##// END OF EJS Templates
transaction: document startgroup and endgroup...
Pierre-Yves David -
r23250:8919dc7f default
parent child Browse files
Show More
@@ -1,422 +1,430 b''
1 1 # transaction.py - simple journaling scheme for mercurial
2 2 #
3 3 # This transaction scheme is intended to gracefully handle program
4 4 # errors and interruptions. More serious failures like system crashes
5 5 # can be recovered with an fsck-like tool. As the whole repository is
6 6 # effectively log-structured, this should amount to simply truncating
7 7 # anything that isn't referenced in the changelog.
8 8 #
9 9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 10 #
11 11 # This software may be used and distributed according to the terms of the
12 12 # GNU General Public License version 2 or any later version.
13 13
14 14 from i18n import _
15 15 import errno
16 16 import error, util
17 17
18 18 version = 1
19 19
20 20 def active(func):
21 21 def _active(self, *args, **kwds):
22 22 if self.count == 0:
23 23 raise error.Abort(_(
24 24 'cannot use transaction when it is already committed/aborted'))
25 25 return func(self, *args, **kwds)
26 26 return _active
27 27
28 28 def _playback(journal, report, opener, entries, backupentries, unlink=True):
29 29 for f, o, _ignore in entries:
30 30 if o or not unlink:
31 31 try:
32 32 fp = opener(f, 'a')
33 33 fp.truncate(o)
34 34 fp.close()
35 35 except IOError:
36 36 report(_("failed to truncate %s\n") % f)
37 37 raise
38 38 else:
39 39 try:
40 40 opener.unlink(f)
41 41 except (IOError, OSError), inst:
42 42 if inst.errno != errno.ENOENT:
43 43 raise
44 44
45 45 backupfiles = []
46 46 for f, b in backupentries:
47 47 filepath = opener.join(f)
48 48 backuppath = opener.join(b)
49 49 try:
50 50 util.copyfile(backuppath, filepath)
51 51 backupfiles.append(b)
52 52 except IOError:
53 53 report(_("failed to recover %s\n") % f)
54 54 raise
55 55
56 56 opener.unlink(journal)
57 57 backuppath = "%s.backupfiles" % journal
58 58 if opener.exists(backuppath):
59 59 opener.unlink(backuppath)
60 60 for f in backupfiles:
61 61 opener.unlink(f)
62 62
63 63 class transaction(object):
64 64 def __init__(self, report, opener, journal, after=None, createmode=None,
65 65 onclose=None, onabort=None):
66 66 """Begin a new transaction
67 67
68 68 Begins a new transaction that allows rolling back writes in the event of
69 69 an exception.
70 70
71 71 * `after`: called after the transaction has been committed
72 72 * `createmode`: the mode of the journal file that will be created
73 73 * `onclose`: called as the transaction is closing, but before it is
74 74 closed
75 75 * `onabort`: called as the transaction is aborting, but before any files
76 76 have been truncated
77 77 """
78 78 self.count = 1
79 79 self.usages = 1
80 80 self.report = report
81 81 self.opener = opener
82 82 self.after = after
83 83 self.onclose = onclose
84 84 self.onabort = onabort
85 85 self.entries = []
86 86 self.map = {}
87 87 # a list of ('path', 'backuppath') entries.
88 88 self._backupentries = []
89 89 self._backupmap = {}
90 90 self.journal = journal
91 91 self._queue = []
92 92 # a dict of arguments to be passed to hooks
93 93 self.hookargs = {}
94 94
95 95 self._backupjournal = "%s.backupfiles" % journal
96 96 self.file = opener.open(self.journal, "w")
97 97 self._backupsfile = opener.open(self._backupjournal, 'w')
98 98 self._backupsfile.write('%d\n' % version)
99 99 if createmode is not None:
100 100 opener.chmod(self.journal, createmode & 0666)
101 101 opener.chmod(self._backupjournal, createmode & 0666)
102 102
103 103 # hold file generations to be performed on commit
104 104 self._filegenerators = {}
105 105 # hold callbalk to write pending data for hooks
106 106 self._pendingcallback = {}
107 107 # True is any pending data have been written ever
108 108 self._anypending = False
109 109 # holds callback to call when writing the transaction
110 110 self._finalizecallback = {}
111 111 # hold callbalk for post transaction close
112 112 self._postclosecallback = {}
113 113
114 114 def __del__(self):
115 115 if self.journal:
116 116 self._abort()
117 117
118 118 @active
119 119 def startgroup(self):
120 """delay registration of file entry
121
122 This is used by strip to delay vision of strip offset. The transaction
123 sees either none or all of the strip actions to be done."""
120 124 self._queue.append(([], []))
121 125
122 126 @active
123 127 def endgroup(self):
128 """apply delayed registration of file entry.
129
130 This is used by strip to delay vision of strip offset. The transaction
131 sees either none or all of the strip actions to be done."""
124 132 q = self._queue.pop()
125 133 self.entries.extend(q[0])
126 134 self._backupentries.extend(q[1])
127 135
128 136 offsets = []
129 137 backups = []
130 138 for f, o, _data in q[0]:
131 139 offsets.append((f, o))
132 140
133 141 for f, b in q[1]:
134 142 backups.append((f, b))
135 143
136 144 d = ''.join(['%s\0%d\n' % (f, o) for f, o in offsets])
137 145 self.file.write(d)
138 146 self.file.flush()
139 147
140 148 d = ''.join(['%s\0%s\n' % (f, b) for f, b in backups])
141 149 self._backupsfile.write(d)
142 150 self._backupsfile.flush()
143 151
144 152 @active
145 153 def add(self, file, offset, data=None):
146 154 if file in self.map or file in self._backupmap:
147 155 return
148 156 if self._queue:
149 157 self._queue[-1][0].append((file, offset, data))
150 158 return
151 159
152 160 self.entries.append((file, offset, data))
153 161 self.map[file] = len(self.entries) - 1
154 162 # add enough data to the journal to do the truncate
155 163 self.file.write("%s\0%d\n" % (file, offset))
156 164 self.file.flush()
157 165
158 166 @active
159 167 def addbackup(self, file, hardlink=True, vfs=None):
160 168 """Adds a backup of the file to the transaction
161 169
162 170 Calling addbackup() creates a hardlink backup of the specified file
163 171 that is used to recover the file in the event of the transaction
164 172 aborting.
165 173
166 174 * `file`: the file path, relative to .hg/store
167 175 * `hardlink`: use a hardlink to quickly create the backup
168 176 """
169 177
170 178 if file in self.map or file in self._backupmap:
171 179 return
172 180 backupfile = "%s.backup.%s" % (self.journal, file)
173 181 if vfs is None:
174 182 vfs = self.opener
175 183 if vfs.exists(file):
176 184 filepath = vfs.join(file)
177 185 backuppath = self.opener.join(backupfile)
178 186 util.copyfiles(filepath, backuppath, hardlink=hardlink)
179 187 else:
180 188 self.add(file, 0)
181 189 return
182 190
183 191 if self._queue:
184 192 self._queue[-1][1].append((file, backupfile))
185 193 return
186 194
187 195 self._backupentries.append((file, backupfile))
188 196 self._backupmap[file] = len(self._backupentries) - 1
189 197 self._backupsfile.write("%s\0%s\n" % (file, backupfile))
190 198 self._backupsfile.flush()
191 199
192 200 @active
193 201 def addfilegenerator(self, genid, filenames, genfunc, order=0, vfs=None):
194 202 """add a function to generates some files at transaction commit
195 203
196 204 The `genfunc` argument is a function capable of generating proper
197 205 content of each entry in the `filename` tuple.
198 206
199 207 At transaction close time, `genfunc` will be called with one file
200 208 object argument per entries in `filenames`.
201 209
202 210 The transaction itself is responsible for the backup, creation and
203 211 final write of such file.
204 212
205 213 The `genid` argument is used to ensure the same set of file is only
206 214 generated once. Call to `addfilegenerator` for a `genid` already
207 215 present will overwrite the old entry.
208 216
209 217 The `order` argument may be used to control the order in which multiple
210 218 generator will be executed.
211 219 """
212 220 # For now, we are unable to do proper backup and restore of custom vfs
213 221 # but for bookmarks that are handled outside this mechanism.
214 222 assert vfs is None or filenames == ('bookmarks',)
215 223 self._filegenerators[genid] = (order, filenames, genfunc, vfs)
216 224
217 225 def _generatefiles(self):
218 226 # write files registered for generation
219 227 for entry in sorted(self._filegenerators.values()):
220 228 order, filenames, genfunc, vfs = entry
221 229 if vfs is None:
222 230 vfs = self.opener
223 231 files = []
224 232 try:
225 233 for name in filenames:
226 234 # Some files are already backed up when creating the
227 235 # localrepo. Until this is properly fixed we disable the
228 236 # backup for them.
229 237 if name not in ('phaseroots', 'bookmarks'):
230 238 self.addbackup(name)
231 239 files.append(vfs(name, 'w', atomictemp=True))
232 240 genfunc(*files)
233 241 finally:
234 242 for f in files:
235 243 f.close()
236 244
237 245 @active
238 246 def find(self, file):
239 247 if file in self.map:
240 248 return self.entries[self.map[file]]
241 249 if file in self._backupmap:
242 250 return self._backupentries[self._backupmap[file]]
243 251 return None
244 252
245 253 @active
246 254 def replace(self, file, offset, data=None):
247 255 '''
248 256 replace can only replace already committed entries
249 257 that are not pending in the queue
250 258 '''
251 259
252 260 if file not in self.map:
253 261 raise KeyError(file)
254 262 index = self.map[file]
255 263 self.entries[index] = (file, offset, data)
256 264 self.file.write("%s\0%d\n" % (file, offset))
257 265 self.file.flush()
258 266
259 267 @active
260 268 def nest(self):
261 269 self.count += 1
262 270 self.usages += 1
263 271 return self
264 272
265 273 def release(self):
266 274 if self.count > 0:
267 275 self.usages -= 1
268 276 # if the transaction scopes are left without being closed, fail
269 277 if self.count > 0 and self.usages == 0:
270 278 self._abort()
271 279
272 280 def running(self):
273 281 return self.count > 0
274 282
275 283 def addpending(self, category, callback):
276 284 """add a callback to be called when the transaction is pending
277 285
278 286 Category is a unique identifier to allow overwriting an old callback
279 287 with a newer callback.
280 288 """
281 289 self._pendingcallback[category] = callback
282 290
283 291 @active
284 292 def writepending(self):
285 293 '''write pending file to temporary version
286 294
287 295 This is used to allow hooks to view a transaction before commit'''
288 296 categories = sorted(self._pendingcallback)
289 297 for cat in categories:
290 298 # remove callback since the data will have been flushed
291 299 any = self._pendingcallback.pop(cat)()
292 300 self._anypending = self._anypending or any
293 301 return self._anypending
294 302
295 303 @active
296 304 def addfinalize(self, category, callback):
297 305 """add a callback to be called when the transaction is closed
298 306
299 307 Category is a unique identifier to allow overwriting old callbacks with
300 308 newer callbacks.
301 309 """
302 310 self._finalizecallback[category] = callback
303 311
304 312 @active
305 313 def addpostclose(self, category, callback):
306 314 """add a callback to be called after the transaction is closed
307 315
308 316 Category is a unique identifier to allow overwriting an old callback
309 317 with a newer callback.
310 318 """
311 319 self._postclosecallback[category] = callback
312 320
313 321 @active
314 322 def close(self):
315 323 '''commit the transaction'''
316 324 if self.count == 1 and self.onclose is not None:
317 325 self._generatefiles()
318 326 categories = sorted(self._finalizecallback)
319 327 for cat in categories:
320 328 self._finalizecallback[cat]()
321 329 self.onclose()
322 330
323 331 self.count -= 1
324 332 if self.count != 0:
325 333 return
326 334 self.file.close()
327 335 self._backupsfile.close()
328 336 self.entries = []
329 337 if self.after:
330 338 self.after()
331 339 if self.opener.isfile(self.journal):
332 340 self.opener.unlink(self.journal)
333 341 if self.opener.isfile(self._backupjournal):
334 342 self.opener.unlink(self._backupjournal)
335 343 for _f, b in self._backupentries:
336 344 self.opener.unlink(b)
337 345 self._backupentries = []
338 346 self.journal = None
339 347 # run post close action
340 348 categories = sorted(self._postclosecallback)
341 349 for cat in categories:
342 350 self._postclosecallback[cat]()
343 351
344 352 @active
345 353 def abort(self):
346 354 '''abort the transaction (generally called on error, or when the
347 355 transaction is not explicitly committed before going out of
348 356 scope)'''
349 357 self._abort()
350 358
351 359 def _abort(self):
352 360 self.count = 0
353 361 self.usages = 0
354 362 self.file.close()
355 363 self._backupsfile.close()
356 364
357 365 if self.onabort is not None:
358 366 self.onabort()
359 367
360 368 try:
361 369 if not self.entries and not self._backupentries:
362 370 if self.journal:
363 371 self.opener.unlink(self.journal)
364 372 if self._backupjournal:
365 373 self.opener.unlink(self._backupjournal)
366 374 return
367 375
368 376 self.report(_("transaction abort!\n"))
369 377
370 378 try:
371 379 _playback(self.journal, self.report, self.opener,
372 380 self.entries, self._backupentries, False)
373 381 self.report(_("rollback completed\n"))
374 382 except Exception:
375 383 self.report(_("rollback failed - please run hg recover\n"))
376 384 finally:
377 385 self.journal = None
378 386
379 387
380 388 def rollback(opener, file, report):
381 389 """Rolls back the transaction contained in the given file
382 390
383 391 Reads the entries in the specified file, and the corresponding
384 392 '*.backupfiles' file, to recover from an incomplete transaction.
385 393
386 394 * `file`: a file containing a list of entries, specifying where
387 395 to truncate each file. The file should contain a list of
388 396 file\0offset pairs, delimited by newlines. The corresponding
389 397 '*.backupfiles' file should contain a list of file\0backupfile
390 398 pairs, delimited by \0.
391 399 """
392 400 entries = []
393 401 backupentries = []
394 402
395 403 fp = opener.open(file)
396 404 lines = fp.readlines()
397 405 fp.close()
398 406 for l in lines:
399 407 try:
400 408 f, o = l.split('\0')
401 409 entries.append((f, int(o), None))
402 410 except ValueError:
403 411 report(_("couldn't read journal entry %r!\n") % l)
404 412
405 413 backupjournal = "%s.backupfiles" % file
406 414 if opener.exists(backupjournal):
407 415 fp = opener.open(backupjournal)
408 416 lines = fp.readlines()
409 417 if lines:
410 418 ver = lines[0][:-1]
411 419 if ver == str(version):
412 420 for line in lines[1:]:
413 421 if line:
414 422 # Shave off the trailing newline
415 423 line = line[:-1]
416 424 f, b = line.split('\0')
417 425 backupentries.append((f, b))
418 426 else:
419 427 report(_("journal was created by a newer version of "
420 428 "Mercurial"))
421 429
422 430 _playback(file, report, opener, entries, backupentries)
General Comments 0
You need to be logged in to leave comments. Login now