##// END OF EJS Templates
style: remove namespace class...
Pierre-Yves David -
r29297:50fef825 default
parent child Browse files
Show More
@@ -1,599 +1,598
1 1 # transaction.py - simple journaling scheme for mercurial
2 2 #
3 3 # This transaction scheme is intended to gracefully handle program
4 4 # errors and interruptions. More serious failures like system crashes
5 5 # can be recovered with an fsck-like tool. As the whole repository is
6 6 # effectively log-structured, this should amount to simply truncating
7 7 # anything that isn't referenced in the changelog.
8 8 #
9 9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 10 #
11 11 # This software may be used and distributed according to the terms of the
12 12 # GNU General Public License version 2 or any later version.
13 13
14 14 from __future__ import absolute_import
15 15
16 16 import errno
17 17
18 18 from .i18n import _
19 19 from . import (
20 20 error,
21 21 util,
22 22 )
23 23
24 24 version = 2
25 25
26 26 # These are the file generators that should only be executed after the
27 27 # finalizers are done, since they rely on the output of the finalizers (like
28 28 # the changelog having been written).
29 29 postfinalizegenerators = set([
30 30 'bookmarks',
31 31 'dirstate'
32 32 ])
33 33
34 class GenerationGroup(object):
35 all='all'
36 prefinalize='prefinalize'
37 postfinalize='postfinalize'
34 gengroupall='all'
35 gengroupprefinalize='prefinalize'
36 gengrouppostfinalize='postfinalize'
38 37
39 38 def active(func):
40 39 def _active(self, *args, **kwds):
41 40 if self.count == 0:
42 41 raise error.Abort(_(
43 42 'cannot use transaction when it is already committed/aborted'))
44 43 return func(self, *args, **kwds)
45 44 return _active
46 45
47 46 def _playback(journal, report, opener, vfsmap, entries, backupentries,
48 47 unlink=True):
49 48 for f, o, _ignore in entries:
50 49 if o or not unlink:
51 50 try:
52 51 fp = opener(f, 'a')
53 52 fp.truncate(o)
54 53 fp.close()
55 54 except IOError:
56 55 report(_("failed to truncate %s\n") % f)
57 56 raise
58 57 else:
59 58 try:
60 59 opener.unlink(f)
61 60 except (IOError, OSError) as inst:
62 61 if inst.errno != errno.ENOENT:
63 62 raise
64 63
65 64 backupfiles = []
66 65 for l, f, b, c in backupentries:
67 66 if l not in vfsmap and c:
68 67 report("couldn't handle %s: unknown cache location %s\n"
69 68 % (b, l))
70 69 vfs = vfsmap[l]
71 70 try:
72 71 if f and b:
73 72 filepath = vfs.join(f)
74 73 backuppath = vfs.join(b)
75 74 try:
76 75 util.copyfile(backuppath, filepath)
77 76 backupfiles.append(b)
78 77 except IOError:
79 78 report(_("failed to recover %s\n") % f)
80 79 else:
81 80 target = f or b
82 81 try:
83 82 vfs.unlink(target)
84 83 except (IOError, OSError) as inst:
85 84 if inst.errno != errno.ENOENT:
86 85 raise
87 86 except (IOError, OSError, error.Abort) as inst:
88 87 if not c:
89 88 raise
90 89
91 90 backuppath = "%s.backupfiles" % journal
92 91 if opener.exists(backuppath):
93 92 opener.unlink(backuppath)
94 93 opener.unlink(journal)
95 94 try:
96 95 for f in backupfiles:
97 96 if opener.exists(f):
98 97 opener.unlink(f)
99 98 except (IOError, OSError, error.Abort) as inst:
100 99 # only pure backup file remains, it is sage to ignore any error
101 100 pass
102 101
103 102 class transaction(object):
104 103 def __init__(self, report, opener, vfsmap, journalname, undoname=None,
105 104 after=None, createmode=None, validator=None, releasefn=None):
106 105 """Begin a new transaction
107 106
108 107 Begins a new transaction that allows rolling back writes in the event of
109 108 an exception.
110 109
111 110 * `after`: called after the transaction has been committed
112 111 * `createmode`: the mode of the journal file that will be created
113 112 * `releasefn`: called after releasing (with transaction and result)
114 113 """
115 114 self.count = 1
116 115 self.usages = 1
117 116 self.report = report
118 117 # a vfs to the store content
119 118 self.opener = opener
120 119 # a map to access file in various {location -> vfs}
121 120 vfsmap = vfsmap.copy()
122 121 vfsmap[''] = opener # set default value
123 122 self._vfsmap = vfsmap
124 123 self.after = after
125 124 self.entries = []
126 125 self.map = {}
127 126 self.journal = journalname
128 127 self.undoname = undoname
129 128 self._queue = []
130 129 # A callback to validate transaction content before closing it.
131 130 # should raise exception is anything is wrong.
132 131 # target user is repository hooks.
133 132 if validator is None:
134 133 validator = lambda tr: None
135 134 self.validator = validator
136 135 # A callback to do something just after releasing transaction.
137 136 if releasefn is None:
138 137 releasefn = lambda tr, success: None
139 138 self.releasefn = releasefn
140 139
141 140 # a dict of arguments to be passed to hooks
142 141 self.hookargs = {}
143 142 self.file = opener.open(self.journal, "w")
144 143
145 144 # a list of ('location', 'path', 'backuppath', cache) entries.
146 145 # - if 'backuppath' is empty, no file existed at backup time
147 146 # - if 'path' is empty, this is a temporary transaction file
148 147 # - if 'location' is not empty, the path is outside main opener reach.
149 148 # use 'location' value as a key in a vfsmap to find the right 'vfs'
150 149 # (cache is currently unused)
151 150 self._backupentries = []
152 151 self._backupmap = {}
153 152 self._backupjournal = "%s.backupfiles" % self.journal
154 153 self._backupsfile = opener.open(self._backupjournal, 'w')
155 154 self._backupsfile.write('%d\n' % version)
156 155
157 156 if createmode is not None:
158 157 opener.chmod(self.journal, createmode & 0o666)
159 158 opener.chmod(self._backupjournal, createmode & 0o666)
160 159
161 160 # hold file generations to be performed on commit
162 161 self._filegenerators = {}
163 162 # hold callback to write pending data for hooks
164 163 self._pendingcallback = {}
165 164 # True is any pending data have been written ever
166 165 self._anypending = False
167 166 # holds callback to call when writing the transaction
168 167 self._finalizecallback = {}
169 168 # hold callback for post transaction close
170 169 self._postclosecallback = {}
171 170 # holds callbacks to call during abort
172 171 self._abortcallback = {}
173 172
174 173 def __del__(self):
175 174 if self.journal:
176 175 self._abort()
177 176
178 177 @active
179 178 def startgroup(self):
180 179 """delay registration of file entry
181 180
182 181 This is used by strip to delay vision of strip offset. The transaction
183 182 sees either none or all of the strip actions to be done."""
184 183 self._queue.append([])
185 184
186 185 @active
187 186 def endgroup(self):
188 187 """apply delayed registration of file entry.
189 188
190 189 This is used by strip to delay vision of strip offset. The transaction
191 190 sees either none or all of the strip actions to be done."""
192 191 q = self._queue.pop()
193 192 for f, o, data in q:
194 193 self._addentry(f, o, data)
195 194
196 195 @active
197 196 def add(self, file, offset, data=None):
198 197 """record the state of an append-only file before update"""
199 198 if file in self.map or file in self._backupmap:
200 199 return
201 200 if self._queue:
202 201 self._queue[-1].append((file, offset, data))
203 202 return
204 203
205 204 self._addentry(file, offset, data)
206 205
207 206 def _addentry(self, file, offset, data):
208 207 """add a append-only entry to memory and on-disk state"""
209 208 if file in self.map or file in self._backupmap:
210 209 return
211 210 self.entries.append((file, offset, data))
212 211 self.map[file] = len(self.entries) - 1
213 212 # add enough data to the journal to do the truncate
214 213 self.file.write("%s\0%d\n" % (file, offset))
215 214 self.file.flush()
216 215
217 216 @active
218 217 def addbackup(self, file, hardlink=True, location=''):
219 218 """Adds a backup of the file to the transaction
220 219
221 220 Calling addbackup() creates a hardlink backup of the specified file
222 221 that is used to recover the file in the event of the transaction
223 222 aborting.
224 223
225 224 * `file`: the file path, relative to .hg/store
226 225 * `hardlink`: use a hardlink to quickly create the backup
227 226 """
228 227 if self._queue:
229 228 msg = 'cannot use transaction.addbackup inside "group"'
230 229 raise RuntimeError(msg)
231 230
232 231 if file in self.map or file in self._backupmap:
233 232 return
234 233 vfs = self._vfsmap[location]
235 234 dirname, filename = vfs.split(file)
236 235 backupfilename = "%s.backup.%s" % (self.journal, filename)
237 236 backupfile = vfs.reljoin(dirname, backupfilename)
238 237 if vfs.exists(file):
239 238 filepath = vfs.join(file)
240 239 backuppath = vfs.join(backupfile)
241 240 util.copyfile(filepath, backuppath, hardlink=hardlink)
242 241 else:
243 242 backupfile = ''
244 243
245 244 self._addbackupentry((location, file, backupfile, False))
246 245
247 246 def _addbackupentry(self, entry):
248 247 """register a new backup entry and write it to disk"""
249 248 self._backupentries.append(entry)
250 249 self._backupmap[entry[1]] = len(self._backupentries) - 1
251 250 self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
252 251 self._backupsfile.flush()
253 252
254 253 @active
255 254 def registertmp(self, tmpfile, location=''):
256 255 """register a temporary transaction file
257 256
258 257 Such files will be deleted when the transaction exits (on both
259 258 failure and success).
260 259 """
261 260 self._addbackupentry((location, '', tmpfile, False))
262 261
263 262 @active
264 263 def addfilegenerator(self, genid, filenames, genfunc, order=0,
265 264 location=''):
266 265 """add a function to generates some files at transaction commit
267 266
268 267 The `genfunc` argument is a function capable of generating proper
269 268 content of each entry in the `filename` tuple.
270 269
271 270 At transaction close time, `genfunc` will be called with one file
272 271 object argument per entries in `filenames`.
273 272
274 273 The transaction itself is responsible for the backup, creation and
275 274 final write of such file.
276 275
277 276 The `genid` argument is used to ensure the same set of file is only
278 277 generated once. Call to `addfilegenerator` for a `genid` already
279 278 present will overwrite the old entry.
280 279
281 280 The `order` argument may be used to control the order in which multiple
282 281 generator will be executed.
283 282
284 283 The `location` arguments may be used to indicate the files are located
285 284 outside of the the standard directory for transaction. It should match
286 285 one of the key of the `transaction.vfsmap` dictionary.
287 286 """
288 287 # For now, we are unable to do proper backup and restore of custom vfs
289 288 # but for bookmarks that are handled outside this mechanism.
290 289 self._filegenerators[genid] = (order, filenames, genfunc, location)
291 290
292 def _generatefiles(self, suffix='', group=GenerationGroup.all):
291 def _generatefiles(self, suffix='', group=gengroupall):
293 292 # write files registered for generation
294 293 any = False
295 294 for id, entry in sorted(self._filegenerators.iteritems()):
296 295 any = True
297 296 order, filenames, genfunc, location = entry
298 297
299 298 # for generation at closing, check if it's before or after finalize
300 postfinalize = group == GenerationGroup.postfinalize
301 if (group != GenerationGroup.all and
299 postfinalize = group == gengrouppostfinalize
300 if (group != gengroupall and
302 301 (id in postfinalizegenerators) != (postfinalize)):
303 302 continue
304 303
305 304 vfs = self._vfsmap[location]
306 305 files = []
307 306 try:
308 307 for name in filenames:
309 308 name += suffix
310 309 if suffix:
311 310 self.registertmp(name, location=location)
312 311 else:
313 312 self.addbackup(name, location=location)
314 313 files.append(vfs(name, 'w', atomictemp=True))
315 314 genfunc(*files)
316 315 finally:
317 316 for f in files:
318 317 f.close()
319 318 return any
320 319
321 320 @active
322 321 def find(self, file):
323 322 if file in self.map:
324 323 return self.entries[self.map[file]]
325 324 if file in self._backupmap:
326 325 return self._backupentries[self._backupmap[file]]
327 326 return None
328 327
329 328 @active
330 329 def replace(self, file, offset, data=None):
331 330 '''
332 331 replace can only replace already committed entries
333 332 that are not pending in the queue
334 333 '''
335 334
336 335 if file not in self.map:
337 336 raise KeyError(file)
338 337 index = self.map[file]
339 338 self.entries[index] = (file, offset, data)
340 339 self.file.write("%s\0%d\n" % (file, offset))
341 340 self.file.flush()
342 341
343 342 @active
344 343 def nest(self):
345 344 self.count += 1
346 345 self.usages += 1
347 346 return self
348 347
349 348 def release(self):
350 349 if self.count > 0:
351 350 self.usages -= 1
352 351 # if the transaction scopes are left without being closed, fail
353 352 if self.count > 0 and self.usages == 0:
354 353 self._abort()
355 354
356 355 def __enter__(self):
357 356 return self
358 357
359 358 def __exit__(self, exc_type, exc_val, exc_tb):
360 359 try:
361 360 if exc_type is None:
362 361 self.close()
363 362 finally:
364 363 self.release()
365 364
366 365 def running(self):
367 366 return self.count > 0
368 367
369 368 def addpending(self, category, callback):
370 369 """add a callback to be called when the transaction is pending
371 370
372 371 The transaction will be given as callback's first argument.
373 372
374 373 Category is a unique identifier to allow overwriting an old callback
375 374 with a newer callback.
376 375 """
377 376 self._pendingcallback[category] = callback
378 377
379 378 @active
380 379 def writepending(self):
381 380 '''write pending file to temporary version
382 381
383 382 This is used to allow hooks to view a transaction before commit'''
384 383 categories = sorted(self._pendingcallback)
385 384 for cat in categories:
386 385 # remove callback since the data will have been flushed
387 386 any = self._pendingcallback.pop(cat)(self)
388 387 self._anypending = self._anypending or any
389 388 self._anypending |= self._generatefiles(suffix='.pending')
390 389 return self._anypending
391 390
392 391 @active
393 392 def addfinalize(self, category, callback):
394 393 """add a callback to be called when the transaction is closed
395 394
396 395 The transaction will be given as callback's first argument.
397 396
398 397 Category is a unique identifier to allow overwriting old callbacks with
399 398 newer callbacks.
400 399 """
401 400 self._finalizecallback[category] = callback
402 401
403 402 @active
404 403 def addpostclose(self, category, callback):
405 404 """add a callback to be called after the transaction is closed
406 405
407 406 The transaction will be given as callback's first argument.
408 407
409 408 Category is a unique identifier to allow overwriting an old callback
410 409 with a newer callback.
411 410 """
412 411 self._postclosecallback[category] = callback
413 412
414 413 @active
415 414 def addabort(self, category, callback):
416 415 """add a callback to be called when the transaction is aborted.
417 416
418 417 The transaction will be given as the first argument to the callback.
419 418
420 419 Category is a unique identifier to allow overwriting an old callback
421 420 with a newer callback.
422 421 """
423 422 self._abortcallback[category] = callback
424 423
425 424 @active
426 425 def close(self):
427 426 '''commit the transaction'''
428 427 if self.count == 1:
429 428 self.validator(self) # will raise exception if needed
430 self._generatefiles(group=GenerationGroup.prefinalize)
429 self._generatefiles(group=gengroupprefinalize)
431 430 categories = sorted(self._finalizecallback)
432 431 for cat in categories:
433 432 self._finalizecallback[cat](self)
434 433 # Prevent double usage and help clear cycles.
435 434 self._finalizecallback = None
436 self._generatefiles(group=GenerationGroup.postfinalize)
435 self._generatefiles(group=gengrouppostfinalize)
437 436
438 437 self.count -= 1
439 438 if self.count != 0:
440 439 return
441 440 self.file.close()
442 441 self._backupsfile.close()
443 442 # cleanup temporary files
444 443 for l, f, b, c in self._backupentries:
445 444 if l not in self._vfsmap and c:
446 445 self.report("couldn't remove %s: unknown cache location %s\n"
447 446 % (b, l))
448 447 continue
449 448 vfs = self._vfsmap[l]
450 449 if not f and b and vfs.exists(b):
451 450 try:
452 451 vfs.unlink(b)
453 452 except (IOError, OSError, error.Abort) as inst:
454 453 if not c:
455 454 raise
456 455 # Abort may be raise by read only opener
457 456 self.report("couldn't remove %s: %s\n"
458 457 % (vfs.join(b), inst))
459 458 self.entries = []
460 459 self._writeundo()
461 460 if self.after:
462 461 self.after()
463 462 if self.opener.isfile(self._backupjournal):
464 463 self.opener.unlink(self._backupjournal)
465 464 if self.opener.isfile(self.journal):
466 465 self.opener.unlink(self.journal)
467 466 for l, _f, b, c in self._backupentries:
468 467 if l not in self._vfsmap and c:
469 468 self.report("couldn't remove %s: unknown cache location"
470 469 "%s\n" % (b, l))
471 470 continue
472 471 vfs = self._vfsmap[l]
473 472 if b and vfs.exists(b):
474 473 try:
475 474 vfs.unlink(b)
476 475 except (IOError, OSError, error.Abort) as inst:
477 476 if not c:
478 477 raise
479 478 # Abort may be raise by read only opener
480 479 self.report("couldn't remove %s: %s\n"
481 480 % (vfs.join(b), inst))
482 481 self._backupentries = []
483 482 self.journal = None
484 483
485 484 self.releasefn(self, True) # notify success of closing transaction
486 485
487 486 # run post close action
488 487 categories = sorted(self._postclosecallback)
489 488 for cat in categories:
490 489 self._postclosecallback[cat](self)
491 490 # Prevent double usage and help clear cycles.
492 491 self._postclosecallback = None
493 492
494 493 @active
495 494 def abort(self):
496 495 '''abort the transaction (generally called on error, or when the
497 496 transaction is not explicitly committed before going out of
498 497 scope)'''
499 498 self._abort()
500 499
501 500 def _writeundo(self):
502 501 """write transaction data for possible future undo call"""
503 502 if self.undoname is None:
504 503 return
505 504 undobackupfile = self.opener.open("%s.backupfiles" % self.undoname, 'w')
506 505 undobackupfile.write('%d\n' % version)
507 506 for l, f, b, c in self._backupentries:
508 507 if not f: # temporary file
509 508 continue
510 509 if not b:
511 510 u = ''
512 511 else:
513 512 if l not in self._vfsmap and c:
514 513 self.report("couldn't remove %s: unknown cache location"
515 514 "%s\n" % (b, l))
516 515 continue
517 516 vfs = self._vfsmap[l]
518 517 base, name = vfs.split(b)
519 518 assert name.startswith(self.journal), name
520 519 uname = name.replace(self.journal, self.undoname, 1)
521 520 u = vfs.reljoin(base, uname)
522 521 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
523 522 undobackupfile.write("%s\0%s\0%s\0%d\n" % (l, f, u, c))
524 523 undobackupfile.close()
525 524
526 525
527 526 def _abort(self):
528 527 self.count = 0
529 528 self.usages = 0
530 529 self.file.close()
531 530 self._backupsfile.close()
532 531
533 532 try:
534 533 if not self.entries and not self._backupentries:
535 534 if self._backupjournal:
536 535 self.opener.unlink(self._backupjournal)
537 536 if self.journal:
538 537 self.opener.unlink(self.journal)
539 538 return
540 539
541 540 self.report(_("transaction abort!\n"))
542 541
543 542 try:
544 543 for cat in sorted(self._abortcallback):
545 544 self._abortcallback[cat](self)
546 545 # Prevent double usage and help clear cycles.
547 546 self._abortcallback = None
548 547 _playback(self.journal, self.report, self.opener, self._vfsmap,
549 548 self.entries, self._backupentries, False)
550 549 self.report(_("rollback completed\n"))
551 550 except BaseException:
552 551 self.report(_("rollback failed - please run hg recover\n"))
553 552 finally:
554 553 self.journal = None
555 554 self.releasefn(self, False) # notify failure of transaction
556 555
557 556 def rollback(opener, vfsmap, file, report):
558 557 """Rolls back the transaction contained in the given file
559 558
560 559 Reads the entries in the specified file, and the corresponding
561 560 '*.backupfiles' file, to recover from an incomplete transaction.
562 561
563 562 * `file`: a file containing a list of entries, specifying where
564 563 to truncate each file. The file should contain a list of
565 564 file\0offset pairs, delimited by newlines. The corresponding
566 565 '*.backupfiles' file should contain a list of file\0backupfile
567 566 pairs, delimited by \0.
568 567 """
569 568 entries = []
570 569 backupentries = []
571 570
572 571 fp = opener.open(file)
573 572 lines = fp.readlines()
574 573 fp.close()
575 574 for l in lines:
576 575 try:
577 576 f, o = l.split('\0')
578 577 entries.append((f, int(o), None))
579 578 except ValueError:
580 579 report(_("couldn't read journal entry %r!\n") % l)
581 580
582 581 backupjournal = "%s.backupfiles" % file
583 582 if opener.exists(backupjournal):
584 583 fp = opener.open(backupjournal)
585 584 lines = fp.readlines()
586 585 if lines:
587 586 ver = lines[0][:-1]
588 587 if ver == str(version):
589 588 for line in lines[1:]:
590 589 if line:
591 590 # Shave off the trailing newline
592 591 line = line[:-1]
593 592 l, f, b, c = line.split('\0')
594 593 backupentries.append((l, f, b, bool(c)))
595 594 else:
596 595 report(_("journal was created by a different version of "
597 596 "Mercurial\n"))
598 597
599 598 _playback(file, report, opener, vfsmap, entries, backupentries)
General Comments 0
You need to be logged in to leave comments. Login now