##// END OF EJS Templates
transaction: add clarifying comment about why ignoring some error is fine...
marmoute -
r51233:70ca1f09 stable
parent child Browse files
Show More
@@ -1,899 +1,909 b''
1 1 # transaction.py - simple journaling scheme for mercurial
2 2 #
3 3 # This transaction scheme is intended to gracefully handle program
4 4 # errors and interruptions. More serious failures like system crashes
5 5 # can be recovered with an fsck-like tool. As the whole repository is
6 6 # effectively log-structured, this should amount to simply truncating
7 7 # anything that isn't referenced in the changelog.
8 8 #
9 9 # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
10 10 #
11 11 # This software may be used and distributed according to the terms of the
12 12 # GNU General Public License version 2 or any later version.
13 13
14 14 import errno
15 15 import os
16 16
17 17 from .i18n import _
18 18 from . import (
19 19 error,
20 20 pycompat,
21 21 util,
22 22 )
23 23 from .utils import stringutil
24 24
25 25 version = 2
26 26
27 27 GEN_GROUP_ALL = b'all'
28 28 GEN_GROUP_PRE_FINALIZE = b'prefinalize'
29 29 GEN_GROUP_POST_FINALIZE = b'postfinalize'
30 30
31 31
32 32 def active(func):
33 33 def _active(self, *args, **kwds):
34 34 if self._count == 0:
35 35 raise error.ProgrammingError(
36 36 b'cannot use transaction when it is already committed/aborted'
37 37 )
38 38 return func(self, *args, **kwds)
39 39
40 40 return _active
41 41
42 42
43 43 UNDO_BACKUP = b'%s.backupfiles'
44 44
45 45 UNDO_FILES_MAY_NEED_CLEANUP = [
46 46 # legacy entries that might exists on disk from previous version:
47 47 (b'store', b'%s.narrowspec'),
48 48 (b'plain', b'%s.narrowspec.dirstate'),
49 49 (b'plain', b'%s.branch'),
50 50 (b'plain', b'%s.bookmarks'),
51 51 (b'store', b'%s.phaseroots'),
52 52 (b'plain', b'%s.dirstate'),
53 53 # files actually in uses today:
54 54 (b'plain', b'%s.desc'),
55 55 # Always delete undo last to make sure we detect that a clean up is needed if
56 56 # the process is interrupted.
57 57 (b'store', b'%s'),
58 58 ]
59 59
60 60
61 61 def cleanup_undo_files(report, vfsmap, undo_prefix=b'undo'):
62 62 """remove "undo" files used by the rollback logic
63 63
64 64 This is useful to prevent rollback running in situation were it does not
65 65 make sense. For example after a strip.
66 66 """
67 67 backup_listing = UNDO_BACKUP % undo_prefix
68 68
69 69 backup_entries = []
70 70 undo_files = []
71 71 svfs = vfsmap[b'store']
72 72 try:
73 73 with svfs(backup_listing) as f:
74 74 backup_entries = read_backup_files(report, f)
75 75 except OSError as e:
76 76 if e.errno != errno.ENOENT:
77 77 msg = _(b'could not read %s: %s\n')
78 78 msg %= (svfs.join(backup_listing), stringutil.forcebytestr(e))
79 79 report(msg)
80 80
81 81 for location, f, backup_path, c in backup_entries:
82 82 if location in vfsmap and backup_path:
83 83 undo_files.append((vfsmap[location], backup_path))
84 84
85 85 undo_files.append((svfs, backup_listing))
86 86 for location, undo_path in UNDO_FILES_MAY_NEED_CLEANUP:
87 87 undo_files.append((vfsmap[location], undo_path % undo_prefix))
88 88 for undovfs, undofile in undo_files:
89 89 try:
90 90 undovfs.unlink(undofile)
91 91 except OSError as e:
92 92 if e.errno != errno.ENOENT:
93 93 msg = _(b'error removing %s: %s\n')
94 94 msg %= (undovfs.join(undofile), stringutil.forcebytestr(e))
95 95 report(msg)
96 96
97 97
98 98 def _playback(
99 99 journal,
100 100 report,
101 101 opener,
102 102 vfsmap,
103 103 entries,
104 104 backupentries,
105 105 unlink=True,
106 106 checkambigfiles=None,
107 107 ):
108 108 for f, o in sorted(dict(entries).items()):
109 109 if o or not unlink:
110 110 checkambig = checkambigfiles and (f, b'') in checkambigfiles
111 111 try:
112 112 fp = opener(f, b'a', checkambig=checkambig)
113 113 if fp.tell() < o:
114 114 raise error.Abort(
115 115 _(
116 116 b"attempted to truncate %s to %d bytes, but it was "
117 117 b"already %d bytes\n"
118 118 )
119 119 % (f, o, fp.tell())
120 120 )
121 121 fp.truncate(o)
122 122 fp.close()
123 123 except IOError:
124 124 report(_(b"failed to truncate %s\n") % f)
125 125 raise
126 126 else:
127 127 try:
128 128 opener.unlink(f)
129 129 except FileNotFoundError:
130 130 pass
131 131
132 132 backupfiles = []
133 133 for l, f, b, c in backupentries:
134 134 if l not in vfsmap and c:
135 135 report(b"couldn't handle %s: unknown cache location %s\n" % (b, l))
136 136 vfs = vfsmap[l]
137 137 try:
138 138 if f and b:
139 139 filepath = vfs.join(f)
140 140 backuppath = vfs.join(b)
141 141 checkambig = checkambigfiles and (f, l) in checkambigfiles
142 142 try:
143 143 util.copyfile(backuppath, filepath, checkambig=checkambig)
144 144 backupfiles.append((vfs, b))
145 145 except IOError as exc:
146 146 e_msg = stringutil.forcebytestr(exc)
147 147 report(_(b"failed to recover %s (%s)\n") % (f, e_msg))
148 148 else:
149 149 target = f or b
150 150 try:
151 151 vfs.unlink(target)
152 152 except FileNotFoundError:
153 # This is fine because
154 #
155 # either we are trying to delete the main file, and it is
156 # already deleted.
157 #
158 # or we are trying to delete a temporary file and it is
159 # already deleted.
160 #
161 # in both case, our target result (delete the file) is
162 # already achieved.
153 163 pass
154 164 except (IOError, OSError, error.Abort):
155 165 if not c:
156 166 raise
157 167
158 168 backuppath = b"%s.backupfiles" % journal
159 169 if opener.exists(backuppath):
160 170 opener.unlink(backuppath)
161 171 opener.unlink(journal)
162 172 try:
163 173 for vfs, f in backupfiles:
164 174 if vfs.exists(f):
165 175 vfs.unlink(f)
166 176 except (IOError, OSError, error.Abort):
167 177 # only pure backup file remains, it is sage to ignore any error
168 178 pass
169 179
170 180
171 181 class transaction(util.transactional):
172 182 def __init__(
173 183 self,
174 184 report,
175 185 opener,
176 186 vfsmap,
177 187 journalname,
178 188 undoname=None,
179 189 after=None,
180 190 createmode=None,
181 191 validator=None,
182 192 releasefn=None,
183 193 checkambigfiles=None,
184 194 name='<unnamed>',
185 195 ):
186 196 """Begin a new transaction
187 197
188 198 Begins a new transaction that allows rolling back writes in the event of
189 199 an exception.
190 200
191 201 * `after`: called after the transaction has been committed
192 202 * `createmode`: the mode of the journal file that will be created
193 203 * `releasefn`: called after releasing (with transaction and result)
194 204
195 205 `checkambigfiles` is a set of (path, vfs-location) tuples,
196 206 which determine whether file stat ambiguity should be avoided
197 207 for corresponded files.
198 208 """
199 209 self._count = 1
200 210 self._usages = 1
201 211 self._report = report
202 212 # a vfs to the store content
203 213 self._opener = opener
204 214 # a map to access file in various {location -> vfs}
205 215 vfsmap = vfsmap.copy()
206 216 vfsmap[b''] = opener # set default value
207 217 self._vfsmap = vfsmap
208 218 self._after = after
209 219 self._offsetmap = {}
210 220 self._newfiles = set()
211 221 self._journal = journalname
212 222 self._journal_files = []
213 223 self._undoname = undoname
214 224 self._queue = []
215 225 # A callback to do something just after releasing transaction.
216 226 if releasefn is None:
217 227 releasefn = lambda tr, success: None
218 228 self._releasefn = releasefn
219 229
220 230 self._checkambigfiles = set()
221 231 if checkambigfiles:
222 232 self._checkambigfiles.update(checkambigfiles)
223 233
224 234 self._names = [name]
225 235
226 236 # A dict dedicated to precisely tracking the changes introduced in the
227 237 # transaction.
228 238 self.changes = {}
229 239
230 240 # a dict of arguments to be passed to hooks
231 241 self.hookargs = {}
232 242 self._file = opener.open(self._journal, b"w+")
233 243
234 244 # a list of ('location', 'path', 'backuppath', cache) entries.
235 245 # - if 'backuppath' is empty, no file existed at backup time
236 246 # - if 'path' is empty, this is a temporary transaction file
237 247 # - if 'location' is not empty, the path is outside main opener reach.
238 248 # use 'location' value as a key in a vfsmap to find the right 'vfs'
239 249 # (cache is currently unused)
240 250 self._backupentries = []
241 251 self._backupmap = {}
242 252 self._backupjournal = b"%s.backupfiles" % self._journal
243 253 self._backupsfile = opener.open(self._backupjournal, b'w')
244 254 self._backupsfile.write(b'%d\n' % version)
245 255
246 256 if createmode is not None:
247 257 opener.chmod(self._journal, createmode & 0o666)
248 258 opener.chmod(self._backupjournal, createmode & 0o666)
249 259
250 260 # hold file generations to be performed on commit
251 261 self._filegenerators = {}
252 262 # hold callback to write pending data for hooks
253 263 self._pendingcallback = {}
254 264 # True is any pending data have been written ever
255 265 self._anypending = False
256 266 # holds callback to call when writing the transaction
257 267 self._finalizecallback = {}
258 268 # holds callback to call when validating the transaction
259 269 # should raise exception if anything is wrong
260 270 self._validatecallback = {}
261 271 if validator is not None:
262 272 self._validatecallback[b'001-userhooks'] = validator
263 273 # hold callback for post transaction close
264 274 self._postclosecallback = {}
265 275 # holds callbacks to call during abort
266 276 self._abortcallback = {}
267 277
268 278 def __repr__(self):
269 279 name = '/'.join(self._names)
270 280 return '<transaction name=%s, count=%d, usages=%d>' % (
271 281 name,
272 282 self._count,
273 283 self._usages,
274 284 )
275 285
276 286 def __del__(self):
277 287 if self._journal:
278 288 self._abort()
279 289
280 290 @property
281 291 def finalized(self):
282 292 return self._finalizecallback is None
283 293
284 294 @active
285 295 def startgroup(self):
286 296 """delay registration of file entry
287 297
288 298 This is used by strip to delay vision of strip offset. The transaction
289 299 sees either none or all of the strip actions to be done."""
290 300 self._queue.append([])
291 301
292 302 @active
293 303 def endgroup(self):
294 304 """apply delayed registration of file entry.
295 305
296 306 This is used by strip to delay vision of strip offset. The transaction
297 307 sees either none or all of the strip actions to be done."""
298 308 q = self._queue.pop()
299 309 for f, o in q:
300 310 self._addentry(f, o)
301 311
302 312 @active
303 313 def add(self, file, offset):
304 314 """record the state of an append-only file before update"""
305 315 if (
306 316 file in self._newfiles
307 317 or file in self._offsetmap
308 318 or file in self._backupmap
309 319 ):
310 320 return
311 321 if self._queue:
312 322 self._queue[-1].append((file, offset))
313 323 return
314 324
315 325 self._addentry(file, offset)
316 326
317 327 def _addentry(self, file, offset):
318 328 """add a append-only entry to memory and on-disk state"""
319 329 if (
320 330 file in self._newfiles
321 331 or file in self._offsetmap
322 332 or file in self._backupmap
323 333 ):
324 334 return
325 335 if offset:
326 336 self._offsetmap[file] = offset
327 337 else:
328 338 self._newfiles.add(file)
329 339 # add enough data to the journal to do the truncate
330 340 self._file.write(b"%s\0%d\n" % (file, offset))
331 341 self._file.flush()
332 342
333 343 @active
334 344 def addbackup(self, file, hardlink=True, location=b''):
335 345 """Adds a backup of the file to the transaction
336 346
337 347 Calling addbackup() creates a hardlink backup of the specified file
338 348 that is used to recover the file in the event of the transaction
339 349 aborting.
340 350
341 351 * `file`: the file path, relative to .hg/store
342 352 * `hardlink`: use a hardlink to quickly create the backup
343 353 """
344 354 if self._queue:
345 355 msg = b'cannot use transaction.addbackup inside "group"'
346 356 raise error.ProgrammingError(msg)
347 357
348 358 if (
349 359 file in self._newfiles
350 360 or file in self._offsetmap
351 361 or file in self._backupmap
352 362 ):
353 363 return
354 364 vfs = self._vfsmap[location]
355 365 dirname, filename = vfs.split(file)
356 366 backupfilename = b"%s.backup.%s" % (self._journal, filename)
357 367 backupfile = vfs.reljoin(dirname, backupfilename)
358 368 if vfs.exists(file):
359 369 filepath = vfs.join(file)
360 370 backuppath = vfs.join(backupfile)
361 371 util.copyfile(filepath, backuppath, hardlink=hardlink)
362 372 else:
363 373 backupfile = b''
364 374
365 375 self._addbackupentry((location, file, backupfile, False))
366 376
367 377 def _addbackupentry(self, entry):
368 378 """register a new backup entry and write it to disk"""
369 379 self._backupentries.append(entry)
370 380 self._backupmap[entry[1]] = len(self._backupentries) - 1
371 381 self._backupsfile.write(b"%s\0%s\0%s\0%d\n" % entry)
372 382 self._backupsfile.flush()
373 383
374 384 @active
375 385 def registertmp(self, tmpfile, location=b''):
376 386 """register a temporary transaction file
377 387
378 388 Such files will be deleted when the transaction exits (on both
379 389 failure and success).
380 390 """
381 391 self._addbackupentry((location, b'', tmpfile, False))
382 392
383 393 @active
384 394 def addfilegenerator(
385 395 self,
386 396 genid,
387 397 filenames,
388 398 genfunc,
389 399 order=0,
390 400 location=b'',
391 401 post_finalize=False,
392 402 ):
393 403 """add a function to generates some files at transaction commit
394 404
395 405 The `genfunc` argument is a function capable of generating proper
396 406 content of each entry in the `filename` tuple.
397 407
398 408 At transaction close time, `genfunc` will be called with one file
399 409 object argument per entries in `filenames`.
400 410
401 411 The transaction itself is responsible for the backup, creation and
402 412 final write of such file.
403 413
404 414 The `genid` argument is used to ensure the same set of file is only
405 415 generated once. Call to `addfilegenerator` for a `genid` already
406 416 present will overwrite the old entry.
407 417
408 418 The `order` argument may be used to control the order in which multiple
409 419 generator will be executed.
410 420
411 421 The `location` arguments may be used to indicate the files are located
412 422 outside of the the standard directory for transaction. It should match
413 423 one of the key of the `transaction.vfsmap` dictionary.
414 424
415 425 The `post_finalize` argument can be set to `True` for file generation
416 426 that must be run after the transaction has been finalized.
417 427 """
418 428 # For now, we are unable to do proper backup and restore of custom vfs
419 429 # but for bookmarks that are handled outside this mechanism.
420 430 entry = (order, filenames, genfunc, location, post_finalize)
421 431 self._filegenerators[genid] = entry
422 432
423 433 @active
424 434 def removefilegenerator(self, genid):
425 435 """reverse of addfilegenerator, remove a file generator function"""
426 436 if genid in self._filegenerators:
427 437 del self._filegenerators[genid]
428 438
429 439 def _generatefiles(self, suffix=b'', group=GEN_GROUP_ALL):
430 440 # write files registered for generation
431 441 any = False
432 442
433 443 if group == GEN_GROUP_ALL:
434 444 skip_post = skip_pre = False
435 445 else:
436 446 skip_pre = group == GEN_GROUP_POST_FINALIZE
437 447 skip_post = group == GEN_GROUP_PRE_FINALIZE
438 448
439 449 for id, entry in sorted(self._filegenerators.items()):
440 450 any = True
441 451 order, filenames, genfunc, location, post_finalize = entry
442 452
443 453 # for generation at closing, check if it's before or after finalize
444 454 if skip_post and post_finalize:
445 455 continue
446 456 elif skip_pre and not post_finalize:
447 457 continue
448 458
449 459 vfs = self._vfsmap[location]
450 460 files = []
451 461 try:
452 462 for name in filenames:
453 463 name += suffix
454 464 if suffix:
455 465 self.registertmp(name, location=location)
456 466 checkambig = False
457 467 else:
458 468 self.addbackup(name, location=location)
459 469 checkambig = (name, location) in self._checkambigfiles
460 470 files.append(
461 471 vfs(name, b'w', atomictemp=True, checkambig=checkambig)
462 472 )
463 473 genfunc(*files)
464 474 for f in files:
465 475 f.close()
466 476 # skip discard() loop since we're sure no open file remains
467 477 del files[:]
468 478 finally:
469 479 for f in files:
470 480 f.discard()
471 481 return any
472 482
473 483 @active
474 484 def findoffset(self, file):
475 485 if file in self._newfiles:
476 486 return 0
477 487 return self._offsetmap.get(file)
478 488
479 489 @active
480 490 def readjournal(self):
481 491 self._file.seek(0)
482 492 entries = []
483 493 for l in self._file.readlines():
484 494 file, troffset = l.split(b'\0')
485 495 entries.append((file, int(troffset)))
486 496 return entries
487 497
488 498 @active
489 499 def replace(self, file, offset):
490 500 """
491 501 replace can only replace already committed entries
492 502 that are not pending in the queue
493 503 """
494 504 if file in self._newfiles:
495 505 if not offset:
496 506 return
497 507 self._newfiles.remove(file)
498 508 self._offsetmap[file] = offset
499 509 elif file in self._offsetmap:
500 510 if not offset:
501 511 del self._offsetmap[file]
502 512 self._newfiles.add(file)
503 513 else:
504 514 self._offsetmap[file] = offset
505 515 else:
506 516 raise KeyError(file)
507 517 self._file.write(b"%s\0%d\n" % (file, offset))
508 518 self._file.flush()
509 519
510 520 @active
511 521 def nest(self, name='<unnamed>'):
512 522 self._count += 1
513 523 self._usages += 1
514 524 self._names.append(name)
515 525 return self
516 526
517 527 def release(self):
518 528 if self._count > 0:
519 529 self._usages -= 1
520 530 if self._names:
521 531 self._names.pop()
522 532 # if the transaction scopes are left without being closed, fail
523 533 if self._count > 0 and self._usages == 0:
524 534 self._abort()
525 535
526 536 def running(self):
527 537 return self._count > 0
528 538
529 539 def addpending(self, category, callback):
530 540 """add a callback to be called when the transaction is pending
531 541
532 542 The transaction will be given as callback's first argument.
533 543
534 544 Category is a unique identifier to allow overwriting an old callback
535 545 with a newer callback.
536 546 """
537 547 self._pendingcallback[category] = callback
538 548
539 549 @active
540 550 def writepending(self):
541 551 """write pending file to temporary version
542 552
543 553 This is used to allow hooks to view a transaction before commit"""
544 554 categories = sorted(self._pendingcallback)
545 555 for cat in categories:
546 556 # remove callback since the data will have been flushed
547 557 any = self._pendingcallback.pop(cat)(self)
548 558 self._anypending = self._anypending or any
549 559 self._anypending |= self._generatefiles(suffix=b'.pending')
550 560 return self._anypending
551 561
552 562 @active
553 563 def hasfinalize(self, category):
554 564 """check is a callback already exist for a category"""
555 565 return category in self._finalizecallback
556 566
557 567 @active
558 568 def addfinalize(self, category, callback):
559 569 """add a callback to be called when the transaction is closed
560 570
561 571 The transaction will be given as callback's first argument.
562 572
563 573 Category is a unique identifier to allow overwriting old callbacks with
564 574 newer callbacks.
565 575 """
566 576 self._finalizecallback[category] = callback
567 577
568 578 @active
569 579 def addpostclose(self, category, callback):
570 580 """add or replace a callback to be called after the transaction closed
571 581
572 582 The transaction will be given as callback's first argument.
573 583
574 584 Category is a unique identifier to allow overwriting an old callback
575 585 with a newer callback.
576 586 """
577 587 self._postclosecallback[category] = callback
578 588
579 589 @active
580 590 def getpostclose(self, category):
581 591 """return a postclose callback added before, or None"""
582 592 return self._postclosecallback.get(category, None)
583 593
584 594 @active
585 595 def addabort(self, category, callback):
586 596 """add a callback to be called when the transaction is aborted.
587 597
588 598 The transaction will be given as the first argument to the callback.
589 599
590 600 Category is a unique identifier to allow overwriting an old callback
591 601 with a newer callback.
592 602 """
593 603 self._abortcallback[category] = callback
594 604
595 605 @active
596 606 def addvalidator(self, category, callback):
597 607 """adds a callback to be called when validating the transaction.
598 608
599 609 The transaction will be given as the first argument to the callback.
600 610
601 611 callback should raise exception if to abort transaction"""
602 612 self._validatecallback[category] = callback
603 613
604 614 @active
605 615 def close(self):
606 616 '''commit the transaction'''
607 617 if self._count == 1:
608 618 for category in sorted(self._validatecallback):
609 619 self._validatecallback[category](self)
610 620 self._validatecallback = None # Help prevent cycles.
611 621 self._generatefiles(group=GEN_GROUP_PRE_FINALIZE)
612 622 while self._finalizecallback:
613 623 callbacks = self._finalizecallback
614 624 self._finalizecallback = {}
615 625 categories = sorted(callbacks)
616 626 for cat in categories:
617 627 callbacks[cat](self)
618 628 # Prevent double usage and help clear cycles.
619 629 self._finalizecallback = None
620 630 self._generatefiles(group=GEN_GROUP_POST_FINALIZE)
621 631
622 632 self._count -= 1
623 633 if self._count != 0:
624 634 return
625 635 self._file.close()
626 636 self._backupsfile.close()
627 637 # cleanup temporary files
628 638 for l, f, b, c in self._backupentries:
629 639 if l not in self._vfsmap and c:
630 640 self._report(
631 641 b"couldn't remove %s: unknown cache location %s\n" % (b, l)
632 642 )
633 643 continue
634 644 vfs = self._vfsmap[l]
635 645 if not f and b and vfs.exists(b):
636 646 try:
637 647 vfs.unlink(b)
638 648 except (IOError, OSError, error.Abort) as inst:
639 649 if not c:
640 650 raise
641 651 # Abort may be raise by read only opener
642 652 self._report(
643 653 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
644 654 )
645 655 self._offsetmap = {}
646 656 self._newfiles = set()
647 657 self._writeundo()
648 658 if self._after:
649 659 self._after()
650 660 self._after = None # Help prevent cycles.
651 661 if self._opener.isfile(self._backupjournal):
652 662 self._opener.unlink(self._backupjournal)
653 663 if self._opener.isfile(self._journal):
654 664 self._opener.unlink(self._journal)
655 665 for l, _f, b, c in self._backupentries:
656 666 if l not in self._vfsmap and c:
657 667 self._report(
658 668 b"couldn't remove %s: unknown cache location"
659 669 b"%s\n" % (b, l)
660 670 )
661 671 continue
662 672 vfs = self._vfsmap[l]
663 673 if b and vfs.exists(b):
664 674 try:
665 675 vfs.unlink(b)
666 676 except (IOError, OSError, error.Abort) as inst:
667 677 if not c:
668 678 raise
669 679 # Abort may be raise by read only opener
670 680 self._report(
671 681 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
672 682 )
673 683 self._backupentries = []
674 684 self._journal = None
675 685
676 686 self._releasefn(self, True) # notify success of closing transaction
677 687 self._releasefn = None # Help prevent cycles.
678 688
679 689 # run post close action
680 690 categories = sorted(self._postclosecallback)
681 691 for cat in categories:
682 692 self._postclosecallback[cat](self)
683 693 # Prevent double usage and help clear cycles.
684 694 self._postclosecallback = None
685 695
686 696 @active
687 697 def abort(self):
688 698 """abort the transaction (generally called on error, or when the
689 699 transaction is not explicitly committed before going out of
690 700 scope)"""
691 701 self._abort()
692 702
693 703 @active
694 704 def add_journal(self, vfs_id, path):
695 705 self._journal_files.append((vfs_id, path))
696 706
697 707 def _writeundo(self):
698 708 """write transaction data for possible future undo call"""
699 709 if self._undoname is None:
700 710 return
701 711 cleanup_undo_files(
702 712 self._report,
703 713 self._vfsmap,
704 714 undo_prefix=self._undoname,
705 715 )
706 716
707 717 def undoname(fn: bytes) -> bytes:
708 718 base, name = os.path.split(fn)
709 719 assert name.startswith(self._journal)
710 720 new_name = name.replace(self._journal, self._undoname, 1)
711 721 return os.path.join(base, new_name)
712 722
713 723 undo_backup_path = b"%s.backupfiles" % self._undoname
714 724 undobackupfile = self._opener.open(undo_backup_path, b'w')
715 725 undobackupfile.write(b'%d\n' % version)
716 726 for l, f, b, c in self._backupentries:
717 727 if not f: # temporary file
718 728 continue
719 729 if not b:
720 730 u = b''
721 731 else:
722 732 if l not in self._vfsmap and c:
723 733 self._report(
724 734 b"couldn't remove %s: unknown cache location"
725 735 b"%s\n" % (b, l)
726 736 )
727 737 continue
728 738 vfs = self._vfsmap[l]
729 739 u = undoname(b)
730 740 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
731 741 undobackupfile.write(b"%s\0%s\0%s\0%d\n" % (l, f, u, c))
732 742 undobackupfile.close()
733 743 for vfs, src in self._journal_files:
734 744 dest = undoname(src)
735 745 # if src and dest refer to a same file, vfs.rename is a no-op,
736 746 # leaving both src and dest on disk. delete dest to make sure
737 747 # the rename couldn't be such a no-op.
738 748 vfs.tryunlink(dest)
739 749 try:
740 750 vfs.rename(src, dest)
741 751 except FileNotFoundError: # journal file does not yet exist
742 752 pass
743 753
744 754 def _abort(self):
745 755 entries = self.readjournal()
746 756 self._count = 0
747 757 self._usages = 0
748 758 self._file.close()
749 759 self._backupsfile.close()
750 760
751 761 quick = self._can_quick_abort(entries)
752 762 try:
753 763 if not quick:
754 764 self._report(_(b"transaction abort!\n"))
755 765 for cat in sorted(self._abortcallback):
756 766 self._abortcallback[cat](self)
757 767 # Prevent double usage and help clear cycles.
758 768 self._abortcallback = None
759 769 if quick:
760 770 self._do_quick_abort(entries)
761 771 else:
762 772 self._do_full_abort(entries)
763 773 finally:
764 774 self._journal = None
765 775 self._releasefn(self, False) # notify failure of transaction
766 776 self._releasefn = None # Help prevent cycles.
767 777
768 778 def _can_quick_abort(self, entries):
769 779 """False if any semantic content have been written on disk
770 780
771 781 True if nothing, except temporary files has been writen on disk."""
772 782 if entries:
773 783 return False
774 784 for e in self._backupentries:
775 785 if e[1]:
776 786 return False
777 787 return True
778 788
779 789 def _do_quick_abort(self, entries):
780 790 """(Silently) do a quick cleanup (see _can_quick_abort)"""
781 791 assert self._can_quick_abort(entries)
782 792 tmp_files = [e for e in self._backupentries if not e[1]]
783 793 for vfs_id, old_path, tmp_path, xxx in tmp_files:
784 794 vfs = self._vfsmap[vfs_id]
785 795 try:
786 796 vfs.unlink(tmp_path)
787 797 except FileNotFoundError:
788 798 pass
789 799 if self._backupjournal:
790 800 self._opener.unlink(self._backupjournal)
791 801 if self._journal:
792 802 self._opener.unlink(self._journal)
793 803
794 804 def _do_full_abort(self, entries):
795 805 """(Noisily) rollback all the change introduced by the transaction"""
796 806 try:
797 807 _playback(
798 808 self._journal,
799 809 self._report,
800 810 self._opener,
801 811 self._vfsmap,
802 812 entries,
803 813 self._backupentries,
804 814 False,
805 815 checkambigfiles=self._checkambigfiles,
806 816 )
807 817 self._report(_(b"rollback completed\n"))
808 818 except BaseException as exc:
809 819 self._report(_(b"rollback failed - please run hg recover\n"))
810 820 self._report(
811 821 _(b"(failure reason: %s)\n") % stringutil.forcebytestr(exc)
812 822 )
813 823
814 824
815 825 BAD_VERSION_MSG = _(
816 826 b"journal was created by a different version of Mercurial\n"
817 827 )
818 828
819 829
820 830 def read_backup_files(report, fp):
821 831 """parse an (already open) backup file an return contained backup entries
822 832
823 833 entries are in the form: (location, file, backupfile, xxx)
824 834
825 835 :location: the vfs identifier (vfsmap's key)
826 836 :file: original file path (in the vfs)
827 837 :backupfile: path of the backup (in the vfs)
828 838 :cache: a boolean currently always set to False
829 839 """
830 840 lines = fp.readlines()
831 841 backupentries = []
832 842 if lines:
833 843 ver = lines[0][:-1]
834 844 if ver != (b'%d' % version):
835 845 report(BAD_VERSION_MSG)
836 846 else:
837 847 for line in lines[1:]:
838 848 if line:
839 849 # Shave off the trailing newline
840 850 line = line[:-1]
841 851 l, f, b, c = line.split(b'\0')
842 852 backupentries.append((l, f, b, bool(c)))
843 853 return backupentries
844 854
845 855
846 856 def rollback(
847 857 opener,
848 858 vfsmap,
849 859 file,
850 860 report,
851 861 checkambigfiles=None,
852 862 skip_journal_pattern=None,
853 863 ):
854 864 """Rolls back the transaction contained in the given file
855 865
856 866 Reads the entries in the specified file, and the corresponding
857 867 '*.backupfiles' file, to recover from an incomplete transaction.
858 868
859 869 * `file`: a file containing a list of entries, specifying where
860 870 to truncate each file. The file should contain a list of
861 871 file\0offset pairs, delimited by newlines. The corresponding
862 872 '*.backupfiles' file should contain a list of file\0backupfile
863 873 pairs, delimited by \0.
864 874
865 875 `checkambigfiles` is a set of (path, vfs-location) tuples,
866 876 which determine whether file stat ambiguity should be avoided at
867 877 restoring corresponded files.
868 878 """
869 879 entries = []
870 880 backupentries = []
871 881
872 882 with opener.open(file) as fp:
873 883 lines = fp.readlines()
874 884 for l in lines:
875 885 try:
876 886 f, o = l.split(b'\0')
877 887 entries.append((f, int(o)))
878 888 except ValueError:
879 889 report(
880 890 _(b"couldn't read journal entry %r!\n") % pycompat.bytestr(l)
881 891 )
882 892
883 893 backupjournal = b"%s.backupfiles" % file
884 894 if opener.exists(backupjournal):
885 895 with opener.open(backupjournal) as fp:
886 896 backupentries = read_backup_files(report, fp)
887 897 if skip_journal_pattern is not None:
888 898 keep = lambda x: not skip_journal_pattern.match(x[1])
889 899 backupentries = [x for x in backupentries if keep(x)]
890 900
891 901 _playback(
892 902 file,
893 903 report,
894 904 opener,
895 905 vfsmap,
896 906 entries,
897 907 backupentries,
898 908 checkambigfiles=checkambigfiles,
899 909 )
General Comments 0
You need to be logged in to leave comments. Login now