##// END OF EJS Templates
transaction: tr._names are actually bytes, use byte string to join them...
av6 -
r51343:392e2f31 default
parent child Browse files
Show More
@@ -1,955 +1,955 b''
1 1 # transaction.py - simple journaling scheme for mercurial
2 2 #
3 3 # This transaction scheme is intended to gracefully handle program
4 4 # errors and interruptions. More serious failures like system crashes
5 5 # can be recovered with an fsck-like tool. As the whole repository is
6 6 # effectively log-structured, this should amount to simply truncating
7 7 # anything that isn't referenced in the changelog.
8 8 #
9 9 # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
10 10 #
11 11 # This software may be used and distributed according to the terms of the
12 12 # GNU General Public License version 2 or any later version.
13 13
14 14 import errno
15 15 import os
16 16
17 17 from .i18n import _
18 18 from . import (
19 19 error,
20 20 pycompat,
21 21 util,
22 22 )
23 23 from .utils import stringutil
24 24
25 25 version = 2
26 26
27 27 GEN_GROUP_ALL = b'all'
28 28 GEN_GROUP_PRE_FINALIZE = b'prefinalize'
29 29 GEN_GROUP_POST_FINALIZE = b'postfinalize'
30 30
31 31
32 32 def active(func):
33 33 def _active(self, *args, **kwds):
34 34 if self._count == 0:
35 35 raise error.ProgrammingError(
36 36 b'cannot use transaction when it is already committed/aborted'
37 37 )
38 38 return func(self, *args, **kwds)
39 39
40 40 return _active
41 41
42 42
43 43 UNDO_BACKUP = b'%s.backupfiles'
44 44
45 45 UNDO_FILES_MAY_NEED_CLEANUP = [
46 46 # legacy entries that might exists on disk from previous version:
47 47 (b'store', b'%s.narrowspec'),
48 48 (b'plain', b'%s.narrowspec.dirstate'),
49 49 (b'plain', b'%s.branch'),
50 50 (b'plain', b'%s.bookmarks'),
51 51 (b'store', b'%s.phaseroots'),
52 52 (b'plain', b'%s.dirstate'),
53 53 # files actually in uses today:
54 54 (b'plain', b'%s.desc'),
55 55 # Always delete undo last to make sure we detect that a clean up is needed if
56 56 # the process is interrupted.
57 57 (b'store', b'%s'),
58 58 ]
59 59
60 60
61 61 def cleanup_undo_files(report, vfsmap, undo_prefix=b'undo'):
62 62 """remove "undo" files used by the rollback logic
63 63
64 64 This is useful to prevent rollback running in situation were it does not
65 65 make sense. For example after a strip.
66 66 """
67 67 backup_listing = UNDO_BACKUP % undo_prefix
68 68
69 69 backup_entries = []
70 70 undo_files = []
71 71 svfs = vfsmap[b'store']
72 72 try:
73 73 with svfs(backup_listing) as f:
74 74 backup_entries = read_backup_files(report, f)
75 75 except OSError as e:
76 76 if e.errno != errno.ENOENT:
77 77 msg = _(b'could not read %s: %s\n')
78 78 msg %= (svfs.join(backup_listing), stringutil.forcebytestr(e))
79 79 report(msg)
80 80
81 81 for location, f, backup_path, c in backup_entries:
82 82 if location in vfsmap and backup_path:
83 83 undo_files.append((vfsmap[location], backup_path))
84 84
85 85 undo_files.append((svfs, backup_listing))
86 86 for location, undo_path in UNDO_FILES_MAY_NEED_CLEANUP:
87 87 undo_files.append((vfsmap[location], undo_path % undo_prefix))
88 88 for undovfs, undofile in undo_files:
89 89 try:
90 90 undovfs.unlink(undofile)
91 91 except OSError as e:
92 92 if e.errno != errno.ENOENT:
93 93 msg = _(b'error removing %s: %s\n')
94 94 msg %= (undovfs.join(undofile), stringutil.forcebytestr(e))
95 95 report(msg)
96 96
97 97
98 98 def _playback(
99 99 journal,
100 100 report,
101 101 opener,
102 102 vfsmap,
103 103 entries,
104 104 backupentries,
105 105 unlink=True,
106 106 checkambigfiles=None,
107 107 ):
108 108 """rollback a transaction :
109 109 - truncate files that have been appended to
110 110 - restore file backups
111 111 - delete temporary files
112 112 """
113 113 backupfiles = []
114 114
115 115 def restore_one_backup(vfs, f, b, checkambig):
116 116 filepath = vfs.join(f)
117 117 backuppath = vfs.join(b)
118 118 try:
119 119 util.copyfile(backuppath, filepath, checkambig=checkambig)
120 120 backupfiles.append((vfs, b))
121 121 except IOError as exc:
122 122 e_msg = stringutil.forcebytestr(exc)
123 123 report(_(b"failed to recover %s (%s)\n") % (f, e_msg))
124 124 raise
125 125
126 126 # gather all backup files that impact the store
127 127 # (we need this to detect files that are both backed up and truncated)
128 128 store_backup = {}
129 129 for entry in backupentries:
130 130 location, file_path, backup_path, cache = entry
131 131 vfs = vfsmap[location]
132 132 is_store = vfs.join(b'') == opener.join(b'')
133 133 if is_store and file_path and backup_path:
134 134 store_backup[file_path] = entry
135 135 copy_done = set()
136 136
137 137 # truncate all file `f` to offset `o`
138 138 for f, o in sorted(dict(entries).items()):
139 139 # if we have a backup for `f`, we should restore it first and truncate
140 140 # the restored file
141 141 bck_entry = store_backup.get(f)
142 142 if bck_entry is not None:
143 143 location, file_path, backup_path, cache = bck_entry
144 144 checkambig = False
145 145 if checkambigfiles:
146 146 checkambig = (file_path, location) in checkambigfiles
147 147 restore_one_backup(opener, file_path, backup_path, checkambig)
148 148 copy_done.add(bck_entry)
149 149 # truncate the file to its pre-transaction size
150 150 if o or not unlink:
151 151 checkambig = checkambigfiles and (f, b'') in checkambigfiles
152 152 try:
153 153 fp = opener(f, b'a', checkambig=checkambig)
154 154 if fp.tell() < o:
155 155 raise error.Abort(
156 156 _(
157 157 b"attempted to truncate %s to %d bytes, but it was "
158 158 b"already %d bytes\n"
159 159 )
160 160 % (f, o, fp.tell())
161 161 )
162 162 fp.truncate(o)
163 163 fp.close()
164 164 except IOError:
165 165 report(_(b"failed to truncate %s\n") % f)
166 166 raise
167 167 else:
168 168 # delete empty file
169 169 try:
170 170 opener.unlink(f)
171 171 except FileNotFoundError:
172 172 pass
173 173 # restore backed up files and clean up temporary files
174 174 for entry in backupentries:
175 175 if entry in copy_done:
176 176 continue
177 177 l, f, b, c = entry
178 178 if l not in vfsmap and c:
179 179 report(b"couldn't handle %s: unknown cache location %s\n" % (b, l))
180 180 vfs = vfsmap[l]
181 181 try:
182 182 checkambig = checkambigfiles and (f, l) in checkambigfiles
183 183 if f and b:
184 184 restore_one_backup(vfs, f, b, checkambig)
185 185 else:
186 186 target = f or b
187 187 try:
188 188 vfs.unlink(target)
189 189 except FileNotFoundError:
190 190 # This is fine because
191 191 #
192 192 # either we are trying to delete the main file, and it is
193 193 # already deleted.
194 194 #
195 195 # or we are trying to delete a temporary file and it is
196 196 # already deleted.
197 197 #
198 198 # in both case, our target result (delete the file) is
199 199 # already achieved.
200 200 pass
201 201 except (IOError, OSError, error.Abort):
202 202 if not c:
203 203 raise
204 204
205 205 # cleanup transaction state file and the backups file
206 206 backuppath = b"%s.backupfiles" % journal
207 207 if opener.exists(backuppath):
208 208 opener.unlink(backuppath)
209 209 opener.unlink(journal)
210 210 try:
211 211 for vfs, f in backupfiles:
212 212 if vfs.exists(f):
213 213 vfs.unlink(f)
214 214 except (IOError, OSError, error.Abort):
215 215 # only pure backup file remains, it is sage to ignore any error
216 216 pass
217 217
218 218
219 219 class transaction(util.transactional):
220 220 def __init__(
221 221 self,
222 222 report,
223 223 opener,
224 224 vfsmap,
225 225 journalname,
226 226 undoname=None,
227 227 after=None,
228 228 createmode=None,
229 229 validator=None,
230 230 releasefn=None,
231 231 checkambigfiles=None,
232 232 name='<unnamed>',
233 233 ):
234 234 """Begin a new transaction
235 235
236 236 Begins a new transaction that allows rolling back writes in the event of
237 237 an exception.
238 238
239 239 * `after`: called after the transaction has been committed
240 240 * `createmode`: the mode of the journal file that will be created
241 241 * `releasefn`: called after releasing (with transaction and result)
242 242
243 243 `checkambigfiles` is a set of (path, vfs-location) tuples,
244 244 which determine whether file stat ambiguity should be avoided
245 245 for corresponded files.
246 246 """
247 247 self._count = 1
248 248 self._usages = 1
249 249 self._report = report
250 250 # a vfs to the store content
251 251 self._opener = opener
252 252 # a map to access file in various {location -> vfs}
253 253 vfsmap = vfsmap.copy()
254 254 vfsmap[b''] = opener # set default value
255 255 self._vfsmap = vfsmap
256 256 self._after = after
257 257 self._offsetmap = {}
258 258 self._newfiles = set()
259 259 self._journal = journalname
260 260 self._journal_files = []
261 261 self._undoname = undoname
262 262 self._queue = []
263 263 # A callback to do something just after releasing transaction.
264 264 if releasefn is None:
265 265 releasefn = lambda tr, success: None
266 266 self._releasefn = releasefn
267 267
268 268 self._checkambigfiles = set()
269 269 if checkambigfiles:
270 270 self._checkambigfiles.update(checkambigfiles)
271 271
272 272 self._names = [name]
273 273
274 274 # A dict dedicated to precisely tracking the changes introduced in the
275 275 # transaction.
276 276 self.changes = {}
277 277
278 278 # a dict of arguments to be passed to hooks
279 279 self.hookargs = {}
280 280 self._file = opener.open(self._journal, b"w+")
281 281
282 282 # a list of ('location', 'path', 'backuppath', cache) entries.
283 283 # - if 'backuppath' is empty, no file existed at backup time
284 284 # - if 'path' is empty, this is a temporary transaction file
285 285 # - if 'location' is not empty, the path is outside main opener reach.
286 286 # use 'location' value as a key in a vfsmap to find the right 'vfs'
287 287 # (cache is currently unused)
288 288 self._backupentries = []
289 289 self._backupmap = {}
290 290 self._backupjournal = b"%s.backupfiles" % self._journal
291 291 self._backupsfile = opener.open(self._backupjournal, b'w')
292 292 self._backupsfile.write(b'%d\n' % version)
293 293
294 294 if createmode is not None:
295 295 opener.chmod(self._journal, createmode & 0o666)
296 296 opener.chmod(self._backupjournal, createmode & 0o666)
297 297
298 298 # hold file generations to be performed on commit
299 299 self._filegenerators = {}
300 300 # hold callback to write pending data for hooks
301 301 self._pendingcallback = {}
302 302 # True is any pending data have been written ever
303 303 self._anypending = False
304 304 # holds callback to call when writing the transaction
305 305 self._finalizecallback = {}
306 306 # holds callback to call when validating the transaction
307 307 # should raise exception if anything is wrong
308 308 self._validatecallback = {}
309 309 if validator is not None:
310 310 self._validatecallback[b'001-userhooks'] = validator
311 311 # hold callback for post transaction close
312 312 self._postclosecallback = {}
313 313 # holds callbacks to call during abort
314 314 self._abortcallback = {}
315 315
316 316 def __repr__(self):
317 name = '/'.join(self._names)
317 name = b'/'.join(self._names)
318 318 return '<transaction name=%s, count=%d, usages=%d>' % (
319 319 name,
320 320 self._count,
321 321 self._usages,
322 322 )
323 323
324 324 def __del__(self):
325 325 if self._journal:
326 326 self._abort()
327 327
328 328 @property
329 329 def finalized(self):
330 330 return self._finalizecallback is None
331 331
332 332 @active
333 333 def startgroup(self):
334 334 """delay registration of file entry
335 335
336 336 This is used by strip to delay vision of strip offset. The transaction
337 337 sees either none or all of the strip actions to be done."""
338 338 self._queue.append([])
339 339
340 340 @active
341 341 def endgroup(self):
342 342 """apply delayed registration of file entry.
343 343
344 344 This is used by strip to delay vision of strip offset. The transaction
345 345 sees either none or all of the strip actions to be done."""
346 346 q = self._queue.pop()
347 347 for f, o in q:
348 348 self._addentry(f, o)
349 349
350 350 @active
351 351 def add(self, file, offset):
352 352 """record the state of an append-only file before update"""
353 353 if (
354 354 file in self._newfiles
355 355 or file in self._offsetmap
356 356 or file in self._backupmap
357 357 ):
358 358 return
359 359 if self._queue:
360 360 self._queue[-1].append((file, offset))
361 361 return
362 362
363 363 self._addentry(file, offset)
364 364
365 365 def _addentry(self, file, offset):
366 366 """add a append-only entry to memory and on-disk state"""
367 367 if (
368 368 file in self._newfiles
369 369 or file in self._offsetmap
370 370 or file in self._backupmap
371 371 ):
372 372 return
373 373 if offset:
374 374 self._offsetmap[file] = offset
375 375 else:
376 376 self._newfiles.add(file)
377 377 # add enough data to the journal to do the truncate
378 378 self._file.write(b"%s\0%d\n" % (file, offset))
379 379 self._file.flush()
380 380
381 381 @active
382 382 def addbackup(self, file, hardlink=True, location=b'', for_offset=False):
383 383 """Adds a backup of the file to the transaction
384 384
385 385 Calling addbackup() creates a hardlink backup of the specified file
386 386 that is used to recover the file in the event of the transaction
387 387 aborting.
388 388
389 389 * `file`: the file path, relative to .hg/store
390 390 * `hardlink`: use a hardlink to quickly create the backup
391 391
392 392 If `for_offset` is set, we expect a offset for this file to have been previously recorded
393 393 """
394 394 if self._queue:
395 395 msg = b'cannot use transaction.addbackup inside "group"'
396 396 raise error.ProgrammingError(msg)
397 397
398 398 if file in self._newfiles or file in self._backupmap:
399 399 return
400 400 elif file in self._offsetmap and not for_offset:
401 401 return
402 402 elif for_offset and file not in self._offsetmap:
403 403 msg = (
404 404 'calling `addbackup` with `for_offmap=True`, '
405 405 'but no offset recorded: [%r] %r'
406 406 )
407 407 msg %= (location, file)
408 408 raise error.ProgrammingError(msg)
409 409
410 410 vfs = self._vfsmap[location]
411 411 dirname, filename = vfs.split(file)
412 412 backupfilename = b"%s.backup.%s" % (self._journal, filename)
413 413 backupfile = vfs.reljoin(dirname, backupfilename)
414 414 if vfs.exists(file):
415 415 filepath = vfs.join(file)
416 416 backuppath = vfs.join(backupfile)
417 417 util.copyfile(filepath, backuppath, hardlink=hardlink)
418 418 else:
419 419 backupfile = b''
420 420
421 421 self._addbackupentry((location, file, backupfile, False))
422 422
423 423 def _addbackupentry(self, entry):
424 424 """register a new backup entry and write it to disk"""
425 425 self._backupentries.append(entry)
426 426 self._backupmap[entry[1]] = len(self._backupentries) - 1
427 427 self._backupsfile.write(b"%s\0%s\0%s\0%d\n" % entry)
428 428 self._backupsfile.flush()
429 429
430 430 @active
431 431 def registertmp(self, tmpfile, location=b''):
432 432 """register a temporary transaction file
433 433
434 434 Such files will be deleted when the transaction exits (on both
435 435 failure and success).
436 436 """
437 437 self._addbackupentry((location, b'', tmpfile, False))
438 438
439 439 @active
440 440 def addfilegenerator(
441 441 self,
442 442 genid,
443 443 filenames,
444 444 genfunc,
445 445 order=0,
446 446 location=b'',
447 447 post_finalize=False,
448 448 ):
449 449 """add a function to generates some files at transaction commit
450 450
451 451 The `genfunc` argument is a function capable of generating proper
452 452 content of each entry in the `filename` tuple.
453 453
454 454 At transaction close time, `genfunc` will be called with one file
455 455 object argument per entries in `filenames`.
456 456
457 457 The transaction itself is responsible for the backup, creation and
458 458 final write of such file.
459 459
460 460 The `genid` argument is used to ensure the same set of file is only
461 461 generated once. Call to `addfilegenerator` for a `genid` already
462 462 present will overwrite the old entry.
463 463
464 464 The `order` argument may be used to control the order in which multiple
465 465 generator will be executed.
466 466
467 467 The `location` arguments may be used to indicate the files are located
468 468 outside of the the standard directory for transaction. It should match
469 469 one of the key of the `transaction.vfsmap` dictionary.
470 470
471 471 The `post_finalize` argument can be set to `True` for file generation
472 472 that must be run after the transaction has been finalized.
473 473 """
474 474 # For now, we are unable to do proper backup and restore of custom vfs
475 475 # but for bookmarks that are handled outside this mechanism.
476 476 entry = (order, filenames, genfunc, location, post_finalize)
477 477 self._filegenerators[genid] = entry
478 478
479 479 @active
480 480 def removefilegenerator(self, genid):
481 481 """reverse of addfilegenerator, remove a file generator function"""
482 482 if genid in self._filegenerators:
483 483 del self._filegenerators[genid]
484 484
485 485 def _generatefiles(self, suffix=b'', group=GEN_GROUP_ALL):
486 486 # write files registered for generation
487 487 any = False
488 488
489 489 if group == GEN_GROUP_ALL:
490 490 skip_post = skip_pre = False
491 491 else:
492 492 skip_pre = group == GEN_GROUP_POST_FINALIZE
493 493 skip_post = group == GEN_GROUP_PRE_FINALIZE
494 494
495 495 for id, entry in sorted(self._filegenerators.items()):
496 496 any = True
497 497 order, filenames, genfunc, location, post_finalize = entry
498 498
499 499 # for generation at closing, check if it's before or after finalize
500 500 if skip_post and post_finalize:
501 501 continue
502 502 elif skip_pre and not post_finalize:
503 503 continue
504 504
505 505 vfs = self._vfsmap[location]
506 506 files = []
507 507 try:
508 508 for name in filenames:
509 509 name += suffix
510 510 if suffix:
511 511 self.registertmp(name, location=location)
512 512 checkambig = False
513 513 else:
514 514 self.addbackup(name, location=location)
515 515 checkambig = (name, location) in self._checkambigfiles
516 516 files.append(
517 517 vfs(name, b'w', atomictemp=True, checkambig=checkambig)
518 518 )
519 519 genfunc(*files)
520 520 for f in files:
521 521 f.close()
522 522 # skip discard() loop since we're sure no open file remains
523 523 del files[:]
524 524 finally:
525 525 for f in files:
526 526 f.discard()
527 527 return any
528 528
529 529 @active
530 530 def findoffset(self, file):
531 531 if file in self._newfiles:
532 532 return 0
533 533 return self._offsetmap.get(file)
534 534
535 535 @active
536 536 def readjournal(self):
537 537 self._file.seek(0)
538 538 entries = []
539 539 for l in self._file.readlines():
540 540 file, troffset = l.split(b'\0')
541 541 entries.append((file, int(troffset)))
542 542 return entries
543 543
544 544 @active
545 545 def replace(self, file, offset):
546 546 """
547 547 replace can only replace already committed entries
548 548 that are not pending in the queue
549 549 """
550 550 if file in self._newfiles:
551 551 if not offset:
552 552 return
553 553 self._newfiles.remove(file)
554 554 self._offsetmap[file] = offset
555 555 elif file in self._offsetmap:
556 556 if not offset:
557 557 del self._offsetmap[file]
558 558 self._newfiles.add(file)
559 559 else:
560 560 self._offsetmap[file] = offset
561 561 else:
562 562 raise KeyError(file)
563 563 self._file.write(b"%s\0%d\n" % (file, offset))
564 564 self._file.flush()
565 565
566 566 @active
567 567 def nest(self, name='<unnamed>'):
568 568 self._count += 1
569 569 self._usages += 1
570 570 self._names.append(name)
571 571 return self
572 572
573 573 def release(self):
574 574 if self._count > 0:
575 575 self._usages -= 1
576 576 if self._names:
577 577 self._names.pop()
578 578 # if the transaction scopes are left without being closed, fail
579 579 if self._count > 0 and self._usages == 0:
580 580 self._abort()
581 581
582 582 def running(self):
583 583 return self._count > 0
584 584
585 585 def addpending(self, category, callback):
586 586 """add a callback to be called when the transaction is pending
587 587
588 588 The transaction will be given as callback's first argument.
589 589
590 590 Category is a unique identifier to allow overwriting an old callback
591 591 with a newer callback.
592 592 """
593 593 self._pendingcallback[category] = callback
594 594
595 595 @active
596 596 def writepending(self):
597 597 """write pending file to temporary version
598 598
599 599 This is used to allow hooks to view a transaction before commit"""
600 600 categories = sorted(self._pendingcallback)
601 601 for cat in categories:
602 602 # remove callback since the data will have been flushed
603 603 any = self._pendingcallback.pop(cat)(self)
604 604 self._anypending = self._anypending or any
605 605 self._anypending |= self._generatefiles(suffix=b'.pending')
606 606 return self._anypending
607 607
608 608 @active
609 609 def hasfinalize(self, category):
610 610 """check is a callback already exist for a category"""
611 611 return category in self._finalizecallback
612 612
613 613 @active
614 614 def addfinalize(self, category, callback):
615 615 """add a callback to be called when the transaction is closed
616 616
617 617 The transaction will be given as callback's first argument.
618 618
619 619 Category is a unique identifier to allow overwriting old callbacks with
620 620 newer callbacks.
621 621 """
622 622 self._finalizecallback[category] = callback
623 623
624 624 @active
625 625 def addpostclose(self, category, callback):
626 626 """add or replace a callback to be called after the transaction closed
627 627
628 628 The transaction will be given as callback's first argument.
629 629
630 630 Category is a unique identifier to allow overwriting an old callback
631 631 with a newer callback.
632 632 """
633 633 self._postclosecallback[category] = callback
634 634
635 635 @active
636 636 def getpostclose(self, category):
637 637 """return a postclose callback added before, or None"""
638 638 return self._postclosecallback.get(category, None)
639 639
640 640 @active
641 641 def addabort(self, category, callback):
642 642 """add a callback to be called when the transaction is aborted.
643 643
644 644 The transaction will be given as the first argument to the callback.
645 645
646 646 Category is a unique identifier to allow overwriting an old callback
647 647 with a newer callback.
648 648 """
649 649 self._abortcallback[category] = callback
650 650
651 651 @active
652 652 def addvalidator(self, category, callback):
653 653 """adds a callback to be called when validating the transaction.
654 654
655 655 The transaction will be given as the first argument to the callback.
656 656
657 657 callback should raise exception if to abort transaction"""
658 658 self._validatecallback[category] = callback
659 659
660 660 @active
661 661 def close(self):
662 662 '''commit the transaction'''
663 663 if self._count == 1:
664 664 for category in sorted(self._validatecallback):
665 665 self._validatecallback[category](self)
666 666 self._validatecallback = None # Help prevent cycles.
667 667 self._generatefiles(group=GEN_GROUP_PRE_FINALIZE)
668 668 while self._finalizecallback:
669 669 callbacks = self._finalizecallback
670 670 self._finalizecallback = {}
671 671 categories = sorted(callbacks)
672 672 for cat in categories:
673 673 callbacks[cat](self)
674 674 # Prevent double usage and help clear cycles.
675 675 self._finalizecallback = None
676 676 self._generatefiles(group=GEN_GROUP_POST_FINALIZE)
677 677
678 678 self._count -= 1
679 679 if self._count != 0:
680 680 return
681 681 self._file.close()
682 682 self._backupsfile.close()
683 683 # cleanup temporary files
684 684 for l, f, b, c in self._backupentries:
685 685 if l not in self._vfsmap and c:
686 686 self._report(
687 687 b"couldn't remove %s: unknown cache location %s\n" % (b, l)
688 688 )
689 689 continue
690 690 vfs = self._vfsmap[l]
691 691 if not f and b and vfs.exists(b):
692 692 try:
693 693 vfs.unlink(b)
694 694 except (IOError, OSError, error.Abort) as inst:
695 695 if not c:
696 696 raise
697 697 # Abort may be raise by read only opener
698 698 self._report(
699 699 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
700 700 )
701 701 self._offsetmap = {}
702 702 self._newfiles = set()
703 703 self._writeundo()
704 704 if self._after:
705 705 self._after()
706 706 self._after = None # Help prevent cycles.
707 707 if self._opener.isfile(self._backupjournal):
708 708 self._opener.unlink(self._backupjournal)
709 709 if self._opener.isfile(self._journal):
710 710 self._opener.unlink(self._journal)
711 711 for l, _f, b, c in self._backupentries:
712 712 if l not in self._vfsmap and c:
713 713 self._report(
714 714 b"couldn't remove %s: unknown cache location"
715 715 b"%s\n" % (b, l)
716 716 )
717 717 continue
718 718 vfs = self._vfsmap[l]
719 719 if b and vfs.exists(b):
720 720 try:
721 721 vfs.unlink(b)
722 722 except (IOError, OSError, error.Abort) as inst:
723 723 if not c:
724 724 raise
725 725 # Abort may be raise by read only opener
726 726 self._report(
727 727 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
728 728 )
729 729 self._backupentries = []
730 730 self._journal = None
731 731
732 732 self._releasefn(self, True) # notify success of closing transaction
733 733 self._releasefn = None # Help prevent cycles.
734 734
735 735 # run post close action
736 736 categories = sorted(self._postclosecallback)
737 737 for cat in categories:
738 738 self._postclosecallback[cat](self)
739 739 # Prevent double usage and help clear cycles.
740 740 self._postclosecallback = None
741 741
742 742 @active
743 743 def abort(self):
744 744 """abort the transaction (generally called on error, or when the
745 745 transaction is not explicitly committed before going out of
746 746 scope)"""
747 747 self._abort()
748 748
749 749 @active
750 750 def add_journal(self, vfs_id, path):
751 751 self._journal_files.append((vfs_id, path))
752 752
753 753 def _writeundo(self):
754 754 """write transaction data for possible future undo call"""
755 755 if self._undoname is None:
756 756 return
757 757 cleanup_undo_files(
758 758 self._report,
759 759 self._vfsmap,
760 760 undo_prefix=self._undoname,
761 761 )
762 762
763 763 def undoname(fn: bytes) -> bytes:
764 764 base, name = os.path.split(fn)
765 765 assert name.startswith(self._journal)
766 766 new_name = name.replace(self._journal, self._undoname, 1)
767 767 return os.path.join(base, new_name)
768 768
769 769 undo_backup_path = b"%s.backupfiles" % self._undoname
770 770 undobackupfile = self._opener.open(undo_backup_path, b'w')
771 771 undobackupfile.write(b'%d\n' % version)
772 772 for l, f, b, c in self._backupentries:
773 773 if not f: # temporary file
774 774 continue
775 775 if not b:
776 776 u = b''
777 777 else:
778 778 if l not in self._vfsmap and c:
779 779 self._report(
780 780 b"couldn't remove %s: unknown cache location"
781 781 b"%s\n" % (b, l)
782 782 )
783 783 continue
784 784 vfs = self._vfsmap[l]
785 785 u = undoname(b)
786 786 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
787 787 undobackupfile.write(b"%s\0%s\0%s\0%d\n" % (l, f, u, c))
788 788 undobackupfile.close()
789 789 for vfs, src in self._journal_files:
790 790 dest = undoname(src)
791 791 # if src and dest refer to a same file, vfs.rename is a no-op,
792 792 # leaving both src and dest on disk. delete dest to make sure
793 793 # the rename couldn't be such a no-op.
794 794 vfs.tryunlink(dest)
795 795 try:
796 796 vfs.rename(src, dest)
797 797 except FileNotFoundError: # journal file does not yet exist
798 798 pass
799 799
800 800 def _abort(self):
801 801 entries = self.readjournal()
802 802 self._count = 0
803 803 self._usages = 0
804 804 self._file.close()
805 805 self._backupsfile.close()
806 806
807 807 quick = self._can_quick_abort(entries)
808 808 try:
809 809 if not quick:
810 810 self._report(_(b"transaction abort!\n"))
811 811 for cat in sorted(self._abortcallback):
812 812 self._abortcallback[cat](self)
813 813 # Prevent double usage and help clear cycles.
814 814 self._abortcallback = None
815 815 if quick:
816 816 self._do_quick_abort(entries)
817 817 else:
818 818 self._do_full_abort(entries)
819 819 finally:
820 820 self._journal = None
821 821 self._releasefn(self, False) # notify failure of transaction
822 822 self._releasefn = None # Help prevent cycles.
823 823
824 824 def _can_quick_abort(self, entries):
825 825 """False if any semantic content have been written on disk
826 826
827 827 True if nothing, except temporary files has been writen on disk."""
828 828 if entries:
829 829 return False
830 830 for e in self._backupentries:
831 831 if e[1]:
832 832 return False
833 833 return True
834 834
835 835 def _do_quick_abort(self, entries):
836 836 """(Silently) do a quick cleanup (see _can_quick_abort)"""
837 837 assert self._can_quick_abort(entries)
838 838 tmp_files = [e for e in self._backupentries if not e[1]]
839 839 for vfs_id, old_path, tmp_path, xxx in tmp_files:
840 840 vfs = self._vfsmap[vfs_id]
841 841 try:
842 842 vfs.unlink(tmp_path)
843 843 except FileNotFoundError:
844 844 pass
845 845 if self._backupjournal:
846 846 self._opener.unlink(self._backupjournal)
847 847 if self._journal:
848 848 self._opener.unlink(self._journal)
849 849
850 850 def _do_full_abort(self, entries):
851 851 """(Noisily) rollback all the change introduced by the transaction"""
852 852 try:
853 853 _playback(
854 854 self._journal,
855 855 self._report,
856 856 self._opener,
857 857 self._vfsmap,
858 858 entries,
859 859 self._backupentries,
860 860 False,
861 861 checkambigfiles=self._checkambigfiles,
862 862 )
863 863 self._report(_(b"rollback completed\n"))
864 864 except BaseException as exc:
865 865 self._report(_(b"rollback failed - please run hg recover\n"))
866 866 self._report(
867 867 _(b"(failure reason: %s)\n") % stringutil.forcebytestr(exc)
868 868 )
869 869
870 870
871 871 BAD_VERSION_MSG = _(
872 872 b"journal was created by a different version of Mercurial\n"
873 873 )
874 874
875 875
876 876 def read_backup_files(report, fp):
877 877 """parse an (already open) backup file an return contained backup entries
878 878
879 879 entries are in the form: (location, file, backupfile, xxx)
880 880
881 881 :location: the vfs identifier (vfsmap's key)
882 882 :file: original file path (in the vfs)
883 883 :backupfile: path of the backup (in the vfs)
884 884 :cache: a boolean currently always set to False
885 885 """
886 886 lines = fp.readlines()
887 887 backupentries = []
888 888 if lines:
889 889 ver = lines[0][:-1]
890 890 if ver != (b'%d' % version):
891 891 report(BAD_VERSION_MSG)
892 892 else:
893 893 for line in lines[1:]:
894 894 if line:
895 895 # Shave off the trailing newline
896 896 line = line[:-1]
897 897 l, f, b, c = line.split(b'\0')
898 898 backupentries.append((l, f, b, bool(c)))
899 899 return backupentries
900 900
901 901
902 902 def rollback(
903 903 opener,
904 904 vfsmap,
905 905 file,
906 906 report,
907 907 checkambigfiles=None,
908 908 skip_journal_pattern=None,
909 909 ):
910 910 """Rolls back the transaction contained in the given file
911 911
912 912 Reads the entries in the specified file, and the corresponding
913 913 '*.backupfiles' file, to recover from an incomplete transaction.
914 914
915 915 * `file`: a file containing a list of entries, specifying where
916 916 to truncate each file. The file should contain a list of
917 917 file\0offset pairs, delimited by newlines. The corresponding
918 918 '*.backupfiles' file should contain a list of file\0backupfile
919 919 pairs, delimited by \0.
920 920
921 921 `checkambigfiles` is a set of (path, vfs-location) tuples,
922 922 which determine whether file stat ambiguity should be avoided at
923 923 restoring corresponded files.
924 924 """
925 925 entries = []
926 926 backupentries = []
927 927
928 928 with opener.open(file) as fp:
929 929 lines = fp.readlines()
930 930 for l in lines:
931 931 try:
932 932 f, o = l.split(b'\0')
933 933 entries.append((f, int(o)))
934 934 except ValueError:
935 935 report(
936 936 _(b"couldn't read journal entry %r!\n") % pycompat.bytestr(l)
937 937 )
938 938
939 939 backupjournal = b"%s.backupfiles" % file
940 940 if opener.exists(backupjournal):
941 941 with opener.open(backupjournal) as fp:
942 942 backupentries = read_backup_files(report, fp)
943 943 if skip_journal_pattern is not None:
944 944 keep = lambda x: not skip_journal_pattern.match(x[1])
945 945 backupentries = [x for x in backupentries if keep(x)]
946 946
947 947 _playback(
948 948 file,
949 949 report,
950 950 opener,
951 951 vfsmap,
952 952 entries,
953 953 backupentries,
954 954 checkambigfiles=checkambigfiles,
955 955 )
General Comments 0
You need to be logged in to leave comments. Login now