##// END OF EJS Templates
transaction: fix __repr__() and make the default name bytes...
Matt Harbison -
r51761:0a4efb65 stable
parent child Browse files
Show More
@@ -1,965 +1,966 b''
1 1 # transaction.py - simple journaling scheme for mercurial
2 2 #
3 3 # This transaction scheme is intended to gracefully handle program
4 4 # errors and interruptions. More serious failures like system crashes
5 5 # can be recovered with an fsck-like tool. As the whole repository is
6 6 # effectively log-structured, this should amount to simply truncating
7 7 # anything that isn't referenced in the changelog.
8 8 #
9 9 # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
10 10 #
11 11 # This software may be used and distributed according to the terms of the
12 12 # GNU General Public License version 2 or any later version.
13 13
14 14 import errno
15 15 import os
16 16
17 17 from .i18n import _
18 18 from . import (
19 encoding,
19 20 error,
20 21 pycompat,
21 22 util,
22 23 )
23 24 from .utils import stringutil
24 25
25 26 version = 2
26 27
27 28 GEN_GROUP_ALL = b'all'
28 29 GEN_GROUP_PRE_FINALIZE = b'prefinalize'
29 30 GEN_GROUP_POST_FINALIZE = b'postfinalize'
30 31
31 32
32 33 def active(func):
33 34 def _active(self, *args, **kwds):
34 35 if self._count == 0:
35 36 raise error.ProgrammingError(
36 37 b'cannot use transaction when it is already committed/aborted'
37 38 )
38 39 return func(self, *args, **kwds)
39 40
40 41 return _active
41 42
42 43
43 44 UNDO_BACKUP = b'%s.backupfiles'
44 45
45 46 UNDO_FILES_MAY_NEED_CLEANUP = [
46 47 # legacy entries that might exists on disk from previous version:
47 48 (b'store', b'%s.narrowspec'),
48 49 (b'plain', b'%s.narrowspec.dirstate'),
49 50 (b'plain', b'%s.branch'),
50 51 (b'plain', b'%s.bookmarks'),
51 52 (b'store', b'%s.phaseroots'),
52 53 (b'plain', b'%s.dirstate'),
53 54 # files actually in uses today:
54 55 (b'plain', b'%s.desc'),
55 56 # Always delete undo last to make sure we detect that a clean up is needed if
56 57 # the process is interrupted.
57 58 (b'store', b'%s'),
58 59 ]
59 60
60 61
61 62 def cleanup_undo_files(report, vfsmap, undo_prefix=b'undo'):
62 63 """remove "undo" files used by the rollback logic
63 64
64 65 This is useful to prevent rollback running in situation were it does not
65 66 make sense. For example after a strip.
66 67 """
67 68 backup_listing = UNDO_BACKUP % undo_prefix
68 69
69 70 backup_entries = []
70 71 undo_files = []
71 72 svfs = vfsmap[b'store']
72 73 try:
73 74 with svfs(backup_listing) as f:
74 75 backup_entries = read_backup_files(report, f)
75 76 except OSError as e:
76 77 if e.errno != errno.ENOENT:
77 78 msg = _(b'could not read %s: %s\n')
78 79 msg %= (svfs.join(backup_listing), stringutil.forcebytestr(e))
79 80 report(msg)
80 81
81 82 for location, f, backup_path, c in backup_entries:
82 83 if location in vfsmap and backup_path:
83 84 undo_files.append((vfsmap[location], backup_path))
84 85
85 86 undo_files.append((svfs, backup_listing))
86 87 for location, undo_path in UNDO_FILES_MAY_NEED_CLEANUP:
87 88 undo_files.append((vfsmap[location], undo_path % undo_prefix))
88 89 for undovfs, undofile in undo_files:
89 90 try:
90 91 undovfs.unlink(undofile)
91 92 except OSError as e:
92 93 if e.errno != errno.ENOENT:
93 94 msg = _(b'error removing %s: %s\n')
94 95 msg %= (undovfs.join(undofile), stringutil.forcebytestr(e))
95 96 report(msg)
96 97
97 98
98 99 def _playback(
99 100 journal,
100 101 report,
101 102 opener,
102 103 vfsmap,
103 104 entries,
104 105 backupentries,
105 106 unlink=True,
106 107 checkambigfiles=None,
107 108 ):
108 109 """rollback a transaction :
109 110 - truncate files that have been appended to
110 111 - restore file backups
111 112 - delete temporary files
112 113 """
113 114 backupfiles = []
114 115
115 116 def restore_one_backup(vfs, f, b, checkambig):
116 117 filepath = vfs.join(f)
117 118 backuppath = vfs.join(b)
118 119 try:
119 120 util.copyfile(backuppath, filepath, checkambig=checkambig)
120 121 backupfiles.append((vfs, b))
121 122 except IOError as exc:
122 123 e_msg = stringutil.forcebytestr(exc)
123 124 report(_(b"failed to recover %s (%s)\n") % (f, e_msg))
124 125 raise
125 126
126 127 # gather all backup files that impact the store
127 128 # (we need this to detect files that are both backed up and truncated)
128 129 store_backup = {}
129 130 for entry in backupentries:
130 131 location, file_path, backup_path, cache = entry
131 132 vfs = vfsmap[location]
132 133 is_store = vfs.join(b'') == opener.join(b'')
133 134 if is_store and file_path and backup_path:
134 135 store_backup[file_path] = entry
135 136 copy_done = set()
136 137
137 138 # truncate all file `f` to offset `o`
138 139 for f, o in sorted(dict(entries).items()):
139 140 # if we have a backup for `f`, we should restore it first and truncate
140 141 # the restored file
141 142 bck_entry = store_backup.get(f)
142 143 if bck_entry is not None:
143 144 location, file_path, backup_path, cache = bck_entry
144 145 checkambig = False
145 146 if checkambigfiles:
146 147 checkambig = (file_path, location) in checkambigfiles
147 148 restore_one_backup(opener, file_path, backup_path, checkambig)
148 149 copy_done.add(bck_entry)
149 150 # truncate the file to its pre-transaction size
150 151 if o or not unlink:
151 152 checkambig = checkambigfiles and (f, b'') in checkambigfiles
152 153 try:
153 154 fp = opener(f, b'a', checkambig=checkambig)
154 155 if fp.tell() < o:
155 156 raise error.Abort(
156 157 _(
157 158 b"attempted to truncate %s to %d bytes, but it was "
158 159 b"already %d bytes\n"
159 160 )
160 161 % (f, o, fp.tell())
161 162 )
162 163 fp.truncate(o)
163 164 fp.close()
164 165 except IOError:
165 166 report(_(b"failed to truncate %s\n") % f)
166 167 raise
167 168 else:
168 169 # delete empty file
169 170 try:
170 171 opener.unlink(f)
171 172 except FileNotFoundError:
172 173 pass
173 174 # restore backed up files and clean up temporary files
174 175 for entry in backupentries:
175 176 if entry in copy_done:
176 177 continue
177 178 l, f, b, c = entry
178 179 if l not in vfsmap and c:
179 180 report(b"couldn't handle %s: unknown cache location %s\n" % (b, l))
180 181 vfs = vfsmap[l]
181 182 try:
182 183 checkambig = checkambigfiles and (f, l) in checkambigfiles
183 184 if f and b:
184 185 restore_one_backup(vfs, f, b, checkambig)
185 186 else:
186 187 target = f or b
187 188 try:
188 189 vfs.unlink(target)
189 190 except FileNotFoundError:
190 191 # This is fine because
191 192 #
192 193 # either we are trying to delete the main file, and it is
193 194 # already deleted.
194 195 #
195 196 # or we are trying to delete a temporary file and it is
196 197 # already deleted.
197 198 #
198 199 # in both case, our target result (delete the file) is
199 200 # already achieved.
200 201 pass
201 202 except (IOError, OSError, error.Abort):
202 203 if not c:
203 204 raise
204 205
205 206 # cleanup transaction state file and the backups file
206 207 backuppath = b"%s.backupfiles" % journal
207 208 if opener.exists(backuppath):
208 209 opener.unlink(backuppath)
209 210 opener.unlink(journal)
210 211 try:
211 212 for vfs, f in backupfiles:
212 213 if vfs.exists(f):
213 214 vfs.unlink(f)
214 215 except (IOError, OSError, error.Abort):
215 216 # only pure backup file remains, it is sage to ignore any error
216 217 pass
217 218
218 219
219 220 class transaction(util.transactional):
220 221 def __init__(
221 222 self,
222 223 report,
223 224 opener,
224 225 vfsmap,
225 226 journalname,
226 227 undoname=None,
227 228 after=None,
228 229 createmode=None,
229 230 validator=None,
230 231 releasefn=None,
231 232 checkambigfiles=None,
232 name='<unnamed>',
233 name=b'<unnamed>',
233 234 ):
234 235 """Begin a new transaction
235 236
236 237 Begins a new transaction that allows rolling back writes in the event of
237 238 an exception.
238 239
239 240 * `after`: called after the transaction has been committed
240 241 * `createmode`: the mode of the journal file that will be created
241 242 * `releasefn`: called after releasing (with transaction and result)
242 243
243 244 `checkambigfiles` is a set of (path, vfs-location) tuples,
244 245 which determine whether file stat ambiguity should be avoided
245 246 for corresponded files.
246 247 """
247 248 self._count = 1
248 249 self._usages = 1
249 250 self._report = report
250 251 # a vfs to the store content
251 252 self._opener = opener
252 253 # a map to access file in various {location -> vfs}
253 254 vfsmap = vfsmap.copy()
254 255 vfsmap[b''] = opener # set default value
255 256 self._vfsmap = vfsmap
256 257 self._after = after
257 258 self._offsetmap = {}
258 259 self._newfiles = set()
259 260 self._journal = journalname
260 261 self._journal_files = []
261 262 self._undoname = undoname
262 263 self._queue = []
263 264 # A callback to do something just after releasing transaction.
264 265 if releasefn is None:
265 266 releasefn = lambda tr, success: None
266 267 self._releasefn = releasefn
267 268
268 269 self._checkambigfiles = set()
269 270 if checkambigfiles:
270 271 self._checkambigfiles.update(checkambigfiles)
271 272
272 273 self._names = [name]
273 274
274 275 # A dict dedicated to precisely tracking the changes introduced in the
275 276 # transaction.
276 277 self.changes = {}
277 278
278 279 # a dict of arguments to be passed to hooks
279 280 self.hookargs = {}
280 281 self._file = opener.open(self._journal, b"w+")
281 282
282 283 # a list of ('location', 'path', 'backuppath', cache) entries.
283 284 # - if 'backuppath' is empty, no file existed at backup time
284 285 # - if 'path' is empty, this is a temporary transaction file
285 286 # - if 'location' is not empty, the path is outside main opener reach.
286 287 # use 'location' value as a key in a vfsmap to find the right 'vfs'
287 288 # (cache is currently unused)
288 289 self._backupentries = []
289 290 self._backupmap = {}
290 291 self._backupjournal = b"%s.backupfiles" % self._journal
291 292 self._backupsfile = opener.open(self._backupjournal, b'w')
292 293 self._backupsfile.write(b'%d\n' % version)
293 294 # the set of temporary files
294 295 self._tmp_files = set()
295 296
296 297 if createmode is not None:
297 298 opener.chmod(self._journal, createmode & 0o666)
298 299 opener.chmod(self._backupjournal, createmode & 0o666)
299 300
300 301 # hold file generations to be performed on commit
301 302 self._filegenerators = {}
302 303 # hold callback to write pending data for hooks
303 304 self._pendingcallback = {}
304 305 # True is any pending data have been written ever
305 306 self._anypending = False
306 307 # holds callback to call when writing the transaction
307 308 self._finalizecallback = {}
308 309 # holds callback to call when validating the transaction
309 310 # should raise exception if anything is wrong
310 311 self._validatecallback = {}
311 312 if validator is not None:
312 313 self._validatecallback[b'001-userhooks'] = validator
313 314 # hold callback for post transaction close
314 315 self._postclosecallback = {}
315 316 # holds callbacks to call during abort
316 317 self._abortcallback = {}
317 318
318 319 def __repr__(self):
319 320 name = b'/'.join(self._names)
320 321 return '<transaction name=%s, count=%d, usages=%d>' % (
321 name,
322 encoding.strfromlocal(name),
322 323 self._count,
323 324 self._usages,
324 325 )
325 326
326 327 def __del__(self):
327 328 if self._journal:
328 329 self._abort()
329 330
330 331 @property
331 332 def finalized(self):
332 333 return self._finalizecallback is None
333 334
334 335 @active
335 336 def startgroup(self):
336 337 """delay registration of file entry
337 338
338 339 This is used by strip to delay vision of strip offset. The transaction
339 340 sees either none or all of the strip actions to be done."""
340 341 self._queue.append([])
341 342
342 343 @active
343 344 def endgroup(self):
344 345 """apply delayed registration of file entry.
345 346
346 347 This is used by strip to delay vision of strip offset. The transaction
347 348 sees either none or all of the strip actions to be done."""
348 349 q = self._queue.pop()
349 350 for f, o in q:
350 351 self._addentry(f, o)
351 352
352 353 @active
353 354 def add(self, file, offset):
354 355 """record the state of an append-only file before update"""
355 356 if (
356 357 file in self._newfiles
357 358 or file in self._offsetmap
358 359 or file in self._backupmap
359 360 or file in self._tmp_files
360 361 ):
361 362 return
362 363 if self._queue:
363 364 self._queue[-1].append((file, offset))
364 365 return
365 366
366 367 self._addentry(file, offset)
367 368
368 369 def _addentry(self, file, offset):
369 370 """add a append-only entry to memory and on-disk state"""
370 371 if (
371 372 file in self._newfiles
372 373 or file in self._offsetmap
373 374 or file in self._backupmap
374 375 or file in self._tmp_files
375 376 ):
376 377 return
377 378 if offset:
378 379 self._offsetmap[file] = offset
379 380 else:
380 381 self._newfiles.add(file)
381 382 # add enough data to the journal to do the truncate
382 383 self._file.write(b"%s\0%d\n" % (file, offset))
383 384 self._file.flush()
384 385
385 386 @active
386 387 def addbackup(self, file, hardlink=True, location=b'', for_offset=False):
387 388 """Adds a backup of the file to the transaction
388 389
389 390 Calling addbackup() creates a hardlink backup of the specified file
390 391 that is used to recover the file in the event of the transaction
391 392 aborting.
392 393
393 394 * `file`: the file path, relative to .hg/store
394 395 * `hardlink`: use a hardlink to quickly create the backup
395 396
396 397 If `for_offset` is set, we expect a offset for this file to have been previously recorded
397 398 """
398 399 if self._queue:
399 400 msg = b'cannot use transaction.addbackup inside "group"'
400 401 raise error.ProgrammingError(msg)
401 402
402 403 if file in self._newfiles or file in self._backupmap:
403 404 return
404 405 elif file in self._offsetmap and not for_offset:
405 406 return
406 407 elif for_offset and file not in self._offsetmap:
407 408 msg = (
408 409 'calling `addbackup` with `for_offmap=True`, '
409 410 'but no offset recorded: [%r] %r'
410 411 )
411 412 msg %= (location, file)
412 413 raise error.ProgrammingError(msg)
413 414
414 415 vfs = self._vfsmap[location]
415 416 dirname, filename = vfs.split(file)
416 417 backupfilename = b"%s.backup.%s.bck" % (self._journal, filename)
417 418 backupfile = vfs.reljoin(dirname, backupfilename)
418 419 if vfs.exists(file):
419 420 filepath = vfs.join(file)
420 421 backuppath = vfs.join(backupfile)
421 422 # store encoding may result in different directory here.
422 423 # so we have to ensure the destination directory exist
423 424 final_dir_name = os.path.dirname(backuppath)
424 425 util.makedirs(final_dir_name, mode=vfs.createmode, notindexed=True)
425 426 # then we can copy the backup
426 427 util.copyfile(filepath, backuppath, hardlink=hardlink)
427 428 else:
428 429 backupfile = b''
429 430
430 431 self._addbackupentry((location, file, backupfile, False))
431 432
432 433 def _addbackupentry(self, entry):
433 434 """register a new backup entry and write it to disk"""
434 435 self._backupentries.append(entry)
435 436 self._backupmap[entry[1]] = len(self._backupentries) - 1
436 437 self._backupsfile.write(b"%s\0%s\0%s\0%d\n" % entry)
437 438 self._backupsfile.flush()
438 439
439 440 @active
440 441 def registertmp(self, tmpfile, location=b''):
441 442 """register a temporary transaction file
442 443
443 444 Such files will be deleted when the transaction exits (on both
444 445 failure and success).
445 446 """
446 447 self._tmp_files.add(tmpfile)
447 448 self._addbackupentry((location, b'', tmpfile, False))
448 449
449 450 @active
450 451 def addfilegenerator(
451 452 self,
452 453 genid,
453 454 filenames,
454 455 genfunc,
455 456 order=0,
456 457 location=b'',
457 458 post_finalize=False,
458 459 ):
459 460 """add a function to generates some files at transaction commit
460 461
461 462 The `genfunc` argument is a function capable of generating proper
462 463 content of each entry in the `filename` tuple.
463 464
464 465 At transaction close time, `genfunc` will be called with one file
465 466 object argument per entries in `filenames`.
466 467
467 468 The transaction itself is responsible for the backup, creation and
468 469 final write of such file.
469 470
470 471 The `genid` argument is used to ensure the same set of file is only
471 472 generated once. Call to `addfilegenerator` for a `genid` already
472 473 present will overwrite the old entry.
473 474
474 475 The `order` argument may be used to control the order in which multiple
475 476 generator will be executed.
476 477
477 478 The `location` arguments may be used to indicate the files are located
478 479 outside of the the standard directory for transaction. It should match
479 480 one of the key of the `transaction.vfsmap` dictionary.
480 481
481 482 The `post_finalize` argument can be set to `True` for file generation
482 483 that must be run after the transaction has been finalized.
483 484 """
484 485 # For now, we are unable to do proper backup and restore of custom vfs
485 486 # but for bookmarks that are handled outside this mechanism.
486 487 entry = (order, filenames, genfunc, location, post_finalize)
487 488 self._filegenerators[genid] = entry
488 489
489 490 @active
490 491 def removefilegenerator(self, genid):
491 492 """reverse of addfilegenerator, remove a file generator function"""
492 493 if genid in self._filegenerators:
493 494 del self._filegenerators[genid]
494 495
495 496 def _generatefiles(self, suffix=b'', group=GEN_GROUP_ALL):
496 497 # write files registered for generation
497 498 any = False
498 499
499 500 if group == GEN_GROUP_ALL:
500 501 skip_post = skip_pre = False
501 502 else:
502 503 skip_pre = group == GEN_GROUP_POST_FINALIZE
503 504 skip_post = group == GEN_GROUP_PRE_FINALIZE
504 505
505 506 for id, entry in sorted(self._filegenerators.items()):
506 507 any = True
507 508 order, filenames, genfunc, location, post_finalize = entry
508 509
509 510 # for generation at closing, check if it's before or after finalize
510 511 if skip_post and post_finalize:
511 512 continue
512 513 elif skip_pre and not post_finalize:
513 514 continue
514 515
515 516 vfs = self._vfsmap[location]
516 517 files = []
517 518 try:
518 519 for name in filenames:
519 520 name += suffix
520 521 if suffix:
521 522 self.registertmp(name, location=location)
522 523 checkambig = False
523 524 else:
524 525 self.addbackup(name, location=location)
525 526 checkambig = (name, location) in self._checkambigfiles
526 527 files.append(
527 528 vfs(name, b'w', atomictemp=True, checkambig=checkambig)
528 529 )
529 530 genfunc(*files)
530 531 for f in files:
531 532 f.close()
532 533 # skip discard() loop since we're sure no open file remains
533 534 del files[:]
534 535 finally:
535 536 for f in files:
536 537 f.discard()
537 538 return any
538 539
539 540 @active
540 541 def findoffset(self, file):
541 542 if file in self._newfiles:
542 543 return 0
543 544 return self._offsetmap.get(file)
544 545
545 546 @active
546 547 def readjournal(self):
547 548 self._file.seek(0)
548 549 entries = []
549 550 for l in self._file.readlines():
550 551 file, troffset = l.split(b'\0')
551 552 entries.append((file, int(troffset)))
552 553 return entries
553 554
554 555 @active
555 556 def replace(self, file, offset):
556 557 """
557 558 replace can only replace already committed entries
558 559 that are not pending in the queue
559 560 """
560 561 if file in self._newfiles:
561 562 if not offset:
562 563 return
563 564 self._newfiles.remove(file)
564 565 self._offsetmap[file] = offset
565 566 elif file in self._offsetmap:
566 567 if not offset:
567 568 del self._offsetmap[file]
568 569 self._newfiles.add(file)
569 570 else:
570 571 self._offsetmap[file] = offset
571 572 else:
572 573 raise KeyError(file)
573 574 self._file.write(b"%s\0%d\n" % (file, offset))
574 575 self._file.flush()
575 576
576 577 @active
577 def nest(self, name='<unnamed>'):
578 def nest(self, name=b'<unnamed>'):
578 579 self._count += 1
579 580 self._usages += 1
580 581 self._names.append(name)
581 582 return self
582 583
583 584 def release(self):
584 585 if self._count > 0:
585 586 self._usages -= 1
586 587 if self._names:
587 588 self._names.pop()
588 589 # if the transaction scopes are left without being closed, fail
589 590 if self._count > 0 and self._usages == 0:
590 591 self._abort()
591 592
592 593 def running(self):
593 594 return self._count > 0
594 595
595 596 def addpending(self, category, callback):
596 597 """add a callback to be called when the transaction is pending
597 598
598 599 The transaction will be given as callback's first argument.
599 600
600 601 Category is a unique identifier to allow overwriting an old callback
601 602 with a newer callback.
602 603 """
603 604 self._pendingcallback[category] = callback
604 605
605 606 @active
606 607 def writepending(self):
607 608 """write pending file to temporary version
608 609
609 610 This is used to allow hooks to view a transaction before commit"""
610 611 categories = sorted(self._pendingcallback)
611 612 for cat in categories:
612 613 # remove callback since the data will have been flushed
613 614 any = self._pendingcallback.pop(cat)(self)
614 615 self._anypending = self._anypending or any
615 616 self._anypending |= self._generatefiles(suffix=b'.pending')
616 617 return self._anypending
617 618
618 619 @active
619 620 def hasfinalize(self, category):
620 621 """check is a callback already exist for a category"""
621 622 return category in self._finalizecallback
622 623
623 624 @active
624 625 def addfinalize(self, category, callback):
625 626 """add a callback to be called when the transaction is closed
626 627
627 628 The transaction will be given as callback's first argument.
628 629
629 630 Category is a unique identifier to allow overwriting old callbacks with
630 631 newer callbacks.
631 632 """
632 633 self._finalizecallback[category] = callback
633 634
634 635 @active
635 636 def addpostclose(self, category, callback):
636 637 """add or replace a callback to be called after the transaction closed
637 638
638 639 The transaction will be given as callback's first argument.
639 640
640 641 Category is a unique identifier to allow overwriting an old callback
641 642 with a newer callback.
642 643 """
643 644 self._postclosecallback[category] = callback
644 645
645 646 @active
646 647 def getpostclose(self, category):
647 648 """return a postclose callback added before, or None"""
648 649 return self._postclosecallback.get(category, None)
649 650
650 651 @active
651 652 def addabort(self, category, callback):
652 653 """add a callback to be called when the transaction is aborted.
653 654
654 655 The transaction will be given as the first argument to the callback.
655 656
656 657 Category is a unique identifier to allow overwriting an old callback
657 658 with a newer callback.
658 659 """
659 660 self._abortcallback[category] = callback
660 661
661 662 @active
662 663 def addvalidator(self, category, callback):
663 664 """adds a callback to be called when validating the transaction.
664 665
665 666 The transaction will be given as the first argument to the callback.
666 667
667 668 callback should raise exception if to abort transaction"""
668 669 self._validatecallback[category] = callback
669 670
670 671 @active
671 672 def close(self):
672 673 '''commit the transaction'''
673 674 if self._count == 1:
674 675 for category in sorted(self._validatecallback):
675 676 self._validatecallback[category](self)
676 677 self._validatecallback = None # Help prevent cycles.
677 678 self._generatefiles(group=GEN_GROUP_PRE_FINALIZE)
678 679 while self._finalizecallback:
679 680 callbacks = self._finalizecallback
680 681 self._finalizecallback = {}
681 682 categories = sorted(callbacks)
682 683 for cat in categories:
683 684 callbacks[cat](self)
684 685 # Prevent double usage and help clear cycles.
685 686 self._finalizecallback = None
686 687 self._generatefiles(group=GEN_GROUP_POST_FINALIZE)
687 688
688 689 self._count -= 1
689 690 if self._count != 0:
690 691 return
691 692 self._file.close()
692 693 self._backupsfile.close()
693 694 # cleanup temporary files
694 695 for l, f, b, c in self._backupentries:
695 696 if l not in self._vfsmap and c:
696 697 self._report(
697 698 b"couldn't remove %s: unknown cache location %s\n" % (b, l)
698 699 )
699 700 continue
700 701 vfs = self._vfsmap[l]
701 702 if not f and b and vfs.exists(b):
702 703 try:
703 704 vfs.unlink(b)
704 705 except (IOError, OSError, error.Abort) as inst:
705 706 if not c:
706 707 raise
707 708 # Abort may be raise by read only opener
708 709 self._report(
709 710 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
710 711 )
711 712 self._offsetmap = {}
712 713 self._newfiles = set()
713 714 self._writeundo()
714 715 if self._after:
715 716 self._after()
716 717 self._after = None # Help prevent cycles.
717 718 if self._opener.isfile(self._backupjournal):
718 719 self._opener.unlink(self._backupjournal)
719 720 if self._opener.isfile(self._journal):
720 721 self._opener.unlink(self._journal)
721 722 for l, _f, b, c in self._backupentries:
722 723 if l not in self._vfsmap and c:
723 724 self._report(
724 725 b"couldn't remove %s: unknown cache location"
725 726 b"%s\n" % (b, l)
726 727 )
727 728 continue
728 729 vfs = self._vfsmap[l]
729 730 if b and vfs.exists(b):
730 731 try:
731 732 vfs.unlink(b)
732 733 except (IOError, OSError, error.Abort) as inst:
733 734 if not c:
734 735 raise
735 736 # Abort may be raise by read only opener
736 737 self._report(
737 738 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
738 739 )
739 740 self._backupentries = []
740 741 self._journal = None
741 742
742 743 self._releasefn(self, True) # notify success of closing transaction
743 744 self._releasefn = None # Help prevent cycles.
744 745
745 746 # run post close action
746 747 categories = sorted(self._postclosecallback)
747 748 for cat in categories:
748 749 self._postclosecallback[cat](self)
749 750 # Prevent double usage and help clear cycles.
750 751 self._postclosecallback = None
751 752
752 753 @active
753 754 def abort(self):
754 755 """abort the transaction (generally called on error, or when the
755 756 transaction is not explicitly committed before going out of
756 757 scope)"""
757 758 self._abort()
758 759
759 760 @active
760 761 def add_journal(self, vfs_id, path):
761 762 self._journal_files.append((vfs_id, path))
762 763
763 764 def _writeundo(self):
764 765 """write transaction data for possible future undo call"""
765 766 if self._undoname is None:
766 767 return
767 768 cleanup_undo_files(
768 769 self._report,
769 770 self._vfsmap,
770 771 undo_prefix=self._undoname,
771 772 )
772 773
773 774 def undoname(fn: bytes) -> bytes:
774 775 base, name = os.path.split(fn)
775 776 assert name.startswith(self._journal)
776 777 new_name = name.replace(self._journal, self._undoname, 1)
777 778 return os.path.join(base, new_name)
778 779
779 780 undo_backup_path = b"%s.backupfiles" % self._undoname
780 781 undobackupfile = self._opener.open(undo_backup_path, b'w')
781 782 undobackupfile.write(b'%d\n' % version)
782 783 for l, f, b, c in self._backupentries:
783 784 if not f: # temporary file
784 785 continue
785 786 if not b:
786 787 u = b''
787 788 else:
788 789 if l not in self._vfsmap and c:
789 790 self._report(
790 791 b"couldn't remove %s: unknown cache location"
791 792 b"%s\n" % (b, l)
792 793 )
793 794 continue
794 795 vfs = self._vfsmap[l]
795 796 u = undoname(b)
796 797 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
797 798 undobackupfile.write(b"%s\0%s\0%s\0%d\n" % (l, f, u, c))
798 799 undobackupfile.close()
799 800 for vfs, src in self._journal_files:
800 801 dest = undoname(src)
801 802 # if src and dest refer to a same file, vfs.rename is a no-op,
802 803 # leaving both src and dest on disk. delete dest to make sure
803 804 # the rename couldn't be such a no-op.
804 805 vfs.tryunlink(dest)
805 806 try:
806 807 vfs.rename(src, dest)
807 808 except FileNotFoundError: # journal file does not yet exist
808 809 pass
809 810
810 811 def _abort(self):
811 812 entries = self.readjournal()
812 813 self._count = 0
813 814 self._usages = 0
814 815 self._file.close()
815 816 self._backupsfile.close()
816 817
817 818 quick = self._can_quick_abort(entries)
818 819 try:
819 820 if not quick:
820 821 self._report(_(b"transaction abort!\n"))
821 822 for cat in sorted(self._abortcallback):
822 823 self._abortcallback[cat](self)
823 824 # Prevent double usage and help clear cycles.
824 825 self._abortcallback = None
825 826 if quick:
826 827 self._do_quick_abort(entries)
827 828 else:
828 829 self._do_full_abort(entries)
829 830 finally:
830 831 self._journal = None
831 832 self._releasefn(self, False) # notify failure of transaction
832 833 self._releasefn = None # Help prevent cycles.
833 834
834 835 def _can_quick_abort(self, entries):
835 836 """False if any semantic content have been written on disk
836 837
837 838 True if nothing, except temporary files has been writen on disk."""
838 839 if entries:
839 840 return False
840 841 for e in self._backupentries:
841 842 if e[1]:
842 843 return False
843 844 return True
844 845
845 846 def _do_quick_abort(self, entries):
846 847 """(Silently) do a quick cleanup (see _can_quick_abort)"""
847 848 assert self._can_quick_abort(entries)
848 849 tmp_files = [e for e in self._backupentries if not e[1]]
849 850 for vfs_id, old_path, tmp_path, xxx in tmp_files:
850 851 vfs = self._vfsmap[vfs_id]
851 852 try:
852 853 vfs.unlink(tmp_path)
853 854 except FileNotFoundError:
854 855 pass
855 856 if self._backupjournal:
856 857 self._opener.unlink(self._backupjournal)
857 858 if self._journal:
858 859 self._opener.unlink(self._journal)
859 860
860 861 def _do_full_abort(self, entries):
861 862 """(Noisily) rollback all the change introduced by the transaction"""
862 863 try:
863 864 _playback(
864 865 self._journal,
865 866 self._report,
866 867 self._opener,
867 868 self._vfsmap,
868 869 entries,
869 870 self._backupentries,
870 871 False,
871 872 checkambigfiles=self._checkambigfiles,
872 873 )
873 874 self._report(_(b"rollback completed\n"))
874 875 except BaseException as exc:
875 876 self._report(_(b"rollback failed - please run hg recover\n"))
876 877 self._report(
877 878 _(b"(failure reason: %s)\n") % stringutil.forcebytestr(exc)
878 879 )
879 880
880 881
881 882 BAD_VERSION_MSG = _(
882 883 b"journal was created by a different version of Mercurial\n"
883 884 )
884 885
885 886
886 887 def read_backup_files(report, fp):
887 888 """parse an (already open) backup file an return contained backup entries
888 889
889 890 entries are in the form: (location, file, backupfile, xxx)
890 891
891 892 :location: the vfs identifier (vfsmap's key)
892 893 :file: original file path (in the vfs)
893 894 :backupfile: path of the backup (in the vfs)
894 895 :cache: a boolean currently always set to False
895 896 """
896 897 lines = fp.readlines()
897 898 backupentries = []
898 899 if lines:
899 900 ver = lines[0][:-1]
900 901 if ver != (b'%d' % version):
901 902 report(BAD_VERSION_MSG)
902 903 else:
903 904 for line in lines[1:]:
904 905 if line:
905 906 # Shave off the trailing newline
906 907 line = line[:-1]
907 908 l, f, b, c = line.split(b'\0')
908 909 backupentries.append((l, f, b, bool(c)))
909 910 return backupentries
910 911
911 912
912 913 def rollback(
913 914 opener,
914 915 vfsmap,
915 916 file,
916 917 report,
917 918 checkambigfiles=None,
918 919 skip_journal_pattern=None,
919 920 ):
920 921 """Rolls back the transaction contained in the given file
921 922
922 923 Reads the entries in the specified file, and the corresponding
923 924 '*.backupfiles' file, to recover from an incomplete transaction.
924 925
925 926 * `file`: a file containing a list of entries, specifying where
926 927 to truncate each file. The file should contain a list of
927 928 file\0offset pairs, delimited by newlines. The corresponding
928 929 '*.backupfiles' file should contain a list of file\0backupfile
929 930 pairs, delimited by \0.
930 931
931 932 `checkambigfiles` is a set of (path, vfs-location) tuples,
932 933 which determine whether file stat ambiguity should be avoided at
933 934 restoring corresponded files.
934 935 """
935 936 entries = []
936 937 backupentries = []
937 938
938 939 with opener.open(file) as fp:
939 940 lines = fp.readlines()
940 941 for l in lines:
941 942 try:
942 943 f, o = l.split(b'\0')
943 944 entries.append((f, int(o)))
944 945 except ValueError:
945 946 report(
946 947 _(b"couldn't read journal entry %r!\n") % pycompat.bytestr(l)
947 948 )
948 949
949 950 backupjournal = b"%s.backupfiles" % file
950 951 if opener.exists(backupjournal):
951 952 with opener.open(backupjournal) as fp:
952 953 backupentries = read_backup_files(report, fp)
953 954 if skip_journal_pattern is not None:
954 955 keep = lambda x: not skip_journal_pattern.match(x[1])
955 956 backupentries = [x for x in backupentries if keep(x)]
956 957
957 958 _playback(
958 959 file,
959 960 report,
960 961 opener,
961 962 vfsmap,
962 963 entries,
963 964 backupentries,
964 965 checkambigfiles=checkambigfiles,
965 966 )
General Comments 0
You need to be logged in to leave comments. Login now