##// END OF EJS Templates
transaction: move the restoration of backup file in a small closure...
marmoute -
r51235:86dc9e09 stable
parent child Browse files
Show More
@@ -1,910 +1,914 b''
1 1 # transaction.py - simple journaling scheme for mercurial
2 2 #
3 3 # This transaction scheme is intended to gracefully handle program
4 4 # errors and interruptions. More serious failures like system crashes
5 5 # can be recovered with an fsck-like tool. As the whole repository is
6 6 # effectively log-structured, this should amount to simply truncating
7 7 # anything that isn't referenced in the changelog.
8 8 #
9 9 # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
10 10 #
11 11 # This software may be used and distributed according to the terms of the
12 12 # GNU General Public License version 2 or any later version.
13 13
14 14 import errno
15 15 import os
16 16
17 17 from .i18n import _
18 18 from . import (
19 19 error,
20 20 pycompat,
21 21 util,
22 22 )
23 23 from .utils import stringutil
24 24
25 25 version = 2
26 26
27 27 GEN_GROUP_ALL = b'all'
28 28 GEN_GROUP_PRE_FINALIZE = b'prefinalize'
29 29 GEN_GROUP_POST_FINALIZE = b'postfinalize'
30 30
31 31
32 32 def active(func):
33 33 def _active(self, *args, **kwds):
34 34 if self._count == 0:
35 35 raise error.ProgrammingError(
36 36 b'cannot use transaction when it is already committed/aborted'
37 37 )
38 38 return func(self, *args, **kwds)
39 39
40 40 return _active
41 41
42 42
43 43 UNDO_BACKUP = b'%s.backupfiles'
44 44
45 45 UNDO_FILES_MAY_NEED_CLEANUP = [
46 46 # legacy entries that might exists on disk from previous version:
47 47 (b'store', b'%s.narrowspec'),
48 48 (b'plain', b'%s.narrowspec.dirstate'),
49 49 (b'plain', b'%s.branch'),
50 50 (b'plain', b'%s.bookmarks'),
51 51 (b'store', b'%s.phaseroots'),
52 52 (b'plain', b'%s.dirstate'),
53 53 # files actually in uses today:
54 54 (b'plain', b'%s.desc'),
55 55 # Always delete undo last to make sure we detect that a clean up is needed if
56 56 # the process is interrupted.
57 57 (b'store', b'%s'),
58 58 ]
59 59
60 60
61 61 def cleanup_undo_files(report, vfsmap, undo_prefix=b'undo'):
62 62 """remove "undo" files used by the rollback logic
63 63
64 64 This is useful to prevent rollback running in situation were it does not
65 65 make sense. For example after a strip.
66 66 """
67 67 backup_listing = UNDO_BACKUP % undo_prefix
68 68
69 69 backup_entries = []
70 70 undo_files = []
71 71 svfs = vfsmap[b'store']
72 72 try:
73 73 with svfs(backup_listing) as f:
74 74 backup_entries = read_backup_files(report, f)
75 75 except OSError as e:
76 76 if e.errno != errno.ENOENT:
77 77 msg = _(b'could not read %s: %s\n')
78 78 msg %= (svfs.join(backup_listing), stringutil.forcebytestr(e))
79 79 report(msg)
80 80
81 81 for location, f, backup_path, c in backup_entries:
82 82 if location in vfsmap and backup_path:
83 83 undo_files.append((vfsmap[location], backup_path))
84 84
85 85 undo_files.append((svfs, backup_listing))
86 86 for location, undo_path in UNDO_FILES_MAY_NEED_CLEANUP:
87 87 undo_files.append((vfsmap[location], undo_path % undo_prefix))
88 88 for undovfs, undofile in undo_files:
89 89 try:
90 90 undovfs.unlink(undofile)
91 91 except OSError as e:
92 92 if e.errno != errno.ENOENT:
93 93 msg = _(b'error removing %s: %s\n')
94 94 msg %= (undovfs.join(undofile), stringutil.forcebytestr(e))
95 95 report(msg)
96 96
97 97
98 98 def _playback(
99 99 journal,
100 100 report,
101 101 opener,
102 102 vfsmap,
103 103 entries,
104 104 backupentries,
105 105 unlink=True,
106 106 checkambigfiles=None,
107 107 ):
108 backupfiles = []
109
110 def restore_one_backup(vfs, f, b, checkambig):
111 filepath = vfs.join(f)
112 backuppath = vfs.join(b)
113 try:
114 util.copyfile(backuppath, filepath, checkambig=checkambig)
115 backupfiles.append((vfs, b))
116 except IOError as exc:
117 e_msg = stringutil.forcebytestr(exc)
118 report(_(b"failed to recover %s (%s)\n") % (f, e_msg))
119 raise
120
108 121 for f, o in sorted(dict(entries).items()):
109 122 if o or not unlink:
110 123 checkambig = checkambigfiles and (f, b'') in checkambigfiles
111 124 try:
112 125 fp = opener(f, b'a', checkambig=checkambig)
113 126 if fp.tell() < o:
114 127 raise error.Abort(
115 128 _(
116 129 b"attempted to truncate %s to %d bytes, but it was "
117 130 b"already %d bytes\n"
118 131 )
119 132 % (f, o, fp.tell())
120 133 )
121 134 fp.truncate(o)
122 135 fp.close()
123 136 except IOError:
124 137 report(_(b"failed to truncate %s\n") % f)
125 138 raise
126 139 else:
127 140 try:
128 141 opener.unlink(f)
129 142 except FileNotFoundError:
130 143 pass
131 144
132 backupfiles = []
133 145 for l, f, b, c in backupentries:
134 146 if l not in vfsmap and c:
135 147 report(b"couldn't handle %s: unknown cache location %s\n" % (b, l))
136 148 vfs = vfsmap[l]
137 149 try:
150 checkambig = checkambigfiles and (f, l) in checkambigfiles
138 151 if f and b:
139 filepath = vfs.join(f)
140 backuppath = vfs.join(b)
141 checkambig = checkambigfiles and (f, l) in checkambigfiles
142 try:
143 util.copyfile(backuppath, filepath, checkambig=checkambig)
144 backupfiles.append((vfs, b))
145 except IOError as exc:
146 e_msg = stringutil.forcebytestr(exc)
147 report(_(b"failed to recover %s (%s)\n") % (f, e_msg))
148 raise
152 restore_one_backup(vfs, f, b, checkambig)
149 153 else:
150 154 target = f or b
151 155 try:
152 156 vfs.unlink(target)
153 157 except FileNotFoundError:
154 158 # This is fine because
155 159 #
156 160 # either we are trying to delete the main file, and it is
157 161 # already deleted.
158 162 #
159 163 # or we are trying to delete a temporary file and it is
160 164 # already deleted.
161 165 #
162 166 # in both case, our target result (delete the file) is
163 167 # already achieved.
164 168 pass
165 169 except (IOError, OSError, error.Abort):
166 170 if not c:
167 171 raise
168 172
169 173 backuppath = b"%s.backupfiles" % journal
170 174 if opener.exists(backuppath):
171 175 opener.unlink(backuppath)
172 176 opener.unlink(journal)
173 177 try:
174 178 for vfs, f in backupfiles:
175 179 if vfs.exists(f):
176 180 vfs.unlink(f)
177 181 except (IOError, OSError, error.Abort):
178 182 # only pure backup file remains, it is sage to ignore any error
179 183 pass
180 184
181 185
182 186 class transaction(util.transactional):
183 187 def __init__(
184 188 self,
185 189 report,
186 190 opener,
187 191 vfsmap,
188 192 journalname,
189 193 undoname=None,
190 194 after=None,
191 195 createmode=None,
192 196 validator=None,
193 197 releasefn=None,
194 198 checkambigfiles=None,
195 199 name='<unnamed>',
196 200 ):
197 201 """Begin a new transaction
198 202
199 203 Begins a new transaction that allows rolling back writes in the event of
200 204 an exception.
201 205
202 206 * `after`: called after the transaction has been committed
203 207 * `createmode`: the mode of the journal file that will be created
204 208 * `releasefn`: called after releasing (with transaction and result)
205 209
206 210 `checkambigfiles` is a set of (path, vfs-location) tuples,
207 211 which determine whether file stat ambiguity should be avoided
208 212 for corresponded files.
209 213 """
210 214 self._count = 1
211 215 self._usages = 1
212 216 self._report = report
213 217 # a vfs to the store content
214 218 self._opener = opener
215 219 # a map to access file in various {location -> vfs}
216 220 vfsmap = vfsmap.copy()
217 221 vfsmap[b''] = opener # set default value
218 222 self._vfsmap = vfsmap
219 223 self._after = after
220 224 self._offsetmap = {}
221 225 self._newfiles = set()
222 226 self._journal = journalname
223 227 self._journal_files = []
224 228 self._undoname = undoname
225 229 self._queue = []
226 230 # A callback to do something just after releasing transaction.
227 231 if releasefn is None:
228 232 releasefn = lambda tr, success: None
229 233 self._releasefn = releasefn
230 234
231 235 self._checkambigfiles = set()
232 236 if checkambigfiles:
233 237 self._checkambigfiles.update(checkambigfiles)
234 238
235 239 self._names = [name]
236 240
237 241 # A dict dedicated to precisely tracking the changes introduced in the
238 242 # transaction.
239 243 self.changes = {}
240 244
241 245 # a dict of arguments to be passed to hooks
242 246 self.hookargs = {}
243 247 self._file = opener.open(self._journal, b"w+")
244 248
245 249 # a list of ('location', 'path', 'backuppath', cache) entries.
246 250 # - if 'backuppath' is empty, no file existed at backup time
247 251 # - if 'path' is empty, this is a temporary transaction file
248 252 # - if 'location' is not empty, the path is outside main opener reach.
249 253 # use 'location' value as a key in a vfsmap to find the right 'vfs'
250 254 # (cache is currently unused)
251 255 self._backupentries = []
252 256 self._backupmap = {}
253 257 self._backupjournal = b"%s.backupfiles" % self._journal
254 258 self._backupsfile = opener.open(self._backupjournal, b'w')
255 259 self._backupsfile.write(b'%d\n' % version)
256 260
257 261 if createmode is not None:
258 262 opener.chmod(self._journal, createmode & 0o666)
259 263 opener.chmod(self._backupjournal, createmode & 0o666)
260 264
261 265 # hold file generations to be performed on commit
262 266 self._filegenerators = {}
263 267 # hold callback to write pending data for hooks
264 268 self._pendingcallback = {}
265 269 # True is any pending data have been written ever
266 270 self._anypending = False
267 271 # holds callback to call when writing the transaction
268 272 self._finalizecallback = {}
269 273 # holds callback to call when validating the transaction
270 274 # should raise exception if anything is wrong
271 275 self._validatecallback = {}
272 276 if validator is not None:
273 277 self._validatecallback[b'001-userhooks'] = validator
274 278 # hold callback for post transaction close
275 279 self._postclosecallback = {}
276 280 # holds callbacks to call during abort
277 281 self._abortcallback = {}
278 282
279 283 def __repr__(self):
280 284 name = '/'.join(self._names)
281 285 return '<transaction name=%s, count=%d, usages=%d>' % (
282 286 name,
283 287 self._count,
284 288 self._usages,
285 289 )
286 290
287 291 def __del__(self):
288 292 if self._journal:
289 293 self._abort()
290 294
291 295 @property
292 296 def finalized(self):
293 297 return self._finalizecallback is None
294 298
295 299 @active
296 300 def startgroup(self):
297 301 """delay registration of file entry
298 302
299 303 This is used by strip to delay vision of strip offset. The transaction
300 304 sees either none or all of the strip actions to be done."""
301 305 self._queue.append([])
302 306
303 307 @active
304 308 def endgroup(self):
305 309 """apply delayed registration of file entry.
306 310
307 311 This is used by strip to delay vision of strip offset. The transaction
308 312 sees either none or all of the strip actions to be done."""
309 313 q = self._queue.pop()
310 314 for f, o in q:
311 315 self._addentry(f, o)
312 316
313 317 @active
314 318 def add(self, file, offset):
315 319 """record the state of an append-only file before update"""
316 320 if (
317 321 file in self._newfiles
318 322 or file in self._offsetmap
319 323 or file in self._backupmap
320 324 ):
321 325 return
322 326 if self._queue:
323 327 self._queue[-1].append((file, offset))
324 328 return
325 329
326 330 self._addentry(file, offset)
327 331
328 332 def _addentry(self, file, offset):
329 333 """add a append-only entry to memory and on-disk state"""
330 334 if (
331 335 file in self._newfiles
332 336 or file in self._offsetmap
333 337 or file in self._backupmap
334 338 ):
335 339 return
336 340 if offset:
337 341 self._offsetmap[file] = offset
338 342 else:
339 343 self._newfiles.add(file)
340 344 # add enough data to the journal to do the truncate
341 345 self._file.write(b"%s\0%d\n" % (file, offset))
342 346 self._file.flush()
343 347
344 348 @active
345 349 def addbackup(self, file, hardlink=True, location=b''):
346 350 """Adds a backup of the file to the transaction
347 351
348 352 Calling addbackup() creates a hardlink backup of the specified file
349 353 that is used to recover the file in the event of the transaction
350 354 aborting.
351 355
352 356 * `file`: the file path, relative to .hg/store
353 357 * `hardlink`: use a hardlink to quickly create the backup
354 358 """
355 359 if self._queue:
356 360 msg = b'cannot use transaction.addbackup inside "group"'
357 361 raise error.ProgrammingError(msg)
358 362
359 363 if (
360 364 file in self._newfiles
361 365 or file in self._offsetmap
362 366 or file in self._backupmap
363 367 ):
364 368 return
365 369 vfs = self._vfsmap[location]
366 370 dirname, filename = vfs.split(file)
367 371 backupfilename = b"%s.backup.%s" % (self._journal, filename)
368 372 backupfile = vfs.reljoin(dirname, backupfilename)
369 373 if vfs.exists(file):
370 374 filepath = vfs.join(file)
371 375 backuppath = vfs.join(backupfile)
372 376 util.copyfile(filepath, backuppath, hardlink=hardlink)
373 377 else:
374 378 backupfile = b''
375 379
376 380 self._addbackupentry((location, file, backupfile, False))
377 381
378 382 def _addbackupentry(self, entry):
379 383 """register a new backup entry and write it to disk"""
380 384 self._backupentries.append(entry)
381 385 self._backupmap[entry[1]] = len(self._backupentries) - 1
382 386 self._backupsfile.write(b"%s\0%s\0%s\0%d\n" % entry)
383 387 self._backupsfile.flush()
384 388
385 389 @active
386 390 def registertmp(self, tmpfile, location=b''):
387 391 """register a temporary transaction file
388 392
389 393 Such files will be deleted when the transaction exits (on both
390 394 failure and success).
391 395 """
392 396 self._addbackupentry((location, b'', tmpfile, False))
393 397
394 398 @active
395 399 def addfilegenerator(
396 400 self,
397 401 genid,
398 402 filenames,
399 403 genfunc,
400 404 order=0,
401 405 location=b'',
402 406 post_finalize=False,
403 407 ):
404 408 """add a function to generates some files at transaction commit
405 409
406 410 The `genfunc` argument is a function capable of generating proper
407 411 content of each entry in the `filename` tuple.
408 412
409 413 At transaction close time, `genfunc` will be called with one file
410 414 object argument per entries in `filenames`.
411 415
412 416 The transaction itself is responsible for the backup, creation and
413 417 final write of such file.
414 418
415 419 The `genid` argument is used to ensure the same set of file is only
416 420 generated once. Call to `addfilegenerator` for a `genid` already
417 421 present will overwrite the old entry.
418 422
419 423 The `order` argument may be used to control the order in which multiple
420 424 generator will be executed.
421 425
422 426 The `location` arguments may be used to indicate the files are located
423 427 outside of the the standard directory for transaction. It should match
424 428 one of the key of the `transaction.vfsmap` dictionary.
425 429
426 430 The `post_finalize` argument can be set to `True` for file generation
427 431 that must be run after the transaction has been finalized.
428 432 """
429 433 # For now, we are unable to do proper backup and restore of custom vfs
430 434 # but for bookmarks that are handled outside this mechanism.
431 435 entry = (order, filenames, genfunc, location, post_finalize)
432 436 self._filegenerators[genid] = entry
433 437
434 438 @active
435 439 def removefilegenerator(self, genid):
436 440 """reverse of addfilegenerator, remove a file generator function"""
437 441 if genid in self._filegenerators:
438 442 del self._filegenerators[genid]
439 443
440 444 def _generatefiles(self, suffix=b'', group=GEN_GROUP_ALL):
441 445 # write files registered for generation
442 446 any = False
443 447
444 448 if group == GEN_GROUP_ALL:
445 449 skip_post = skip_pre = False
446 450 else:
447 451 skip_pre = group == GEN_GROUP_POST_FINALIZE
448 452 skip_post = group == GEN_GROUP_PRE_FINALIZE
449 453
450 454 for id, entry in sorted(self._filegenerators.items()):
451 455 any = True
452 456 order, filenames, genfunc, location, post_finalize = entry
453 457
454 458 # for generation at closing, check if it's before or after finalize
455 459 if skip_post and post_finalize:
456 460 continue
457 461 elif skip_pre and not post_finalize:
458 462 continue
459 463
460 464 vfs = self._vfsmap[location]
461 465 files = []
462 466 try:
463 467 for name in filenames:
464 468 name += suffix
465 469 if suffix:
466 470 self.registertmp(name, location=location)
467 471 checkambig = False
468 472 else:
469 473 self.addbackup(name, location=location)
470 474 checkambig = (name, location) in self._checkambigfiles
471 475 files.append(
472 476 vfs(name, b'w', atomictemp=True, checkambig=checkambig)
473 477 )
474 478 genfunc(*files)
475 479 for f in files:
476 480 f.close()
477 481 # skip discard() loop since we're sure no open file remains
478 482 del files[:]
479 483 finally:
480 484 for f in files:
481 485 f.discard()
482 486 return any
483 487
484 488 @active
485 489 def findoffset(self, file):
486 490 if file in self._newfiles:
487 491 return 0
488 492 return self._offsetmap.get(file)
489 493
490 494 @active
491 495 def readjournal(self):
492 496 self._file.seek(0)
493 497 entries = []
494 498 for l in self._file.readlines():
495 499 file, troffset = l.split(b'\0')
496 500 entries.append((file, int(troffset)))
497 501 return entries
498 502
499 503 @active
500 504 def replace(self, file, offset):
501 505 """
502 506 replace can only replace already committed entries
503 507 that are not pending in the queue
504 508 """
505 509 if file in self._newfiles:
506 510 if not offset:
507 511 return
508 512 self._newfiles.remove(file)
509 513 self._offsetmap[file] = offset
510 514 elif file in self._offsetmap:
511 515 if not offset:
512 516 del self._offsetmap[file]
513 517 self._newfiles.add(file)
514 518 else:
515 519 self._offsetmap[file] = offset
516 520 else:
517 521 raise KeyError(file)
518 522 self._file.write(b"%s\0%d\n" % (file, offset))
519 523 self._file.flush()
520 524
521 525 @active
522 526 def nest(self, name='<unnamed>'):
523 527 self._count += 1
524 528 self._usages += 1
525 529 self._names.append(name)
526 530 return self
527 531
528 532 def release(self):
529 533 if self._count > 0:
530 534 self._usages -= 1
531 535 if self._names:
532 536 self._names.pop()
533 537 # if the transaction scopes are left without being closed, fail
534 538 if self._count > 0 and self._usages == 0:
535 539 self._abort()
536 540
537 541 def running(self):
538 542 return self._count > 0
539 543
540 544 def addpending(self, category, callback):
541 545 """add a callback to be called when the transaction is pending
542 546
543 547 The transaction will be given as callback's first argument.
544 548
545 549 Category is a unique identifier to allow overwriting an old callback
546 550 with a newer callback.
547 551 """
548 552 self._pendingcallback[category] = callback
549 553
550 554 @active
551 555 def writepending(self):
552 556 """write pending file to temporary version
553 557
554 558 This is used to allow hooks to view a transaction before commit"""
555 559 categories = sorted(self._pendingcallback)
556 560 for cat in categories:
557 561 # remove callback since the data will have been flushed
558 562 any = self._pendingcallback.pop(cat)(self)
559 563 self._anypending = self._anypending or any
560 564 self._anypending |= self._generatefiles(suffix=b'.pending')
561 565 return self._anypending
562 566
563 567 @active
564 568 def hasfinalize(self, category):
565 569 """check is a callback already exist for a category"""
566 570 return category in self._finalizecallback
567 571
568 572 @active
569 573 def addfinalize(self, category, callback):
570 574 """add a callback to be called when the transaction is closed
571 575
572 576 The transaction will be given as callback's first argument.
573 577
574 578 Category is a unique identifier to allow overwriting old callbacks with
575 579 newer callbacks.
576 580 """
577 581 self._finalizecallback[category] = callback
578 582
579 583 @active
580 584 def addpostclose(self, category, callback):
581 585 """add or replace a callback to be called after the transaction closed
582 586
583 587 The transaction will be given as callback's first argument.
584 588
585 589 Category is a unique identifier to allow overwriting an old callback
586 590 with a newer callback.
587 591 """
588 592 self._postclosecallback[category] = callback
589 593
590 594 @active
591 595 def getpostclose(self, category):
592 596 """return a postclose callback added before, or None"""
593 597 return self._postclosecallback.get(category, None)
594 598
595 599 @active
596 600 def addabort(self, category, callback):
597 601 """add a callback to be called when the transaction is aborted.
598 602
599 603 The transaction will be given as the first argument to the callback.
600 604
601 605 Category is a unique identifier to allow overwriting an old callback
602 606 with a newer callback.
603 607 """
604 608 self._abortcallback[category] = callback
605 609
606 610 @active
607 611 def addvalidator(self, category, callback):
608 612 """adds a callback to be called when validating the transaction.
609 613
610 614 The transaction will be given as the first argument to the callback.
611 615
612 616 callback should raise exception if to abort transaction"""
613 617 self._validatecallback[category] = callback
614 618
615 619 @active
616 620 def close(self):
617 621 '''commit the transaction'''
618 622 if self._count == 1:
619 623 for category in sorted(self._validatecallback):
620 624 self._validatecallback[category](self)
621 625 self._validatecallback = None # Help prevent cycles.
622 626 self._generatefiles(group=GEN_GROUP_PRE_FINALIZE)
623 627 while self._finalizecallback:
624 628 callbacks = self._finalizecallback
625 629 self._finalizecallback = {}
626 630 categories = sorted(callbacks)
627 631 for cat in categories:
628 632 callbacks[cat](self)
629 633 # Prevent double usage and help clear cycles.
630 634 self._finalizecallback = None
631 635 self._generatefiles(group=GEN_GROUP_POST_FINALIZE)
632 636
633 637 self._count -= 1
634 638 if self._count != 0:
635 639 return
636 640 self._file.close()
637 641 self._backupsfile.close()
638 642 # cleanup temporary files
639 643 for l, f, b, c in self._backupentries:
640 644 if l not in self._vfsmap and c:
641 645 self._report(
642 646 b"couldn't remove %s: unknown cache location %s\n" % (b, l)
643 647 )
644 648 continue
645 649 vfs = self._vfsmap[l]
646 650 if not f and b and vfs.exists(b):
647 651 try:
648 652 vfs.unlink(b)
649 653 except (IOError, OSError, error.Abort) as inst:
650 654 if not c:
651 655 raise
652 656 # Abort may be raise by read only opener
653 657 self._report(
654 658 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
655 659 )
656 660 self._offsetmap = {}
657 661 self._newfiles = set()
658 662 self._writeundo()
659 663 if self._after:
660 664 self._after()
661 665 self._after = None # Help prevent cycles.
662 666 if self._opener.isfile(self._backupjournal):
663 667 self._opener.unlink(self._backupjournal)
664 668 if self._opener.isfile(self._journal):
665 669 self._opener.unlink(self._journal)
666 670 for l, _f, b, c in self._backupentries:
667 671 if l not in self._vfsmap and c:
668 672 self._report(
669 673 b"couldn't remove %s: unknown cache location"
670 674 b"%s\n" % (b, l)
671 675 )
672 676 continue
673 677 vfs = self._vfsmap[l]
674 678 if b and vfs.exists(b):
675 679 try:
676 680 vfs.unlink(b)
677 681 except (IOError, OSError, error.Abort) as inst:
678 682 if not c:
679 683 raise
680 684 # Abort may be raise by read only opener
681 685 self._report(
682 686 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
683 687 )
684 688 self._backupentries = []
685 689 self._journal = None
686 690
687 691 self._releasefn(self, True) # notify success of closing transaction
688 692 self._releasefn = None # Help prevent cycles.
689 693
690 694 # run post close action
691 695 categories = sorted(self._postclosecallback)
692 696 for cat in categories:
693 697 self._postclosecallback[cat](self)
694 698 # Prevent double usage and help clear cycles.
695 699 self._postclosecallback = None
696 700
697 701 @active
698 702 def abort(self):
699 703 """abort the transaction (generally called on error, or when the
700 704 transaction is not explicitly committed before going out of
701 705 scope)"""
702 706 self._abort()
703 707
704 708 @active
705 709 def add_journal(self, vfs_id, path):
706 710 self._journal_files.append((vfs_id, path))
707 711
708 712 def _writeundo(self):
709 713 """write transaction data for possible future undo call"""
710 714 if self._undoname is None:
711 715 return
712 716 cleanup_undo_files(
713 717 self._report,
714 718 self._vfsmap,
715 719 undo_prefix=self._undoname,
716 720 )
717 721
718 722 def undoname(fn: bytes) -> bytes:
719 723 base, name = os.path.split(fn)
720 724 assert name.startswith(self._journal)
721 725 new_name = name.replace(self._journal, self._undoname, 1)
722 726 return os.path.join(base, new_name)
723 727
724 728 undo_backup_path = b"%s.backupfiles" % self._undoname
725 729 undobackupfile = self._opener.open(undo_backup_path, b'w')
726 730 undobackupfile.write(b'%d\n' % version)
727 731 for l, f, b, c in self._backupentries:
728 732 if not f: # temporary file
729 733 continue
730 734 if not b:
731 735 u = b''
732 736 else:
733 737 if l not in self._vfsmap and c:
734 738 self._report(
735 739 b"couldn't remove %s: unknown cache location"
736 740 b"%s\n" % (b, l)
737 741 )
738 742 continue
739 743 vfs = self._vfsmap[l]
740 744 u = undoname(b)
741 745 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
742 746 undobackupfile.write(b"%s\0%s\0%s\0%d\n" % (l, f, u, c))
743 747 undobackupfile.close()
744 748 for vfs, src in self._journal_files:
745 749 dest = undoname(src)
746 750 # if src and dest refer to a same file, vfs.rename is a no-op,
747 751 # leaving both src and dest on disk. delete dest to make sure
748 752 # the rename couldn't be such a no-op.
749 753 vfs.tryunlink(dest)
750 754 try:
751 755 vfs.rename(src, dest)
752 756 except FileNotFoundError: # journal file does not yet exist
753 757 pass
754 758
755 759 def _abort(self):
756 760 entries = self.readjournal()
757 761 self._count = 0
758 762 self._usages = 0
759 763 self._file.close()
760 764 self._backupsfile.close()
761 765
762 766 quick = self._can_quick_abort(entries)
763 767 try:
764 768 if not quick:
765 769 self._report(_(b"transaction abort!\n"))
766 770 for cat in sorted(self._abortcallback):
767 771 self._abortcallback[cat](self)
768 772 # Prevent double usage and help clear cycles.
769 773 self._abortcallback = None
770 774 if quick:
771 775 self._do_quick_abort(entries)
772 776 else:
773 777 self._do_full_abort(entries)
774 778 finally:
775 779 self._journal = None
776 780 self._releasefn(self, False) # notify failure of transaction
777 781 self._releasefn = None # Help prevent cycles.
778 782
779 783 def _can_quick_abort(self, entries):
780 784 """False if any semantic content have been written on disk
781 785
782 786 True if nothing, except temporary files has been writen on disk."""
783 787 if entries:
784 788 return False
785 789 for e in self._backupentries:
786 790 if e[1]:
787 791 return False
788 792 return True
789 793
790 794 def _do_quick_abort(self, entries):
791 795 """(Silently) do a quick cleanup (see _can_quick_abort)"""
792 796 assert self._can_quick_abort(entries)
793 797 tmp_files = [e for e in self._backupentries if not e[1]]
794 798 for vfs_id, old_path, tmp_path, xxx in tmp_files:
795 799 vfs = self._vfsmap[vfs_id]
796 800 try:
797 801 vfs.unlink(tmp_path)
798 802 except FileNotFoundError:
799 803 pass
800 804 if self._backupjournal:
801 805 self._opener.unlink(self._backupjournal)
802 806 if self._journal:
803 807 self._opener.unlink(self._journal)
804 808
805 809 def _do_full_abort(self, entries):
806 810 """(Noisily) rollback all the change introduced by the transaction"""
807 811 try:
808 812 _playback(
809 813 self._journal,
810 814 self._report,
811 815 self._opener,
812 816 self._vfsmap,
813 817 entries,
814 818 self._backupentries,
815 819 False,
816 820 checkambigfiles=self._checkambigfiles,
817 821 )
818 822 self._report(_(b"rollback completed\n"))
819 823 except BaseException as exc:
820 824 self._report(_(b"rollback failed - please run hg recover\n"))
821 825 self._report(
822 826 _(b"(failure reason: %s)\n") % stringutil.forcebytestr(exc)
823 827 )
824 828
825 829
826 830 BAD_VERSION_MSG = _(
827 831 b"journal was created by a different version of Mercurial\n"
828 832 )
829 833
830 834
831 835 def read_backup_files(report, fp):
832 836 """parse an (already open) backup file an return contained backup entries
833 837
834 838 entries are in the form: (location, file, backupfile, xxx)
835 839
836 840 :location: the vfs identifier (vfsmap's key)
837 841 :file: original file path (in the vfs)
838 842 :backupfile: path of the backup (in the vfs)
839 843 :cache: a boolean currently always set to False
840 844 """
841 845 lines = fp.readlines()
842 846 backupentries = []
843 847 if lines:
844 848 ver = lines[0][:-1]
845 849 if ver != (b'%d' % version):
846 850 report(BAD_VERSION_MSG)
847 851 else:
848 852 for line in lines[1:]:
849 853 if line:
850 854 # Shave off the trailing newline
851 855 line = line[:-1]
852 856 l, f, b, c = line.split(b'\0')
853 857 backupentries.append((l, f, b, bool(c)))
854 858 return backupentries
855 859
856 860
857 861 def rollback(
858 862 opener,
859 863 vfsmap,
860 864 file,
861 865 report,
862 866 checkambigfiles=None,
863 867 skip_journal_pattern=None,
864 868 ):
865 869 """Rolls back the transaction contained in the given file
866 870
867 871 Reads the entries in the specified file, and the corresponding
868 872 '*.backupfiles' file, to recover from an incomplete transaction.
869 873
870 874 * `file`: a file containing a list of entries, specifying where
871 875 to truncate each file. The file should contain a list of
872 876 file\0offset pairs, delimited by newlines. The corresponding
873 877 '*.backupfiles' file should contain a list of file\0backupfile
874 878 pairs, delimited by \0.
875 879
876 880 `checkambigfiles` is a set of (path, vfs-location) tuples,
877 881 which determine whether file stat ambiguity should be avoided at
878 882 restoring corresponded files.
879 883 """
880 884 entries = []
881 885 backupentries = []
882 886
883 887 with opener.open(file) as fp:
884 888 lines = fp.readlines()
885 889 for l in lines:
886 890 try:
887 891 f, o = l.split(b'\0')
888 892 entries.append((f, int(o)))
889 893 except ValueError:
890 894 report(
891 895 _(b"couldn't read journal entry %r!\n") % pycompat.bytestr(l)
892 896 )
893 897
894 898 backupjournal = b"%s.backupfiles" % file
895 899 if opener.exists(backupjournal):
896 900 with opener.open(backupjournal) as fp:
897 901 backupentries = read_backup_files(report, fp)
898 902 if skip_journal_pattern is not None:
899 903 keep = lambda x: not skip_journal_pattern.match(x[1])
900 904 backupentries = [x for x in backupentries if keep(x)]
901 905
902 906 _playback(
903 907 file,
904 908 report,
905 909 opener,
906 910 vfsmap,
907 911 entries,
908 912 backupentries,
909 913 checkambigfiles=checkambigfiles,
910 914 )
General Comments 0
You need to be logged in to leave comments. Login now