##// END OF EJS Templates
transaction: add a way to know a transaction has been finalized...
marmoute -
r49525:3f618484 stable
parent child Browse files
Show More
@@ -1,764 +1,768 b''
1 1 # transaction.py - simple journaling scheme for mercurial
2 2 #
3 3 # This transaction scheme is intended to gracefully handle program
4 4 # errors and interruptions. More serious failures like system crashes
5 5 # can be recovered with an fsck-like tool. As the whole repository is
6 6 # effectively log-structured, this should amount to simply truncating
7 7 # anything that isn't referenced in the changelog.
8 8 #
9 9 # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
10 10 #
11 11 # This software may be used and distributed according to the terms of the
12 12 # GNU General Public License version 2 or any later version.
13 13
14 14 from __future__ import absolute_import
15 15
16 16 import errno
17 17
18 18 from .i18n import _
19 19 from . import (
20 20 error,
21 21 pycompat,
22 22 util,
23 23 )
24 24 from .utils import stringutil
25 25
26 26 version = 2
27 27
28 28 # These are the file generators that should only be executed after the
29 29 # finalizers are done, since they rely on the output of the finalizers (like
30 30 # the changelog having been written).
31 31 postfinalizegenerators = {b'bookmarks', b'dirstate'}
32 32
33 33 GEN_GROUP_ALL = b'all'
34 34 GEN_GROUP_PRE_FINALIZE = b'prefinalize'
35 35 GEN_GROUP_POST_FINALIZE = b'postfinalize'
36 36
37 37
38 38 def active(func):
39 39 def _active(self, *args, **kwds):
40 40 if self._count == 0:
41 41 raise error.ProgrammingError(
42 42 b'cannot use transaction when it is already committed/aborted'
43 43 )
44 44 return func(self, *args, **kwds)
45 45
46 46 return _active
47 47
48 48
49 49 def _playback(
50 50 journal,
51 51 report,
52 52 opener,
53 53 vfsmap,
54 54 entries,
55 55 backupentries,
56 56 unlink=True,
57 57 checkambigfiles=None,
58 58 ):
59 59 for f, o in sorted(dict(entries).items()):
60 60 if o or not unlink:
61 61 checkambig = checkambigfiles and (f, b'') in checkambigfiles
62 62 try:
63 63 fp = opener(f, b'a', checkambig=checkambig)
64 64 if fp.tell() < o:
65 65 raise error.Abort(
66 66 _(
67 67 b"attempted to truncate %s to %d bytes, but it was "
68 68 b"already %d bytes\n"
69 69 )
70 70 % (f, o, fp.tell())
71 71 )
72 72 fp.truncate(o)
73 73 fp.close()
74 74 except IOError:
75 75 report(_(b"failed to truncate %s\n") % f)
76 76 raise
77 77 else:
78 78 try:
79 79 opener.unlink(f)
80 80 except (IOError, OSError) as inst:
81 81 if inst.errno != errno.ENOENT:
82 82 raise
83 83
84 84 backupfiles = []
85 85 for l, f, b, c in backupentries:
86 86 if l not in vfsmap and c:
87 87 report(b"couldn't handle %s: unknown cache location %s\n" % (b, l))
88 88 vfs = vfsmap[l]
89 89 try:
90 90 if f and b:
91 91 filepath = vfs.join(f)
92 92 backuppath = vfs.join(b)
93 93 checkambig = checkambigfiles and (f, l) in checkambigfiles
94 94 try:
95 95 util.copyfile(backuppath, filepath, checkambig=checkambig)
96 96 backupfiles.append(b)
97 97 except IOError as exc:
98 98 e_msg = stringutil.forcebytestr(exc)
99 99 report(_(b"failed to recover %s (%s)\n") % (f, e_msg))
100 100 else:
101 101 target = f or b
102 102 try:
103 103 vfs.unlink(target)
104 104 except (IOError, OSError) as inst:
105 105 if inst.errno != errno.ENOENT:
106 106 raise
107 107 except (IOError, OSError, error.Abort):
108 108 if not c:
109 109 raise
110 110
111 111 backuppath = b"%s.backupfiles" % journal
112 112 if opener.exists(backuppath):
113 113 opener.unlink(backuppath)
114 114 opener.unlink(journal)
115 115 try:
116 116 for f in backupfiles:
117 117 if opener.exists(f):
118 118 opener.unlink(f)
119 119 except (IOError, OSError, error.Abort):
120 120 # only pure backup file remains, it is sage to ignore any error
121 121 pass
122 122
123 123
124 124 class transaction(util.transactional):
125 125 def __init__(
126 126 self,
127 127 report,
128 128 opener,
129 129 vfsmap,
130 130 journalname,
131 131 undoname=None,
132 132 after=None,
133 133 createmode=None,
134 134 validator=None,
135 135 releasefn=None,
136 136 checkambigfiles=None,
137 137 name='<unnamed>',
138 138 ):
139 139 """Begin a new transaction
140 140
141 141 Begins a new transaction that allows rolling back writes in the event of
142 142 an exception.
143 143
144 144 * `after`: called after the transaction has been committed
145 145 * `createmode`: the mode of the journal file that will be created
146 146 * `releasefn`: called after releasing (with transaction and result)
147 147
148 148 `checkambigfiles` is a set of (path, vfs-location) tuples,
149 149 which determine whether file stat ambiguity should be avoided
150 150 for corresponded files.
151 151 """
152 152 self._count = 1
153 153 self._usages = 1
154 154 self._report = report
155 155 # a vfs to the store content
156 156 self._opener = opener
157 157 # a map to access file in various {location -> vfs}
158 158 vfsmap = vfsmap.copy()
159 159 vfsmap[b''] = opener # set default value
160 160 self._vfsmap = vfsmap
161 161 self._after = after
162 162 self._offsetmap = {}
163 163 self._newfiles = set()
164 164 self._journal = journalname
165 165 self._undoname = undoname
166 166 self._queue = []
167 167 # A callback to do something just after releasing transaction.
168 168 if releasefn is None:
169 169 releasefn = lambda tr, success: None
170 170 self._releasefn = releasefn
171 171
172 172 self._checkambigfiles = set()
173 173 if checkambigfiles:
174 174 self._checkambigfiles.update(checkambigfiles)
175 175
176 176 self._names = [name]
177 177
178 178 # A dict dedicated to precisely tracking the changes introduced in the
179 179 # transaction.
180 180 self.changes = {}
181 181
182 182 # a dict of arguments to be passed to hooks
183 183 self.hookargs = {}
184 184 self._file = opener.open(self._journal, b"w+")
185 185
186 186 # a list of ('location', 'path', 'backuppath', cache) entries.
187 187 # - if 'backuppath' is empty, no file existed at backup time
188 188 # - if 'path' is empty, this is a temporary transaction file
189 189 # - if 'location' is not empty, the path is outside main opener reach.
190 190 # use 'location' value as a key in a vfsmap to find the right 'vfs'
191 191 # (cache is currently unused)
192 192 self._backupentries = []
193 193 self._backupmap = {}
194 194 self._backupjournal = b"%s.backupfiles" % self._journal
195 195 self._backupsfile = opener.open(self._backupjournal, b'w')
196 196 self._backupsfile.write(b'%d\n' % version)
197 197
198 198 if createmode is not None:
199 199 opener.chmod(self._journal, createmode & 0o666)
200 200 opener.chmod(self._backupjournal, createmode & 0o666)
201 201
202 202 # hold file generations to be performed on commit
203 203 self._filegenerators = {}
204 204 # hold callback to write pending data for hooks
205 205 self._pendingcallback = {}
206 206 # True is any pending data have been written ever
207 207 self._anypending = False
208 208 # holds callback to call when writing the transaction
209 209 self._finalizecallback = {}
210 210 # holds callback to call when validating the transaction
211 211 # should raise exception if anything is wrong
212 212 self._validatecallback = {}
213 213 if validator is not None:
214 214 self._validatecallback[b'001-userhooks'] = validator
215 215 # hold callback for post transaction close
216 216 self._postclosecallback = {}
217 217 # holds callbacks to call during abort
218 218 self._abortcallback = {}
219 219
220 220 def __repr__(self):
221 221 name = '/'.join(self._names)
222 222 return '<transaction name=%s, count=%d, usages=%d>' % (
223 223 name,
224 224 self._count,
225 225 self._usages,
226 226 )
227 227
228 228 def __del__(self):
229 229 if self._journal:
230 230 self._abort()
231 231
232 @property
233 def finalized(self):
234 return self._finalizecallback is None
235
232 236 @active
233 237 def startgroup(self):
234 238 """delay registration of file entry
235 239
236 240 This is used by strip to delay vision of strip offset. The transaction
237 241 sees either none or all of the strip actions to be done."""
238 242 self._queue.append([])
239 243
240 244 @active
241 245 def endgroup(self):
242 246 """apply delayed registration of file entry.
243 247
244 248 This is used by strip to delay vision of strip offset. The transaction
245 249 sees either none or all of the strip actions to be done."""
246 250 q = self._queue.pop()
247 251 for f, o in q:
248 252 self._addentry(f, o)
249 253
250 254 @active
251 255 def add(self, file, offset):
252 256 """record the state of an append-only file before update"""
253 257 if (
254 258 file in self._newfiles
255 259 or file in self._offsetmap
256 260 or file in self._backupmap
257 261 ):
258 262 return
259 263 if self._queue:
260 264 self._queue[-1].append((file, offset))
261 265 return
262 266
263 267 self._addentry(file, offset)
264 268
265 269 def _addentry(self, file, offset):
266 270 """add a append-only entry to memory and on-disk state"""
267 271 if (
268 272 file in self._newfiles
269 273 or file in self._offsetmap
270 274 or file in self._backupmap
271 275 ):
272 276 return
273 277 if offset:
274 278 self._offsetmap[file] = offset
275 279 else:
276 280 self._newfiles.add(file)
277 281 # add enough data to the journal to do the truncate
278 282 self._file.write(b"%s\0%d\n" % (file, offset))
279 283 self._file.flush()
280 284
281 285 @active
282 286 def addbackup(self, file, hardlink=True, location=b''):
283 287 """Adds a backup of the file to the transaction
284 288
285 289 Calling addbackup() creates a hardlink backup of the specified file
286 290 that is used to recover the file in the event of the transaction
287 291 aborting.
288 292
289 293 * `file`: the file path, relative to .hg/store
290 294 * `hardlink`: use a hardlink to quickly create the backup
291 295 """
292 296 if self._queue:
293 297 msg = b'cannot use transaction.addbackup inside "group"'
294 298 raise error.ProgrammingError(msg)
295 299
296 300 if (
297 301 file in self._newfiles
298 302 or file in self._offsetmap
299 303 or file in self._backupmap
300 304 ):
301 305 return
302 306 vfs = self._vfsmap[location]
303 307 dirname, filename = vfs.split(file)
304 308 backupfilename = b"%s.backup.%s" % (self._journal, filename)
305 309 backupfile = vfs.reljoin(dirname, backupfilename)
306 310 if vfs.exists(file):
307 311 filepath = vfs.join(file)
308 312 backuppath = vfs.join(backupfile)
309 313 util.copyfile(filepath, backuppath, hardlink=hardlink)
310 314 else:
311 315 backupfile = b''
312 316
313 317 self._addbackupentry((location, file, backupfile, False))
314 318
315 319 def _addbackupentry(self, entry):
316 320 """register a new backup entry and write it to disk"""
317 321 self._backupentries.append(entry)
318 322 self._backupmap[entry[1]] = len(self._backupentries) - 1
319 323 self._backupsfile.write(b"%s\0%s\0%s\0%d\n" % entry)
320 324 self._backupsfile.flush()
321 325
322 326 @active
323 327 def registertmp(self, tmpfile, location=b''):
324 328 """register a temporary transaction file
325 329
326 330 Such files will be deleted when the transaction exits (on both
327 331 failure and success).
328 332 """
329 333 self._addbackupentry((location, b'', tmpfile, False))
330 334
331 335 @active
332 336 def addfilegenerator(
333 337 self, genid, filenames, genfunc, order=0, location=b''
334 338 ):
335 339 """add a function to generates some files at transaction commit
336 340
337 341 The `genfunc` argument is a function capable of generating proper
338 342 content of each entry in the `filename` tuple.
339 343
340 344 At transaction close time, `genfunc` will be called with one file
341 345 object argument per entries in `filenames`.
342 346
343 347 The transaction itself is responsible for the backup, creation and
344 348 final write of such file.
345 349
346 350 The `genid` argument is used to ensure the same set of file is only
347 351 generated once. Call to `addfilegenerator` for a `genid` already
348 352 present will overwrite the old entry.
349 353
350 354 The `order` argument may be used to control the order in which multiple
351 355 generator will be executed.
352 356
353 357 The `location` arguments may be used to indicate the files are located
354 358 outside of the the standard directory for transaction. It should match
355 359 one of the key of the `transaction.vfsmap` dictionary.
356 360 """
357 361 # For now, we are unable to do proper backup and restore of custom vfs
358 362 # but for bookmarks that are handled outside this mechanism.
359 363 self._filegenerators[genid] = (order, filenames, genfunc, location)
360 364
361 365 @active
362 366 def removefilegenerator(self, genid):
363 367 """reverse of addfilegenerator, remove a file generator function"""
364 368 if genid in self._filegenerators:
365 369 del self._filegenerators[genid]
366 370
367 371 def _generatefiles(self, suffix=b'', group=GEN_GROUP_ALL):
368 372 # write files registered for generation
369 373 any = False
370 374
371 375 if group == GEN_GROUP_ALL:
372 376 skip_post = skip_pre = False
373 377 else:
374 378 skip_pre = group == GEN_GROUP_POST_FINALIZE
375 379 skip_post = group == GEN_GROUP_PRE_FINALIZE
376 380
377 381 for id, entry in sorted(pycompat.iteritems(self._filegenerators)):
378 382 any = True
379 383 order, filenames, genfunc, location = entry
380 384
381 385 # for generation at closing, check if it's before or after finalize
382 386 is_post = id in postfinalizegenerators
383 387 if skip_post and is_post:
384 388 continue
385 389 elif skip_pre and not is_post:
386 390 continue
387 391
388 392 vfs = self._vfsmap[location]
389 393 files = []
390 394 try:
391 395 for name in filenames:
392 396 name += suffix
393 397 if suffix:
394 398 self.registertmp(name, location=location)
395 399 checkambig = False
396 400 else:
397 401 self.addbackup(name, location=location)
398 402 checkambig = (name, location) in self._checkambigfiles
399 403 files.append(
400 404 vfs(name, b'w', atomictemp=True, checkambig=checkambig)
401 405 )
402 406 genfunc(*files)
403 407 for f in files:
404 408 f.close()
405 409 # skip discard() loop since we're sure no open file remains
406 410 del files[:]
407 411 finally:
408 412 for f in files:
409 413 f.discard()
410 414 return any
411 415
412 416 @active
413 417 def findoffset(self, file):
414 418 if file in self._newfiles:
415 419 return 0
416 420 return self._offsetmap.get(file)
417 421
418 422 @active
419 423 def readjournal(self):
420 424 self._file.seek(0)
421 425 entries = []
422 426 for l in self._file.readlines():
423 427 file, troffset = l.split(b'\0')
424 428 entries.append((file, int(troffset)))
425 429 return entries
426 430
427 431 @active
428 432 def replace(self, file, offset):
429 433 """
430 434 replace can only replace already committed entries
431 435 that are not pending in the queue
432 436 """
433 437 if file in self._newfiles:
434 438 if not offset:
435 439 return
436 440 self._newfiles.remove(file)
437 441 self._offsetmap[file] = offset
438 442 elif file in self._offsetmap:
439 443 if not offset:
440 444 del self._offsetmap[file]
441 445 self._newfiles.add(file)
442 446 else:
443 447 self._offsetmap[file] = offset
444 448 else:
445 449 raise KeyError(file)
446 450 self._file.write(b"%s\0%d\n" % (file, offset))
447 451 self._file.flush()
448 452
449 453 @active
450 454 def nest(self, name='<unnamed>'):
451 455 self._count += 1
452 456 self._usages += 1
453 457 self._names.append(name)
454 458 return self
455 459
456 460 def release(self):
457 461 if self._count > 0:
458 462 self._usages -= 1
459 463 if self._names:
460 464 self._names.pop()
461 465 # if the transaction scopes are left without being closed, fail
462 466 if self._count > 0 and self._usages == 0:
463 467 self._abort()
464 468
465 469 def running(self):
466 470 return self._count > 0
467 471
468 472 def addpending(self, category, callback):
469 473 """add a callback to be called when the transaction is pending
470 474
471 475 The transaction will be given as callback's first argument.
472 476
473 477 Category is a unique identifier to allow overwriting an old callback
474 478 with a newer callback.
475 479 """
476 480 self._pendingcallback[category] = callback
477 481
478 482 @active
479 483 def writepending(self):
480 484 """write pending file to temporary version
481 485
482 486 This is used to allow hooks to view a transaction before commit"""
483 487 categories = sorted(self._pendingcallback)
484 488 for cat in categories:
485 489 # remove callback since the data will have been flushed
486 490 any = self._pendingcallback.pop(cat)(self)
487 491 self._anypending = self._anypending or any
488 492 self._anypending |= self._generatefiles(suffix=b'.pending')
489 493 return self._anypending
490 494
491 495 @active
492 496 def hasfinalize(self, category):
493 497 """check is a callback already exist for a category"""
494 498 return category in self._finalizecallback
495 499
496 500 @active
497 501 def addfinalize(self, category, callback):
498 502 """add a callback to be called when the transaction is closed
499 503
500 504 The transaction will be given as callback's first argument.
501 505
502 506 Category is a unique identifier to allow overwriting old callbacks with
503 507 newer callbacks.
504 508 """
505 509 self._finalizecallback[category] = callback
506 510
507 511 @active
508 512 def addpostclose(self, category, callback):
509 513 """add or replace a callback to be called after the transaction closed
510 514
511 515 The transaction will be given as callback's first argument.
512 516
513 517 Category is a unique identifier to allow overwriting an old callback
514 518 with a newer callback.
515 519 """
516 520 self._postclosecallback[category] = callback
517 521
518 522 @active
519 523 def getpostclose(self, category):
520 524 """return a postclose callback added before, or None"""
521 525 return self._postclosecallback.get(category, None)
522 526
523 527 @active
524 528 def addabort(self, category, callback):
525 529 """add a callback to be called when the transaction is aborted.
526 530
527 531 The transaction will be given as the first argument to the callback.
528 532
529 533 Category is a unique identifier to allow overwriting an old callback
530 534 with a newer callback.
531 535 """
532 536 self._abortcallback[category] = callback
533 537
534 538 @active
535 539 def addvalidator(self, category, callback):
536 540 """adds a callback to be called when validating the transaction.
537 541
538 542 The transaction will be given as the first argument to the callback.
539 543
540 544 callback should raise exception if to abort transaction"""
541 545 self._validatecallback[category] = callback
542 546
543 547 @active
544 548 def close(self):
545 549 '''commit the transaction'''
546 550 if self._count == 1:
547 551 for category in sorted(self._validatecallback):
548 552 self._validatecallback[category](self)
549 553 self._validatecallback = None # Help prevent cycles.
550 554 self._generatefiles(group=GEN_GROUP_PRE_FINALIZE)
551 555 while self._finalizecallback:
552 556 callbacks = self._finalizecallback
553 557 self._finalizecallback = {}
554 558 categories = sorted(callbacks)
555 559 for cat in categories:
556 560 callbacks[cat](self)
557 561 # Prevent double usage and help clear cycles.
558 562 self._finalizecallback = None
559 563 self._generatefiles(group=GEN_GROUP_POST_FINALIZE)
560 564
561 565 self._count -= 1
562 566 if self._count != 0:
563 567 return
564 568 self._file.close()
565 569 self._backupsfile.close()
566 570 # cleanup temporary files
567 571 for l, f, b, c in self._backupentries:
568 572 if l not in self._vfsmap and c:
569 573 self._report(
570 574 b"couldn't remove %s: unknown cache location %s\n" % (b, l)
571 575 )
572 576 continue
573 577 vfs = self._vfsmap[l]
574 578 if not f and b and vfs.exists(b):
575 579 try:
576 580 vfs.unlink(b)
577 581 except (IOError, OSError, error.Abort) as inst:
578 582 if not c:
579 583 raise
580 584 # Abort may be raise by read only opener
581 585 self._report(
582 586 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
583 587 )
584 588 self._offsetmap = {}
585 589 self._newfiles = set()
586 590 self._writeundo()
587 591 if self._after:
588 592 self._after()
589 593 self._after = None # Help prevent cycles.
590 594 if self._opener.isfile(self._backupjournal):
591 595 self._opener.unlink(self._backupjournal)
592 596 if self._opener.isfile(self._journal):
593 597 self._opener.unlink(self._journal)
594 598 for l, _f, b, c in self._backupentries:
595 599 if l not in self._vfsmap and c:
596 600 self._report(
597 601 b"couldn't remove %s: unknown cache location"
598 602 b"%s\n" % (b, l)
599 603 )
600 604 continue
601 605 vfs = self._vfsmap[l]
602 606 if b and vfs.exists(b):
603 607 try:
604 608 vfs.unlink(b)
605 609 except (IOError, OSError, error.Abort) as inst:
606 610 if not c:
607 611 raise
608 612 # Abort may be raise by read only opener
609 613 self._report(
610 614 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
611 615 )
612 616 self._backupentries = []
613 617 self._journal = None
614 618
615 619 self._releasefn(self, True) # notify success of closing transaction
616 620 self._releasefn = None # Help prevent cycles.
617 621
618 622 # run post close action
619 623 categories = sorted(self._postclosecallback)
620 624 for cat in categories:
621 625 self._postclosecallback[cat](self)
622 626 # Prevent double usage and help clear cycles.
623 627 self._postclosecallback = None
624 628
625 629 @active
626 630 def abort(self):
627 631 """abort the transaction (generally called on error, or when the
628 632 transaction is not explicitly committed before going out of
629 633 scope)"""
630 634 self._abort()
631 635
632 636 def _writeundo(self):
633 637 """write transaction data for possible future undo call"""
634 638 if self._undoname is None:
635 639 return
636 640
637 641 undo_backup_path = b"%s.backupfiles" % self._undoname
638 642 undobackupfile = self._opener.open(undo_backup_path, b'w')
639 643 undobackupfile.write(b'%d\n' % version)
640 644 for l, f, b, c in self._backupentries:
641 645 if not f: # temporary file
642 646 continue
643 647 if not b:
644 648 u = b''
645 649 else:
646 650 if l not in self._vfsmap and c:
647 651 self._report(
648 652 b"couldn't remove %s: unknown cache location"
649 653 b"%s\n" % (b, l)
650 654 )
651 655 continue
652 656 vfs = self._vfsmap[l]
653 657 base, name = vfs.split(b)
654 658 assert name.startswith(self._journal), name
655 659 uname = name.replace(self._journal, self._undoname, 1)
656 660 u = vfs.reljoin(base, uname)
657 661 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
658 662 undobackupfile.write(b"%s\0%s\0%s\0%d\n" % (l, f, u, c))
659 663 undobackupfile.close()
660 664
661 665 def _abort(self):
662 666 entries = self.readjournal()
663 667 self._count = 0
664 668 self._usages = 0
665 669 self._file.close()
666 670 self._backupsfile.close()
667 671
668 672 try:
669 673 if not entries and not self._backupentries:
670 674 if self._backupjournal:
671 675 self._opener.unlink(self._backupjournal)
672 676 if self._journal:
673 677 self._opener.unlink(self._journal)
674 678 return
675 679
676 680 self._report(_(b"transaction abort!\n"))
677 681
678 682 try:
679 683 for cat in sorted(self._abortcallback):
680 684 self._abortcallback[cat](self)
681 685 # Prevent double usage and help clear cycles.
682 686 self._abortcallback = None
683 687 _playback(
684 688 self._journal,
685 689 self._report,
686 690 self._opener,
687 691 self._vfsmap,
688 692 entries,
689 693 self._backupentries,
690 694 False,
691 695 checkambigfiles=self._checkambigfiles,
692 696 )
693 697 self._report(_(b"rollback completed\n"))
694 698 except BaseException as exc:
695 699 self._report(_(b"rollback failed - please run hg recover\n"))
696 700 self._report(
697 701 _(b"(failure reason: %s)\n") % stringutil.forcebytestr(exc)
698 702 )
699 703 finally:
700 704 self._journal = None
701 705 self._releasefn(self, False) # notify failure of transaction
702 706 self._releasefn = None # Help prevent cycles.
703 707
704 708
705 709 BAD_VERSION_MSG = _(
706 710 b"journal was created by a different version of Mercurial\n"
707 711 )
708 712
709 713
710 714 def rollback(opener, vfsmap, file, report, checkambigfiles=None):
711 715 """Rolls back the transaction contained in the given file
712 716
713 717 Reads the entries in the specified file, and the corresponding
714 718 '*.backupfiles' file, to recover from an incomplete transaction.
715 719
716 720 * `file`: a file containing a list of entries, specifying where
717 721 to truncate each file. The file should contain a list of
718 722 file\0offset pairs, delimited by newlines. The corresponding
719 723 '*.backupfiles' file should contain a list of file\0backupfile
720 724 pairs, delimited by \0.
721 725
722 726 `checkambigfiles` is a set of (path, vfs-location) tuples,
723 727 which determine whether file stat ambiguity should be avoided at
724 728 restoring corresponded files.
725 729 """
726 730 entries = []
727 731 backupentries = []
728 732
729 733 with opener.open(file) as fp:
730 734 lines = fp.readlines()
731 735 for l in lines:
732 736 try:
733 737 f, o = l.split(b'\0')
734 738 entries.append((f, int(o)))
735 739 except ValueError:
736 740 report(
737 741 _(b"couldn't read journal entry %r!\n") % pycompat.bytestr(l)
738 742 )
739 743
740 744 backupjournal = b"%s.backupfiles" % file
741 745 if opener.exists(backupjournal):
742 746 fp = opener.open(backupjournal)
743 747 lines = fp.readlines()
744 748 if lines:
745 749 ver = lines[0][:-1]
746 750 if ver != (b'%d' % version):
747 751 report(BAD_VERSION_MSG)
748 752 else:
749 753 for line in lines[1:]:
750 754 if line:
751 755 # Shave off the trailing newline
752 756 line = line[:-1]
753 757 l, f, b, c = line.split(b'\0')
754 758 backupentries.append((l, f, b, bool(c)))
755 759
756 760 _playback(
757 761 file,
758 762 report,
759 763 opener,
760 764 vfsmap,
761 765 entries,
762 766 backupentries,
763 767 checkambigfiles=checkambigfiles,
764 768 )
General Comments 0
You need to be logged in to leave comments. Login now