##// END OF EJS Templates
transaction: clarify the logic around pre-finalize/post-finalize...
marmoute -
r44887:f6798c1a default
parent child Browse files
Show More
@@ -1,721 +1,727 b''
1 1 # transaction.py - simple journaling scheme for mercurial
2 2 #
3 3 # This transaction scheme is intended to gracefully handle program
4 4 # errors and interruptions. More serious failures like system crashes
5 5 # can be recovered with an fsck-like tool. As the whole repository is
6 6 # effectively log-structured, this should amount to simply truncating
7 7 # anything that isn't referenced in the changelog.
8 8 #
9 9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 10 #
11 11 # This software may be used and distributed according to the terms of the
12 12 # GNU General Public License version 2 or any later version.
13 13
14 14 from __future__ import absolute_import
15 15
16 16 import errno
17 17
18 18 from .i18n import _
19 19 from . import (
20 20 error,
21 21 pycompat,
22 22 util,
23 23 )
24 24 from .utils import stringutil
25 25
26 26 version = 2
27 27
28 28 # These are the file generators that should only be executed after the
29 29 # finalizers are done, since they rely on the output of the finalizers (like
30 30 # the changelog having been written).
31 31 postfinalizegenerators = {b'bookmarks', b'dirstate'}
32 32
33 33 GEN_GROUP_ALL = b'all'
34 34 GEN_GROUP_PRE_FINALIZE = b'prefinalize'
35 35 GEN_GROUP_POST_FINALIZE = b'postfinalize'
36 36
37 37
38 38 def active(func):
39 39 def _active(self, *args, **kwds):
40 40 if self._count == 0:
41 41 raise error.Abort(
42 42 _(
43 43 b'cannot use transaction when it is already committed/aborted'
44 44 )
45 45 )
46 46 return func(self, *args, **kwds)
47 47
48 48 return _active
49 49
50 50
51 51 def _playback(
52 52 journal,
53 53 report,
54 54 opener,
55 55 vfsmap,
56 56 entries,
57 57 backupentries,
58 58 unlink=True,
59 59 checkambigfiles=None,
60 60 ):
61 61 for f, o, _ignore in entries:
62 62 if o or not unlink:
63 63 checkambig = checkambigfiles and (f, b'') in checkambigfiles
64 64 try:
65 65 fp = opener(f, b'a', checkambig=checkambig)
66 66 if fp.tell() < o:
67 67 raise error.Abort(
68 68 _(
69 69 b"attempted to truncate %s to %d bytes, but it was "
70 70 b"already %d bytes\n"
71 71 )
72 72 % (f, o, fp.tell())
73 73 )
74 74 fp.truncate(o)
75 75 fp.close()
76 76 except IOError:
77 77 report(_(b"failed to truncate %s\n") % f)
78 78 raise
79 79 else:
80 80 try:
81 81 opener.unlink(f)
82 82 except (IOError, OSError) as inst:
83 83 if inst.errno != errno.ENOENT:
84 84 raise
85 85
86 86 backupfiles = []
87 87 for l, f, b, c in backupentries:
88 88 if l not in vfsmap and c:
89 89 report(b"couldn't handle %s: unknown cache location %s\n" % (b, l))
90 90 vfs = vfsmap[l]
91 91 try:
92 92 if f and b:
93 93 filepath = vfs.join(f)
94 94 backuppath = vfs.join(b)
95 95 checkambig = checkambigfiles and (f, l) in checkambigfiles
96 96 try:
97 97 util.copyfile(backuppath, filepath, checkambig=checkambig)
98 98 backupfiles.append(b)
99 99 except IOError:
100 100 report(_(b"failed to recover %s\n") % f)
101 101 else:
102 102 target = f or b
103 103 try:
104 104 vfs.unlink(target)
105 105 except (IOError, OSError) as inst:
106 106 if inst.errno != errno.ENOENT:
107 107 raise
108 108 except (IOError, OSError, error.Abort):
109 109 if not c:
110 110 raise
111 111
112 112 backuppath = b"%s.backupfiles" % journal
113 113 if opener.exists(backuppath):
114 114 opener.unlink(backuppath)
115 115 opener.unlink(journal)
116 116 try:
117 117 for f in backupfiles:
118 118 if opener.exists(f):
119 119 opener.unlink(f)
120 120 except (IOError, OSError, error.Abort):
121 121 # only pure backup file remains, it is sage to ignore any error
122 122 pass
123 123
124 124
125 125 class transaction(util.transactional):
126 126 def __init__(
127 127 self,
128 128 report,
129 129 opener,
130 130 vfsmap,
131 131 journalname,
132 132 undoname=None,
133 133 after=None,
134 134 createmode=None,
135 135 validator=None,
136 136 releasefn=None,
137 137 checkambigfiles=None,
138 138 name='<unnamed>',
139 139 ):
140 140 """Begin a new transaction
141 141
142 142 Begins a new transaction that allows rolling back writes in the event of
143 143 an exception.
144 144
145 145 * `after`: called after the transaction has been committed
146 146 * `createmode`: the mode of the journal file that will be created
147 147 * `releasefn`: called after releasing (with transaction and result)
148 148
149 149 `checkambigfiles` is a set of (path, vfs-location) tuples,
150 150 which determine whether file stat ambiguity should be avoided
151 151 for corresponded files.
152 152 """
153 153 self._count = 1
154 154 self._usages = 1
155 155 self._report = report
156 156 # a vfs to the store content
157 157 self._opener = opener
158 158 # a map to access file in various {location -> vfs}
159 159 vfsmap = vfsmap.copy()
160 160 vfsmap[b''] = opener # set default value
161 161 self._vfsmap = vfsmap
162 162 self._after = after
163 163 self._entries = []
164 164 self._map = {}
165 165 self._journal = journalname
166 166 self._undoname = undoname
167 167 self._queue = []
168 168 # A callback to validate transaction content before closing it.
169 169 # should raise exception is anything is wrong.
170 170 # target user is repository hooks.
171 171 if validator is None:
172 172 validator = lambda tr: None
173 173 self._validator = validator
174 174 # A callback to do something just after releasing transaction.
175 175 if releasefn is None:
176 176 releasefn = lambda tr, success: None
177 177 self._releasefn = releasefn
178 178
179 179 self._checkambigfiles = set()
180 180 if checkambigfiles:
181 181 self._checkambigfiles.update(checkambigfiles)
182 182
183 183 self._names = [name]
184 184
185 185 # A dict dedicated to precisely tracking the changes introduced in the
186 186 # transaction.
187 187 self.changes = {}
188 188
189 189 # a dict of arguments to be passed to hooks
190 190 self.hookargs = {}
191 191 self._file = opener.open(self._journal, b"w")
192 192
193 193 # a list of ('location', 'path', 'backuppath', cache) entries.
194 194 # - if 'backuppath' is empty, no file existed at backup time
195 195 # - if 'path' is empty, this is a temporary transaction file
196 196 # - if 'location' is not empty, the path is outside main opener reach.
197 197 # use 'location' value as a key in a vfsmap to find the right 'vfs'
198 198 # (cache is currently unused)
199 199 self._backupentries = []
200 200 self._backupmap = {}
201 201 self._backupjournal = b"%s.backupfiles" % self._journal
202 202 self._backupsfile = opener.open(self._backupjournal, b'w')
203 203 self._backupsfile.write(b'%d\n' % version)
204 204
205 205 if createmode is not None:
206 206 opener.chmod(self._journal, createmode & 0o666)
207 207 opener.chmod(self._backupjournal, createmode & 0o666)
208 208
209 209 # hold file generations to be performed on commit
210 210 self._filegenerators = {}
211 211 # hold callback to write pending data for hooks
212 212 self._pendingcallback = {}
213 213 # True is any pending data have been written ever
214 214 self._anypending = False
215 215 # holds callback to call when writing the transaction
216 216 self._finalizecallback = {}
217 217 # hold callback for post transaction close
218 218 self._postclosecallback = {}
219 219 # holds callbacks to call during abort
220 220 self._abortcallback = {}
221 221
222 222 def __repr__(self):
223 223 name = '/'.join(self._names)
224 224 return '<transaction name=%s, count=%d, usages=%d>' % (
225 225 name,
226 226 self._count,
227 227 self._usages,
228 228 )
229 229
230 230 def __del__(self):
231 231 if self._journal:
232 232 self._abort()
233 233
234 234 @active
235 235 def startgroup(self):
236 236 """delay registration of file entry
237 237
238 238 This is used by strip to delay vision of strip offset. The transaction
239 239 sees either none or all of the strip actions to be done."""
240 240 self._queue.append([])
241 241
242 242 @active
243 243 def endgroup(self):
244 244 """apply delayed registration of file entry.
245 245
246 246 This is used by strip to delay vision of strip offset. The transaction
247 247 sees either none or all of the strip actions to be done."""
248 248 q = self._queue.pop()
249 249 for f, o, data in q:
250 250 self._addentry(f, o, data)
251 251
252 252 @active
253 253 def add(self, file, offset, data=None):
254 254 """record the state of an append-only file before update"""
255 255 if file in self._map or file in self._backupmap:
256 256 return
257 257 if self._queue:
258 258 self._queue[-1].append((file, offset, data))
259 259 return
260 260
261 261 self._addentry(file, offset, data)
262 262
263 263 def _addentry(self, file, offset, data):
264 264 """add a append-only entry to memory and on-disk state"""
265 265 if file in self._map or file in self._backupmap:
266 266 return
267 267 self._entries.append((file, offset, data))
268 268 self._map[file] = len(self._entries) - 1
269 269 # add enough data to the journal to do the truncate
270 270 self._file.write(b"%s\0%d\n" % (file, offset))
271 271 self._file.flush()
272 272
273 273 @active
274 274 def addbackup(self, file, hardlink=True, location=b''):
275 275 """Adds a backup of the file to the transaction
276 276
277 277 Calling addbackup() creates a hardlink backup of the specified file
278 278 that is used to recover the file in the event of the transaction
279 279 aborting.
280 280
281 281 * `file`: the file path, relative to .hg/store
282 282 * `hardlink`: use a hardlink to quickly create the backup
283 283 """
284 284 if self._queue:
285 285 msg = b'cannot use transaction.addbackup inside "group"'
286 286 raise error.ProgrammingError(msg)
287 287
288 288 if file in self._map or file in self._backupmap:
289 289 return
290 290 vfs = self._vfsmap[location]
291 291 dirname, filename = vfs.split(file)
292 292 backupfilename = b"%s.backup.%s" % (self._journal, filename)
293 293 backupfile = vfs.reljoin(dirname, backupfilename)
294 294 if vfs.exists(file):
295 295 filepath = vfs.join(file)
296 296 backuppath = vfs.join(backupfile)
297 297 util.copyfile(filepath, backuppath, hardlink=hardlink)
298 298 else:
299 299 backupfile = b''
300 300
301 301 self._addbackupentry((location, file, backupfile, False))
302 302
303 303 def _addbackupentry(self, entry):
304 304 """register a new backup entry and write it to disk"""
305 305 self._backupentries.append(entry)
306 306 self._backupmap[entry[1]] = len(self._backupentries) - 1
307 307 self._backupsfile.write(b"%s\0%s\0%s\0%d\n" % entry)
308 308 self._backupsfile.flush()
309 309
310 310 @active
311 311 def registertmp(self, tmpfile, location=b''):
312 312 """register a temporary transaction file
313 313
314 314 Such files will be deleted when the transaction exits (on both
315 315 failure and success).
316 316 """
317 317 self._addbackupentry((location, b'', tmpfile, False))
318 318
319 319 @active
320 320 def addfilegenerator(
321 321 self, genid, filenames, genfunc, order=0, location=b''
322 322 ):
323 323 """add a function to generates some files at transaction commit
324 324
325 325 The `genfunc` argument is a function capable of generating proper
326 326 content of each entry in the `filename` tuple.
327 327
328 328 At transaction close time, `genfunc` will be called with one file
329 329 object argument per entries in `filenames`.
330 330
331 331 The transaction itself is responsible for the backup, creation and
332 332 final write of such file.
333 333
334 334 The `genid` argument is used to ensure the same set of file is only
335 335 generated once. Call to `addfilegenerator` for a `genid` already
336 336 present will overwrite the old entry.
337 337
338 338 The `order` argument may be used to control the order in which multiple
339 339 generator will be executed.
340 340
341 341 The `location` arguments may be used to indicate the files are located
342 342 outside of the the standard directory for transaction. It should match
343 343 one of the key of the `transaction.vfsmap` dictionary.
344 344 """
345 345 # For now, we are unable to do proper backup and restore of custom vfs
346 346 # but for bookmarks that are handled outside this mechanism.
347 347 self._filegenerators[genid] = (order, filenames, genfunc, location)
348 348
349 349 @active
350 350 def removefilegenerator(self, genid):
351 351 """reverse of addfilegenerator, remove a file generator function"""
352 352 if genid in self._filegenerators:
353 353 del self._filegenerators[genid]
354 354
355 355 def _generatefiles(self, suffix=b'', group=GEN_GROUP_ALL):
356 356 # write files registered for generation
357 357 any = False
358
359 if group == GEN_GROUP_ALL:
360 skip_post = skip_pre = False
361 else:
362 skip_pre = group == GEN_GROUP_POST_FINALIZE
363 skip_post = group == GEN_GROUP_PRE_FINALIZE
364
358 365 for id, entry in sorted(pycompat.iteritems(self._filegenerators)):
359 366 any = True
360 367 order, filenames, genfunc, location = entry
361 368
362 369 # for generation at closing, check if it's before or after finalize
363 postfinalize = group == GEN_GROUP_POST_FINALIZE
364 if (
365 group != GEN_GROUP_ALL
366 and (id in postfinalizegenerators) != postfinalize
367 ):
370 is_post = id in postfinalizegenerators
371 if skip_post and is_post:
372 continue
373 elif skip_pre and not is_post:
368 374 continue
369 375
370 376 vfs = self._vfsmap[location]
371 377 files = []
372 378 try:
373 379 for name in filenames:
374 380 name += suffix
375 381 if suffix:
376 382 self.registertmp(name, location=location)
377 383 checkambig = False
378 384 else:
379 385 self.addbackup(name, location=location)
380 386 checkambig = (name, location) in self._checkambigfiles
381 387 files.append(
382 388 vfs(name, b'w', atomictemp=True, checkambig=checkambig)
383 389 )
384 390 genfunc(*files)
385 391 for f in files:
386 392 f.close()
387 393 # skip discard() loop since we're sure no open file remains
388 394 del files[:]
389 395 finally:
390 396 for f in files:
391 397 f.discard()
392 398 return any
393 399
394 400 @active
395 401 def find(self, file):
396 402 if file in self._map:
397 403 return self._entries[self._map[file]]
398 404 if file in self._backupmap:
399 405 return self._backupentries[self._backupmap[file]]
400 406 return None
401 407
402 408 @active
403 409 def replace(self, file, offset, data=None):
404 410 '''
405 411 replace can only replace already committed entries
406 412 that are not pending in the queue
407 413 '''
408 414
409 415 if file not in self._map:
410 416 raise KeyError(file)
411 417 index = self._map[file]
412 418 self._entries[index] = (file, offset, data)
413 419 self._file.write(b"%s\0%d\n" % (file, offset))
414 420 self._file.flush()
415 421
416 422 @active
417 423 def nest(self, name='<unnamed>'):
418 424 self._count += 1
419 425 self._usages += 1
420 426 self._names.append(name)
421 427 return self
422 428
423 429 def release(self):
424 430 if self._count > 0:
425 431 self._usages -= 1
426 432 if self._names:
427 433 self._names.pop()
428 434 # if the transaction scopes are left without being closed, fail
429 435 if self._count > 0 and self._usages == 0:
430 436 self._abort()
431 437
432 438 def running(self):
433 439 return self._count > 0
434 440
435 441 def addpending(self, category, callback):
436 442 """add a callback to be called when the transaction is pending
437 443
438 444 The transaction will be given as callback's first argument.
439 445
440 446 Category is a unique identifier to allow overwriting an old callback
441 447 with a newer callback.
442 448 """
443 449 self._pendingcallback[category] = callback
444 450
445 451 @active
446 452 def writepending(self):
447 453 '''write pending file to temporary version
448 454
449 455 This is used to allow hooks to view a transaction before commit'''
450 456 categories = sorted(self._pendingcallback)
451 457 for cat in categories:
452 458 # remove callback since the data will have been flushed
453 459 any = self._pendingcallback.pop(cat)(self)
454 460 self._anypending = self._anypending or any
455 461 self._anypending |= self._generatefiles(suffix=b'.pending')
456 462 return self._anypending
457 463
458 464 @active
459 465 def hasfinalize(self, category):
460 466 """check is a callback already exist for a category
461 467 """
462 468 return category in self._finalizecallback
463 469
464 470 @active
465 471 def addfinalize(self, category, callback):
466 472 """add a callback to be called when the transaction is closed
467 473
468 474 The transaction will be given as callback's first argument.
469 475
470 476 Category is a unique identifier to allow overwriting old callbacks with
471 477 newer callbacks.
472 478 """
473 479 self._finalizecallback[category] = callback
474 480
475 481 @active
476 482 def addpostclose(self, category, callback):
477 483 """add or replace a callback to be called after the transaction closed
478 484
479 485 The transaction will be given as callback's first argument.
480 486
481 487 Category is a unique identifier to allow overwriting an old callback
482 488 with a newer callback.
483 489 """
484 490 self._postclosecallback[category] = callback
485 491
486 492 @active
487 493 def getpostclose(self, category):
488 494 """return a postclose callback added before, or None"""
489 495 return self._postclosecallback.get(category, None)
490 496
491 497 @active
492 498 def addabort(self, category, callback):
493 499 """add a callback to be called when the transaction is aborted.
494 500
495 501 The transaction will be given as the first argument to the callback.
496 502
497 503 Category is a unique identifier to allow overwriting an old callback
498 504 with a newer callback.
499 505 """
500 506 self._abortcallback[category] = callback
501 507
502 508 @active
503 509 def close(self):
504 510 '''commit the transaction'''
505 511 if self._count == 1:
506 512 self._validator(self) # will raise exception if needed
507 513 self._validator = None # Help prevent cycles.
508 514 self._generatefiles(group=GEN_GROUP_PRE_FINALIZE)
509 515 while self._finalizecallback:
510 516 callbacks = self._finalizecallback
511 517 self._finalizecallback = {}
512 518 categories = sorted(callbacks)
513 519 for cat in categories:
514 520 callbacks[cat](self)
515 521 # Prevent double usage and help clear cycles.
516 522 self._finalizecallback = None
517 523 self._generatefiles(group=GEN_GROUP_POST_FINALIZE)
518 524
519 525 self._count -= 1
520 526 if self._count != 0:
521 527 return
522 528 self._file.close()
523 529 self._backupsfile.close()
524 530 # cleanup temporary files
525 531 for l, f, b, c in self._backupentries:
526 532 if l not in self._vfsmap and c:
527 533 self._report(
528 534 b"couldn't remove %s: unknown cache location %s\n" % (b, l)
529 535 )
530 536 continue
531 537 vfs = self._vfsmap[l]
532 538 if not f and b and vfs.exists(b):
533 539 try:
534 540 vfs.unlink(b)
535 541 except (IOError, OSError, error.Abort) as inst:
536 542 if not c:
537 543 raise
538 544 # Abort may be raise by read only opener
539 545 self._report(
540 546 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
541 547 )
542 548 self._entries = []
543 549 self._writeundo()
544 550 if self._after:
545 551 self._after()
546 552 self._after = None # Help prevent cycles.
547 553 if self._opener.isfile(self._backupjournal):
548 554 self._opener.unlink(self._backupjournal)
549 555 if self._opener.isfile(self._journal):
550 556 self._opener.unlink(self._journal)
551 557 for l, _f, b, c in self._backupentries:
552 558 if l not in self._vfsmap and c:
553 559 self._report(
554 560 b"couldn't remove %s: unknown cache location"
555 561 b"%s\n" % (b, l)
556 562 )
557 563 continue
558 564 vfs = self._vfsmap[l]
559 565 if b and vfs.exists(b):
560 566 try:
561 567 vfs.unlink(b)
562 568 except (IOError, OSError, error.Abort) as inst:
563 569 if not c:
564 570 raise
565 571 # Abort may be raise by read only opener
566 572 self._report(
567 573 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
568 574 )
569 575 self._backupentries = []
570 576 self._journal = None
571 577
572 578 self._releasefn(self, True) # notify success of closing transaction
573 579 self._releasefn = None # Help prevent cycles.
574 580
575 581 # run post close action
576 582 categories = sorted(self._postclosecallback)
577 583 for cat in categories:
578 584 self._postclosecallback[cat](self)
579 585 # Prevent double usage and help clear cycles.
580 586 self._postclosecallback = None
581 587
582 588 @active
583 589 def abort(self):
584 590 '''abort the transaction (generally called on error, or when the
585 591 transaction is not explicitly committed before going out of
586 592 scope)'''
587 593 self._abort()
588 594
589 595 def _writeundo(self):
590 596 """write transaction data for possible future undo call"""
591 597 if self._undoname is None:
592 598 return
593 599 undobackupfile = self._opener.open(
594 600 b"%s.backupfiles" % self._undoname, b'w'
595 601 )
596 602 undobackupfile.write(b'%d\n' % version)
597 603 for l, f, b, c in self._backupentries:
598 604 if not f: # temporary file
599 605 continue
600 606 if not b:
601 607 u = b''
602 608 else:
603 609 if l not in self._vfsmap and c:
604 610 self._report(
605 611 b"couldn't remove %s: unknown cache location"
606 612 b"%s\n" % (b, l)
607 613 )
608 614 continue
609 615 vfs = self._vfsmap[l]
610 616 base, name = vfs.split(b)
611 617 assert name.startswith(self._journal), name
612 618 uname = name.replace(self._journal, self._undoname, 1)
613 619 u = vfs.reljoin(base, uname)
614 620 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
615 621 undobackupfile.write(b"%s\0%s\0%s\0%d\n" % (l, f, u, c))
616 622 undobackupfile.close()
617 623
618 624 def _abort(self):
619 625 self._count = 0
620 626 self._usages = 0
621 627 self._file.close()
622 628 self._backupsfile.close()
623 629
624 630 try:
625 631 if not self._entries and not self._backupentries:
626 632 if self._backupjournal:
627 633 self._opener.unlink(self._backupjournal)
628 634 if self._journal:
629 635 self._opener.unlink(self._journal)
630 636 return
631 637
632 638 self._report(_(b"transaction abort!\n"))
633 639
634 640 try:
635 641 for cat in sorted(self._abortcallback):
636 642 self._abortcallback[cat](self)
637 643 # Prevent double usage and help clear cycles.
638 644 self._abortcallback = None
639 645 _playback(
640 646 self._journal,
641 647 self._report,
642 648 self._opener,
643 649 self._vfsmap,
644 650 self._entries,
645 651 self._backupentries,
646 652 False,
647 653 checkambigfiles=self._checkambigfiles,
648 654 )
649 655 self._report(_(b"rollback completed\n"))
650 656 except BaseException as exc:
651 657 self._report(_(b"rollback failed - please run hg recover\n"))
652 658 self._report(
653 659 _(b"(failure reason: %s)\n") % stringutil.forcebytestr(exc)
654 660 )
655 661 finally:
656 662 self._journal = None
657 663 self._releasefn(self, False) # notify failure of transaction
658 664 self._releasefn = None # Help prevent cycles.
659 665
660 666
661 667 def rollback(opener, vfsmap, file, report, checkambigfiles=None):
662 668 """Rolls back the transaction contained in the given file
663 669
664 670 Reads the entries in the specified file, and the corresponding
665 671 '*.backupfiles' file, to recover from an incomplete transaction.
666 672
667 673 * `file`: a file containing a list of entries, specifying where
668 674 to truncate each file. The file should contain a list of
669 675 file\0offset pairs, delimited by newlines. The corresponding
670 676 '*.backupfiles' file should contain a list of file\0backupfile
671 677 pairs, delimited by \0.
672 678
673 679 `checkambigfiles` is a set of (path, vfs-location) tuples,
674 680 which determine whether file stat ambiguity should be avoided at
675 681 restoring corresponded files.
676 682 """
677 683 entries = []
678 684 backupentries = []
679 685
680 686 fp = opener.open(file)
681 687 lines = fp.readlines()
682 688 fp.close()
683 689 for l in lines:
684 690 try:
685 691 f, o = l.split(b'\0')
686 692 entries.append((f, int(o), None))
687 693 except ValueError:
688 694 report(
689 695 _(b"couldn't read journal entry %r!\n") % pycompat.bytestr(l)
690 696 )
691 697
692 698 backupjournal = b"%s.backupfiles" % file
693 699 if opener.exists(backupjournal):
694 700 fp = opener.open(backupjournal)
695 701 lines = fp.readlines()
696 702 if lines:
697 703 ver = lines[0][:-1]
698 704 if ver == (b'%d' % version):
699 705 for line in lines[1:]:
700 706 if line:
701 707 # Shave off the trailing newline
702 708 line = line[:-1]
703 709 l, f, b, c = line.split(b'\0')
704 710 backupentries.append((l, f, b, bool(c)))
705 711 else:
706 712 report(
707 713 _(
708 714 b"journal was created by a different version of "
709 715 b"Mercurial\n"
710 716 )
711 717 )
712 718
713 719 _playback(
714 720 file,
715 721 report,
716 722 opener,
717 723 vfsmap,
718 724 entries,
719 725 backupentries,
720 726 checkambigfiles=checkambigfiles,
721 727 )
General Comments 0
You need to be logged in to leave comments. Login now