##// END OF EJS Templates
transaction: allow finalizer to add finalizer...
marmoute -
r44556:2f1d6180 default
parent child Browse files
Show More
@@ -1,718 +1,721 b''
1 1 # transaction.py - simple journaling scheme for mercurial
2 2 #
3 3 # This transaction scheme is intended to gracefully handle program
4 4 # errors and interruptions. More serious failures like system crashes
5 5 # can be recovered with an fsck-like tool. As the whole repository is
6 6 # effectively log-structured, this should amount to simply truncating
7 7 # anything that isn't referenced in the changelog.
8 8 #
9 9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 10 #
11 11 # This software may be used and distributed according to the terms of the
12 12 # GNU General Public License version 2 or any later version.
13 13
14 14 from __future__ import absolute_import
15 15
16 16 import errno
17 17
18 18 from .i18n import _
19 19 from . import (
20 20 error,
21 21 pycompat,
22 22 util,
23 23 )
24 24 from .utils import stringutil
25 25
26 26 version = 2
27 27
28 28 # These are the file generators that should only be executed after the
29 29 # finalizers are done, since they rely on the output of the finalizers (like
30 30 # the changelog having been written).
31 31 postfinalizegenerators = {b'bookmarks', b'dirstate'}
32 32
33 33 gengroupall = b'all'
34 34 gengroupprefinalize = b'prefinalize'
35 35 gengrouppostfinalize = b'postfinalize'
36 36
37 37
38 38 def active(func):
39 39 def _active(self, *args, **kwds):
40 40 if self._count == 0:
41 41 raise error.Abort(
42 42 _(
43 43 b'cannot use transaction when it is already committed/aborted'
44 44 )
45 45 )
46 46 return func(self, *args, **kwds)
47 47
48 48 return _active
49 49
50 50
51 51 def _playback(
52 52 journal,
53 53 report,
54 54 opener,
55 55 vfsmap,
56 56 entries,
57 57 backupentries,
58 58 unlink=True,
59 59 checkambigfiles=None,
60 60 ):
61 61 for f, o, _ignore in entries:
62 62 if o or not unlink:
63 63 checkambig = checkambigfiles and (f, b'') in checkambigfiles
64 64 try:
65 65 fp = opener(f, b'a', checkambig=checkambig)
66 66 if fp.tell() < o:
67 67 raise error.Abort(
68 68 _(
69 69 b"attempted to truncate %s to %d bytes, but it was "
70 70 b"already %d bytes\n"
71 71 )
72 72 % (f, o, fp.tell())
73 73 )
74 74 fp.truncate(o)
75 75 fp.close()
76 76 except IOError:
77 77 report(_(b"failed to truncate %s\n") % f)
78 78 raise
79 79 else:
80 80 try:
81 81 opener.unlink(f)
82 82 except (IOError, OSError) as inst:
83 83 if inst.errno != errno.ENOENT:
84 84 raise
85 85
86 86 backupfiles = []
87 87 for l, f, b, c in backupentries:
88 88 if l not in vfsmap and c:
89 89 report(b"couldn't handle %s: unknown cache location %s\n" % (b, l))
90 90 vfs = vfsmap[l]
91 91 try:
92 92 if f and b:
93 93 filepath = vfs.join(f)
94 94 backuppath = vfs.join(b)
95 95 checkambig = checkambigfiles and (f, l) in checkambigfiles
96 96 try:
97 97 util.copyfile(backuppath, filepath, checkambig=checkambig)
98 98 backupfiles.append(b)
99 99 except IOError:
100 100 report(_(b"failed to recover %s\n") % f)
101 101 else:
102 102 target = f or b
103 103 try:
104 104 vfs.unlink(target)
105 105 except (IOError, OSError) as inst:
106 106 if inst.errno != errno.ENOENT:
107 107 raise
108 108 except (IOError, OSError, error.Abort):
109 109 if not c:
110 110 raise
111 111
112 112 backuppath = b"%s.backupfiles" % journal
113 113 if opener.exists(backuppath):
114 114 opener.unlink(backuppath)
115 115 opener.unlink(journal)
116 116 try:
117 117 for f in backupfiles:
118 118 if opener.exists(f):
119 119 opener.unlink(f)
120 120 except (IOError, OSError, error.Abort):
121 121 # only pure backup file remains, it is sage to ignore any error
122 122 pass
123 123
124 124
125 125 class transaction(util.transactional):
126 126 def __init__(
127 127 self,
128 128 report,
129 129 opener,
130 130 vfsmap,
131 131 journalname,
132 132 undoname=None,
133 133 after=None,
134 134 createmode=None,
135 135 validator=None,
136 136 releasefn=None,
137 137 checkambigfiles=None,
138 138 name='<unnamed>',
139 139 ):
140 140 """Begin a new transaction
141 141
142 142 Begins a new transaction that allows rolling back writes in the event of
143 143 an exception.
144 144
145 145 * `after`: called after the transaction has been committed
146 146 * `createmode`: the mode of the journal file that will be created
147 147 * `releasefn`: called after releasing (with transaction and result)
148 148
149 149 `checkambigfiles` is a set of (path, vfs-location) tuples,
150 150 which determine whether file stat ambiguity should be avoided
151 151 for corresponded files.
152 152 """
153 153 self._count = 1
154 154 self._usages = 1
155 155 self._report = report
156 156 # a vfs to the store content
157 157 self._opener = opener
158 158 # a map to access file in various {location -> vfs}
159 159 vfsmap = vfsmap.copy()
160 160 vfsmap[b''] = opener # set default value
161 161 self._vfsmap = vfsmap
162 162 self._after = after
163 163 self._entries = []
164 164 self._map = {}
165 165 self._journal = journalname
166 166 self._undoname = undoname
167 167 self._queue = []
168 168 # A callback to validate transaction content before closing it.
169 169 # should raise exception is anything is wrong.
170 170 # target user is repository hooks.
171 171 if validator is None:
172 172 validator = lambda tr: None
173 173 self._validator = validator
174 174 # A callback to do something just after releasing transaction.
175 175 if releasefn is None:
176 176 releasefn = lambda tr, success: None
177 177 self._releasefn = releasefn
178 178
179 179 self._checkambigfiles = set()
180 180 if checkambigfiles:
181 181 self._checkambigfiles.update(checkambigfiles)
182 182
183 183 self._names = [name]
184 184
185 185 # A dict dedicated to precisely tracking the changes introduced in the
186 186 # transaction.
187 187 self.changes = {}
188 188
189 189 # a dict of arguments to be passed to hooks
190 190 self.hookargs = {}
191 191 self._file = opener.open(self._journal, b"w")
192 192
193 193 # a list of ('location', 'path', 'backuppath', cache) entries.
194 194 # - if 'backuppath' is empty, no file existed at backup time
195 195 # - if 'path' is empty, this is a temporary transaction file
196 196 # - if 'location' is not empty, the path is outside main opener reach.
197 197 # use 'location' value as a key in a vfsmap to find the right 'vfs'
198 198 # (cache is currently unused)
199 199 self._backupentries = []
200 200 self._backupmap = {}
201 201 self._backupjournal = b"%s.backupfiles" % self._journal
202 202 self._backupsfile = opener.open(self._backupjournal, b'w')
203 203 self._backupsfile.write(b'%d\n' % version)
204 204
205 205 if createmode is not None:
206 206 opener.chmod(self._journal, createmode & 0o666)
207 207 opener.chmod(self._backupjournal, createmode & 0o666)
208 208
209 209 # hold file generations to be performed on commit
210 210 self._filegenerators = {}
211 211 # hold callback to write pending data for hooks
212 212 self._pendingcallback = {}
213 213 # True is any pending data have been written ever
214 214 self._anypending = False
215 215 # holds callback to call when writing the transaction
216 216 self._finalizecallback = {}
217 217 # hold callback for post transaction close
218 218 self._postclosecallback = {}
219 219 # holds callbacks to call during abort
220 220 self._abortcallback = {}
221 221
222 222 def __repr__(self):
223 223 name = '/'.join(self._names)
224 224 return '<transaction name=%s, count=%d, usages=%d>' % (
225 225 name,
226 226 self._count,
227 227 self._usages,
228 228 )
229 229
230 230 def __del__(self):
231 231 if self._journal:
232 232 self._abort()
233 233
234 234 @active
235 235 def startgroup(self):
236 236 """delay registration of file entry
237 237
238 238 This is used by strip to delay vision of strip offset. The transaction
239 239 sees either none or all of the strip actions to be done."""
240 240 self._queue.append([])
241 241
242 242 @active
243 243 def endgroup(self):
244 244 """apply delayed registration of file entry.
245 245
246 246 This is used by strip to delay vision of strip offset. The transaction
247 247 sees either none or all of the strip actions to be done."""
248 248 q = self._queue.pop()
249 249 for f, o, data in q:
250 250 self._addentry(f, o, data)
251 251
252 252 @active
253 253 def add(self, file, offset, data=None):
254 254 """record the state of an append-only file before update"""
255 255 if file in self._map or file in self._backupmap:
256 256 return
257 257 if self._queue:
258 258 self._queue[-1].append((file, offset, data))
259 259 return
260 260
261 261 self._addentry(file, offset, data)
262 262
263 263 def _addentry(self, file, offset, data):
264 264 """add a append-only entry to memory and on-disk state"""
265 265 if file in self._map or file in self._backupmap:
266 266 return
267 267 self._entries.append((file, offset, data))
268 268 self._map[file] = len(self._entries) - 1
269 269 # add enough data to the journal to do the truncate
270 270 self._file.write(b"%s\0%d\n" % (file, offset))
271 271 self._file.flush()
272 272
273 273 @active
274 274 def addbackup(self, file, hardlink=True, location=b''):
275 275 """Adds a backup of the file to the transaction
276 276
277 277 Calling addbackup() creates a hardlink backup of the specified file
278 278 that is used to recover the file in the event of the transaction
279 279 aborting.
280 280
281 281 * `file`: the file path, relative to .hg/store
282 282 * `hardlink`: use a hardlink to quickly create the backup
283 283 """
284 284 if self._queue:
285 285 msg = b'cannot use transaction.addbackup inside "group"'
286 286 raise error.ProgrammingError(msg)
287 287
288 288 if file in self._map or file in self._backupmap:
289 289 return
290 290 vfs = self._vfsmap[location]
291 291 dirname, filename = vfs.split(file)
292 292 backupfilename = b"%s.backup.%s" % (self._journal, filename)
293 293 backupfile = vfs.reljoin(dirname, backupfilename)
294 294 if vfs.exists(file):
295 295 filepath = vfs.join(file)
296 296 backuppath = vfs.join(backupfile)
297 297 util.copyfile(filepath, backuppath, hardlink=hardlink)
298 298 else:
299 299 backupfile = b''
300 300
301 301 self._addbackupentry((location, file, backupfile, False))
302 302
303 303 def _addbackupentry(self, entry):
304 304 """register a new backup entry and write it to disk"""
305 305 self._backupentries.append(entry)
306 306 self._backupmap[entry[1]] = len(self._backupentries) - 1
307 307 self._backupsfile.write(b"%s\0%s\0%s\0%d\n" % entry)
308 308 self._backupsfile.flush()
309 309
310 310 @active
311 311 def registertmp(self, tmpfile, location=b''):
312 312 """register a temporary transaction file
313 313
314 314 Such files will be deleted when the transaction exits (on both
315 315 failure and success).
316 316 """
317 317 self._addbackupentry((location, b'', tmpfile, False))
318 318
319 319 @active
320 320 def addfilegenerator(
321 321 self, genid, filenames, genfunc, order=0, location=b''
322 322 ):
323 323 """add a function to generates some files at transaction commit
324 324
325 325 The `genfunc` argument is a function capable of generating proper
326 326 content of each entry in the `filename` tuple.
327 327
328 328 At transaction close time, `genfunc` will be called with one file
329 329 object argument per entries in `filenames`.
330 330
331 331 The transaction itself is responsible for the backup, creation and
332 332 final write of such file.
333 333
334 334 The `genid` argument is used to ensure the same set of file is only
335 335 generated once. Call to `addfilegenerator` for a `genid` already
336 336 present will overwrite the old entry.
337 337
338 338 The `order` argument may be used to control the order in which multiple
339 339 generator will be executed.
340 340
341 341 The `location` arguments may be used to indicate the files are located
342 342 outside of the the standard directory for transaction. It should match
343 343 one of the key of the `transaction.vfsmap` dictionary.
344 344 """
345 345 # For now, we are unable to do proper backup and restore of custom vfs
346 346 # but for bookmarks that are handled outside this mechanism.
347 347 self._filegenerators[genid] = (order, filenames, genfunc, location)
348 348
349 349 @active
350 350 def removefilegenerator(self, genid):
351 351 """reverse of addfilegenerator, remove a file generator function"""
352 352 if genid in self._filegenerators:
353 353 del self._filegenerators[genid]
354 354
355 355 def _generatefiles(self, suffix=b'', group=gengroupall):
356 356 # write files registered for generation
357 357 any = False
358 358 for id, entry in sorted(pycompat.iteritems(self._filegenerators)):
359 359 any = True
360 360 order, filenames, genfunc, location = entry
361 361
362 362 # for generation at closing, check if it's before or after finalize
363 363 postfinalize = group == gengrouppostfinalize
364 364 if (
365 365 group != gengroupall
366 366 and (id in postfinalizegenerators) != postfinalize
367 367 ):
368 368 continue
369 369
370 370 vfs = self._vfsmap[location]
371 371 files = []
372 372 try:
373 373 for name in filenames:
374 374 name += suffix
375 375 if suffix:
376 376 self.registertmp(name, location=location)
377 377 checkambig = False
378 378 else:
379 379 self.addbackup(name, location=location)
380 380 checkambig = (name, location) in self._checkambigfiles
381 381 files.append(
382 382 vfs(name, b'w', atomictemp=True, checkambig=checkambig)
383 383 )
384 384 genfunc(*files)
385 385 for f in files:
386 386 f.close()
387 387 # skip discard() loop since we're sure no open file remains
388 388 del files[:]
389 389 finally:
390 390 for f in files:
391 391 f.discard()
392 392 return any
393 393
394 394 @active
395 395 def find(self, file):
396 396 if file in self._map:
397 397 return self._entries[self._map[file]]
398 398 if file in self._backupmap:
399 399 return self._backupentries[self._backupmap[file]]
400 400 return None
401 401
402 402 @active
403 403 def replace(self, file, offset, data=None):
404 404 '''
405 405 replace can only replace already committed entries
406 406 that are not pending in the queue
407 407 '''
408 408
409 409 if file not in self._map:
410 410 raise KeyError(file)
411 411 index = self._map[file]
412 412 self._entries[index] = (file, offset, data)
413 413 self._file.write(b"%s\0%d\n" % (file, offset))
414 414 self._file.flush()
415 415
416 416 @active
417 417 def nest(self, name='<unnamed>'):
418 418 self._count += 1
419 419 self._usages += 1
420 420 self._names.append(name)
421 421 return self
422 422
423 423 def release(self):
424 424 if self._count > 0:
425 425 self._usages -= 1
426 426 if self._names:
427 427 self._names.pop()
428 428 # if the transaction scopes are left without being closed, fail
429 429 if self._count > 0 and self._usages == 0:
430 430 self._abort()
431 431
432 432 def running(self):
433 433 return self._count > 0
434 434
435 435 def addpending(self, category, callback):
436 436 """add a callback to be called when the transaction is pending
437 437
438 438 The transaction will be given as callback's first argument.
439 439
440 440 Category is a unique identifier to allow overwriting an old callback
441 441 with a newer callback.
442 442 """
443 443 self._pendingcallback[category] = callback
444 444
445 445 @active
446 446 def writepending(self):
447 447 '''write pending file to temporary version
448 448
449 449 This is used to allow hooks to view a transaction before commit'''
450 450 categories = sorted(self._pendingcallback)
451 451 for cat in categories:
452 452 # remove callback since the data will have been flushed
453 453 any = self._pendingcallback.pop(cat)(self)
454 454 self._anypending = self._anypending or any
455 455 self._anypending |= self._generatefiles(suffix=b'.pending')
456 456 return self._anypending
457 457
458 458 @active
459 459 def hasfinalize(self, category):
460 460 """check is a callback already exist for a category
461 461 """
462 462 return category in self._finalizecallback
463 463
464 464 @active
465 465 def addfinalize(self, category, callback):
466 466 """add a callback to be called when the transaction is closed
467 467
468 468 The transaction will be given as callback's first argument.
469 469
470 470 Category is a unique identifier to allow overwriting old callbacks with
471 471 newer callbacks.
472 472 """
473 473 self._finalizecallback[category] = callback
474 474
475 475 @active
476 476 def addpostclose(self, category, callback):
477 477 """add or replace a callback to be called after the transaction closed
478 478
479 479 The transaction will be given as callback's first argument.
480 480
481 481 Category is a unique identifier to allow overwriting an old callback
482 482 with a newer callback.
483 483 """
484 484 self._postclosecallback[category] = callback
485 485
486 486 @active
487 487 def getpostclose(self, category):
488 488 """return a postclose callback added before, or None"""
489 489 return self._postclosecallback.get(category, None)
490 490
491 491 @active
492 492 def addabort(self, category, callback):
493 493 """add a callback to be called when the transaction is aborted.
494 494
495 495 The transaction will be given as the first argument to the callback.
496 496
497 497 Category is a unique identifier to allow overwriting an old callback
498 498 with a newer callback.
499 499 """
500 500 self._abortcallback[category] = callback
501 501
502 502 @active
503 503 def close(self):
504 504 '''commit the transaction'''
505 505 if self._count == 1:
506 506 self._validator(self) # will raise exception if needed
507 507 self._validator = None # Help prevent cycles.
508 508 self._generatefiles(group=gengroupprefinalize)
509 categories = sorted(self._finalizecallback)
510 for cat in categories:
511 self._finalizecallback[cat](self)
509 while self._finalizecallback:
510 callbacks = self._finalizecallback
511 self._finalizecallback = {}
512 categories = sorted(callbacks)
513 for cat in categories:
514 callbacks[cat](self)
512 515 # Prevent double usage and help clear cycles.
513 516 self._finalizecallback = None
514 517 self._generatefiles(group=gengrouppostfinalize)
515 518
516 519 self._count -= 1
517 520 if self._count != 0:
518 521 return
519 522 self._file.close()
520 523 self._backupsfile.close()
521 524 # cleanup temporary files
522 525 for l, f, b, c in self._backupentries:
523 526 if l not in self._vfsmap and c:
524 527 self._report(
525 528 b"couldn't remove %s: unknown cache location %s\n" % (b, l)
526 529 )
527 530 continue
528 531 vfs = self._vfsmap[l]
529 532 if not f and b and vfs.exists(b):
530 533 try:
531 534 vfs.unlink(b)
532 535 except (IOError, OSError, error.Abort) as inst:
533 536 if not c:
534 537 raise
535 538 # Abort may be raise by read only opener
536 539 self._report(
537 540 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
538 541 )
539 542 self._entries = []
540 543 self._writeundo()
541 544 if self._after:
542 545 self._after()
543 546 self._after = None # Help prevent cycles.
544 547 if self._opener.isfile(self._backupjournal):
545 548 self._opener.unlink(self._backupjournal)
546 549 if self._opener.isfile(self._journal):
547 550 self._opener.unlink(self._journal)
548 551 for l, _f, b, c in self._backupentries:
549 552 if l not in self._vfsmap and c:
550 553 self._report(
551 554 b"couldn't remove %s: unknown cache location"
552 555 b"%s\n" % (b, l)
553 556 )
554 557 continue
555 558 vfs = self._vfsmap[l]
556 559 if b and vfs.exists(b):
557 560 try:
558 561 vfs.unlink(b)
559 562 except (IOError, OSError, error.Abort) as inst:
560 563 if not c:
561 564 raise
562 565 # Abort may be raise by read only opener
563 566 self._report(
564 567 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
565 568 )
566 569 self._backupentries = []
567 570 self._journal = None
568 571
569 572 self._releasefn(self, True) # notify success of closing transaction
570 573 self._releasefn = None # Help prevent cycles.
571 574
572 575 # run post close action
573 576 categories = sorted(self._postclosecallback)
574 577 for cat in categories:
575 578 self._postclosecallback[cat](self)
576 579 # Prevent double usage and help clear cycles.
577 580 self._postclosecallback = None
578 581
579 582 @active
580 583 def abort(self):
581 584 '''abort the transaction (generally called on error, or when the
582 585 transaction is not explicitly committed before going out of
583 586 scope)'''
584 587 self._abort()
585 588
586 589 def _writeundo(self):
587 590 """write transaction data for possible future undo call"""
588 591 if self._undoname is None:
589 592 return
590 593 undobackupfile = self._opener.open(
591 594 b"%s.backupfiles" % self._undoname, b'w'
592 595 )
593 596 undobackupfile.write(b'%d\n' % version)
594 597 for l, f, b, c in self._backupentries:
595 598 if not f: # temporary file
596 599 continue
597 600 if not b:
598 601 u = b''
599 602 else:
600 603 if l not in self._vfsmap and c:
601 604 self._report(
602 605 b"couldn't remove %s: unknown cache location"
603 606 b"%s\n" % (b, l)
604 607 )
605 608 continue
606 609 vfs = self._vfsmap[l]
607 610 base, name = vfs.split(b)
608 611 assert name.startswith(self._journal), name
609 612 uname = name.replace(self._journal, self._undoname, 1)
610 613 u = vfs.reljoin(base, uname)
611 614 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
612 615 undobackupfile.write(b"%s\0%s\0%s\0%d\n" % (l, f, u, c))
613 616 undobackupfile.close()
614 617
615 618 def _abort(self):
616 619 self._count = 0
617 620 self._usages = 0
618 621 self._file.close()
619 622 self._backupsfile.close()
620 623
621 624 try:
622 625 if not self._entries and not self._backupentries:
623 626 if self._backupjournal:
624 627 self._opener.unlink(self._backupjournal)
625 628 if self._journal:
626 629 self._opener.unlink(self._journal)
627 630 return
628 631
629 632 self._report(_(b"transaction abort!\n"))
630 633
631 634 try:
632 635 for cat in sorted(self._abortcallback):
633 636 self._abortcallback[cat](self)
634 637 # Prevent double usage and help clear cycles.
635 638 self._abortcallback = None
636 639 _playback(
637 640 self._journal,
638 641 self._report,
639 642 self._opener,
640 643 self._vfsmap,
641 644 self._entries,
642 645 self._backupentries,
643 646 False,
644 647 checkambigfiles=self._checkambigfiles,
645 648 )
646 649 self._report(_(b"rollback completed\n"))
647 650 except BaseException as exc:
648 651 self._report(_(b"rollback failed - please run hg recover\n"))
649 652 self._report(
650 653 _(b"(failure reason: %s)\n") % stringutil.forcebytestr(exc)
651 654 )
652 655 finally:
653 656 self._journal = None
654 657 self._releasefn(self, False) # notify failure of transaction
655 658 self._releasefn = None # Help prevent cycles.
656 659
657 660
658 661 def rollback(opener, vfsmap, file, report, checkambigfiles=None):
659 662 """Rolls back the transaction contained in the given file
660 663
661 664 Reads the entries in the specified file, and the corresponding
662 665 '*.backupfiles' file, to recover from an incomplete transaction.
663 666
664 667 * `file`: a file containing a list of entries, specifying where
665 668 to truncate each file. The file should contain a list of
666 669 file\0offset pairs, delimited by newlines. The corresponding
667 670 '*.backupfiles' file should contain a list of file\0backupfile
668 671 pairs, delimited by \0.
669 672
670 673 `checkambigfiles` is a set of (path, vfs-location) tuples,
671 674 which determine whether file stat ambiguity should be avoided at
672 675 restoring corresponded files.
673 676 """
674 677 entries = []
675 678 backupentries = []
676 679
677 680 fp = opener.open(file)
678 681 lines = fp.readlines()
679 682 fp.close()
680 683 for l in lines:
681 684 try:
682 685 f, o = l.split(b'\0')
683 686 entries.append((f, int(o), None))
684 687 except ValueError:
685 688 report(
686 689 _(b"couldn't read journal entry %r!\n") % pycompat.bytestr(l)
687 690 )
688 691
689 692 backupjournal = b"%s.backupfiles" % file
690 693 if opener.exists(backupjournal):
691 694 fp = opener.open(backupjournal)
692 695 lines = fp.readlines()
693 696 if lines:
694 697 ver = lines[0][:-1]
695 698 if ver == (b'%d' % version):
696 699 for line in lines[1:]:
697 700 if line:
698 701 # Shave off the trailing newline
699 702 line = line[:-1]
700 703 l, f, b, c = line.split(b'\0')
701 704 backupentries.append((l, f, b, bool(c)))
702 705 else:
703 706 report(
704 707 _(
705 708 b"journal was created by a different version of "
706 709 b"Mercurial\n"
707 710 )
708 711 )
709 712
710 713 _playback(
711 714 file,
712 715 report,
713 716 opener,
714 717 vfsmap,
715 718 entries,
716 719 backupentries,
717 720 checkambigfiles=checkambigfiles,
718 721 )
General Comments 0
You need to be logged in to leave comments. Login now