##// END OF EJS Templates
transaction: move constant to upper case...
marmoute -
r44886:baf8c3f9 default
parent child Browse files
Show More
@@ -1,721 +1,721 b''
1 1 # transaction.py - simple journaling scheme for mercurial
2 2 #
3 3 # This transaction scheme is intended to gracefully handle program
4 4 # errors and interruptions. More serious failures like system crashes
5 5 # can be recovered with an fsck-like tool. As the whole repository is
6 6 # effectively log-structured, this should amount to simply truncating
7 7 # anything that isn't referenced in the changelog.
8 8 #
9 9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 10 #
11 11 # This software may be used and distributed according to the terms of the
12 12 # GNU General Public License version 2 or any later version.
13 13
14 14 from __future__ import absolute_import
15 15
16 16 import errno
17 17
18 18 from .i18n import _
19 19 from . import (
20 20 error,
21 21 pycompat,
22 22 util,
23 23 )
24 24 from .utils import stringutil
25 25
26 26 version = 2
27 27
28 28 # These are the file generators that should only be executed after the
29 29 # finalizers are done, since they rely on the output of the finalizers (like
30 30 # the changelog having been written).
31 31 postfinalizegenerators = {b'bookmarks', b'dirstate'}
32 32
33 gengroupall = b'all'
34 gengroupprefinalize = b'prefinalize'
35 gengrouppostfinalize = b'postfinalize'
33 GEN_GROUP_ALL = b'all'
34 GEN_GROUP_PRE_FINALIZE = b'prefinalize'
35 GEN_GROUP_POST_FINALIZE = b'postfinalize'
36 36
37 37
38 38 def active(func):
39 39 def _active(self, *args, **kwds):
40 40 if self._count == 0:
41 41 raise error.Abort(
42 42 _(
43 43 b'cannot use transaction when it is already committed/aborted'
44 44 )
45 45 )
46 46 return func(self, *args, **kwds)
47 47
48 48 return _active
49 49
50 50
51 51 def _playback(
52 52 journal,
53 53 report,
54 54 opener,
55 55 vfsmap,
56 56 entries,
57 57 backupentries,
58 58 unlink=True,
59 59 checkambigfiles=None,
60 60 ):
61 61 for f, o, _ignore in entries:
62 62 if o or not unlink:
63 63 checkambig = checkambigfiles and (f, b'') in checkambigfiles
64 64 try:
65 65 fp = opener(f, b'a', checkambig=checkambig)
66 66 if fp.tell() < o:
67 67 raise error.Abort(
68 68 _(
69 69 b"attempted to truncate %s to %d bytes, but it was "
70 70 b"already %d bytes\n"
71 71 )
72 72 % (f, o, fp.tell())
73 73 )
74 74 fp.truncate(o)
75 75 fp.close()
76 76 except IOError:
77 77 report(_(b"failed to truncate %s\n") % f)
78 78 raise
79 79 else:
80 80 try:
81 81 opener.unlink(f)
82 82 except (IOError, OSError) as inst:
83 83 if inst.errno != errno.ENOENT:
84 84 raise
85 85
86 86 backupfiles = []
87 87 for l, f, b, c in backupentries:
88 88 if l not in vfsmap and c:
89 89 report(b"couldn't handle %s: unknown cache location %s\n" % (b, l))
90 90 vfs = vfsmap[l]
91 91 try:
92 92 if f and b:
93 93 filepath = vfs.join(f)
94 94 backuppath = vfs.join(b)
95 95 checkambig = checkambigfiles and (f, l) in checkambigfiles
96 96 try:
97 97 util.copyfile(backuppath, filepath, checkambig=checkambig)
98 98 backupfiles.append(b)
99 99 except IOError:
100 100 report(_(b"failed to recover %s\n") % f)
101 101 else:
102 102 target = f or b
103 103 try:
104 104 vfs.unlink(target)
105 105 except (IOError, OSError) as inst:
106 106 if inst.errno != errno.ENOENT:
107 107 raise
108 108 except (IOError, OSError, error.Abort):
109 109 if not c:
110 110 raise
111 111
112 112 backuppath = b"%s.backupfiles" % journal
113 113 if opener.exists(backuppath):
114 114 opener.unlink(backuppath)
115 115 opener.unlink(journal)
116 116 try:
117 117 for f in backupfiles:
118 118 if opener.exists(f):
119 119 opener.unlink(f)
120 120 except (IOError, OSError, error.Abort):
121 121 # only pure backup file remains, it is sage to ignore any error
122 122 pass
123 123
124 124
125 125 class transaction(util.transactional):
126 126 def __init__(
127 127 self,
128 128 report,
129 129 opener,
130 130 vfsmap,
131 131 journalname,
132 132 undoname=None,
133 133 after=None,
134 134 createmode=None,
135 135 validator=None,
136 136 releasefn=None,
137 137 checkambigfiles=None,
138 138 name='<unnamed>',
139 139 ):
140 140 """Begin a new transaction
141 141
142 142 Begins a new transaction that allows rolling back writes in the event of
143 143 an exception.
144 144
145 145 * `after`: called after the transaction has been committed
146 146 * `createmode`: the mode of the journal file that will be created
147 147 * `releasefn`: called after releasing (with transaction and result)
148 148
149 149 `checkambigfiles` is a set of (path, vfs-location) tuples,
150 150 which determine whether file stat ambiguity should be avoided
151 151 for corresponded files.
152 152 """
153 153 self._count = 1
154 154 self._usages = 1
155 155 self._report = report
156 156 # a vfs to the store content
157 157 self._opener = opener
158 158 # a map to access file in various {location -> vfs}
159 159 vfsmap = vfsmap.copy()
160 160 vfsmap[b''] = opener # set default value
161 161 self._vfsmap = vfsmap
162 162 self._after = after
163 163 self._entries = []
164 164 self._map = {}
165 165 self._journal = journalname
166 166 self._undoname = undoname
167 167 self._queue = []
168 168 # A callback to validate transaction content before closing it.
169 169 # should raise exception is anything is wrong.
170 170 # target user is repository hooks.
171 171 if validator is None:
172 172 validator = lambda tr: None
173 173 self._validator = validator
174 174 # A callback to do something just after releasing transaction.
175 175 if releasefn is None:
176 176 releasefn = lambda tr, success: None
177 177 self._releasefn = releasefn
178 178
179 179 self._checkambigfiles = set()
180 180 if checkambigfiles:
181 181 self._checkambigfiles.update(checkambigfiles)
182 182
183 183 self._names = [name]
184 184
185 185 # A dict dedicated to precisely tracking the changes introduced in the
186 186 # transaction.
187 187 self.changes = {}
188 188
189 189 # a dict of arguments to be passed to hooks
190 190 self.hookargs = {}
191 191 self._file = opener.open(self._journal, b"w")
192 192
193 193 # a list of ('location', 'path', 'backuppath', cache) entries.
194 194 # - if 'backuppath' is empty, no file existed at backup time
195 195 # - if 'path' is empty, this is a temporary transaction file
196 196 # - if 'location' is not empty, the path is outside main opener reach.
197 197 # use 'location' value as a key in a vfsmap to find the right 'vfs'
198 198 # (cache is currently unused)
199 199 self._backupentries = []
200 200 self._backupmap = {}
201 201 self._backupjournal = b"%s.backupfiles" % self._journal
202 202 self._backupsfile = opener.open(self._backupjournal, b'w')
203 203 self._backupsfile.write(b'%d\n' % version)
204 204
205 205 if createmode is not None:
206 206 opener.chmod(self._journal, createmode & 0o666)
207 207 opener.chmod(self._backupjournal, createmode & 0o666)
208 208
209 209 # hold file generations to be performed on commit
210 210 self._filegenerators = {}
211 211 # hold callback to write pending data for hooks
212 212 self._pendingcallback = {}
213 213 # True is any pending data have been written ever
214 214 self._anypending = False
215 215 # holds callback to call when writing the transaction
216 216 self._finalizecallback = {}
217 217 # hold callback for post transaction close
218 218 self._postclosecallback = {}
219 219 # holds callbacks to call during abort
220 220 self._abortcallback = {}
221 221
222 222 def __repr__(self):
223 223 name = '/'.join(self._names)
224 224 return '<transaction name=%s, count=%d, usages=%d>' % (
225 225 name,
226 226 self._count,
227 227 self._usages,
228 228 )
229 229
230 230 def __del__(self):
231 231 if self._journal:
232 232 self._abort()
233 233
234 234 @active
235 235 def startgroup(self):
236 236 """delay registration of file entry
237 237
238 238 This is used by strip to delay vision of strip offset. The transaction
239 239 sees either none or all of the strip actions to be done."""
240 240 self._queue.append([])
241 241
242 242 @active
243 243 def endgroup(self):
244 244 """apply delayed registration of file entry.
245 245
246 246 This is used by strip to delay vision of strip offset. The transaction
247 247 sees either none or all of the strip actions to be done."""
248 248 q = self._queue.pop()
249 249 for f, o, data in q:
250 250 self._addentry(f, o, data)
251 251
252 252 @active
253 253 def add(self, file, offset, data=None):
254 254 """record the state of an append-only file before update"""
255 255 if file in self._map or file in self._backupmap:
256 256 return
257 257 if self._queue:
258 258 self._queue[-1].append((file, offset, data))
259 259 return
260 260
261 261 self._addentry(file, offset, data)
262 262
263 263 def _addentry(self, file, offset, data):
264 264 """add a append-only entry to memory and on-disk state"""
265 265 if file in self._map or file in self._backupmap:
266 266 return
267 267 self._entries.append((file, offset, data))
268 268 self._map[file] = len(self._entries) - 1
269 269 # add enough data to the journal to do the truncate
270 270 self._file.write(b"%s\0%d\n" % (file, offset))
271 271 self._file.flush()
272 272
273 273 @active
274 274 def addbackup(self, file, hardlink=True, location=b''):
275 275 """Adds a backup of the file to the transaction
276 276
277 277 Calling addbackup() creates a hardlink backup of the specified file
278 278 that is used to recover the file in the event of the transaction
279 279 aborting.
280 280
281 281 * `file`: the file path, relative to .hg/store
282 282 * `hardlink`: use a hardlink to quickly create the backup
283 283 """
284 284 if self._queue:
285 285 msg = b'cannot use transaction.addbackup inside "group"'
286 286 raise error.ProgrammingError(msg)
287 287
288 288 if file in self._map or file in self._backupmap:
289 289 return
290 290 vfs = self._vfsmap[location]
291 291 dirname, filename = vfs.split(file)
292 292 backupfilename = b"%s.backup.%s" % (self._journal, filename)
293 293 backupfile = vfs.reljoin(dirname, backupfilename)
294 294 if vfs.exists(file):
295 295 filepath = vfs.join(file)
296 296 backuppath = vfs.join(backupfile)
297 297 util.copyfile(filepath, backuppath, hardlink=hardlink)
298 298 else:
299 299 backupfile = b''
300 300
301 301 self._addbackupentry((location, file, backupfile, False))
302 302
303 303 def _addbackupentry(self, entry):
304 304 """register a new backup entry and write it to disk"""
305 305 self._backupentries.append(entry)
306 306 self._backupmap[entry[1]] = len(self._backupentries) - 1
307 307 self._backupsfile.write(b"%s\0%s\0%s\0%d\n" % entry)
308 308 self._backupsfile.flush()
309 309
310 310 @active
311 311 def registertmp(self, tmpfile, location=b''):
312 312 """register a temporary transaction file
313 313
314 314 Such files will be deleted when the transaction exits (on both
315 315 failure and success).
316 316 """
317 317 self._addbackupentry((location, b'', tmpfile, False))
318 318
319 319 @active
320 320 def addfilegenerator(
321 321 self, genid, filenames, genfunc, order=0, location=b''
322 322 ):
323 323 """add a function to generates some files at transaction commit
324 324
325 325 The `genfunc` argument is a function capable of generating proper
326 326 content of each entry in the `filename` tuple.
327 327
328 328 At transaction close time, `genfunc` will be called with one file
329 329 object argument per entries in `filenames`.
330 330
331 331 The transaction itself is responsible for the backup, creation and
332 332 final write of such file.
333 333
334 334 The `genid` argument is used to ensure the same set of file is only
335 335 generated once. Call to `addfilegenerator` for a `genid` already
336 336 present will overwrite the old entry.
337 337
338 338 The `order` argument may be used to control the order in which multiple
339 339 generator will be executed.
340 340
341 341 The `location` arguments may be used to indicate the files are located
342 342 outside of the the standard directory for transaction. It should match
343 343 one of the key of the `transaction.vfsmap` dictionary.
344 344 """
345 345 # For now, we are unable to do proper backup and restore of custom vfs
346 346 # but for bookmarks that are handled outside this mechanism.
347 347 self._filegenerators[genid] = (order, filenames, genfunc, location)
348 348
349 349 @active
350 350 def removefilegenerator(self, genid):
351 351 """reverse of addfilegenerator, remove a file generator function"""
352 352 if genid in self._filegenerators:
353 353 del self._filegenerators[genid]
354 354
355 def _generatefiles(self, suffix=b'', group=gengroupall):
355 def _generatefiles(self, suffix=b'', group=GEN_GROUP_ALL):
356 356 # write files registered for generation
357 357 any = False
358 358 for id, entry in sorted(pycompat.iteritems(self._filegenerators)):
359 359 any = True
360 360 order, filenames, genfunc, location = entry
361 361
362 362 # for generation at closing, check if it's before or after finalize
363 postfinalize = group == gengrouppostfinalize
363 postfinalize = group == GEN_GROUP_POST_FINALIZE
364 364 if (
365 group != gengroupall
365 group != GEN_GROUP_ALL
366 366 and (id in postfinalizegenerators) != postfinalize
367 367 ):
368 368 continue
369 369
370 370 vfs = self._vfsmap[location]
371 371 files = []
372 372 try:
373 373 for name in filenames:
374 374 name += suffix
375 375 if suffix:
376 376 self.registertmp(name, location=location)
377 377 checkambig = False
378 378 else:
379 379 self.addbackup(name, location=location)
380 380 checkambig = (name, location) in self._checkambigfiles
381 381 files.append(
382 382 vfs(name, b'w', atomictemp=True, checkambig=checkambig)
383 383 )
384 384 genfunc(*files)
385 385 for f in files:
386 386 f.close()
387 387 # skip discard() loop since we're sure no open file remains
388 388 del files[:]
389 389 finally:
390 390 for f in files:
391 391 f.discard()
392 392 return any
393 393
394 394 @active
395 395 def find(self, file):
396 396 if file in self._map:
397 397 return self._entries[self._map[file]]
398 398 if file in self._backupmap:
399 399 return self._backupentries[self._backupmap[file]]
400 400 return None
401 401
402 402 @active
403 403 def replace(self, file, offset, data=None):
404 404 '''
405 405 replace can only replace already committed entries
406 406 that are not pending in the queue
407 407 '''
408 408
409 409 if file not in self._map:
410 410 raise KeyError(file)
411 411 index = self._map[file]
412 412 self._entries[index] = (file, offset, data)
413 413 self._file.write(b"%s\0%d\n" % (file, offset))
414 414 self._file.flush()
415 415
416 416 @active
417 417 def nest(self, name='<unnamed>'):
418 418 self._count += 1
419 419 self._usages += 1
420 420 self._names.append(name)
421 421 return self
422 422
423 423 def release(self):
424 424 if self._count > 0:
425 425 self._usages -= 1
426 426 if self._names:
427 427 self._names.pop()
428 428 # if the transaction scopes are left without being closed, fail
429 429 if self._count > 0 and self._usages == 0:
430 430 self._abort()
431 431
432 432 def running(self):
433 433 return self._count > 0
434 434
435 435 def addpending(self, category, callback):
436 436 """add a callback to be called when the transaction is pending
437 437
438 438 The transaction will be given as callback's first argument.
439 439
440 440 Category is a unique identifier to allow overwriting an old callback
441 441 with a newer callback.
442 442 """
443 443 self._pendingcallback[category] = callback
444 444
445 445 @active
446 446 def writepending(self):
447 447 '''write pending file to temporary version
448 448
449 449 This is used to allow hooks to view a transaction before commit'''
450 450 categories = sorted(self._pendingcallback)
451 451 for cat in categories:
452 452 # remove callback since the data will have been flushed
453 453 any = self._pendingcallback.pop(cat)(self)
454 454 self._anypending = self._anypending or any
455 455 self._anypending |= self._generatefiles(suffix=b'.pending')
456 456 return self._anypending
457 457
458 458 @active
459 459 def hasfinalize(self, category):
460 460 """check is a callback already exist for a category
461 461 """
462 462 return category in self._finalizecallback
463 463
464 464 @active
465 465 def addfinalize(self, category, callback):
466 466 """add a callback to be called when the transaction is closed
467 467
468 468 The transaction will be given as callback's first argument.
469 469
470 470 Category is a unique identifier to allow overwriting old callbacks with
471 471 newer callbacks.
472 472 """
473 473 self._finalizecallback[category] = callback
474 474
475 475 @active
476 476 def addpostclose(self, category, callback):
477 477 """add or replace a callback to be called after the transaction closed
478 478
479 479 The transaction will be given as callback's first argument.
480 480
481 481 Category is a unique identifier to allow overwriting an old callback
482 482 with a newer callback.
483 483 """
484 484 self._postclosecallback[category] = callback
485 485
486 486 @active
487 487 def getpostclose(self, category):
488 488 """return a postclose callback added before, or None"""
489 489 return self._postclosecallback.get(category, None)
490 490
491 491 @active
492 492 def addabort(self, category, callback):
493 493 """add a callback to be called when the transaction is aborted.
494 494
495 495 The transaction will be given as the first argument to the callback.
496 496
497 497 Category is a unique identifier to allow overwriting an old callback
498 498 with a newer callback.
499 499 """
500 500 self._abortcallback[category] = callback
501 501
502 502 @active
503 503 def close(self):
504 504 '''commit the transaction'''
505 505 if self._count == 1:
506 506 self._validator(self) # will raise exception if needed
507 507 self._validator = None # Help prevent cycles.
508 self._generatefiles(group=gengroupprefinalize)
508 self._generatefiles(group=GEN_GROUP_PRE_FINALIZE)
509 509 while self._finalizecallback:
510 510 callbacks = self._finalizecallback
511 511 self._finalizecallback = {}
512 512 categories = sorted(callbacks)
513 513 for cat in categories:
514 514 callbacks[cat](self)
515 515 # Prevent double usage and help clear cycles.
516 516 self._finalizecallback = None
517 self._generatefiles(group=gengrouppostfinalize)
517 self._generatefiles(group=GEN_GROUP_POST_FINALIZE)
518 518
519 519 self._count -= 1
520 520 if self._count != 0:
521 521 return
522 522 self._file.close()
523 523 self._backupsfile.close()
524 524 # cleanup temporary files
525 525 for l, f, b, c in self._backupentries:
526 526 if l not in self._vfsmap and c:
527 527 self._report(
528 528 b"couldn't remove %s: unknown cache location %s\n" % (b, l)
529 529 )
530 530 continue
531 531 vfs = self._vfsmap[l]
532 532 if not f and b and vfs.exists(b):
533 533 try:
534 534 vfs.unlink(b)
535 535 except (IOError, OSError, error.Abort) as inst:
536 536 if not c:
537 537 raise
538 538 # Abort may be raise by read only opener
539 539 self._report(
540 540 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
541 541 )
542 542 self._entries = []
543 543 self._writeundo()
544 544 if self._after:
545 545 self._after()
546 546 self._after = None # Help prevent cycles.
547 547 if self._opener.isfile(self._backupjournal):
548 548 self._opener.unlink(self._backupjournal)
549 549 if self._opener.isfile(self._journal):
550 550 self._opener.unlink(self._journal)
551 551 for l, _f, b, c in self._backupentries:
552 552 if l not in self._vfsmap and c:
553 553 self._report(
554 554 b"couldn't remove %s: unknown cache location"
555 555 b"%s\n" % (b, l)
556 556 )
557 557 continue
558 558 vfs = self._vfsmap[l]
559 559 if b and vfs.exists(b):
560 560 try:
561 561 vfs.unlink(b)
562 562 except (IOError, OSError, error.Abort) as inst:
563 563 if not c:
564 564 raise
565 565 # Abort may be raise by read only opener
566 566 self._report(
567 567 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
568 568 )
569 569 self._backupentries = []
570 570 self._journal = None
571 571
572 572 self._releasefn(self, True) # notify success of closing transaction
573 573 self._releasefn = None # Help prevent cycles.
574 574
575 575 # run post close action
576 576 categories = sorted(self._postclosecallback)
577 577 for cat in categories:
578 578 self._postclosecallback[cat](self)
579 579 # Prevent double usage and help clear cycles.
580 580 self._postclosecallback = None
581 581
582 582 @active
583 583 def abort(self):
584 584 '''abort the transaction (generally called on error, or when the
585 585 transaction is not explicitly committed before going out of
586 586 scope)'''
587 587 self._abort()
588 588
589 589 def _writeundo(self):
590 590 """write transaction data for possible future undo call"""
591 591 if self._undoname is None:
592 592 return
593 593 undobackupfile = self._opener.open(
594 594 b"%s.backupfiles" % self._undoname, b'w'
595 595 )
596 596 undobackupfile.write(b'%d\n' % version)
597 597 for l, f, b, c in self._backupentries:
598 598 if not f: # temporary file
599 599 continue
600 600 if not b:
601 601 u = b''
602 602 else:
603 603 if l not in self._vfsmap and c:
604 604 self._report(
605 605 b"couldn't remove %s: unknown cache location"
606 606 b"%s\n" % (b, l)
607 607 )
608 608 continue
609 609 vfs = self._vfsmap[l]
610 610 base, name = vfs.split(b)
611 611 assert name.startswith(self._journal), name
612 612 uname = name.replace(self._journal, self._undoname, 1)
613 613 u = vfs.reljoin(base, uname)
614 614 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
615 615 undobackupfile.write(b"%s\0%s\0%s\0%d\n" % (l, f, u, c))
616 616 undobackupfile.close()
617 617
618 618 def _abort(self):
619 619 self._count = 0
620 620 self._usages = 0
621 621 self._file.close()
622 622 self._backupsfile.close()
623 623
624 624 try:
625 625 if not self._entries and not self._backupentries:
626 626 if self._backupjournal:
627 627 self._opener.unlink(self._backupjournal)
628 628 if self._journal:
629 629 self._opener.unlink(self._journal)
630 630 return
631 631
632 632 self._report(_(b"transaction abort!\n"))
633 633
634 634 try:
635 635 for cat in sorted(self._abortcallback):
636 636 self._abortcallback[cat](self)
637 637 # Prevent double usage and help clear cycles.
638 638 self._abortcallback = None
639 639 _playback(
640 640 self._journal,
641 641 self._report,
642 642 self._opener,
643 643 self._vfsmap,
644 644 self._entries,
645 645 self._backupentries,
646 646 False,
647 647 checkambigfiles=self._checkambigfiles,
648 648 )
649 649 self._report(_(b"rollback completed\n"))
650 650 except BaseException as exc:
651 651 self._report(_(b"rollback failed - please run hg recover\n"))
652 652 self._report(
653 653 _(b"(failure reason: %s)\n") % stringutil.forcebytestr(exc)
654 654 )
655 655 finally:
656 656 self._journal = None
657 657 self._releasefn(self, False) # notify failure of transaction
658 658 self._releasefn = None # Help prevent cycles.
659 659
660 660
661 661 def rollback(opener, vfsmap, file, report, checkambigfiles=None):
662 662 """Rolls back the transaction contained in the given file
663 663
664 664 Reads the entries in the specified file, and the corresponding
665 665 '*.backupfiles' file, to recover from an incomplete transaction.
666 666
667 667 * `file`: a file containing a list of entries, specifying where
668 668 to truncate each file. The file should contain a list of
669 669 file\0offset pairs, delimited by newlines. The corresponding
670 670 '*.backupfiles' file should contain a list of file\0backupfile
671 671 pairs, delimited by \0.
672 672
673 673 `checkambigfiles` is a set of (path, vfs-location) tuples,
674 674 which determine whether file stat ambiguity should be avoided at
675 675 restoring corresponded files.
676 676 """
677 677 entries = []
678 678 backupentries = []
679 679
680 680 fp = opener.open(file)
681 681 lines = fp.readlines()
682 682 fp.close()
683 683 for l in lines:
684 684 try:
685 685 f, o = l.split(b'\0')
686 686 entries.append((f, int(o), None))
687 687 except ValueError:
688 688 report(
689 689 _(b"couldn't read journal entry %r!\n") % pycompat.bytestr(l)
690 690 )
691 691
692 692 backupjournal = b"%s.backupfiles" % file
693 693 if opener.exists(backupjournal):
694 694 fp = opener.open(backupjournal)
695 695 lines = fp.readlines()
696 696 if lines:
697 697 ver = lines[0][:-1]
698 698 if ver == (b'%d' % version):
699 699 for line in lines[1:]:
700 700 if line:
701 701 # Shave off the trailing newline
702 702 line = line[:-1]
703 703 l, f, b, c = line.split(b'\0')
704 704 backupentries.append((l, f, b, bool(c)))
705 705 else:
706 706 report(
707 707 _(
708 708 b"journal was created by a different version of "
709 709 b"Mercurial\n"
710 710 )
711 711 )
712 712
713 713 _playback(
714 714 file,
715 715 report,
716 716 opener,
717 717 vfsmap,
718 718 entries,
719 719 backupentries,
720 720 checkambigfiles=checkambigfiles,
721 721 )
General Comments 0
You need to be logged in to leave comments. Login now