##// END OF EJS Templates
transaction: use ProgrammingError for when an committed transaction is used...
Martin von Zweigbergk -
r46330:5df1655e default
parent child Browse files
Show More
@@ -1,736 +1,734 b''
1 1 # transaction.py - simple journaling scheme for mercurial
2 2 #
3 3 # This transaction scheme is intended to gracefully handle program
4 4 # errors and interruptions. More serious failures like system crashes
5 5 # can be recovered with an fsck-like tool. As the whole repository is
6 6 # effectively log-structured, this should amount to simply truncating
7 7 # anything that isn't referenced in the changelog.
8 8 #
9 9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 10 #
11 11 # This software may be used and distributed according to the terms of the
12 12 # GNU General Public License version 2 or any later version.
13 13
14 14 from __future__ import absolute_import
15 15
16 16 import errno
17 17
18 18 from .i18n import _
19 19 from . import (
20 20 error,
21 21 pycompat,
22 22 util,
23 23 )
24 24 from .utils import stringutil
25 25
26 26 version = 2
27 27
28 28 # These are the file generators that should only be executed after the
29 29 # finalizers are done, since they rely on the output of the finalizers (like
30 30 # the changelog having been written).
31 31 postfinalizegenerators = {b'bookmarks', b'dirstate'}
32 32
33 33 GEN_GROUP_ALL = b'all'
34 34 GEN_GROUP_PRE_FINALIZE = b'prefinalize'
35 35 GEN_GROUP_POST_FINALIZE = b'postfinalize'
36 36
37 37
38 38 def active(func):
39 39 def _active(self, *args, **kwds):
40 40 if self._count == 0:
41 raise error.Abort(
42 _(
43 b'cannot use transaction when it is already committed/aborted'
44 )
41 raise error.ProgrammingError(
42 b'cannot use transaction when it is already committed/aborted'
45 43 )
46 44 return func(self, *args, **kwds)
47 45
48 46 return _active
49 47
50 48
51 49 def _playback(
52 50 journal,
53 51 report,
54 52 opener,
55 53 vfsmap,
56 54 entries,
57 55 backupentries,
58 56 unlink=True,
59 57 checkambigfiles=None,
60 58 ):
61 59 for f, o, _ignore in entries:
62 60 if o or not unlink:
63 61 checkambig = checkambigfiles and (f, b'') in checkambigfiles
64 62 try:
65 63 fp = opener(f, b'a', checkambig=checkambig)
66 64 if fp.tell() < o:
67 65 raise error.Abort(
68 66 _(
69 67 b"attempted to truncate %s to %d bytes, but it was "
70 68 b"already %d bytes\n"
71 69 )
72 70 % (f, o, fp.tell())
73 71 )
74 72 fp.truncate(o)
75 73 fp.close()
76 74 except IOError:
77 75 report(_(b"failed to truncate %s\n") % f)
78 76 raise
79 77 else:
80 78 try:
81 79 opener.unlink(f)
82 80 except (IOError, OSError) as inst:
83 81 if inst.errno != errno.ENOENT:
84 82 raise
85 83
86 84 backupfiles = []
87 85 for l, f, b, c in backupentries:
88 86 if l not in vfsmap and c:
89 87 report(b"couldn't handle %s: unknown cache location %s\n" % (b, l))
90 88 vfs = vfsmap[l]
91 89 try:
92 90 if f and b:
93 91 filepath = vfs.join(f)
94 92 backuppath = vfs.join(b)
95 93 checkambig = checkambigfiles and (f, l) in checkambigfiles
96 94 try:
97 95 util.copyfile(backuppath, filepath, checkambig=checkambig)
98 96 backupfiles.append(b)
99 97 except IOError:
100 98 report(_(b"failed to recover %s\n") % f)
101 99 else:
102 100 target = f or b
103 101 try:
104 102 vfs.unlink(target)
105 103 except (IOError, OSError) as inst:
106 104 if inst.errno != errno.ENOENT:
107 105 raise
108 106 except (IOError, OSError, error.Abort):
109 107 if not c:
110 108 raise
111 109
112 110 backuppath = b"%s.backupfiles" % journal
113 111 if opener.exists(backuppath):
114 112 opener.unlink(backuppath)
115 113 opener.unlink(journal)
116 114 try:
117 115 for f in backupfiles:
118 116 if opener.exists(f):
119 117 opener.unlink(f)
120 118 except (IOError, OSError, error.Abort):
121 119 # only pure backup file remains, it is sage to ignore any error
122 120 pass
123 121
124 122
125 123 class transaction(util.transactional):
126 124 def __init__(
127 125 self,
128 126 report,
129 127 opener,
130 128 vfsmap,
131 129 journalname,
132 130 undoname=None,
133 131 after=None,
134 132 createmode=None,
135 133 validator=None,
136 134 releasefn=None,
137 135 checkambigfiles=None,
138 136 name='<unnamed>',
139 137 ):
140 138 """Begin a new transaction
141 139
142 140 Begins a new transaction that allows rolling back writes in the event of
143 141 an exception.
144 142
145 143 * `after`: called after the transaction has been committed
146 144 * `createmode`: the mode of the journal file that will be created
147 145 * `releasefn`: called after releasing (with transaction and result)
148 146
149 147 `checkambigfiles` is a set of (path, vfs-location) tuples,
150 148 which determine whether file stat ambiguity should be avoided
151 149 for corresponded files.
152 150 """
153 151 self._count = 1
154 152 self._usages = 1
155 153 self._report = report
156 154 # a vfs to the store content
157 155 self._opener = opener
158 156 # a map to access file in various {location -> vfs}
159 157 vfsmap = vfsmap.copy()
160 158 vfsmap[b''] = opener # set default value
161 159 self._vfsmap = vfsmap
162 160 self._after = after
163 161 self._entries = []
164 162 self._map = {}
165 163 self._journal = journalname
166 164 self._undoname = undoname
167 165 self._queue = []
168 166 # A callback to do something just after releasing transaction.
169 167 if releasefn is None:
170 168 releasefn = lambda tr, success: None
171 169 self._releasefn = releasefn
172 170
173 171 self._checkambigfiles = set()
174 172 if checkambigfiles:
175 173 self._checkambigfiles.update(checkambigfiles)
176 174
177 175 self._names = [name]
178 176
179 177 # A dict dedicated to precisely tracking the changes introduced in the
180 178 # transaction.
181 179 self.changes = {}
182 180
183 181 # a dict of arguments to be passed to hooks
184 182 self.hookargs = {}
185 183 self._file = opener.open(self._journal, b"w")
186 184
187 185 # a list of ('location', 'path', 'backuppath', cache) entries.
188 186 # - if 'backuppath' is empty, no file existed at backup time
189 187 # - if 'path' is empty, this is a temporary transaction file
190 188 # - if 'location' is not empty, the path is outside main opener reach.
191 189 # use 'location' value as a key in a vfsmap to find the right 'vfs'
192 190 # (cache is currently unused)
193 191 self._backupentries = []
194 192 self._backupmap = {}
195 193 self._backupjournal = b"%s.backupfiles" % self._journal
196 194 self._backupsfile = opener.open(self._backupjournal, b'w')
197 195 self._backupsfile.write(b'%d\n' % version)
198 196
199 197 if createmode is not None:
200 198 opener.chmod(self._journal, createmode & 0o666)
201 199 opener.chmod(self._backupjournal, createmode & 0o666)
202 200
203 201 # hold file generations to be performed on commit
204 202 self._filegenerators = {}
205 203 # hold callback to write pending data for hooks
206 204 self._pendingcallback = {}
207 205 # True is any pending data have been written ever
208 206 self._anypending = False
209 207 # holds callback to call when writing the transaction
210 208 self._finalizecallback = {}
211 209 # holds callback to call when validating the transaction
212 210 # should raise exception if anything is wrong
213 211 self._validatecallback = {}
214 212 if validator is not None:
215 213 self._validatecallback[b'001-userhooks'] = validator
216 214 # hold callback for post transaction close
217 215 self._postclosecallback = {}
218 216 # holds callbacks to call during abort
219 217 self._abortcallback = {}
220 218
221 219 def __repr__(self):
222 220 name = '/'.join(self._names)
223 221 return '<transaction name=%s, count=%d, usages=%d>' % (
224 222 name,
225 223 self._count,
226 224 self._usages,
227 225 )
228 226
229 227 def __del__(self):
230 228 if self._journal:
231 229 self._abort()
232 230
233 231 @active
234 232 def startgroup(self):
235 233 """delay registration of file entry
236 234
237 235 This is used by strip to delay vision of strip offset. The transaction
238 236 sees either none or all of the strip actions to be done."""
239 237 self._queue.append([])
240 238
241 239 @active
242 240 def endgroup(self):
243 241 """apply delayed registration of file entry.
244 242
245 243 This is used by strip to delay vision of strip offset. The transaction
246 244 sees either none or all of the strip actions to be done."""
247 245 q = self._queue.pop()
248 246 for f, o, data in q:
249 247 self._addentry(f, o, data)
250 248
251 249 @active
252 250 def add(self, file, offset, data=None):
253 251 """record the state of an append-only file before update"""
254 252 if file in self._map or file in self._backupmap:
255 253 return
256 254 if self._queue:
257 255 self._queue[-1].append((file, offset, data))
258 256 return
259 257
260 258 self._addentry(file, offset, data)
261 259
262 260 def _addentry(self, file, offset, data):
263 261 """add a append-only entry to memory and on-disk state"""
264 262 if file in self._map or file in self._backupmap:
265 263 return
266 264 self._entries.append((file, offset, data))
267 265 self._map[file] = len(self._entries) - 1
268 266 # add enough data to the journal to do the truncate
269 267 self._file.write(b"%s\0%d\n" % (file, offset))
270 268 self._file.flush()
271 269
272 270 @active
273 271 def addbackup(self, file, hardlink=True, location=b''):
274 272 """Adds a backup of the file to the transaction
275 273
276 274 Calling addbackup() creates a hardlink backup of the specified file
277 275 that is used to recover the file in the event of the transaction
278 276 aborting.
279 277
280 278 * `file`: the file path, relative to .hg/store
281 279 * `hardlink`: use a hardlink to quickly create the backup
282 280 """
283 281 if self._queue:
284 282 msg = b'cannot use transaction.addbackup inside "group"'
285 283 raise error.ProgrammingError(msg)
286 284
287 285 if file in self._map or file in self._backupmap:
288 286 return
289 287 vfs = self._vfsmap[location]
290 288 dirname, filename = vfs.split(file)
291 289 backupfilename = b"%s.backup.%s" % (self._journal, filename)
292 290 backupfile = vfs.reljoin(dirname, backupfilename)
293 291 if vfs.exists(file):
294 292 filepath = vfs.join(file)
295 293 backuppath = vfs.join(backupfile)
296 294 util.copyfile(filepath, backuppath, hardlink=hardlink)
297 295 else:
298 296 backupfile = b''
299 297
300 298 self._addbackupentry((location, file, backupfile, False))
301 299
302 300 def _addbackupentry(self, entry):
303 301 """register a new backup entry and write it to disk"""
304 302 self._backupentries.append(entry)
305 303 self._backupmap[entry[1]] = len(self._backupentries) - 1
306 304 self._backupsfile.write(b"%s\0%s\0%s\0%d\n" % entry)
307 305 self._backupsfile.flush()
308 306
309 307 @active
310 308 def registertmp(self, tmpfile, location=b''):
311 309 """register a temporary transaction file
312 310
313 311 Such files will be deleted when the transaction exits (on both
314 312 failure and success).
315 313 """
316 314 self._addbackupentry((location, b'', tmpfile, False))
317 315
318 316 @active
319 317 def addfilegenerator(
320 318 self, genid, filenames, genfunc, order=0, location=b''
321 319 ):
322 320 """add a function to generates some files at transaction commit
323 321
324 322 The `genfunc` argument is a function capable of generating proper
325 323 content of each entry in the `filename` tuple.
326 324
327 325 At transaction close time, `genfunc` will be called with one file
328 326 object argument per entries in `filenames`.
329 327
330 328 The transaction itself is responsible for the backup, creation and
331 329 final write of such file.
332 330
333 331 The `genid` argument is used to ensure the same set of file is only
334 332 generated once. Call to `addfilegenerator` for a `genid` already
335 333 present will overwrite the old entry.
336 334
337 335 The `order` argument may be used to control the order in which multiple
338 336 generator will be executed.
339 337
340 338 The `location` arguments may be used to indicate the files are located
341 339 outside of the the standard directory for transaction. It should match
342 340 one of the key of the `transaction.vfsmap` dictionary.
343 341 """
344 342 # For now, we are unable to do proper backup and restore of custom vfs
345 343 # but for bookmarks that are handled outside this mechanism.
346 344 self._filegenerators[genid] = (order, filenames, genfunc, location)
347 345
348 346 @active
349 347 def removefilegenerator(self, genid):
350 348 """reverse of addfilegenerator, remove a file generator function"""
351 349 if genid in self._filegenerators:
352 350 del self._filegenerators[genid]
353 351
354 352 def _generatefiles(self, suffix=b'', group=GEN_GROUP_ALL):
355 353 # write files registered for generation
356 354 any = False
357 355
358 356 if group == GEN_GROUP_ALL:
359 357 skip_post = skip_pre = False
360 358 else:
361 359 skip_pre = group == GEN_GROUP_POST_FINALIZE
362 360 skip_post = group == GEN_GROUP_PRE_FINALIZE
363 361
364 362 for id, entry in sorted(pycompat.iteritems(self._filegenerators)):
365 363 any = True
366 364 order, filenames, genfunc, location = entry
367 365
368 366 # for generation at closing, check if it's before or after finalize
369 367 is_post = id in postfinalizegenerators
370 368 if skip_post and is_post:
371 369 continue
372 370 elif skip_pre and not is_post:
373 371 continue
374 372
375 373 vfs = self._vfsmap[location]
376 374 files = []
377 375 try:
378 376 for name in filenames:
379 377 name += suffix
380 378 if suffix:
381 379 self.registertmp(name, location=location)
382 380 checkambig = False
383 381 else:
384 382 self.addbackup(name, location=location)
385 383 checkambig = (name, location) in self._checkambigfiles
386 384 files.append(
387 385 vfs(name, b'w', atomictemp=True, checkambig=checkambig)
388 386 )
389 387 genfunc(*files)
390 388 for f in files:
391 389 f.close()
392 390 # skip discard() loop since we're sure no open file remains
393 391 del files[:]
394 392 finally:
395 393 for f in files:
396 394 f.discard()
397 395 return any
398 396
399 397 @active
400 398 def find(self, file):
401 399 if file in self._map:
402 400 return self._entries[self._map[file]]
403 401 if file in self._backupmap:
404 402 return self._backupentries[self._backupmap[file]]
405 403 return None
406 404
407 405 @active
408 406 def replace(self, file, offset, data=None):
409 407 '''
410 408 replace can only replace already committed entries
411 409 that are not pending in the queue
412 410 '''
413 411
414 412 if file not in self._map:
415 413 raise KeyError(file)
416 414 index = self._map[file]
417 415 self._entries[index] = (file, offset, data)
418 416 self._file.write(b"%s\0%d\n" % (file, offset))
419 417 self._file.flush()
420 418
421 419 @active
422 420 def nest(self, name='<unnamed>'):
423 421 self._count += 1
424 422 self._usages += 1
425 423 self._names.append(name)
426 424 return self
427 425
428 426 def release(self):
429 427 if self._count > 0:
430 428 self._usages -= 1
431 429 if self._names:
432 430 self._names.pop()
433 431 # if the transaction scopes are left without being closed, fail
434 432 if self._count > 0 and self._usages == 0:
435 433 self._abort()
436 434
437 435 def running(self):
438 436 return self._count > 0
439 437
440 438 def addpending(self, category, callback):
441 439 """add a callback to be called when the transaction is pending
442 440
443 441 The transaction will be given as callback's first argument.
444 442
445 443 Category is a unique identifier to allow overwriting an old callback
446 444 with a newer callback.
447 445 """
448 446 self._pendingcallback[category] = callback
449 447
450 448 @active
451 449 def writepending(self):
452 450 '''write pending file to temporary version
453 451
454 452 This is used to allow hooks to view a transaction before commit'''
455 453 categories = sorted(self._pendingcallback)
456 454 for cat in categories:
457 455 # remove callback since the data will have been flushed
458 456 any = self._pendingcallback.pop(cat)(self)
459 457 self._anypending = self._anypending or any
460 458 self._anypending |= self._generatefiles(suffix=b'.pending')
461 459 return self._anypending
462 460
463 461 @active
464 462 def hasfinalize(self, category):
465 463 """check is a callback already exist for a category
466 464 """
467 465 return category in self._finalizecallback
468 466
469 467 @active
470 468 def addfinalize(self, category, callback):
471 469 """add a callback to be called when the transaction is closed
472 470
473 471 The transaction will be given as callback's first argument.
474 472
475 473 Category is a unique identifier to allow overwriting old callbacks with
476 474 newer callbacks.
477 475 """
478 476 self._finalizecallback[category] = callback
479 477
480 478 @active
481 479 def addpostclose(self, category, callback):
482 480 """add or replace a callback to be called after the transaction closed
483 481
484 482 The transaction will be given as callback's first argument.
485 483
486 484 Category is a unique identifier to allow overwriting an old callback
487 485 with a newer callback.
488 486 """
489 487 self._postclosecallback[category] = callback
490 488
491 489 @active
492 490 def getpostclose(self, category):
493 491 """return a postclose callback added before, or None"""
494 492 return self._postclosecallback.get(category, None)
495 493
496 494 @active
497 495 def addabort(self, category, callback):
498 496 """add a callback to be called when the transaction is aborted.
499 497
500 498 The transaction will be given as the first argument to the callback.
501 499
502 500 Category is a unique identifier to allow overwriting an old callback
503 501 with a newer callback.
504 502 """
505 503 self._abortcallback[category] = callback
506 504
507 505 @active
508 506 def addvalidator(self, category, callback):
509 507 """ adds a callback to be called when validating the transaction.
510 508
511 509 The transaction will be given as the first argument to the callback.
512 510
513 511 callback should raise exception if to abort transaction """
514 512 self._validatecallback[category] = callback
515 513
516 514 @active
517 515 def close(self):
518 516 '''commit the transaction'''
519 517 if self._count == 1:
520 518 for category in sorted(self._validatecallback):
521 519 self._validatecallback[category](self)
522 520 self._validatecallback = None # Help prevent cycles.
523 521 self._generatefiles(group=GEN_GROUP_PRE_FINALIZE)
524 522 while self._finalizecallback:
525 523 callbacks = self._finalizecallback
526 524 self._finalizecallback = {}
527 525 categories = sorted(callbacks)
528 526 for cat in categories:
529 527 callbacks[cat](self)
530 528 # Prevent double usage and help clear cycles.
531 529 self._finalizecallback = None
532 530 self._generatefiles(group=GEN_GROUP_POST_FINALIZE)
533 531
534 532 self._count -= 1
535 533 if self._count != 0:
536 534 return
537 535 self._file.close()
538 536 self._backupsfile.close()
539 537 # cleanup temporary files
540 538 for l, f, b, c in self._backupentries:
541 539 if l not in self._vfsmap and c:
542 540 self._report(
543 541 b"couldn't remove %s: unknown cache location %s\n" % (b, l)
544 542 )
545 543 continue
546 544 vfs = self._vfsmap[l]
547 545 if not f and b and vfs.exists(b):
548 546 try:
549 547 vfs.unlink(b)
550 548 except (IOError, OSError, error.Abort) as inst:
551 549 if not c:
552 550 raise
553 551 # Abort may be raise by read only opener
554 552 self._report(
555 553 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
556 554 )
557 555 self._entries = []
558 556 self._writeundo()
559 557 if self._after:
560 558 self._after()
561 559 self._after = None # Help prevent cycles.
562 560 if self._opener.isfile(self._backupjournal):
563 561 self._opener.unlink(self._backupjournal)
564 562 if self._opener.isfile(self._journal):
565 563 self._opener.unlink(self._journal)
566 564 for l, _f, b, c in self._backupentries:
567 565 if l not in self._vfsmap and c:
568 566 self._report(
569 567 b"couldn't remove %s: unknown cache location"
570 568 b"%s\n" % (b, l)
571 569 )
572 570 continue
573 571 vfs = self._vfsmap[l]
574 572 if b and vfs.exists(b):
575 573 try:
576 574 vfs.unlink(b)
577 575 except (IOError, OSError, error.Abort) as inst:
578 576 if not c:
579 577 raise
580 578 # Abort may be raise by read only opener
581 579 self._report(
582 580 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
583 581 )
584 582 self._backupentries = []
585 583 self._journal = None
586 584
587 585 self._releasefn(self, True) # notify success of closing transaction
588 586 self._releasefn = None # Help prevent cycles.
589 587
590 588 # run post close action
591 589 categories = sorted(self._postclosecallback)
592 590 for cat in categories:
593 591 self._postclosecallback[cat](self)
594 592 # Prevent double usage and help clear cycles.
595 593 self._postclosecallback = None
596 594
597 595 @active
598 596 def abort(self):
599 597 '''abort the transaction (generally called on error, or when the
600 598 transaction is not explicitly committed before going out of
601 599 scope)'''
602 600 self._abort()
603 601
604 602 def _writeundo(self):
605 603 """write transaction data for possible future undo call"""
606 604 if self._undoname is None:
607 605 return
608 606 undobackupfile = self._opener.open(
609 607 b"%s.backupfiles" % self._undoname, b'w'
610 608 )
611 609 undobackupfile.write(b'%d\n' % version)
612 610 for l, f, b, c in self._backupentries:
613 611 if not f: # temporary file
614 612 continue
615 613 if not b:
616 614 u = b''
617 615 else:
618 616 if l not in self._vfsmap and c:
619 617 self._report(
620 618 b"couldn't remove %s: unknown cache location"
621 619 b"%s\n" % (b, l)
622 620 )
623 621 continue
624 622 vfs = self._vfsmap[l]
625 623 base, name = vfs.split(b)
626 624 assert name.startswith(self._journal), name
627 625 uname = name.replace(self._journal, self._undoname, 1)
628 626 u = vfs.reljoin(base, uname)
629 627 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
630 628 undobackupfile.write(b"%s\0%s\0%s\0%d\n" % (l, f, u, c))
631 629 undobackupfile.close()
632 630
633 631 def _abort(self):
634 632 self._count = 0
635 633 self._usages = 0
636 634 self._file.close()
637 635 self._backupsfile.close()
638 636
639 637 try:
640 638 if not self._entries and not self._backupentries:
641 639 if self._backupjournal:
642 640 self._opener.unlink(self._backupjournal)
643 641 if self._journal:
644 642 self._opener.unlink(self._journal)
645 643 return
646 644
647 645 self._report(_(b"transaction abort!\n"))
648 646
649 647 try:
650 648 for cat in sorted(self._abortcallback):
651 649 self._abortcallback[cat](self)
652 650 # Prevent double usage and help clear cycles.
653 651 self._abortcallback = None
654 652 _playback(
655 653 self._journal,
656 654 self._report,
657 655 self._opener,
658 656 self._vfsmap,
659 657 self._entries,
660 658 self._backupentries,
661 659 False,
662 660 checkambigfiles=self._checkambigfiles,
663 661 )
664 662 self._report(_(b"rollback completed\n"))
665 663 except BaseException as exc:
666 664 self._report(_(b"rollback failed - please run hg recover\n"))
667 665 self._report(
668 666 _(b"(failure reason: %s)\n") % stringutil.forcebytestr(exc)
669 667 )
670 668 finally:
671 669 self._journal = None
672 670 self._releasefn(self, False) # notify failure of transaction
673 671 self._releasefn = None # Help prevent cycles.
674 672
675 673
676 674 def rollback(opener, vfsmap, file, report, checkambigfiles=None):
677 675 """Rolls back the transaction contained in the given file
678 676
679 677 Reads the entries in the specified file, and the corresponding
680 678 '*.backupfiles' file, to recover from an incomplete transaction.
681 679
682 680 * `file`: a file containing a list of entries, specifying where
683 681 to truncate each file. The file should contain a list of
684 682 file\0offset pairs, delimited by newlines. The corresponding
685 683 '*.backupfiles' file should contain a list of file\0backupfile
686 684 pairs, delimited by \0.
687 685
688 686 `checkambigfiles` is a set of (path, vfs-location) tuples,
689 687 which determine whether file stat ambiguity should be avoided at
690 688 restoring corresponded files.
691 689 """
692 690 entries = []
693 691 backupentries = []
694 692
695 693 fp = opener.open(file)
696 694 lines = fp.readlines()
697 695 fp.close()
698 696 for l in lines:
699 697 try:
700 698 f, o = l.split(b'\0')
701 699 entries.append((f, int(o), None))
702 700 except ValueError:
703 701 report(
704 702 _(b"couldn't read journal entry %r!\n") % pycompat.bytestr(l)
705 703 )
706 704
707 705 backupjournal = b"%s.backupfiles" % file
708 706 if opener.exists(backupjournal):
709 707 fp = opener.open(backupjournal)
710 708 lines = fp.readlines()
711 709 if lines:
712 710 ver = lines[0][:-1]
713 711 if ver == (b'%d' % version):
714 712 for line in lines[1:]:
715 713 if line:
716 714 # Shave off the trailing newline
717 715 line = line[:-1]
718 716 l, f, b, c = line.split(b'\0')
719 717 backupentries.append((l, f, b, bool(c)))
720 718 else:
721 719 report(
722 720 _(
723 721 b"journal was created by a different version of "
724 722 b"Mercurial\n"
725 723 )
726 724 )
727 725
728 726 _playback(
729 727 file,
730 728 report,
731 729 opener,
732 730 vfsmap,
733 731 entries,
734 732 backupentries,
735 733 checkambigfiles=checkambigfiles,
736 734 )
General Comments 0
You need to be logged in to leave comments. Login now