##// END OF EJS Templates
transaction: add functionality to have multiple validators...
Pulkit Goyal -
r45031:36f08ae8 default
parent child Browse files
Show More
@@ -1,727 +1,736 b''
1 1 # transaction.py - simple journaling scheme for mercurial
2 2 #
3 3 # This transaction scheme is intended to gracefully handle program
4 4 # errors and interruptions. More serious failures like system crashes
5 5 # can be recovered with an fsck-like tool. As the whole repository is
6 6 # effectively log-structured, this should amount to simply truncating
7 7 # anything that isn't referenced in the changelog.
8 8 #
9 9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 10 #
11 11 # This software may be used and distributed according to the terms of the
12 12 # GNU General Public License version 2 or any later version.
13 13
14 14 from __future__ import absolute_import
15 15
16 16 import errno
17 17
18 18 from .i18n import _
19 19 from . import (
20 20 error,
21 21 pycompat,
22 22 util,
23 23 )
24 24 from .utils import stringutil
25 25
26 26 version = 2
27 27
28 28 # These are the file generators that should only be executed after the
29 29 # finalizers are done, since they rely on the output of the finalizers (like
30 30 # the changelog having been written).
31 31 postfinalizegenerators = {b'bookmarks', b'dirstate'}
32 32
33 33 GEN_GROUP_ALL = b'all'
34 34 GEN_GROUP_PRE_FINALIZE = b'prefinalize'
35 35 GEN_GROUP_POST_FINALIZE = b'postfinalize'
36 36
37 37
38 38 def active(func):
39 39 def _active(self, *args, **kwds):
40 40 if self._count == 0:
41 41 raise error.Abort(
42 42 _(
43 43 b'cannot use transaction when it is already committed/aborted'
44 44 )
45 45 )
46 46 return func(self, *args, **kwds)
47 47
48 48 return _active
49 49
50 50
51 51 def _playback(
52 52 journal,
53 53 report,
54 54 opener,
55 55 vfsmap,
56 56 entries,
57 57 backupentries,
58 58 unlink=True,
59 59 checkambigfiles=None,
60 60 ):
61 61 for f, o, _ignore in entries:
62 62 if o or not unlink:
63 63 checkambig = checkambigfiles and (f, b'') in checkambigfiles
64 64 try:
65 65 fp = opener(f, b'a', checkambig=checkambig)
66 66 if fp.tell() < o:
67 67 raise error.Abort(
68 68 _(
69 69 b"attempted to truncate %s to %d bytes, but it was "
70 70 b"already %d bytes\n"
71 71 )
72 72 % (f, o, fp.tell())
73 73 )
74 74 fp.truncate(o)
75 75 fp.close()
76 76 except IOError:
77 77 report(_(b"failed to truncate %s\n") % f)
78 78 raise
79 79 else:
80 80 try:
81 81 opener.unlink(f)
82 82 except (IOError, OSError) as inst:
83 83 if inst.errno != errno.ENOENT:
84 84 raise
85 85
86 86 backupfiles = []
87 87 for l, f, b, c in backupentries:
88 88 if l not in vfsmap and c:
89 89 report(b"couldn't handle %s: unknown cache location %s\n" % (b, l))
90 90 vfs = vfsmap[l]
91 91 try:
92 92 if f and b:
93 93 filepath = vfs.join(f)
94 94 backuppath = vfs.join(b)
95 95 checkambig = checkambigfiles and (f, l) in checkambigfiles
96 96 try:
97 97 util.copyfile(backuppath, filepath, checkambig=checkambig)
98 98 backupfiles.append(b)
99 99 except IOError:
100 100 report(_(b"failed to recover %s\n") % f)
101 101 else:
102 102 target = f or b
103 103 try:
104 104 vfs.unlink(target)
105 105 except (IOError, OSError) as inst:
106 106 if inst.errno != errno.ENOENT:
107 107 raise
108 108 except (IOError, OSError, error.Abort):
109 109 if not c:
110 110 raise
111 111
112 112 backuppath = b"%s.backupfiles" % journal
113 113 if opener.exists(backuppath):
114 114 opener.unlink(backuppath)
115 115 opener.unlink(journal)
116 116 try:
117 117 for f in backupfiles:
118 118 if opener.exists(f):
119 119 opener.unlink(f)
120 120 except (IOError, OSError, error.Abort):
121 121 # only pure backup file remains, it is sage to ignore any error
122 122 pass
123 123
124 124
125 125 class transaction(util.transactional):
126 126 def __init__(
127 127 self,
128 128 report,
129 129 opener,
130 130 vfsmap,
131 131 journalname,
132 132 undoname=None,
133 133 after=None,
134 134 createmode=None,
135 135 validator=None,
136 136 releasefn=None,
137 137 checkambigfiles=None,
138 138 name='<unnamed>',
139 139 ):
140 140 """Begin a new transaction
141 141
142 142 Begins a new transaction that allows rolling back writes in the event of
143 143 an exception.
144 144
145 145 * `after`: called after the transaction has been committed
146 146 * `createmode`: the mode of the journal file that will be created
147 147 * `releasefn`: called after releasing (with transaction and result)
148 148
149 149 `checkambigfiles` is a set of (path, vfs-location) tuples,
150 150 which determine whether file stat ambiguity should be avoided
151 151 for corresponded files.
152 152 """
153 153 self._count = 1
154 154 self._usages = 1
155 155 self._report = report
156 156 # a vfs to the store content
157 157 self._opener = opener
158 158 # a map to access file in various {location -> vfs}
159 159 vfsmap = vfsmap.copy()
160 160 vfsmap[b''] = opener # set default value
161 161 self._vfsmap = vfsmap
162 162 self._after = after
163 163 self._entries = []
164 164 self._map = {}
165 165 self._journal = journalname
166 166 self._undoname = undoname
167 167 self._queue = []
168 # A callback to validate transaction content before closing it.
169 # should raise exception is anything is wrong.
170 # target user is repository hooks.
171 if validator is None:
172 validator = lambda tr: None
173 self._validator = validator
174 168 # A callback to do something just after releasing transaction.
175 169 if releasefn is None:
176 170 releasefn = lambda tr, success: None
177 171 self._releasefn = releasefn
178 172
179 173 self._checkambigfiles = set()
180 174 if checkambigfiles:
181 175 self._checkambigfiles.update(checkambigfiles)
182 176
183 177 self._names = [name]
184 178
185 179 # A dict dedicated to precisely tracking the changes introduced in the
186 180 # transaction.
187 181 self.changes = {}
188 182
189 183 # a dict of arguments to be passed to hooks
190 184 self.hookargs = {}
191 185 self._file = opener.open(self._journal, b"w")
192 186
193 187 # a list of ('location', 'path', 'backuppath', cache) entries.
194 188 # - if 'backuppath' is empty, no file existed at backup time
195 189 # - if 'path' is empty, this is a temporary transaction file
196 190 # - if 'location' is not empty, the path is outside main opener reach.
197 191 # use 'location' value as a key in a vfsmap to find the right 'vfs'
198 192 # (cache is currently unused)
199 193 self._backupentries = []
200 194 self._backupmap = {}
201 195 self._backupjournal = b"%s.backupfiles" % self._journal
202 196 self._backupsfile = opener.open(self._backupjournal, b'w')
203 197 self._backupsfile.write(b'%d\n' % version)
204 198
205 199 if createmode is not None:
206 200 opener.chmod(self._journal, createmode & 0o666)
207 201 opener.chmod(self._backupjournal, createmode & 0o666)
208 202
209 203 # hold file generations to be performed on commit
210 204 self._filegenerators = {}
211 205 # hold callback to write pending data for hooks
212 206 self._pendingcallback = {}
213 207 # True is any pending data have been written ever
214 208 self._anypending = False
215 209 # holds callback to call when writing the transaction
216 210 self._finalizecallback = {}
211 # holds callback to call when validating the transaction
212 # should raise exception if anything is wrong
213 self._validatecallback = {}
214 if validator is not None:
215 self._validatecallback[b'001-userhooks'] = validator
217 216 # hold callback for post transaction close
218 217 self._postclosecallback = {}
219 218 # holds callbacks to call during abort
220 219 self._abortcallback = {}
221 220
222 221 def __repr__(self):
223 222 name = '/'.join(self._names)
224 223 return '<transaction name=%s, count=%d, usages=%d>' % (
225 224 name,
226 225 self._count,
227 226 self._usages,
228 227 )
229 228
230 229 def __del__(self):
231 230 if self._journal:
232 231 self._abort()
233 232
234 233 @active
235 234 def startgroup(self):
236 235 """delay registration of file entry
237 236
238 237 This is used by strip to delay vision of strip offset. The transaction
239 238 sees either none or all of the strip actions to be done."""
240 239 self._queue.append([])
241 240
242 241 @active
243 242 def endgroup(self):
244 243 """apply delayed registration of file entry.
245 244
246 245 This is used by strip to delay vision of strip offset. The transaction
247 246 sees either none or all of the strip actions to be done."""
248 247 q = self._queue.pop()
249 248 for f, o, data in q:
250 249 self._addentry(f, o, data)
251 250
252 251 @active
253 252 def add(self, file, offset, data=None):
254 253 """record the state of an append-only file before update"""
255 254 if file in self._map or file in self._backupmap:
256 255 return
257 256 if self._queue:
258 257 self._queue[-1].append((file, offset, data))
259 258 return
260 259
261 260 self._addentry(file, offset, data)
262 261
263 262 def _addentry(self, file, offset, data):
264 263 """add a append-only entry to memory and on-disk state"""
265 264 if file in self._map or file in self._backupmap:
266 265 return
267 266 self._entries.append((file, offset, data))
268 267 self._map[file] = len(self._entries) - 1
269 268 # add enough data to the journal to do the truncate
270 269 self._file.write(b"%s\0%d\n" % (file, offset))
271 270 self._file.flush()
272 271
273 272 @active
274 273 def addbackup(self, file, hardlink=True, location=b''):
275 274 """Adds a backup of the file to the transaction
276 275
277 276 Calling addbackup() creates a hardlink backup of the specified file
278 277 that is used to recover the file in the event of the transaction
279 278 aborting.
280 279
281 280 * `file`: the file path, relative to .hg/store
282 281 * `hardlink`: use a hardlink to quickly create the backup
283 282 """
284 283 if self._queue:
285 284 msg = b'cannot use transaction.addbackup inside "group"'
286 285 raise error.ProgrammingError(msg)
287 286
288 287 if file in self._map or file in self._backupmap:
289 288 return
290 289 vfs = self._vfsmap[location]
291 290 dirname, filename = vfs.split(file)
292 291 backupfilename = b"%s.backup.%s" % (self._journal, filename)
293 292 backupfile = vfs.reljoin(dirname, backupfilename)
294 293 if vfs.exists(file):
295 294 filepath = vfs.join(file)
296 295 backuppath = vfs.join(backupfile)
297 296 util.copyfile(filepath, backuppath, hardlink=hardlink)
298 297 else:
299 298 backupfile = b''
300 299
301 300 self._addbackupentry((location, file, backupfile, False))
302 301
303 302 def _addbackupentry(self, entry):
304 303 """register a new backup entry and write it to disk"""
305 304 self._backupentries.append(entry)
306 305 self._backupmap[entry[1]] = len(self._backupentries) - 1
307 306 self._backupsfile.write(b"%s\0%s\0%s\0%d\n" % entry)
308 307 self._backupsfile.flush()
309 308
310 309 @active
311 310 def registertmp(self, tmpfile, location=b''):
312 311 """register a temporary transaction file
313 312
314 313 Such files will be deleted when the transaction exits (on both
315 314 failure and success).
316 315 """
317 316 self._addbackupentry((location, b'', tmpfile, False))
318 317
319 318 @active
320 319 def addfilegenerator(
321 320 self, genid, filenames, genfunc, order=0, location=b''
322 321 ):
323 322 """add a function to generates some files at transaction commit
324 323
325 324 The `genfunc` argument is a function capable of generating proper
326 325 content of each entry in the `filename` tuple.
327 326
328 327 At transaction close time, `genfunc` will be called with one file
329 328 object argument per entries in `filenames`.
330 329
331 330 The transaction itself is responsible for the backup, creation and
332 331 final write of such file.
333 332
334 333 The `genid` argument is used to ensure the same set of file is only
335 334 generated once. Call to `addfilegenerator` for a `genid` already
336 335 present will overwrite the old entry.
337 336
338 337 The `order` argument may be used to control the order in which multiple
339 338 generator will be executed.
340 339
341 340 The `location` arguments may be used to indicate the files are located
342 341 outside of the the standard directory for transaction. It should match
343 342 one of the key of the `transaction.vfsmap` dictionary.
344 343 """
345 344 # For now, we are unable to do proper backup and restore of custom vfs
346 345 # but for bookmarks that are handled outside this mechanism.
347 346 self._filegenerators[genid] = (order, filenames, genfunc, location)
348 347
349 348 @active
350 349 def removefilegenerator(self, genid):
351 350 """reverse of addfilegenerator, remove a file generator function"""
352 351 if genid in self._filegenerators:
353 352 del self._filegenerators[genid]
354 353
355 354 def _generatefiles(self, suffix=b'', group=GEN_GROUP_ALL):
356 355 # write files registered for generation
357 356 any = False
358 357
359 358 if group == GEN_GROUP_ALL:
360 359 skip_post = skip_pre = False
361 360 else:
362 361 skip_pre = group == GEN_GROUP_POST_FINALIZE
363 362 skip_post = group == GEN_GROUP_PRE_FINALIZE
364 363
365 364 for id, entry in sorted(pycompat.iteritems(self._filegenerators)):
366 365 any = True
367 366 order, filenames, genfunc, location = entry
368 367
369 368 # for generation at closing, check if it's before or after finalize
370 369 is_post = id in postfinalizegenerators
371 370 if skip_post and is_post:
372 371 continue
373 372 elif skip_pre and not is_post:
374 373 continue
375 374
376 375 vfs = self._vfsmap[location]
377 376 files = []
378 377 try:
379 378 for name in filenames:
380 379 name += suffix
381 380 if suffix:
382 381 self.registertmp(name, location=location)
383 382 checkambig = False
384 383 else:
385 384 self.addbackup(name, location=location)
386 385 checkambig = (name, location) in self._checkambigfiles
387 386 files.append(
388 387 vfs(name, b'w', atomictemp=True, checkambig=checkambig)
389 388 )
390 389 genfunc(*files)
391 390 for f in files:
392 391 f.close()
393 392 # skip discard() loop since we're sure no open file remains
394 393 del files[:]
395 394 finally:
396 395 for f in files:
397 396 f.discard()
398 397 return any
399 398
400 399 @active
401 400 def find(self, file):
402 401 if file in self._map:
403 402 return self._entries[self._map[file]]
404 403 if file in self._backupmap:
405 404 return self._backupentries[self._backupmap[file]]
406 405 return None
407 406
408 407 @active
409 408 def replace(self, file, offset, data=None):
410 409 '''
411 410 replace can only replace already committed entries
412 411 that are not pending in the queue
413 412 '''
414 413
415 414 if file not in self._map:
416 415 raise KeyError(file)
417 416 index = self._map[file]
418 417 self._entries[index] = (file, offset, data)
419 418 self._file.write(b"%s\0%d\n" % (file, offset))
420 419 self._file.flush()
421 420
422 421 @active
423 422 def nest(self, name='<unnamed>'):
424 423 self._count += 1
425 424 self._usages += 1
426 425 self._names.append(name)
427 426 return self
428 427
429 428 def release(self):
430 429 if self._count > 0:
431 430 self._usages -= 1
432 431 if self._names:
433 432 self._names.pop()
434 433 # if the transaction scopes are left without being closed, fail
435 434 if self._count > 0 and self._usages == 0:
436 435 self._abort()
437 436
438 437 def running(self):
439 438 return self._count > 0
440 439
441 440 def addpending(self, category, callback):
442 441 """add a callback to be called when the transaction is pending
443 442
444 443 The transaction will be given as callback's first argument.
445 444
446 445 Category is a unique identifier to allow overwriting an old callback
447 446 with a newer callback.
448 447 """
449 448 self._pendingcallback[category] = callback
450 449
451 450 @active
452 451 def writepending(self):
453 452 '''write pending file to temporary version
454 453
455 454 This is used to allow hooks to view a transaction before commit'''
456 455 categories = sorted(self._pendingcallback)
457 456 for cat in categories:
458 457 # remove callback since the data will have been flushed
459 458 any = self._pendingcallback.pop(cat)(self)
460 459 self._anypending = self._anypending or any
461 460 self._anypending |= self._generatefiles(suffix=b'.pending')
462 461 return self._anypending
463 462
464 463 @active
465 464 def hasfinalize(self, category):
466 465 """check is a callback already exist for a category
467 466 """
468 467 return category in self._finalizecallback
469 468
470 469 @active
471 470 def addfinalize(self, category, callback):
472 471 """add a callback to be called when the transaction is closed
473 472
474 473 The transaction will be given as callback's first argument.
475 474
476 475 Category is a unique identifier to allow overwriting old callbacks with
477 476 newer callbacks.
478 477 """
479 478 self._finalizecallback[category] = callback
480 479
481 480 @active
482 481 def addpostclose(self, category, callback):
483 482 """add or replace a callback to be called after the transaction closed
484 483
485 484 The transaction will be given as callback's first argument.
486 485
487 486 Category is a unique identifier to allow overwriting an old callback
488 487 with a newer callback.
489 488 """
490 489 self._postclosecallback[category] = callback
491 490
492 491 @active
493 492 def getpostclose(self, category):
494 493 """return a postclose callback added before, or None"""
495 494 return self._postclosecallback.get(category, None)
496 495
497 496 @active
498 497 def addabort(self, category, callback):
499 498 """add a callback to be called when the transaction is aborted.
500 499
501 500 The transaction will be given as the first argument to the callback.
502 501
503 502 Category is a unique identifier to allow overwriting an old callback
504 503 with a newer callback.
505 504 """
506 505 self._abortcallback[category] = callback
507 506
508 507 @active
508 def addvalidator(self, category, callback):
509 """ adds a callback to be called when validating the transaction.
510
511 The transaction will be given as the first argument to the callback.
512
513 callback should raise exception if to abort transaction """
514 self._validatecallback[category] = callback
515
516 @active
509 517 def close(self):
510 518 '''commit the transaction'''
511 519 if self._count == 1:
512 self._validator(self) # will raise exception if needed
513 self._validator = None # Help prevent cycles.
520 for category in sorted(self._validatecallback):
521 self._validatecallback[category](self)
522 self._validatecallback = None # Help prevent cycles.
514 523 self._generatefiles(group=GEN_GROUP_PRE_FINALIZE)
515 524 while self._finalizecallback:
516 525 callbacks = self._finalizecallback
517 526 self._finalizecallback = {}
518 527 categories = sorted(callbacks)
519 528 for cat in categories:
520 529 callbacks[cat](self)
521 530 # Prevent double usage and help clear cycles.
522 531 self._finalizecallback = None
523 532 self._generatefiles(group=GEN_GROUP_POST_FINALIZE)
524 533
525 534 self._count -= 1
526 535 if self._count != 0:
527 536 return
528 537 self._file.close()
529 538 self._backupsfile.close()
530 539 # cleanup temporary files
531 540 for l, f, b, c in self._backupentries:
532 541 if l not in self._vfsmap and c:
533 542 self._report(
534 543 b"couldn't remove %s: unknown cache location %s\n" % (b, l)
535 544 )
536 545 continue
537 546 vfs = self._vfsmap[l]
538 547 if not f and b and vfs.exists(b):
539 548 try:
540 549 vfs.unlink(b)
541 550 except (IOError, OSError, error.Abort) as inst:
542 551 if not c:
543 552 raise
544 553 # Abort may be raise by read only opener
545 554 self._report(
546 555 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
547 556 )
548 557 self._entries = []
549 558 self._writeundo()
550 559 if self._after:
551 560 self._after()
552 561 self._after = None # Help prevent cycles.
553 562 if self._opener.isfile(self._backupjournal):
554 563 self._opener.unlink(self._backupjournal)
555 564 if self._opener.isfile(self._journal):
556 565 self._opener.unlink(self._journal)
557 566 for l, _f, b, c in self._backupentries:
558 567 if l not in self._vfsmap and c:
559 568 self._report(
560 569 b"couldn't remove %s: unknown cache location"
561 570 b"%s\n" % (b, l)
562 571 )
563 572 continue
564 573 vfs = self._vfsmap[l]
565 574 if b and vfs.exists(b):
566 575 try:
567 576 vfs.unlink(b)
568 577 except (IOError, OSError, error.Abort) as inst:
569 578 if not c:
570 579 raise
571 580 # Abort may be raise by read only opener
572 581 self._report(
573 582 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
574 583 )
575 584 self._backupentries = []
576 585 self._journal = None
577 586
578 587 self._releasefn(self, True) # notify success of closing transaction
579 588 self._releasefn = None # Help prevent cycles.
580 589
581 590 # run post close action
582 591 categories = sorted(self._postclosecallback)
583 592 for cat in categories:
584 593 self._postclosecallback[cat](self)
585 594 # Prevent double usage and help clear cycles.
586 595 self._postclosecallback = None
587 596
588 597 @active
589 598 def abort(self):
590 599 '''abort the transaction (generally called on error, or when the
591 600 transaction is not explicitly committed before going out of
592 601 scope)'''
593 602 self._abort()
594 603
595 604 def _writeundo(self):
596 605 """write transaction data for possible future undo call"""
597 606 if self._undoname is None:
598 607 return
599 608 undobackupfile = self._opener.open(
600 609 b"%s.backupfiles" % self._undoname, b'w'
601 610 )
602 611 undobackupfile.write(b'%d\n' % version)
603 612 for l, f, b, c in self._backupentries:
604 613 if not f: # temporary file
605 614 continue
606 615 if not b:
607 616 u = b''
608 617 else:
609 618 if l not in self._vfsmap and c:
610 619 self._report(
611 620 b"couldn't remove %s: unknown cache location"
612 621 b"%s\n" % (b, l)
613 622 )
614 623 continue
615 624 vfs = self._vfsmap[l]
616 625 base, name = vfs.split(b)
617 626 assert name.startswith(self._journal), name
618 627 uname = name.replace(self._journal, self._undoname, 1)
619 628 u = vfs.reljoin(base, uname)
620 629 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
621 630 undobackupfile.write(b"%s\0%s\0%s\0%d\n" % (l, f, u, c))
622 631 undobackupfile.close()
623 632
624 633 def _abort(self):
625 634 self._count = 0
626 635 self._usages = 0
627 636 self._file.close()
628 637 self._backupsfile.close()
629 638
630 639 try:
631 640 if not self._entries and not self._backupentries:
632 641 if self._backupjournal:
633 642 self._opener.unlink(self._backupjournal)
634 643 if self._journal:
635 644 self._opener.unlink(self._journal)
636 645 return
637 646
638 647 self._report(_(b"transaction abort!\n"))
639 648
640 649 try:
641 650 for cat in sorted(self._abortcallback):
642 651 self._abortcallback[cat](self)
643 652 # Prevent double usage and help clear cycles.
644 653 self._abortcallback = None
645 654 _playback(
646 655 self._journal,
647 656 self._report,
648 657 self._opener,
649 658 self._vfsmap,
650 659 self._entries,
651 660 self._backupentries,
652 661 False,
653 662 checkambigfiles=self._checkambigfiles,
654 663 )
655 664 self._report(_(b"rollback completed\n"))
656 665 except BaseException as exc:
657 666 self._report(_(b"rollback failed - please run hg recover\n"))
658 667 self._report(
659 668 _(b"(failure reason: %s)\n") % stringutil.forcebytestr(exc)
660 669 )
661 670 finally:
662 671 self._journal = None
663 672 self._releasefn(self, False) # notify failure of transaction
664 673 self._releasefn = None # Help prevent cycles.
665 674
666 675
667 676 def rollback(opener, vfsmap, file, report, checkambigfiles=None):
668 677 """Rolls back the transaction contained in the given file
669 678
670 679 Reads the entries in the specified file, and the corresponding
671 680 '*.backupfiles' file, to recover from an incomplete transaction.
672 681
673 682 * `file`: a file containing a list of entries, specifying where
674 683 to truncate each file. The file should contain a list of
675 684 file\0offset pairs, delimited by newlines. The corresponding
676 685 '*.backupfiles' file should contain a list of file\0backupfile
677 686 pairs, delimited by \0.
678 687
679 688 `checkambigfiles` is a set of (path, vfs-location) tuples,
680 689 which determine whether file stat ambiguity should be avoided at
681 690 restoring corresponded files.
682 691 """
683 692 entries = []
684 693 backupentries = []
685 694
686 695 fp = opener.open(file)
687 696 lines = fp.readlines()
688 697 fp.close()
689 698 for l in lines:
690 699 try:
691 700 f, o = l.split(b'\0')
692 701 entries.append((f, int(o), None))
693 702 except ValueError:
694 703 report(
695 704 _(b"couldn't read journal entry %r!\n") % pycompat.bytestr(l)
696 705 )
697 706
698 707 backupjournal = b"%s.backupfiles" % file
699 708 if opener.exists(backupjournal):
700 709 fp = opener.open(backupjournal)
701 710 lines = fp.readlines()
702 711 if lines:
703 712 ver = lines[0][:-1]
704 713 if ver == (b'%d' % version):
705 714 for line in lines[1:]:
706 715 if line:
707 716 # Shave off the trailing newline
708 717 line = line[:-1]
709 718 l, f, b, c = line.split(b'\0')
710 719 backupentries.append((l, f, b, bool(c)))
711 720 else:
712 721 report(
713 722 _(
714 723 b"journal was created by a different version of "
715 724 b"Mercurial\n"
716 725 )
717 726 )
718 727
719 728 _playback(
720 729 file,
721 730 report,
722 731 opener,
723 732 vfsmap,
724 733 entries,
725 734 backupentries,
726 735 checkambigfiles=checkambigfiles,
727 736 )
General Comments 0
You need to be logged in to leave comments. Login now