##// END OF EJS Templates
recover: only apply last journal record per file (issue6423)...
Joerg Sonnenberger -
r48061:672f48cf default draft
parent child Browse files
Show More
@@ -1,764 +1,764 b''
1 1 # transaction.py - simple journaling scheme for mercurial
2 2 #
3 3 # This transaction scheme is intended to gracefully handle program
4 4 # errors and interruptions. More serious failures like system crashes
5 5 # can be recovered with an fsck-like tool. As the whole repository is
6 6 # effectively log-structured, this should amount to simply truncating
7 7 # anything that isn't referenced in the changelog.
8 8 #
9 9 # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
10 10 #
11 11 # This software may be used and distributed according to the terms of the
12 12 # GNU General Public License version 2 or any later version.
13 13
14 14 from __future__ import absolute_import
15 15
16 16 import errno
17 17
18 18 from .i18n import _
19 19 from . import (
20 20 error,
21 21 pycompat,
22 22 util,
23 23 )
24 24 from .utils import stringutil
25 25
26 26 version = 2
27 27
28 28 # These are the file generators that should only be executed after the
29 29 # finalizers are done, since they rely on the output of the finalizers (like
30 30 # the changelog having been written).
31 31 postfinalizegenerators = {b'bookmarks', b'dirstate'}
32 32
33 33 GEN_GROUP_ALL = b'all'
34 34 GEN_GROUP_PRE_FINALIZE = b'prefinalize'
35 35 GEN_GROUP_POST_FINALIZE = b'postfinalize'
36 36
37 37
38 38 def active(func):
39 39 def _active(self, *args, **kwds):
40 40 if self._count == 0:
41 41 raise error.ProgrammingError(
42 42 b'cannot use transaction when it is already committed/aborted'
43 43 )
44 44 return func(self, *args, **kwds)
45 45
46 46 return _active
47 47
48 48
49 49 def _playback(
50 50 journal,
51 51 report,
52 52 opener,
53 53 vfsmap,
54 54 entries,
55 55 backupentries,
56 56 unlink=True,
57 57 checkambigfiles=None,
58 58 ):
59 for f, o in entries:
59 for f, o in sorted(dict(entries).items()):
60 60 if o or not unlink:
61 61 checkambig = checkambigfiles and (f, b'') in checkambigfiles
62 62 try:
63 63 fp = opener(f, b'a', checkambig=checkambig)
64 64 if fp.tell() < o:
65 65 raise error.Abort(
66 66 _(
67 67 b"attempted to truncate %s to %d bytes, but it was "
68 68 b"already %d bytes\n"
69 69 )
70 70 % (f, o, fp.tell())
71 71 )
72 72 fp.truncate(o)
73 73 fp.close()
74 74 except IOError:
75 75 report(_(b"failed to truncate %s\n") % f)
76 76 raise
77 77 else:
78 78 try:
79 79 opener.unlink(f)
80 80 except (IOError, OSError) as inst:
81 81 if inst.errno != errno.ENOENT:
82 82 raise
83 83
84 84 backupfiles = []
85 85 for l, f, b, c in backupentries:
86 86 if l not in vfsmap and c:
87 87 report(b"couldn't handle %s: unknown cache location %s\n" % (b, l))
88 88 vfs = vfsmap[l]
89 89 try:
90 90 if f and b:
91 91 filepath = vfs.join(f)
92 92 backuppath = vfs.join(b)
93 93 checkambig = checkambigfiles and (f, l) in checkambigfiles
94 94 try:
95 95 util.copyfile(backuppath, filepath, checkambig=checkambig)
96 96 backupfiles.append(b)
97 97 except IOError:
98 98 report(_(b"failed to recover %s\n") % f)
99 99 else:
100 100 target = f or b
101 101 try:
102 102 vfs.unlink(target)
103 103 except (IOError, OSError) as inst:
104 104 if inst.errno != errno.ENOENT:
105 105 raise
106 106 except (IOError, OSError, error.Abort):
107 107 if not c:
108 108 raise
109 109
110 110 backuppath = b"%s.backupfiles" % journal
111 111 if opener.exists(backuppath):
112 112 opener.unlink(backuppath)
113 113 opener.unlink(journal)
114 114 try:
115 115 for f in backupfiles:
116 116 if opener.exists(f):
117 117 opener.unlink(f)
118 118 except (IOError, OSError, error.Abort):
119 119 # only pure backup file remains, it is sage to ignore any error
120 120 pass
121 121
122 122
123 123 class transaction(util.transactional):
124 124 def __init__(
125 125 self,
126 126 report,
127 127 opener,
128 128 vfsmap,
129 129 journalname,
130 130 undoname=None,
131 131 after=None,
132 132 createmode=None,
133 133 validator=None,
134 134 releasefn=None,
135 135 checkambigfiles=None,
136 136 name='<unnamed>',
137 137 ):
138 138 """Begin a new transaction
139 139
140 140 Begins a new transaction that allows rolling back writes in the event of
141 141 an exception.
142 142
143 143 * `after`: called after the transaction has been committed
144 144 * `createmode`: the mode of the journal file that will be created
145 145 * `releasefn`: called after releasing (with transaction and result)
146 146
147 147 `checkambigfiles` is a set of (path, vfs-location) tuples,
148 148 which determine whether file stat ambiguity should be avoided
149 149 for corresponded files.
150 150 """
151 151 self._count = 1
152 152 self._usages = 1
153 153 self._report = report
154 154 # a vfs to the store content
155 155 self._opener = opener
156 156 # a map to access file in various {location -> vfs}
157 157 vfsmap = vfsmap.copy()
158 158 vfsmap[b''] = opener # set default value
159 159 self._vfsmap = vfsmap
160 160 self._after = after
161 161 self._offsetmap = {}
162 162 self._newfiles = set()
163 163 self._journal = journalname
164 164 self._undoname = undoname
165 165 self._queue = []
166 166 # A callback to do something just after releasing transaction.
167 167 if releasefn is None:
168 168 releasefn = lambda tr, success: None
169 169 self._releasefn = releasefn
170 170
171 171 self._checkambigfiles = set()
172 172 if checkambigfiles:
173 173 self._checkambigfiles.update(checkambigfiles)
174 174
175 175 self._names = [name]
176 176
177 177 # A dict dedicated to precisely tracking the changes introduced in the
178 178 # transaction.
179 179 self.changes = {}
180 180
181 181 # a dict of arguments to be passed to hooks
182 182 self.hookargs = {}
183 183 self._file = opener.open(self._journal, b"w+")
184 184
185 185 # a list of ('location', 'path', 'backuppath', cache) entries.
186 186 # - if 'backuppath' is empty, no file existed at backup time
187 187 # - if 'path' is empty, this is a temporary transaction file
188 188 # - if 'location' is not empty, the path is outside main opener reach.
189 189 # use 'location' value as a key in a vfsmap to find the right 'vfs'
190 190 # (cache is currently unused)
191 191 self._backupentries = []
192 192 self._backupmap = {}
193 193 self._backupjournal = b"%s.backupfiles" % self._journal
194 194 self._backupsfile = opener.open(self._backupjournal, b'w')
195 195 self._backupsfile.write(b'%d\n' % version)
196 196
197 197 if createmode is not None:
198 198 opener.chmod(self._journal, createmode & 0o666)
199 199 opener.chmod(self._backupjournal, createmode & 0o666)
200 200
201 201 # hold file generations to be performed on commit
202 202 self._filegenerators = {}
203 203 # hold callback to write pending data for hooks
204 204 self._pendingcallback = {}
205 205 # True is any pending data have been written ever
206 206 self._anypending = False
207 207 # holds callback to call when writing the transaction
208 208 self._finalizecallback = {}
209 209 # holds callback to call when validating the transaction
210 210 # should raise exception if anything is wrong
211 211 self._validatecallback = {}
212 212 if validator is not None:
213 213 self._validatecallback[b'001-userhooks'] = validator
214 214 # hold callback for post transaction close
215 215 self._postclosecallback = {}
216 216 # holds callbacks to call during abort
217 217 self._abortcallback = {}
218 218
219 219 def __repr__(self):
220 220 name = '/'.join(self._names)
221 221 return '<transaction name=%s, count=%d, usages=%d>' % (
222 222 name,
223 223 self._count,
224 224 self._usages,
225 225 )
226 226
227 227 def __del__(self):
228 228 if self._journal:
229 229 self._abort()
230 230
231 231 @active
232 232 def startgroup(self):
233 233 """delay registration of file entry
234 234
235 235 This is used by strip to delay vision of strip offset. The transaction
236 236 sees either none or all of the strip actions to be done."""
237 237 self._queue.append([])
238 238
239 239 @active
240 240 def endgroup(self):
241 241 """apply delayed registration of file entry.
242 242
243 243 This is used by strip to delay vision of strip offset. The transaction
244 244 sees either none or all of the strip actions to be done."""
245 245 q = self._queue.pop()
246 246 for f, o in q:
247 247 self._addentry(f, o)
248 248
249 249 @active
250 250 def add(self, file, offset):
251 251 """record the state of an append-only file before update"""
252 252 if (
253 253 file in self._newfiles
254 254 or file in self._offsetmap
255 255 or file in self._backupmap
256 256 ):
257 257 return
258 258 if self._queue:
259 259 self._queue[-1].append((file, offset))
260 260 return
261 261
262 262 self._addentry(file, offset)
263 263
264 264 def _addentry(self, file, offset):
265 265 """add a append-only entry to memory and on-disk state"""
266 266 if (
267 267 file in self._newfiles
268 268 or file in self._offsetmap
269 269 or file in self._backupmap
270 270 ):
271 271 return
272 272 if offset:
273 273 self._offsetmap[file] = offset
274 274 else:
275 275 self._newfiles.add(file)
276 276 # add enough data to the journal to do the truncate
277 277 self._file.write(b"%s\0%d\n" % (file, offset))
278 278 self._file.flush()
279 279
280 280 @active
281 281 def addbackup(self, file, hardlink=True, location=b''):
282 282 """Adds a backup of the file to the transaction
283 283
284 284 Calling addbackup() creates a hardlink backup of the specified file
285 285 that is used to recover the file in the event of the transaction
286 286 aborting.
287 287
288 288 * `file`: the file path, relative to .hg/store
289 289 * `hardlink`: use a hardlink to quickly create the backup
290 290 """
291 291 if self._queue:
292 292 msg = b'cannot use transaction.addbackup inside "group"'
293 293 raise error.ProgrammingError(msg)
294 294
295 295 if (
296 296 file in self._newfiles
297 297 or file in self._offsetmap
298 298 or file in self._backupmap
299 299 ):
300 300 return
301 301 vfs = self._vfsmap[location]
302 302 dirname, filename = vfs.split(file)
303 303 backupfilename = b"%s.backup.%s" % (self._journal, filename)
304 304 backupfile = vfs.reljoin(dirname, backupfilename)
305 305 if vfs.exists(file):
306 306 filepath = vfs.join(file)
307 307 backuppath = vfs.join(backupfile)
308 308 util.copyfile(filepath, backuppath, hardlink=hardlink)
309 309 else:
310 310 backupfile = b''
311 311
312 312 self._addbackupentry((location, file, backupfile, False))
313 313
314 314 def _addbackupentry(self, entry):
315 315 """register a new backup entry and write it to disk"""
316 316 self._backupentries.append(entry)
317 317 self._backupmap[entry[1]] = len(self._backupentries) - 1
318 318 self._backupsfile.write(b"%s\0%s\0%s\0%d\n" % entry)
319 319 self._backupsfile.flush()
320 320
321 321 @active
322 322 def registertmp(self, tmpfile, location=b''):
323 323 """register a temporary transaction file
324 324
325 325 Such files will be deleted when the transaction exits (on both
326 326 failure and success).
327 327 """
328 328 self._addbackupentry((location, b'', tmpfile, False))
329 329
330 330 @active
331 331 def addfilegenerator(
332 332 self, genid, filenames, genfunc, order=0, location=b''
333 333 ):
334 334 """add a function to generates some files at transaction commit
335 335
336 336 The `genfunc` argument is a function capable of generating proper
337 337 content of each entry in the `filename` tuple.
338 338
339 339 At transaction close time, `genfunc` will be called with one file
340 340 object argument per entries in `filenames`.
341 341
342 342 The transaction itself is responsible for the backup, creation and
343 343 final write of such file.
344 344
345 345 The `genid` argument is used to ensure the same set of file is only
346 346 generated once. Call to `addfilegenerator` for a `genid` already
347 347 present will overwrite the old entry.
348 348
349 349 The `order` argument may be used to control the order in which multiple
350 350 generator will be executed.
351 351
352 352 The `location` arguments may be used to indicate the files are located
353 353 outside of the the standard directory for transaction. It should match
354 354 one of the key of the `transaction.vfsmap` dictionary.
355 355 """
356 356 # For now, we are unable to do proper backup and restore of custom vfs
357 357 # but for bookmarks that are handled outside this mechanism.
358 358 self._filegenerators[genid] = (order, filenames, genfunc, location)
359 359
360 360 @active
361 361 def removefilegenerator(self, genid):
362 362 """reverse of addfilegenerator, remove a file generator function"""
363 363 if genid in self._filegenerators:
364 364 del self._filegenerators[genid]
365 365
366 366 def _generatefiles(self, suffix=b'', group=GEN_GROUP_ALL):
367 367 # write files registered for generation
368 368 any = False
369 369
370 370 if group == GEN_GROUP_ALL:
371 371 skip_post = skip_pre = False
372 372 else:
373 373 skip_pre = group == GEN_GROUP_POST_FINALIZE
374 374 skip_post = group == GEN_GROUP_PRE_FINALIZE
375 375
376 376 for id, entry in sorted(pycompat.iteritems(self._filegenerators)):
377 377 any = True
378 378 order, filenames, genfunc, location = entry
379 379
380 380 # for generation at closing, check if it's before or after finalize
381 381 is_post = id in postfinalizegenerators
382 382 if skip_post and is_post:
383 383 continue
384 384 elif skip_pre and not is_post:
385 385 continue
386 386
387 387 vfs = self._vfsmap[location]
388 388 files = []
389 389 try:
390 390 for name in filenames:
391 391 name += suffix
392 392 if suffix:
393 393 self.registertmp(name, location=location)
394 394 checkambig = False
395 395 else:
396 396 self.addbackup(name, location=location)
397 397 checkambig = (name, location) in self._checkambigfiles
398 398 files.append(
399 399 vfs(name, b'w', atomictemp=True, checkambig=checkambig)
400 400 )
401 401 genfunc(*files)
402 402 for f in files:
403 403 f.close()
404 404 # skip discard() loop since we're sure no open file remains
405 405 del files[:]
406 406 finally:
407 407 for f in files:
408 408 f.discard()
409 409 return any
410 410
411 411 @active
412 412 def findoffset(self, file):
413 413 if file in self._newfiles:
414 414 return 0
415 415 return self._offsetmap.get(file)
416 416
417 417 @active
418 418 def readjournal(self):
419 419 self._file.seek(0)
420 420 entries = []
421 421 for l in self._file.readlines():
422 422 file, troffset = l.split(b'\0')
423 423 entries.append((file, int(troffset)))
424 424 return entries
425 425
426 426 @active
427 427 def replace(self, file, offset):
428 428 """
429 429 replace can only replace already committed entries
430 430 that are not pending in the queue
431 431 """
432 432 if file in self._newfiles:
433 433 if not offset:
434 434 return
435 435 self._newfiles.remove(file)
436 436 self._offsetmap[file] = offset
437 437 elif file in self._offsetmap:
438 438 if not offset:
439 439 del self._offsetmap[file]
440 440 self._newfiles.add(file)
441 441 else:
442 442 self._offsetmap[file] = offset
443 443 else:
444 444 raise KeyError(file)
445 445 self._file.write(b"%s\0%d\n" % (file, offset))
446 446 self._file.flush()
447 447
448 448 @active
449 449 def nest(self, name='<unnamed>'):
450 450 self._count += 1
451 451 self._usages += 1
452 452 self._names.append(name)
453 453 return self
454 454
455 455 def release(self):
456 456 if self._count > 0:
457 457 self._usages -= 1
458 458 if self._names:
459 459 self._names.pop()
460 460 # if the transaction scopes are left without being closed, fail
461 461 if self._count > 0 and self._usages == 0:
462 462 self._abort()
463 463
464 464 def running(self):
465 465 return self._count > 0
466 466
467 467 def addpending(self, category, callback):
468 468 """add a callback to be called when the transaction is pending
469 469
470 470 The transaction will be given as callback's first argument.
471 471
472 472 Category is a unique identifier to allow overwriting an old callback
473 473 with a newer callback.
474 474 """
475 475 self._pendingcallback[category] = callback
476 476
477 477 @active
478 478 def writepending(self):
479 479 """write pending file to temporary version
480 480
481 481 This is used to allow hooks to view a transaction before commit"""
482 482 categories = sorted(self._pendingcallback)
483 483 for cat in categories:
484 484 # remove callback since the data will have been flushed
485 485 any = self._pendingcallback.pop(cat)(self)
486 486 self._anypending = self._anypending or any
487 487 self._anypending |= self._generatefiles(suffix=b'.pending')
488 488 return self._anypending
489 489
490 490 @active
491 491 def hasfinalize(self, category):
492 492 """check is a callback already exist for a category"""
493 493 return category in self._finalizecallback
494 494
495 495 @active
496 496 def addfinalize(self, category, callback):
497 497 """add a callback to be called when the transaction is closed
498 498
499 499 The transaction will be given as callback's first argument.
500 500
501 501 Category is a unique identifier to allow overwriting old callbacks with
502 502 newer callbacks.
503 503 """
504 504 self._finalizecallback[category] = callback
505 505
506 506 @active
507 507 def addpostclose(self, category, callback):
508 508 """add or replace a callback to be called after the transaction closed
509 509
510 510 The transaction will be given as callback's first argument.
511 511
512 512 Category is a unique identifier to allow overwriting an old callback
513 513 with a newer callback.
514 514 """
515 515 self._postclosecallback[category] = callback
516 516
517 517 @active
518 518 def getpostclose(self, category):
519 519 """return a postclose callback added before, or None"""
520 520 return self._postclosecallback.get(category, None)
521 521
522 522 @active
523 523 def addabort(self, category, callback):
524 524 """add a callback to be called when the transaction is aborted.
525 525
526 526 The transaction will be given as the first argument to the callback.
527 527
528 528 Category is a unique identifier to allow overwriting an old callback
529 529 with a newer callback.
530 530 """
531 531 self._abortcallback[category] = callback
532 532
533 533 @active
534 534 def addvalidator(self, category, callback):
535 535 """adds a callback to be called when validating the transaction.
536 536
537 537 The transaction will be given as the first argument to the callback.
538 538
539 539 callback should raise exception if to abort transaction"""
540 540 self._validatecallback[category] = callback
541 541
542 542 @active
543 543 def close(self):
544 544 '''commit the transaction'''
545 545 if self._count == 1:
546 546 for category in sorted(self._validatecallback):
547 547 self._validatecallback[category](self)
548 548 self._validatecallback = None # Help prevent cycles.
549 549 self._generatefiles(group=GEN_GROUP_PRE_FINALIZE)
550 550 while self._finalizecallback:
551 551 callbacks = self._finalizecallback
552 552 self._finalizecallback = {}
553 553 categories = sorted(callbacks)
554 554 for cat in categories:
555 555 callbacks[cat](self)
556 556 # Prevent double usage and help clear cycles.
557 557 self._finalizecallback = None
558 558 self._generatefiles(group=GEN_GROUP_POST_FINALIZE)
559 559
560 560 self._count -= 1
561 561 if self._count != 0:
562 562 return
563 563 self._file.close()
564 564 self._backupsfile.close()
565 565 # cleanup temporary files
566 566 for l, f, b, c in self._backupentries:
567 567 if l not in self._vfsmap and c:
568 568 self._report(
569 569 b"couldn't remove %s: unknown cache location %s\n" % (b, l)
570 570 )
571 571 continue
572 572 vfs = self._vfsmap[l]
573 573 if not f and b and vfs.exists(b):
574 574 try:
575 575 vfs.unlink(b)
576 576 except (IOError, OSError, error.Abort) as inst:
577 577 if not c:
578 578 raise
579 579 # Abort may be raise by read only opener
580 580 self._report(
581 581 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
582 582 )
583 583 self._offsetmap = {}
584 584 self._newfiles = set()
585 585 self._writeundo()
586 586 if self._after:
587 587 self._after()
588 588 self._after = None # Help prevent cycles.
589 589 if self._opener.isfile(self._backupjournal):
590 590 self._opener.unlink(self._backupjournal)
591 591 if self._opener.isfile(self._journal):
592 592 self._opener.unlink(self._journal)
593 593 for l, _f, b, c in self._backupentries:
594 594 if l not in self._vfsmap and c:
595 595 self._report(
596 596 b"couldn't remove %s: unknown cache location"
597 597 b"%s\n" % (b, l)
598 598 )
599 599 continue
600 600 vfs = self._vfsmap[l]
601 601 if b and vfs.exists(b):
602 602 try:
603 603 vfs.unlink(b)
604 604 except (IOError, OSError, error.Abort) as inst:
605 605 if not c:
606 606 raise
607 607 # Abort may be raise by read only opener
608 608 self._report(
609 609 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
610 610 )
611 611 self._backupentries = []
612 612 self._journal = None
613 613
614 614 self._releasefn(self, True) # notify success of closing transaction
615 615 self._releasefn = None # Help prevent cycles.
616 616
617 617 # run post close action
618 618 categories = sorted(self._postclosecallback)
619 619 for cat in categories:
620 620 self._postclosecallback[cat](self)
621 621 # Prevent double usage and help clear cycles.
622 622 self._postclosecallback = None
623 623
624 624 @active
625 625 def abort(self):
626 626 """abort the transaction (generally called on error, or when the
627 627 transaction is not explicitly committed before going out of
628 628 scope)"""
629 629 self._abort()
630 630
631 631 def _writeundo(self):
632 632 """write transaction data for possible future undo call"""
633 633 if self._undoname is None:
634 634 return
635 635 undobackupfile = self._opener.open(
636 636 b"%s.backupfiles" % self._undoname, b'w'
637 637 )
638 638 undobackupfile.write(b'%d\n' % version)
639 639 for l, f, b, c in self._backupentries:
640 640 if not f: # temporary file
641 641 continue
642 642 if not b:
643 643 u = b''
644 644 else:
645 645 if l not in self._vfsmap and c:
646 646 self._report(
647 647 b"couldn't remove %s: unknown cache location"
648 648 b"%s\n" % (b, l)
649 649 )
650 650 continue
651 651 vfs = self._vfsmap[l]
652 652 base, name = vfs.split(b)
653 653 assert name.startswith(self._journal), name
654 654 uname = name.replace(self._journal, self._undoname, 1)
655 655 u = vfs.reljoin(base, uname)
656 656 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
657 657 undobackupfile.write(b"%s\0%s\0%s\0%d\n" % (l, f, u, c))
658 658 undobackupfile.close()
659 659
660 660 def _abort(self):
661 661 entries = self.readjournal()
662 662 self._count = 0
663 663 self._usages = 0
664 664 self._file.close()
665 665 self._backupsfile.close()
666 666
667 667 try:
668 668 if not entries and not self._backupentries:
669 669 if self._backupjournal:
670 670 self._opener.unlink(self._backupjournal)
671 671 if self._journal:
672 672 self._opener.unlink(self._journal)
673 673 return
674 674
675 675 self._report(_(b"transaction abort!\n"))
676 676
677 677 try:
678 678 for cat in sorted(self._abortcallback):
679 679 self._abortcallback[cat](self)
680 680 # Prevent double usage and help clear cycles.
681 681 self._abortcallback = None
682 682 _playback(
683 683 self._journal,
684 684 self._report,
685 685 self._opener,
686 686 self._vfsmap,
687 687 entries,
688 688 self._backupentries,
689 689 False,
690 690 checkambigfiles=self._checkambigfiles,
691 691 )
692 692 self._report(_(b"rollback completed\n"))
693 693 except BaseException as exc:
694 694 self._report(_(b"rollback failed - please run hg recover\n"))
695 695 self._report(
696 696 _(b"(failure reason: %s)\n") % stringutil.forcebytestr(exc)
697 697 )
698 698 finally:
699 699 self._journal = None
700 700 self._releasefn(self, False) # notify failure of transaction
701 701 self._releasefn = None # Help prevent cycles.
702 702
703 703
704 704 def rollback(opener, vfsmap, file, report, checkambigfiles=None):
705 705 """Rolls back the transaction contained in the given file
706 706
707 707 Reads the entries in the specified file, and the corresponding
708 708 '*.backupfiles' file, to recover from an incomplete transaction.
709 709
710 710 * `file`: a file containing a list of entries, specifying where
711 711 to truncate each file. The file should contain a list of
712 712 file\0offset pairs, delimited by newlines. The corresponding
713 713 '*.backupfiles' file should contain a list of file\0backupfile
714 714 pairs, delimited by \0.
715 715
716 716 `checkambigfiles` is a set of (path, vfs-location) tuples,
717 717 which determine whether file stat ambiguity should be avoided at
718 718 restoring corresponded files.
719 719 """
720 720 entries = []
721 721 backupentries = []
722 722
723 723 fp = opener.open(file)
724 724 lines = fp.readlines()
725 725 fp.close()
726 726 for l in lines:
727 727 try:
728 728 f, o = l.split(b'\0')
729 729 entries.append((f, int(o)))
730 730 except ValueError:
731 731 report(
732 732 _(b"couldn't read journal entry %r!\n") % pycompat.bytestr(l)
733 733 )
734 734
735 735 backupjournal = b"%s.backupfiles" % file
736 736 if opener.exists(backupjournal):
737 737 fp = opener.open(backupjournal)
738 738 lines = fp.readlines()
739 739 if lines:
740 740 ver = lines[0][:-1]
741 741 if ver == (b'%d' % version):
742 742 for line in lines[1:]:
743 743 if line:
744 744 # Shave off the trailing newline
745 745 line = line[:-1]
746 746 l, f, b, c = line.split(b'\0')
747 747 backupentries.append((l, f, b, bool(c)))
748 748 else:
749 749 report(
750 750 _(
751 751 b"journal was created by a different version of "
752 752 b"Mercurial\n"
753 753 )
754 754 )
755 755
756 756 _playback(
757 757 file,
758 758 report,
759 759 opener,
760 760 vfsmap,
761 761 entries,
762 762 backupentries,
763 763 checkambigfiles=checkambigfiles,
764 764 )
@@ -1,86 +1,170 b''
1 1 Test correctness of revlog inline -> non-inline transition
2 2 ----------------------------------------------------------
3 3
4 4 Helper extension to intercept renames.
5 5
6 6 $ cat > $TESTTMP/intercept_rename.py << EOF
7 7 > import os
8 8 > import sys
9 9 > from mercurial import extensions, util
10 10 >
11 11 > def extsetup(ui):
12 12 > def close(orig, *args, **kwargs):
13 13 > path = args[0]._atomictempfile__name
14 14 > if path.endswith(b'/.hg/store/data/file.i'):
15 15 > os._exit(80)
16 16 > return orig(*args, **kwargs)
17 17 > extensions.wrapfunction(util.atomictempfile, 'close', close)
18 18 > EOF
19 19
20 20
21 21 Test offset computation to correctly factor in the index entries themselve.
22 22 Also test that the new data size has the correct size if the transaction is aborted
23 23 after the index has been replaced.
24 24
25 25 Test repo has one small, one moderate and one big change. The clone has
26 26 the small and moderate change and will transition to non-inline storage when
27 27 adding the big change.
28 28
29 29 $ hg init troffset-computation --config format.revlog-compression=none
30 30 $ cd troffset-computation
31 31 $ printf '% 20d' '1' > file
32 32 $ hg commit -Aqm_
33 33 $ printf '% 1024d' '1' > file
34 34 $ hg commit -Aqm_
35 35 $ dd if=/dev/zero of=file bs=1k count=128 > /dev/null 2>&1
36 36 $ hg commit -Aqm_
37 37 $ cd ..
38 38
39 39 $ hg clone -r 1 troffset-computation troffset-computation-copy --config format.revlog-compression=none -q
40 40 $ cd troffset-computation-copy
41 41
42 42 Reference size:
43 43
44 44 $ f -s .hg/store/data/file*
45 45 .hg/store/data/file.i: size=1174
46 46
47 47 $ cat > .hg/hgrc <<EOF
48 48 > [hooks]
49 49 > pretxnchangegroup = python:$TESTDIR/helper-killhook.py:killme
50 50 > EOF
51 51 $ hg pull ../troffset-computation
52 52 pulling from ../troffset-computation
53 53 [80]
54 54
55 55 The first file.i entry should match the size above.
56 56 The first file.d entry is the temporary record during the split,
57 57 the second entry after the split happened. The sum of the second file.d
58 58 and the second file.i entry should match the first file.i entry.
59 59
60 60 $ cat .hg/store/journal | tr -s '\000' ' ' | grep data/file
61 61 data/file.i 1174
62 62 data/file.d 0
63 63 data/file.d 1046
64 64 data/file.i 128
65 $ hg recover
66 rolling back interrupted transaction
67 (verify step skipped, run `hg verify` to check your repository content)
68 $ f -s .hg/store/data/file*
69 .hg/store/data/file.d: size=1046
70 .hg/store/data/file.i: size=128
71 $ hg tip
72 changeset: 1:3ce491143aec
73 tag: tip
74 user: test
75 date: Thu Jan 01 00:00:00 1970 +0000
76 summary: _
77
78 $ hg verify
79 checking changesets
80 checking manifests
81 crosschecking files in changesets and manifests
82 checking files
83 warning: revlog 'data/file.d' not in fncache!
84 checked 2 changesets with 2 changes to 1 files
85 1 warnings encountered!
86 hint: run "hg debugrebuildfncache" to recover from corrupt fncache
65 87 $ cd ..
66 88
67 Now retry the same but intercept the rename of the index and check that
89
90 Now retry the procedure but intercept the rename of the index and check that
68 91 the journal does not contain the new index size. This demonstrates the edge case
69 92 where the data file is left as garbage.
70 93
71 94 $ hg clone -r 1 troffset-computation troffset-computation-copy2 --config format.revlog-compression=none -q
72 95 $ cd troffset-computation-copy2
73 96 $ cat > .hg/hgrc <<EOF
74 97 > [extensions]
75 98 > intercept_rename = $TESTTMP/intercept_rename.py
76 99 > [hooks]
77 100 > pretxnchangegroup = python:$TESTDIR/helper-killhook.py:killme
78 101 > EOF
79 102 $ hg pull ../troffset-computation
80 103 pulling from ../troffset-computation
81 104 [80]
82 105 $ cat .hg/store/journal | tr -s '\000' ' ' | grep data/file
83 106 data/file.i 1174
84 107 data/file.d 0
85 108 data/file.d 1046
109
110 $ hg recover
111 rolling back interrupted transaction
112 (verify step skipped, run `hg verify` to check your repository content)
113 $ f -s .hg/store/data/file*
114 .hg/store/data/file.d: size=1046
115 .hg/store/data/file.i: size=1174
116 $ hg tip
117 changeset: 1:3ce491143aec
118 tag: tip
119 user: test
120 date: Thu Jan 01 00:00:00 1970 +0000
121 summary: _
122
123 $ hg verify
124 checking changesets
125 checking manifests
126 crosschecking files in changesets and manifests
127 checking files
128 checked 2 changesets with 2 changes to 1 files
86 129 $ cd ..
130
131
132 Repeat the original test but let hg rollback the transaction.
133
134 $ hg clone -r 1 troffset-computation troffset-computation-copy-rb --config format.revlog-compression=none -q
135 $ cd troffset-computation-copy-rb
136 $ cat > .hg/hgrc <<EOF
137 > [hooks]
138 > pretxnchangegroup = false
139 > EOF
140 $ hg pull ../troffset-computation
141 pulling from ../troffset-computation
142 searching for changes
143 adding changesets
144 adding manifests
145 adding file changes
146 transaction abort!
147 rollback completed
148 abort: pretxnchangegroup hook exited with status 1
149 [40]
150 $ f -s .hg/store/data/file*
151 .hg/store/data/file.d: size=1046
152 .hg/store/data/file.i: size=128
153 $ hg tip
154 changeset: 1:3ce491143aec
155 tag: tip
156 user: test
157 date: Thu Jan 01 00:00:00 1970 +0000
158 summary: _
159
160 $ hg verify
161 checking changesets
162 checking manifests
163 crosschecking files in changesets and manifests
164 checking files
165 warning: revlog 'data/file.d' not in fncache!
166 checked 2 changesets with 2 changes to 1 files
167 1 warnings encountered!
168 hint: run "hg debugrebuildfncache" to recover from corrupt fncache
169 $ cd ..
170
General Comments 0
You need to be logged in to leave comments. Login now