##// END OF EJS Templates
transaction: display data about why the transaction failed to rollback...
Boris Feld -
r40614:aca09df3 default
parent child Browse files
Show More
@@ -1,640 +1,645
1 1 # transaction.py - simple journaling scheme for mercurial
2 2 #
3 3 # This transaction scheme is intended to gracefully handle program
4 4 # errors and interruptions. More serious failures like system crashes
5 5 # can be recovered with an fsck-like tool. As the whole repository is
6 6 # effectively log-structured, this should amount to simply truncating
7 7 # anything that isn't referenced in the changelog.
8 8 #
9 9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 10 #
11 11 # This software may be used and distributed according to the terms of the
12 12 # GNU General Public License version 2 or any later version.
13 13
14 14 from __future__ import absolute_import
15 15
16 16 import errno
17 17
18 18 from .i18n import _
19 19 from . import (
20 20 error,
21 21 pycompat,
22 22 util,
23 23 )
24 from .utils import (
25 stringutil,
26 )
24 27
25 28 version = 2
26 29
27 30 # These are the file generators that should only be executed after the
28 31 # finalizers are done, since they rely on the output of the finalizers (like
29 32 # the changelog having been written).
30 33 postfinalizegenerators = {
31 34 'bookmarks',
32 35 'dirstate'
33 36 }
34 37
35 38 gengroupall='all'
36 39 gengroupprefinalize='prefinalize'
37 40 gengrouppostfinalize='postfinalize'
38 41
39 42 def active(func):
40 43 def _active(self, *args, **kwds):
41 44 if self._count == 0:
42 45 raise error.Abort(_(
43 46 'cannot use transaction when it is already committed/aborted'))
44 47 return func(self, *args, **kwds)
45 48 return _active
46 49
47 50 def _playback(journal, report, opener, vfsmap, entries, backupentries,
48 51 unlink=True, checkambigfiles=None):
49 52 for f, o, _ignore in entries:
50 53 if o or not unlink:
51 54 checkambig = checkambigfiles and (f, '') in checkambigfiles
52 55 try:
53 56 fp = opener(f, 'a', checkambig=checkambig)
54 57 fp.truncate(o)
55 58 fp.close()
56 59 except IOError:
57 60 report(_("failed to truncate %s\n") % f)
58 61 raise
59 62 else:
60 63 try:
61 64 opener.unlink(f)
62 65 except (IOError, OSError) as inst:
63 66 if inst.errno != errno.ENOENT:
64 67 raise
65 68
66 69 backupfiles = []
67 70 for l, f, b, c in backupentries:
68 71 if l not in vfsmap and c:
69 72 report("couldn't handle %s: unknown cache location %s\n"
70 73 % (b, l))
71 74 vfs = vfsmap[l]
72 75 try:
73 76 if f and b:
74 77 filepath = vfs.join(f)
75 78 backuppath = vfs.join(b)
76 79 checkambig = checkambigfiles and (f, l) in checkambigfiles
77 80 try:
78 81 util.copyfile(backuppath, filepath, checkambig=checkambig)
79 82 backupfiles.append(b)
80 83 except IOError:
81 84 report(_("failed to recover %s\n") % f)
82 85 else:
83 86 target = f or b
84 87 try:
85 88 vfs.unlink(target)
86 89 except (IOError, OSError) as inst:
87 90 if inst.errno != errno.ENOENT:
88 91 raise
89 92 except (IOError, OSError, error.Abort) as inst:
90 93 if not c:
91 94 raise
92 95
93 96 backuppath = "%s.backupfiles" % journal
94 97 if opener.exists(backuppath):
95 98 opener.unlink(backuppath)
96 99 opener.unlink(journal)
97 100 try:
98 101 for f in backupfiles:
99 102 if opener.exists(f):
100 103 opener.unlink(f)
101 104 except (IOError, OSError, error.Abort) as inst:
102 105 # only pure backup file remains, it is sage to ignore any error
103 106 pass
104 107
105 108 class transaction(util.transactional):
106 109 def __init__(self, report, opener, vfsmap, journalname, undoname=None,
107 110 after=None, createmode=None, validator=None, releasefn=None,
108 111 checkambigfiles=None, name=r'<unnamed>'):
109 112 """Begin a new transaction
110 113
111 114 Begins a new transaction that allows rolling back writes in the event of
112 115 an exception.
113 116
114 117 * `after`: called after the transaction has been committed
115 118 * `createmode`: the mode of the journal file that will be created
116 119 * `releasefn`: called after releasing (with transaction and result)
117 120
118 121 `checkambigfiles` is a set of (path, vfs-location) tuples,
119 122 which determine whether file stat ambiguity should be avoided
120 123 for corresponded files.
121 124 """
122 125 self._count = 1
123 126 self._usages = 1
124 127 self._report = report
125 128 # a vfs to the store content
126 129 self._opener = opener
127 130 # a map to access file in various {location -> vfs}
128 131 vfsmap = vfsmap.copy()
129 132 vfsmap[''] = opener # set default value
130 133 self._vfsmap = vfsmap
131 134 self._after = after
132 135 self._entries = []
133 136 self._map = {}
134 137 self._journal = journalname
135 138 self._undoname = undoname
136 139 self._queue = []
137 140 # A callback to validate transaction content before closing it.
138 141 # should raise exception is anything is wrong.
139 142 # target user is repository hooks.
140 143 if validator is None:
141 144 validator = lambda tr: None
142 145 self._validator = validator
143 146 # A callback to do something just after releasing transaction.
144 147 if releasefn is None:
145 148 releasefn = lambda tr, success: None
146 149 self._releasefn = releasefn
147 150
148 151 self._checkambigfiles = set()
149 152 if checkambigfiles:
150 153 self._checkambigfiles.update(checkambigfiles)
151 154
152 155 self._names = [name]
153 156
154 157 # A dict dedicated to precisely tracking the changes introduced in the
155 158 # transaction.
156 159 self.changes = {}
157 160
158 161 # a dict of arguments to be passed to hooks
159 162 self.hookargs = {}
160 163 self._file = opener.open(self._journal, "w")
161 164
162 165 # a list of ('location', 'path', 'backuppath', cache) entries.
163 166 # - if 'backuppath' is empty, no file existed at backup time
164 167 # - if 'path' is empty, this is a temporary transaction file
165 168 # - if 'location' is not empty, the path is outside main opener reach.
166 169 # use 'location' value as a key in a vfsmap to find the right 'vfs'
167 170 # (cache is currently unused)
168 171 self._backupentries = []
169 172 self._backupmap = {}
170 173 self._backupjournal = "%s.backupfiles" % self._journal
171 174 self._backupsfile = opener.open(self._backupjournal, 'w')
172 175 self._backupsfile.write('%d\n' % version)
173 176
174 177 if createmode is not None:
175 178 opener.chmod(self._journal, createmode & 0o666)
176 179 opener.chmod(self._backupjournal, createmode & 0o666)
177 180
178 181 # hold file generations to be performed on commit
179 182 self._filegenerators = {}
180 183 # hold callback to write pending data for hooks
181 184 self._pendingcallback = {}
182 185 # True is any pending data have been written ever
183 186 self._anypending = False
184 187 # holds callback to call when writing the transaction
185 188 self._finalizecallback = {}
186 189 # hold callback for post transaction close
187 190 self._postclosecallback = {}
188 191 # holds callbacks to call during abort
189 192 self._abortcallback = {}
190 193
191 194 def __repr__(self):
192 195 name = r'/'.join(self._names)
193 196 return (r'<transaction name=%s, count=%d, usages=%d>' %
194 197 (name, self._count, self._usages))
195 198
196 199 def __del__(self):
197 200 if self._journal:
198 201 self._abort()
199 202
200 203 @active
201 204 def startgroup(self):
202 205 """delay registration of file entry
203 206
204 207 This is used by strip to delay vision of strip offset. The transaction
205 208 sees either none or all of the strip actions to be done."""
206 209 self._queue.append([])
207 210
208 211 @active
209 212 def endgroup(self):
210 213 """apply delayed registration of file entry.
211 214
212 215 This is used by strip to delay vision of strip offset. The transaction
213 216 sees either none or all of the strip actions to be done."""
214 217 q = self._queue.pop()
215 218 for f, o, data in q:
216 219 self._addentry(f, o, data)
217 220
218 221 @active
219 222 def add(self, file, offset, data=None):
220 223 """record the state of an append-only file before update"""
221 224 if file in self._map or file in self._backupmap:
222 225 return
223 226 if self._queue:
224 227 self._queue[-1].append((file, offset, data))
225 228 return
226 229
227 230 self._addentry(file, offset, data)
228 231
229 232 def _addentry(self, file, offset, data):
230 233 """add a append-only entry to memory and on-disk state"""
231 234 if file in self._map or file in self._backupmap:
232 235 return
233 236 self._entries.append((file, offset, data))
234 237 self._map[file] = len(self._entries) - 1
235 238 # add enough data to the journal to do the truncate
236 239 self._file.write("%s\0%d\n" % (file, offset))
237 240 self._file.flush()
238 241
239 242 @active
240 243 def addbackup(self, file, hardlink=True, location=''):
241 244 """Adds a backup of the file to the transaction
242 245
243 246 Calling addbackup() creates a hardlink backup of the specified file
244 247 that is used to recover the file in the event of the transaction
245 248 aborting.
246 249
247 250 * `file`: the file path, relative to .hg/store
248 251 * `hardlink`: use a hardlink to quickly create the backup
249 252 """
250 253 if self._queue:
251 254 msg = 'cannot use transaction.addbackup inside "group"'
252 255 raise error.ProgrammingError(msg)
253 256
254 257 if file in self._map or file in self._backupmap:
255 258 return
256 259 vfs = self._vfsmap[location]
257 260 dirname, filename = vfs.split(file)
258 261 backupfilename = "%s.backup.%s" % (self._journal, filename)
259 262 backupfile = vfs.reljoin(dirname, backupfilename)
260 263 if vfs.exists(file):
261 264 filepath = vfs.join(file)
262 265 backuppath = vfs.join(backupfile)
263 266 util.copyfile(filepath, backuppath, hardlink=hardlink)
264 267 else:
265 268 backupfile = ''
266 269
267 270 self._addbackupentry((location, file, backupfile, False))
268 271
269 272 def _addbackupentry(self, entry):
270 273 """register a new backup entry and write it to disk"""
271 274 self._backupentries.append(entry)
272 275 self._backupmap[entry[1]] = len(self._backupentries) - 1
273 276 self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
274 277 self._backupsfile.flush()
275 278
276 279 @active
277 280 def registertmp(self, tmpfile, location=''):
278 281 """register a temporary transaction file
279 282
280 283 Such files will be deleted when the transaction exits (on both
281 284 failure and success).
282 285 """
283 286 self._addbackupentry((location, '', tmpfile, False))
284 287
285 288 @active
286 289 def addfilegenerator(self, genid, filenames, genfunc, order=0,
287 290 location=''):
288 291 """add a function to generates some files at transaction commit
289 292
290 293 The `genfunc` argument is a function capable of generating proper
291 294 content of each entry in the `filename` tuple.
292 295
293 296 At transaction close time, `genfunc` will be called with one file
294 297 object argument per entries in `filenames`.
295 298
296 299 The transaction itself is responsible for the backup, creation and
297 300 final write of such file.
298 301
299 302 The `genid` argument is used to ensure the same set of file is only
300 303 generated once. Call to `addfilegenerator` for a `genid` already
301 304 present will overwrite the old entry.
302 305
303 306 The `order` argument may be used to control the order in which multiple
304 307 generator will be executed.
305 308
306 309 The `location` arguments may be used to indicate the files are located
307 310 outside of the the standard directory for transaction. It should match
308 311 one of the key of the `transaction.vfsmap` dictionary.
309 312 """
310 313 # For now, we are unable to do proper backup and restore of custom vfs
311 314 # but for bookmarks that are handled outside this mechanism.
312 315 self._filegenerators[genid] = (order, filenames, genfunc, location)
313 316
314 317 @active
315 318 def removefilegenerator(self, genid):
316 319 """reverse of addfilegenerator, remove a file generator function"""
317 320 if genid in self._filegenerators:
318 321 del self._filegenerators[genid]
319 322
320 323 def _generatefiles(self, suffix='', group=gengroupall):
321 324 # write files registered for generation
322 325 any = False
323 326 for id, entry in sorted(self._filegenerators.iteritems()):
324 327 any = True
325 328 order, filenames, genfunc, location = entry
326 329
327 330 # for generation at closing, check if it's before or after finalize
328 331 postfinalize = group == gengrouppostfinalize
329 332 if (group != gengroupall and
330 333 (id in postfinalizegenerators) != (postfinalize)):
331 334 continue
332 335
333 336 vfs = self._vfsmap[location]
334 337 files = []
335 338 try:
336 339 for name in filenames:
337 340 name += suffix
338 341 if suffix:
339 342 self.registertmp(name, location=location)
340 343 checkambig = False
341 344 else:
342 345 self.addbackup(name, location=location)
343 346 checkambig = (name, location) in self._checkambigfiles
344 347 files.append(vfs(name, 'w', atomictemp=True,
345 348 checkambig=checkambig))
346 349 genfunc(*files)
347 350 finally:
348 351 for f in files:
349 352 f.close()
350 353 return any
351 354
352 355 @active
353 356 def find(self, file):
354 357 if file in self._map:
355 358 return self._entries[self._map[file]]
356 359 if file in self._backupmap:
357 360 return self._backupentries[self._backupmap[file]]
358 361 return None
359 362
360 363 @active
361 364 def replace(self, file, offset, data=None):
362 365 '''
363 366 replace can only replace already committed entries
364 367 that are not pending in the queue
365 368 '''
366 369
367 370 if file not in self._map:
368 371 raise KeyError(file)
369 372 index = self._map[file]
370 373 self._entries[index] = (file, offset, data)
371 374 self._file.write("%s\0%d\n" % (file, offset))
372 375 self._file.flush()
373 376
374 377 @active
375 378 def nest(self, name=r'<unnamed>'):
376 379 self._count += 1
377 380 self._usages += 1
378 381 self._names.append(name)
379 382 return self
380 383
381 384 def release(self):
382 385 if self._count > 0:
383 386 self._usages -= 1
384 387 if self._names:
385 388 self._names.pop()
386 389 # if the transaction scopes are left without being closed, fail
387 390 if self._count > 0 and self._usages == 0:
388 391 self._abort()
389 392
390 393 def running(self):
391 394 return self._count > 0
392 395
393 396 def addpending(self, category, callback):
394 397 """add a callback to be called when the transaction is pending
395 398
396 399 The transaction will be given as callback's first argument.
397 400
398 401 Category is a unique identifier to allow overwriting an old callback
399 402 with a newer callback.
400 403 """
401 404 self._pendingcallback[category] = callback
402 405
403 406 @active
404 407 def writepending(self):
405 408 '''write pending file to temporary version
406 409
407 410 This is used to allow hooks to view a transaction before commit'''
408 411 categories = sorted(self._pendingcallback)
409 412 for cat in categories:
410 413 # remove callback since the data will have been flushed
411 414 any = self._pendingcallback.pop(cat)(self)
412 415 self._anypending = self._anypending or any
413 416 self._anypending |= self._generatefiles(suffix='.pending')
414 417 return self._anypending
415 418
416 419 @active
417 420 def addfinalize(self, category, callback):
418 421 """add a callback to be called when the transaction is closed
419 422
420 423 The transaction will be given as callback's first argument.
421 424
422 425 Category is a unique identifier to allow overwriting old callbacks with
423 426 newer callbacks.
424 427 """
425 428 self._finalizecallback[category] = callback
426 429
427 430 @active
428 431 def addpostclose(self, category, callback):
429 432 """add or replace a callback to be called after the transaction closed
430 433
431 434 The transaction will be given as callback's first argument.
432 435
433 436 Category is a unique identifier to allow overwriting an old callback
434 437 with a newer callback.
435 438 """
436 439 self._postclosecallback[category] = callback
437 440
438 441 @active
439 442 def getpostclose(self, category):
440 443 """return a postclose callback added before, or None"""
441 444 return self._postclosecallback.get(category, None)
442 445
443 446 @active
444 447 def addabort(self, category, callback):
445 448 """add a callback to be called when the transaction is aborted.
446 449
447 450 The transaction will be given as the first argument to the callback.
448 451
449 452 Category is a unique identifier to allow overwriting an old callback
450 453 with a newer callback.
451 454 """
452 455 self._abortcallback[category] = callback
453 456
454 457 @active
455 458 def close(self):
456 459 '''commit the transaction'''
457 460 if self._count == 1:
458 461 self._validator(self) # will raise exception if needed
459 462 self._validator = None # Help prevent cycles.
460 463 self._generatefiles(group=gengroupprefinalize)
461 464 categories = sorted(self._finalizecallback)
462 465 for cat in categories:
463 466 self._finalizecallback[cat](self)
464 467 # Prevent double usage and help clear cycles.
465 468 self._finalizecallback = None
466 469 self._generatefiles(group=gengrouppostfinalize)
467 470
468 471 self._count -= 1
469 472 if self._count != 0:
470 473 return
471 474 self._file.close()
472 475 self._backupsfile.close()
473 476 # cleanup temporary files
474 477 for l, f, b, c in self._backupentries:
475 478 if l not in self._vfsmap and c:
476 479 self._report("couldn't remove %s: unknown cache location %s\n"
477 480 % (b, l))
478 481 continue
479 482 vfs = self._vfsmap[l]
480 483 if not f and b and vfs.exists(b):
481 484 try:
482 485 vfs.unlink(b)
483 486 except (IOError, OSError, error.Abort) as inst:
484 487 if not c:
485 488 raise
486 489 # Abort may be raise by read only opener
487 490 self._report("couldn't remove %s: %s\n"
488 491 % (vfs.join(b), inst))
489 492 self._entries = []
490 493 self._writeundo()
491 494 if self._after:
492 495 self._after()
493 496 self._after = None # Help prevent cycles.
494 497 if self._opener.isfile(self._backupjournal):
495 498 self._opener.unlink(self._backupjournal)
496 499 if self._opener.isfile(self._journal):
497 500 self._opener.unlink(self._journal)
498 501 for l, _f, b, c in self._backupentries:
499 502 if l not in self._vfsmap and c:
500 503 self._report("couldn't remove %s: unknown cache location"
501 504 "%s\n" % (b, l))
502 505 continue
503 506 vfs = self._vfsmap[l]
504 507 if b and vfs.exists(b):
505 508 try:
506 509 vfs.unlink(b)
507 510 except (IOError, OSError, error.Abort) as inst:
508 511 if not c:
509 512 raise
510 513 # Abort may be raise by read only opener
511 514 self._report("couldn't remove %s: %s\n"
512 515 % (vfs.join(b), inst))
513 516 self._backupentries = []
514 517 self._journal = None
515 518
516 519 self._releasefn(self, True) # notify success of closing transaction
517 520 self._releasefn = None # Help prevent cycles.
518 521
519 522 # run post close action
520 523 categories = sorted(self._postclosecallback)
521 524 for cat in categories:
522 525 self._postclosecallback[cat](self)
523 526 # Prevent double usage and help clear cycles.
524 527 self._postclosecallback = None
525 528
526 529 @active
527 530 def abort(self):
528 531 '''abort the transaction (generally called on error, or when the
529 532 transaction is not explicitly committed before going out of
530 533 scope)'''
531 534 self._abort()
532 535
533 536 def _writeundo(self):
534 537 """write transaction data for possible future undo call"""
535 538 if self._undoname is None:
536 539 return
537 540 undobackupfile = self._opener.open("%s.backupfiles" % self._undoname,
538 541 'w')
539 542 undobackupfile.write('%d\n' % version)
540 543 for l, f, b, c in self._backupentries:
541 544 if not f: # temporary file
542 545 continue
543 546 if not b:
544 547 u = ''
545 548 else:
546 549 if l not in self._vfsmap and c:
547 550 self._report("couldn't remove %s: unknown cache location"
548 551 "%s\n" % (b, l))
549 552 continue
550 553 vfs = self._vfsmap[l]
551 554 base, name = vfs.split(b)
552 555 assert name.startswith(self._journal), name
553 556 uname = name.replace(self._journal, self._undoname, 1)
554 557 u = vfs.reljoin(base, uname)
555 558 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
556 559 undobackupfile.write("%s\0%s\0%s\0%d\n" % (l, f, u, c))
557 560 undobackupfile.close()
558 561
559 562
560 563 def _abort(self):
561 564 self._count = 0
562 565 self._usages = 0
563 566 self._file.close()
564 567 self._backupsfile.close()
565 568
566 569 try:
567 570 if not self._entries and not self._backupentries:
568 571 if self._backupjournal:
569 572 self._opener.unlink(self._backupjournal)
570 573 if self._journal:
571 574 self._opener.unlink(self._journal)
572 575 return
573 576
574 577 self._report(_("transaction abort!\n"))
575 578
576 579 try:
577 580 for cat in sorted(self._abortcallback):
578 581 self._abortcallback[cat](self)
579 582 # Prevent double usage and help clear cycles.
580 583 self._abortcallback = None
581 584 _playback(self._journal, self._report, self._opener,
582 585 self._vfsmap, self._entries, self._backupentries,
583 586 False, checkambigfiles=self._checkambigfiles)
584 587 self._report(_("rollback completed\n"))
585 except BaseException:
588 except BaseException as exc:
586 589 self._report(_("rollback failed - please run hg recover\n"))
590 self._report(_("(failure reason: %s)\n")
591 % stringutil.forcebytestr(exc))
587 592 finally:
588 593 self._journal = None
589 594 self._releasefn(self, False) # notify failure of transaction
590 595 self._releasefn = None # Help prevent cycles.
591 596
592 597 def rollback(opener, vfsmap, file, report, checkambigfiles=None):
593 598 """Rolls back the transaction contained in the given file
594 599
595 600 Reads the entries in the specified file, and the corresponding
596 601 '*.backupfiles' file, to recover from an incomplete transaction.
597 602
598 603 * `file`: a file containing a list of entries, specifying where
599 604 to truncate each file. The file should contain a list of
600 605 file\0offset pairs, delimited by newlines. The corresponding
601 606 '*.backupfiles' file should contain a list of file\0backupfile
602 607 pairs, delimited by \0.
603 608
604 609 `checkambigfiles` is a set of (path, vfs-location) tuples,
605 610 which determine whether file stat ambiguity should be avoided at
606 611 restoring corresponded files.
607 612 """
608 613 entries = []
609 614 backupentries = []
610 615
611 616 fp = opener.open(file)
612 617 lines = fp.readlines()
613 618 fp.close()
614 619 for l in lines:
615 620 try:
616 621 f, o = l.split('\0')
617 622 entries.append((f, int(o), None))
618 623 except ValueError:
619 624 report(
620 625 _("couldn't read journal entry %r!\n") % pycompat.bytestr(l))
621 626
622 627 backupjournal = "%s.backupfiles" % file
623 628 if opener.exists(backupjournal):
624 629 fp = opener.open(backupjournal)
625 630 lines = fp.readlines()
626 631 if lines:
627 632 ver = lines[0][:-1]
628 633 if ver == (b'%d' % version):
629 634 for line in lines[1:]:
630 635 if line:
631 636 # Shave off the trailing newline
632 637 line = line[:-1]
633 638 l, f, b, c = line.split('\0')
634 639 backupentries.append((l, f, b, bool(c)))
635 640 else:
636 641 report(_("journal was created by a different version of "
637 642 "Mercurial\n"))
638 643
639 644 _playback(file, report, opener, vfsmap, entries, backupentries,
640 645 checkambigfiles=checkambigfiles)
@@ -1,139 +1,141
1 1 #require unix-permissions no-root reporevlogstore
2 2
3 3 $ cat > $TESTTMP/dumpjournal.py <<EOF
4 4 > import sys
5 5 > for entry in sys.stdin.read().split('\n'):
6 6 > if entry:
7 7 > print(entry.split('\x00')[0])
8 8 > EOF
9 9
10 10 $ echo "[extensions]" >> $HGRCPATH
11 11 $ echo "mq=">> $HGRCPATH
12 12
13 13 $ teststrip() {
14 14 > hg -q up -C $1
15 15 > echo % before update $1, strip $2
16 16 > hg parents
17 17 > chmod -$3 $4
18 18 > hg strip $2 2>&1 | sed 's/\(bundle\).*/\1/' | sed 's/Permission denied.*\.hg\/store\/\(.*\)/Permission denied \.hg\/store\/\1/'
19 19 > echo % after update $1, strip $2
20 20 > chmod +$3 $4
21 21 > hg verify
22 22 > echo % journal contents
23 23 > if [ -f .hg/store/journal ]; then
24 24 > cat .hg/store/journal | "$PYTHON" $TESTTMP/dumpjournal.py
25 25 > else
26 26 > echo "(no journal)"
27 27 > fi
28 28 > ls .hg/store/journal >/dev/null 2>&1 && hg recover
29 29 > ls .hg/strip-backup/* >/dev/null 2>&1 && hg unbundle -q .hg/strip-backup/*
30 30 > rm -rf .hg/strip-backup
31 31 > }
32 32
33 33 $ hg init test
34 34 $ cd test
35 35 $ echo a > a
36 36 $ hg -q ci -m "a" -A
37 37 $ echo b > b
38 38 $ hg -q ci -m "b" -A
39 39 $ echo b2 >> b
40 40 $ hg -q ci -m "b2" -A
41 41 $ echo c > c
42 42 $ hg -q ci -m "c" -A
43 43 $ teststrip 0 2 w .hg/store/data/b.i
44 44 % before update 0, strip 2
45 45 changeset: 0:cb9a9f314b8b
46 46 user: test
47 47 date: Thu Jan 01 00:00:00 1970 +0000
48 48 summary: a
49 49
50 50 saved backup bundle
51 51 transaction abort!
52 52 failed to truncate data/b.i
53 53 rollback failed - please run hg recover
54 (failure reason: [Errno 13] Permission denied .hg/store/data/b.i')
54 55 strip failed, backup bundle
55 56 abort: Permission denied .hg/store/data/b.i
56 57 % after update 0, strip 2
57 58 abandoned transaction found - run hg recover
58 59 checking changesets
59 60 checking manifests
60 61 crosschecking files in changesets and manifests
61 62 checking files
62 63 b@?: rev 1 points to nonexistent changeset 2
63 64 (expected 1)
64 65 b@?: 736c29771fba not in manifests
65 66 warning: orphan data file 'data/c.i'
66 67 checked 2 changesets with 3 changes to 2 files
67 68 2 warnings encountered!
68 69 2 integrity errors encountered!
69 70 % journal contents
70 71 00changelog.i
71 72 00manifest.i
72 73 data/b.i
73 74 data/c.i
74 75 rolling back interrupted transaction
75 76 checking changesets
76 77 checking manifests
77 78 crosschecking files in changesets and manifests
78 79 checking files
79 80 checked 2 changesets with 2 changes to 2 files
80 81 $ teststrip 0 2 r .hg/store/data/b.i
81 82 % before update 0, strip 2
82 83 changeset: 0:cb9a9f314b8b
83 84 user: test
84 85 date: Thu Jan 01 00:00:00 1970 +0000
85 86 summary: a
86 87
87 88 abort: Permission denied .hg/store/data/b.i
88 89 % after update 0, strip 2
89 90 checking changesets
90 91 checking manifests
91 92 crosschecking files in changesets and manifests
92 93 checking files
93 94 checked 4 changesets with 4 changes to 3 files
94 95 % journal contents
95 96 (no journal)
96 97 $ teststrip 0 2 w .hg/store/00manifest.i
97 98 % before update 0, strip 2
98 99 changeset: 0:cb9a9f314b8b
99 100 user: test
100 101 date: Thu Jan 01 00:00:00 1970 +0000
101 102 summary: a
102 103
103 104 saved backup bundle
104 105 transaction abort!
105 106 failed to truncate 00manifest.i
106 107 rollback failed - please run hg recover
108 (failure reason: [Errno 13] Permission denied .hg/store/00manifest.i')
107 109 strip failed, backup bundle
108 110 abort: Permission denied .hg/store/00manifest.i
109 111 % after update 0, strip 2
110 112 abandoned transaction found - run hg recover
111 113 checking changesets
112 114 checking manifests
113 115 manifest@?: rev 2 points to nonexistent changeset 2
114 116 manifest@?: 3362547cdf64 not in changesets
115 117 manifest@?: rev 3 points to nonexistent changeset 3
116 118 manifest@?: 265a85892ecb not in changesets
117 119 crosschecking files in changesets and manifests
118 120 c@3: in manifest but not in changeset
119 121 checking files
120 122 b@?: rev 1 points to nonexistent changeset 2
121 123 (expected 1)
122 124 c@?: rev 0 points to nonexistent changeset 3
123 125 checked 2 changesets with 4 changes to 3 files
124 126 1 warnings encountered!
125 127 7 integrity errors encountered!
126 128 (first damaged changeset appears to be 3)
127 129 % journal contents
128 130 00changelog.i
129 131 00manifest.i
130 132 data/b.i
131 133 data/c.i
132 134 rolling back interrupted transaction
133 135 checking changesets
134 136 checking manifests
135 137 crosschecking files in changesets and manifests
136 138 checking files
137 139 checked 2 changesets with 2 changes to 2 files
138 140
139 141 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now