##// END OF EJS Templates
revlog: fix a bug in revlog splitting...
Arseniy Alekseyev -
r51535:05d429fe stable
parent child Browse files
Show More
@@ -1,960 +1,965 b''
1 1 # transaction.py - simple journaling scheme for mercurial
2 2 #
3 3 # This transaction scheme is intended to gracefully handle program
4 4 # errors and interruptions. More serious failures like system crashes
5 5 # can be recovered with an fsck-like tool. As the whole repository is
6 6 # effectively log-structured, this should amount to simply truncating
7 7 # anything that isn't referenced in the changelog.
8 8 #
9 9 # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
10 10 #
11 11 # This software may be used and distributed according to the terms of the
12 12 # GNU General Public License version 2 or any later version.
13 13
14 14 import errno
15 15 import os
16 16
17 17 from .i18n import _
18 18 from . import (
19 19 error,
20 20 pycompat,
21 21 util,
22 22 )
23 23 from .utils import stringutil
24 24
25 25 version = 2
26 26
27 27 GEN_GROUP_ALL = b'all'
28 28 GEN_GROUP_PRE_FINALIZE = b'prefinalize'
29 29 GEN_GROUP_POST_FINALIZE = b'postfinalize'
30 30
31 31
32 32 def active(func):
33 33 def _active(self, *args, **kwds):
34 34 if self._count == 0:
35 35 raise error.ProgrammingError(
36 36 b'cannot use transaction when it is already committed/aborted'
37 37 )
38 38 return func(self, *args, **kwds)
39 39
40 40 return _active
41 41
42 42
43 43 UNDO_BACKUP = b'%s.backupfiles'
44 44
45 45 UNDO_FILES_MAY_NEED_CLEANUP = [
46 46 # legacy entries that might exists on disk from previous version:
47 47 (b'store', b'%s.narrowspec'),
48 48 (b'plain', b'%s.narrowspec.dirstate'),
49 49 (b'plain', b'%s.branch'),
50 50 (b'plain', b'%s.bookmarks'),
51 51 (b'store', b'%s.phaseroots'),
52 52 (b'plain', b'%s.dirstate'),
53 53 # files actually in uses today:
54 54 (b'plain', b'%s.desc'),
55 55 # Always delete undo last to make sure we detect that a clean up is needed if
56 56 # the process is interrupted.
57 57 (b'store', b'%s'),
58 58 ]
59 59
60 60
61 61 def cleanup_undo_files(report, vfsmap, undo_prefix=b'undo'):
62 62 """remove "undo" files used by the rollback logic
63 63
64 64 This is useful to prevent rollback running in situation were it does not
65 65 make sense. For example after a strip.
66 66 """
67 67 backup_listing = UNDO_BACKUP % undo_prefix
68 68
69 69 backup_entries = []
70 70 undo_files = []
71 71 svfs = vfsmap[b'store']
72 72 try:
73 73 with svfs(backup_listing) as f:
74 74 backup_entries = read_backup_files(report, f)
75 75 except OSError as e:
76 76 if e.errno != errno.ENOENT:
77 77 msg = _(b'could not read %s: %s\n')
78 78 msg %= (svfs.join(backup_listing), stringutil.forcebytestr(e))
79 79 report(msg)
80 80
81 81 for location, f, backup_path, c in backup_entries:
82 82 if location in vfsmap and backup_path:
83 83 undo_files.append((vfsmap[location], backup_path))
84 84
85 85 undo_files.append((svfs, backup_listing))
86 86 for location, undo_path in UNDO_FILES_MAY_NEED_CLEANUP:
87 87 undo_files.append((vfsmap[location], undo_path % undo_prefix))
88 88 for undovfs, undofile in undo_files:
89 89 try:
90 90 undovfs.unlink(undofile)
91 91 except OSError as e:
92 92 if e.errno != errno.ENOENT:
93 93 msg = _(b'error removing %s: %s\n')
94 94 msg %= (undovfs.join(undofile), stringutil.forcebytestr(e))
95 95 report(msg)
96 96
97 97
98 98 def _playback(
99 99 journal,
100 100 report,
101 101 opener,
102 102 vfsmap,
103 103 entries,
104 104 backupentries,
105 105 unlink=True,
106 106 checkambigfiles=None,
107 107 ):
108 108 """rollback a transaction :
109 109 - truncate files that have been appended to
110 110 - restore file backups
111 111 - delete temporary files
112 112 """
113 113 backupfiles = []
114 114
115 115 def restore_one_backup(vfs, f, b, checkambig):
116 116 filepath = vfs.join(f)
117 117 backuppath = vfs.join(b)
118 118 try:
119 119 util.copyfile(backuppath, filepath, checkambig=checkambig)
120 120 backupfiles.append((vfs, b))
121 121 except IOError as exc:
122 122 e_msg = stringutil.forcebytestr(exc)
123 123 report(_(b"failed to recover %s (%s)\n") % (f, e_msg))
124 124 raise
125 125
126 126 # gather all backup files that impact the store
127 127 # (we need this to detect files that are both backed up and truncated)
128 128 store_backup = {}
129 129 for entry in backupentries:
130 130 location, file_path, backup_path, cache = entry
131 131 vfs = vfsmap[location]
132 132 is_store = vfs.join(b'') == opener.join(b'')
133 133 if is_store and file_path and backup_path:
134 134 store_backup[file_path] = entry
135 135 copy_done = set()
136 136
137 137 # truncate all file `f` to offset `o`
138 138 for f, o in sorted(dict(entries).items()):
139 139 # if we have a backup for `f`, we should restore it first and truncate
140 140 # the restored file
141 141 bck_entry = store_backup.get(f)
142 142 if bck_entry is not None:
143 143 location, file_path, backup_path, cache = bck_entry
144 144 checkambig = False
145 145 if checkambigfiles:
146 146 checkambig = (file_path, location) in checkambigfiles
147 147 restore_one_backup(opener, file_path, backup_path, checkambig)
148 148 copy_done.add(bck_entry)
149 149 # truncate the file to its pre-transaction size
150 150 if o or not unlink:
151 151 checkambig = checkambigfiles and (f, b'') in checkambigfiles
152 152 try:
153 153 fp = opener(f, b'a', checkambig=checkambig)
154 154 if fp.tell() < o:
155 155 raise error.Abort(
156 156 _(
157 157 b"attempted to truncate %s to %d bytes, but it was "
158 158 b"already %d bytes\n"
159 159 )
160 160 % (f, o, fp.tell())
161 161 )
162 162 fp.truncate(o)
163 163 fp.close()
164 164 except IOError:
165 165 report(_(b"failed to truncate %s\n") % f)
166 166 raise
167 167 else:
168 168 # delete empty file
169 169 try:
170 170 opener.unlink(f)
171 171 except FileNotFoundError:
172 172 pass
173 173 # restore backed up files and clean up temporary files
174 174 for entry in backupentries:
175 175 if entry in copy_done:
176 176 continue
177 177 l, f, b, c = entry
178 178 if l not in vfsmap and c:
179 179 report(b"couldn't handle %s: unknown cache location %s\n" % (b, l))
180 180 vfs = vfsmap[l]
181 181 try:
182 182 checkambig = checkambigfiles and (f, l) in checkambigfiles
183 183 if f and b:
184 184 restore_one_backup(vfs, f, b, checkambig)
185 185 else:
186 186 target = f or b
187 187 try:
188 188 vfs.unlink(target)
189 189 except FileNotFoundError:
190 190 # This is fine because
191 191 #
192 192 # either we are trying to delete the main file, and it is
193 193 # already deleted.
194 194 #
195 195 # or we are trying to delete a temporary file and it is
196 196 # already deleted.
197 197 #
198 198 # in both case, our target result (delete the file) is
199 199 # already achieved.
200 200 pass
201 201 except (IOError, OSError, error.Abort):
202 202 if not c:
203 203 raise
204 204
205 205 # cleanup transaction state file and the backups file
206 206 backuppath = b"%s.backupfiles" % journal
207 207 if opener.exists(backuppath):
208 208 opener.unlink(backuppath)
209 209 opener.unlink(journal)
210 210 try:
211 211 for vfs, f in backupfiles:
212 212 if vfs.exists(f):
213 213 vfs.unlink(f)
214 214 except (IOError, OSError, error.Abort):
215 215 # only pure backup file remains, it is sage to ignore any error
216 216 pass
217 217
218 218
219 219 class transaction(util.transactional):
220 220 def __init__(
221 221 self,
222 222 report,
223 223 opener,
224 224 vfsmap,
225 225 journalname,
226 226 undoname=None,
227 227 after=None,
228 228 createmode=None,
229 229 validator=None,
230 230 releasefn=None,
231 231 checkambigfiles=None,
232 232 name='<unnamed>',
233 233 ):
234 234 """Begin a new transaction
235 235
236 236 Begins a new transaction that allows rolling back writes in the event of
237 237 an exception.
238 238
239 239 * `after`: called after the transaction has been committed
240 240 * `createmode`: the mode of the journal file that will be created
241 241 * `releasefn`: called after releasing (with transaction and result)
242 242
243 243 `checkambigfiles` is a set of (path, vfs-location) tuples,
244 244 which determine whether file stat ambiguity should be avoided
245 245 for corresponded files.
246 246 """
247 247 self._count = 1
248 248 self._usages = 1
249 249 self._report = report
250 250 # a vfs to the store content
251 251 self._opener = opener
252 252 # a map to access file in various {location -> vfs}
253 253 vfsmap = vfsmap.copy()
254 254 vfsmap[b''] = opener # set default value
255 255 self._vfsmap = vfsmap
256 256 self._after = after
257 257 self._offsetmap = {}
258 258 self._newfiles = set()
259 259 self._journal = journalname
260 260 self._journal_files = []
261 261 self._undoname = undoname
262 262 self._queue = []
263 263 # A callback to do something just after releasing transaction.
264 264 if releasefn is None:
265 265 releasefn = lambda tr, success: None
266 266 self._releasefn = releasefn
267 267
268 268 self._checkambigfiles = set()
269 269 if checkambigfiles:
270 270 self._checkambigfiles.update(checkambigfiles)
271 271
272 272 self._names = [name]
273 273
274 274 # A dict dedicated to precisely tracking the changes introduced in the
275 275 # transaction.
276 276 self.changes = {}
277 277
278 278 # a dict of arguments to be passed to hooks
279 279 self.hookargs = {}
280 280 self._file = opener.open(self._journal, b"w+")
281 281
282 282 # a list of ('location', 'path', 'backuppath', cache) entries.
283 283 # - if 'backuppath' is empty, no file existed at backup time
284 284 # - if 'path' is empty, this is a temporary transaction file
285 285 # - if 'location' is not empty, the path is outside main opener reach.
286 286 # use 'location' value as a key in a vfsmap to find the right 'vfs'
287 287 # (cache is currently unused)
288 288 self._backupentries = []
289 289 self._backupmap = {}
290 290 self._backupjournal = b"%s.backupfiles" % self._journal
291 291 self._backupsfile = opener.open(self._backupjournal, b'w')
292 292 self._backupsfile.write(b'%d\n' % version)
293 # the set of temporary files
294 self._tmp_files = set()
293 295
294 296 if createmode is not None:
295 297 opener.chmod(self._journal, createmode & 0o666)
296 298 opener.chmod(self._backupjournal, createmode & 0o666)
297 299
298 300 # hold file generations to be performed on commit
299 301 self._filegenerators = {}
300 302 # hold callback to write pending data for hooks
301 303 self._pendingcallback = {}
302 304 # True is any pending data have been written ever
303 305 self._anypending = False
304 306 # holds callback to call when writing the transaction
305 307 self._finalizecallback = {}
306 308 # holds callback to call when validating the transaction
307 309 # should raise exception if anything is wrong
308 310 self._validatecallback = {}
309 311 if validator is not None:
310 312 self._validatecallback[b'001-userhooks'] = validator
311 313 # hold callback for post transaction close
312 314 self._postclosecallback = {}
313 315 # holds callbacks to call during abort
314 316 self._abortcallback = {}
315 317
316 318 def __repr__(self):
317 319 name = '/'.join(self._names)
318 320 return '<transaction name=%s, count=%d, usages=%d>' % (
319 321 name,
320 322 self._count,
321 323 self._usages,
322 324 )
323 325
324 326 def __del__(self):
325 327 if self._journal:
326 328 self._abort()
327 329
328 330 @property
329 331 def finalized(self):
330 332 return self._finalizecallback is None
331 333
332 334 @active
333 335 def startgroup(self):
334 336 """delay registration of file entry
335 337
336 338 This is used by strip to delay vision of strip offset. The transaction
337 339 sees either none or all of the strip actions to be done."""
338 340 self._queue.append([])
339 341
340 342 @active
341 343 def endgroup(self):
342 344 """apply delayed registration of file entry.
343 345
344 346 This is used by strip to delay vision of strip offset. The transaction
345 347 sees either none or all of the strip actions to be done."""
346 348 q = self._queue.pop()
347 349 for f, o in q:
348 350 self._addentry(f, o)
349 351
350 352 @active
351 353 def add(self, file, offset):
352 354 """record the state of an append-only file before update"""
353 355 if (
354 356 file in self._newfiles
355 357 or file in self._offsetmap
356 358 or file in self._backupmap
359 or file in self._tmp_files
357 360 ):
358 361 return
359 362 if self._queue:
360 363 self._queue[-1].append((file, offset))
361 364 return
362 365
363 366 self._addentry(file, offset)
364 367
365 368 def _addentry(self, file, offset):
366 369 """add a append-only entry to memory and on-disk state"""
367 370 if (
368 371 file in self._newfiles
369 372 or file in self._offsetmap
370 373 or file in self._backupmap
374 or file in self._tmp_files
371 375 ):
372 376 return
373 377 if offset:
374 378 self._offsetmap[file] = offset
375 379 else:
376 380 self._newfiles.add(file)
377 381 # add enough data to the journal to do the truncate
378 382 self._file.write(b"%s\0%d\n" % (file, offset))
379 383 self._file.flush()
380 384
381 385 @active
382 386 def addbackup(self, file, hardlink=True, location=b'', for_offset=False):
383 387 """Adds a backup of the file to the transaction
384 388
385 389 Calling addbackup() creates a hardlink backup of the specified file
386 390 that is used to recover the file in the event of the transaction
387 391 aborting.
388 392
389 393 * `file`: the file path, relative to .hg/store
390 394 * `hardlink`: use a hardlink to quickly create the backup
391 395
392 396 If `for_offset` is set, we expect a offset for this file to have been previously recorded
393 397 """
394 398 if self._queue:
395 399 msg = b'cannot use transaction.addbackup inside "group"'
396 400 raise error.ProgrammingError(msg)
397 401
398 402 if file in self._newfiles or file in self._backupmap:
399 403 return
400 404 elif file in self._offsetmap and not for_offset:
401 405 return
402 406 elif for_offset and file not in self._offsetmap:
403 407 msg = (
404 408 'calling `addbackup` with `for_offmap=True`, '
405 409 'but no offset recorded: [%r] %r'
406 410 )
407 411 msg %= (location, file)
408 412 raise error.ProgrammingError(msg)
409 413
410 414 vfs = self._vfsmap[location]
411 415 dirname, filename = vfs.split(file)
412 416 backupfilename = b"%s.backup.%s" % (self._journal, filename)
413 417 backupfile = vfs.reljoin(dirname, backupfilename)
414 418 if vfs.exists(file):
415 419 filepath = vfs.join(file)
416 420 backuppath = vfs.join(backupfile)
417 421 # store encoding may result in different directory here.
418 422 # so we have to ensure the destination directory exist
419 423 final_dir_name = os.path.dirname(backuppath)
420 424 util.makedirs(final_dir_name, mode=vfs.createmode, notindexed=True)
421 425 # then we can copy the backup
422 426 util.copyfile(filepath, backuppath, hardlink=hardlink)
423 427 else:
424 428 backupfile = b''
425 429
426 430 self._addbackupentry((location, file, backupfile, False))
427 431
428 432 def _addbackupentry(self, entry):
429 433 """register a new backup entry and write it to disk"""
430 434 self._backupentries.append(entry)
431 435 self._backupmap[entry[1]] = len(self._backupentries) - 1
432 436 self._backupsfile.write(b"%s\0%s\0%s\0%d\n" % entry)
433 437 self._backupsfile.flush()
434 438
435 439 @active
436 440 def registertmp(self, tmpfile, location=b''):
437 441 """register a temporary transaction file
438 442
439 443 Such files will be deleted when the transaction exits (on both
440 444 failure and success).
441 445 """
446 self._tmp_files.add(tmpfile)
442 447 self._addbackupentry((location, b'', tmpfile, False))
443 448
444 449 @active
445 450 def addfilegenerator(
446 451 self,
447 452 genid,
448 453 filenames,
449 454 genfunc,
450 455 order=0,
451 456 location=b'',
452 457 post_finalize=False,
453 458 ):
454 459 """add a function to generates some files at transaction commit
455 460
456 461 The `genfunc` argument is a function capable of generating proper
457 462 content of each entry in the `filename` tuple.
458 463
459 464 At transaction close time, `genfunc` will be called with one file
460 465 object argument per entries in `filenames`.
461 466
462 467 The transaction itself is responsible for the backup, creation and
463 468 final write of such file.
464 469
465 470 The `genid` argument is used to ensure the same set of file is only
466 471 generated once. Call to `addfilegenerator` for a `genid` already
467 472 present will overwrite the old entry.
468 473
469 474 The `order` argument may be used to control the order in which multiple
470 475 generator will be executed.
471 476
472 477 The `location` arguments may be used to indicate the files are located
473 478 outside of the the standard directory for transaction. It should match
474 479 one of the key of the `transaction.vfsmap` dictionary.
475 480
476 481 The `post_finalize` argument can be set to `True` for file generation
477 482 that must be run after the transaction has been finalized.
478 483 """
479 484 # For now, we are unable to do proper backup and restore of custom vfs
480 485 # but for bookmarks that are handled outside this mechanism.
481 486 entry = (order, filenames, genfunc, location, post_finalize)
482 487 self._filegenerators[genid] = entry
483 488
484 489 @active
485 490 def removefilegenerator(self, genid):
486 491 """reverse of addfilegenerator, remove a file generator function"""
487 492 if genid in self._filegenerators:
488 493 del self._filegenerators[genid]
489 494
490 495 def _generatefiles(self, suffix=b'', group=GEN_GROUP_ALL):
491 496 # write files registered for generation
492 497 any = False
493 498
494 499 if group == GEN_GROUP_ALL:
495 500 skip_post = skip_pre = False
496 501 else:
497 502 skip_pre = group == GEN_GROUP_POST_FINALIZE
498 503 skip_post = group == GEN_GROUP_PRE_FINALIZE
499 504
500 505 for id, entry in sorted(self._filegenerators.items()):
501 506 any = True
502 507 order, filenames, genfunc, location, post_finalize = entry
503 508
504 509 # for generation at closing, check if it's before or after finalize
505 510 if skip_post and post_finalize:
506 511 continue
507 512 elif skip_pre and not post_finalize:
508 513 continue
509 514
510 515 vfs = self._vfsmap[location]
511 516 files = []
512 517 try:
513 518 for name in filenames:
514 519 name += suffix
515 520 if suffix:
516 521 self.registertmp(name, location=location)
517 522 checkambig = False
518 523 else:
519 524 self.addbackup(name, location=location)
520 525 checkambig = (name, location) in self._checkambigfiles
521 526 files.append(
522 527 vfs(name, b'w', atomictemp=True, checkambig=checkambig)
523 528 )
524 529 genfunc(*files)
525 530 for f in files:
526 531 f.close()
527 532 # skip discard() loop since we're sure no open file remains
528 533 del files[:]
529 534 finally:
530 535 for f in files:
531 536 f.discard()
532 537 return any
533 538
534 539 @active
535 540 def findoffset(self, file):
536 541 if file in self._newfiles:
537 542 return 0
538 543 return self._offsetmap.get(file)
539 544
540 545 @active
541 546 def readjournal(self):
542 547 self._file.seek(0)
543 548 entries = []
544 549 for l in self._file.readlines():
545 550 file, troffset = l.split(b'\0')
546 551 entries.append((file, int(troffset)))
547 552 return entries
548 553
549 554 @active
550 555 def replace(self, file, offset):
551 556 """
552 557 replace can only replace already committed entries
553 558 that are not pending in the queue
554 559 """
555 560 if file in self._newfiles:
556 561 if not offset:
557 562 return
558 563 self._newfiles.remove(file)
559 564 self._offsetmap[file] = offset
560 565 elif file in self._offsetmap:
561 566 if not offset:
562 567 del self._offsetmap[file]
563 568 self._newfiles.add(file)
564 569 else:
565 570 self._offsetmap[file] = offset
566 571 else:
567 572 raise KeyError(file)
568 573 self._file.write(b"%s\0%d\n" % (file, offset))
569 574 self._file.flush()
570 575
571 576 @active
572 577 def nest(self, name='<unnamed>'):
573 578 self._count += 1
574 579 self._usages += 1
575 580 self._names.append(name)
576 581 return self
577 582
578 583 def release(self):
579 584 if self._count > 0:
580 585 self._usages -= 1
581 586 if self._names:
582 587 self._names.pop()
583 588 # if the transaction scopes are left without being closed, fail
584 589 if self._count > 0 and self._usages == 0:
585 590 self._abort()
586 591
587 592 def running(self):
588 593 return self._count > 0
589 594
590 595 def addpending(self, category, callback):
591 596 """add a callback to be called when the transaction is pending
592 597
593 598 The transaction will be given as callback's first argument.
594 599
595 600 Category is a unique identifier to allow overwriting an old callback
596 601 with a newer callback.
597 602 """
598 603 self._pendingcallback[category] = callback
599 604
600 605 @active
601 606 def writepending(self):
602 607 """write pending file to temporary version
603 608
604 609 This is used to allow hooks to view a transaction before commit"""
605 610 categories = sorted(self._pendingcallback)
606 611 for cat in categories:
607 612 # remove callback since the data will have been flushed
608 613 any = self._pendingcallback.pop(cat)(self)
609 614 self._anypending = self._anypending or any
610 615 self._anypending |= self._generatefiles(suffix=b'.pending')
611 616 return self._anypending
612 617
613 618 @active
614 619 def hasfinalize(self, category):
615 620 """check is a callback already exist for a category"""
616 621 return category in self._finalizecallback
617 622
618 623 @active
619 624 def addfinalize(self, category, callback):
620 625 """add a callback to be called when the transaction is closed
621 626
622 627 The transaction will be given as callback's first argument.
623 628
624 629 Category is a unique identifier to allow overwriting old callbacks with
625 630 newer callbacks.
626 631 """
627 632 self._finalizecallback[category] = callback
628 633
629 634 @active
630 635 def addpostclose(self, category, callback):
631 636 """add or replace a callback to be called after the transaction closed
632 637
633 638 The transaction will be given as callback's first argument.
634 639
635 640 Category is a unique identifier to allow overwriting an old callback
636 641 with a newer callback.
637 642 """
638 643 self._postclosecallback[category] = callback
639 644
640 645 @active
641 646 def getpostclose(self, category):
642 647 """return a postclose callback added before, or None"""
643 648 return self._postclosecallback.get(category, None)
644 649
645 650 @active
646 651 def addabort(self, category, callback):
647 652 """add a callback to be called when the transaction is aborted.
648 653
649 654 The transaction will be given as the first argument to the callback.
650 655
651 656 Category is a unique identifier to allow overwriting an old callback
652 657 with a newer callback.
653 658 """
654 659 self._abortcallback[category] = callback
655 660
656 661 @active
657 662 def addvalidator(self, category, callback):
658 663 """adds a callback to be called when validating the transaction.
659 664
660 665 The transaction will be given as the first argument to the callback.
661 666
662 667 callback should raise exception if to abort transaction"""
663 668 self._validatecallback[category] = callback
664 669
665 670 @active
666 671 def close(self):
667 672 '''commit the transaction'''
668 673 if self._count == 1:
669 674 for category in sorted(self._validatecallback):
670 675 self._validatecallback[category](self)
671 676 self._validatecallback = None # Help prevent cycles.
672 677 self._generatefiles(group=GEN_GROUP_PRE_FINALIZE)
673 678 while self._finalizecallback:
674 679 callbacks = self._finalizecallback
675 680 self._finalizecallback = {}
676 681 categories = sorted(callbacks)
677 682 for cat in categories:
678 683 callbacks[cat](self)
679 684 # Prevent double usage and help clear cycles.
680 685 self._finalizecallback = None
681 686 self._generatefiles(group=GEN_GROUP_POST_FINALIZE)
682 687
683 688 self._count -= 1
684 689 if self._count != 0:
685 690 return
686 691 self._file.close()
687 692 self._backupsfile.close()
688 693 # cleanup temporary files
689 694 for l, f, b, c in self._backupentries:
690 695 if l not in self._vfsmap and c:
691 696 self._report(
692 697 b"couldn't remove %s: unknown cache location %s\n" % (b, l)
693 698 )
694 699 continue
695 700 vfs = self._vfsmap[l]
696 701 if not f and b and vfs.exists(b):
697 702 try:
698 703 vfs.unlink(b)
699 704 except (IOError, OSError, error.Abort) as inst:
700 705 if not c:
701 706 raise
702 707 # Abort may be raise by read only opener
703 708 self._report(
704 709 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
705 710 )
706 711 self._offsetmap = {}
707 712 self._newfiles = set()
708 713 self._writeundo()
709 714 if self._after:
710 715 self._after()
711 716 self._after = None # Help prevent cycles.
712 717 if self._opener.isfile(self._backupjournal):
713 718 self._opener.unlink(self._backupjournal)
714 719 if self._opener.isfile(self._journal):
715 720 self._opener.unlink(self._journal)
716 721 for l, _f, b, c in self._backupentries:
717 722 if l not in self._vfsmap and c:
718 723 self._report(
719 724 b"couldn't remove %s: unknown cache location"
720 725 b"%s\n" % (b, l)
721 726 )
722 727 continue
723 728 vfs = self._vfsmap[l]
724 729 if b and vfs.exists(b):
725 730 try:
726 731 vfs.unlink(b)
727 732 except (IOError, OSError, error.Abort) as inst:
728 733 if not c:
729 734 raise
730 735 # Abort may be raise by read only opener
731 736 self._report(
732 737 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
733 738 )
734 739 self._backupentries = []
735 740 self._journal = None
736 741
737 742 self._releasefn(self, True) # notify success of closing transaction
738 743 self._releasefn = None # Help prevent cycles.
739 744
740 745 # run post close action
741 746 categories = sorted(self._postclosecallback)
742 747 for cat in categories:
743 748 self._postclosecallback[cat](self)
744 749 # Prevent double usage and help clear cycles.
745 750 self._postclosecallback = None
746 751
747 752 @active
748 753 def abort(self):
749 754 """abort the transaction (generally called on error, or when the
750 755 transaction is not explicitly committed before going out of
751 756 scope)"""
752 757 self._abort()
753 758
754 759 @active
755 760 def add_journal(self, vfs_id, path):
756 761 self._journal_files.append((vfs_id, path))
757 762
758 763 def _writeundo(self):
759 764 """write transaction data for possible future undo call"""
760 765 if self._undoname is None:
761 766 return
762 767 cleanup_undo_files(
763 768 self._report,
764 769 self._vfsmap,
765 770 undo_prefix=self._undoname,
766 771 )
767 772
768 773 def undoname(fn: bytes) -> bytes:
769 774 base, name = os.path.split(fn)
770 775 assert name.startswith(self._journal)
771 776 new_name = name.replace(self._journal, self._undoname, 1)
772 777 return os.path.join(base, new_name)
773 778
774 779 undo_backup_path = b"%s.backupfiles" % self._undoname
775 780 undobackupfile = self._opener.open(undo_backup_path, b'w')
776 781 undobackupfile.write(b'%d\n' % version)
777 782 for l, f, b, c in self._backupentries:
778 783 if not f: # temporary file
779 784 continue
780 785 if not b:
781 786 u = b''
782 787 else:
783 788 if l not in self._vfsmap and c:
784 789 self._report(
785 790 b"couldn't remove %s: unknown cache location"
786 791 b"%s\n" % (b, l)
787 792 )
788 793 continue
789 794 vfs = self._vfsmap[l]
790 795 u = undoname(b)
791 796 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
792 797 undobackupfile.write(b"%s\0%s\0%s\0%d\n" % (l, f, u, c))
793 798 undobackupfile.close()
794 799 for vfs, src in self._journal_files:
795 800 dest = undoname(src)
796 801 # if src and dest refer to a same file, vfs.rename is a no-op,
797 802 # leaving both src and dest on disk. delete dest to make sure
798 803 # the rename couldn't be such a no-op.
799 804 vfs.tryunlink(dest)
800 805 try:
801 806 vfs.rename(src, dest)
802 807 except FileNotFoundError: # journal file does not yet exist
803 808 pass
804 809
805 810 def _abort(self):
806 811 entries = self.readjournal()
807 812 self._count = 0
808 813 self._usages = 0
809 814 self._file.close()
810 815 self._backupsfile.close()
811 816
812 817 quick = self._can_quick_abort(entries)
813 818 try:
814 819 if not quick:
815 820 self._report(_(b"transaction abort!\n"))
816 821 for cat in sorted(self._abortcallback):
817 822 self._abortcallback[cat](self)
818 823 # Prevent double usage and help clear cycles.
819 824 self._abortcallback = None
820 825 if quick:
821 826 self._do_quick_abort(entries)
822 827 else:
823 828 self._do_full_abort(entries)
824 829 finally:
825 830 self._journal = None
826 831 self._releasefn(self, False) # notify failure of transaction
827 832 self._releasefn = None # Help prevent cycles.
828 833
829 834 def _can_quick_abort(self, entries):
830 835 """False if any semantic content have been written on disk
831 836
832 837 True if nothing, except temporary files has been writen on disk."""
833 838 if entries:
834 839 return False
835 840 for e in self._backupentries:
836 841 if e[1]:
837 842 return False
838 843 return True
839 844
840 845 def _do_quick_abort(self, entries):
841 846 """(Silently) do a quick cleanup (see _can_quick_abort)"""
842 847 assert self._can_quick_abort(entries)
843 848 tmp_files = [e for e in self._backupentries if not e[1]]
844 849 for vfs_id, old_path, tmp_path, xxx in tmp_files:
845 850 vfs = self._vfsmap[vfs_id]
846 851 try:
847 852 vfs.unlink(tmp_path)
848 853 except FileNotFoundError:
849 854 pass
850 855 if self._backupjournal:
851 856 self._opener.unlink(self._backupjournal)
852 857 if self._journal:
853 858 self._opener.unlink(self._journal)
854 859
855 860 def _do_full_abort(self, entries):
856 861 """(Noisily) rollback all the change introduced by the transaction"""
857 862 try:
858 863 _playback(
859 864 self._journal,
860 865 self._report,
861 866 self._opener,
862 867 self._vfsmap,
863 868 entries,
864 869 self._backupentries,
865 870 False,
866 871 checkambigfiles=self._checkambigfiles,
867 872 )
868 873 self._report(_(b"rollback completed\n"))
869 874 except BaseException as exc:
870 875 self._report(_(b"rollback failed - please run hg recover\n"))
871 876 self._report(
872 877 _(b"(failure reason: %s)\n") % stringutil.forcebytestr(exc)
873 878 )
874 879
875 880
876 881 BAD_VERSION_MSG = _(
877 882 b"journal was created by a different version of Mercurial\n"
878 883 )
879 884
880 885
881 886 def read_backup_files(report, fp):
882 887 """parse an (already open) backup file an return contained backup entries
883 888
884 889 entries are in the form: (location, file, backupfile, xxx)
885 890
886 891 :location: the vfs identifier (vfsmap's key)
887 892 :file: original file path (in the vfs)
888 893 :backupfile: path of the backup (in the vfs)
889 894 :cache: a boolean currently always set to False
890 895 """
891 896 lines = fp.readlines()
892 897 backupentries = []
893 898 if lines:
894 899 ver = lines[0][:-1]
895 900 if ver != (b'%d' % version):
896 901 report(BAD_VERSION_MSG)
897 902 else:
898 903 for line in lines[1:]:
899 904 if line:
900 905 # Shave off the trailing newline
901 906 line = line[:-1]
902 907 l, f, b, c = line.split(b'\0')
903 908 backupentries.append((l, f, b, bool(c)))
904 909 return backupentries
905 910
906 911
907 912 def rollback(
908 913 opener,
909 914 vfsmap,
910 915 file,
911 916 report,
912 917 checkambigfiles=None,
913 918 skip_journal_pattern=None,
914 919 ):
915 920 """Rolls back the transaction contained in the given file
916 921
917 922 Reads the entries in the specified file, and the corresponding
918 923 '*.backupfiles' file, to recover from an incomplete transaction.
919 924
920 925 * `file`: a file containing a list of entries, specifying where
921 926 to truncate each file. The file should contain a list of
922 927 file\0offset pairs, delimited by newlines. The corresponding
923 928 '*.backupfiles' file should contain a list of file\0backupfile
924 929 pairs, delimited by \0.
925 930
926 931 `checkambigfiles` is a set of (path, vfs-location) tuples,
927 932 which determine whether file stat ambiguity should be avoided at
928 933 restoring corresponded files.
929 934 """
930 935 entries = []
931 936 backupentries = []
932 937
933 938 with opener.open(file) as fp:
934 939 lines = fp.readlines()
935 940 for l in lines:
936 941 try:
937 942 f, o = l.split(b'\0')
938 943 entries.append((f, int(o)))
939 944 except ValueError:
940 945 report(
941 946 _(b"couldn't read journal entry %r!\n") % pycompat.bytestr(l)
942 947 )
943 948
944 949 backupjournal = b"%s.backupfiles" % file
945 950 if opener.exists(backupjournal):
946 951 with opener.open(backupjournal) as fp:
947 952 backupentries = read_backup_files(report, fp)
948 953 if skip_journal_pattern is not None:
949 954 keep = lambda x: not skip_journal_pattern.match(x[1])
950 955 backupentries = [x for x in backupentries if keep(x)]
951 956
952 957 _playback(
953 958 file,
954 959 report,
955 960 opener,
956 961 vfsmap,
957 962 entries,
958 963 backupentries,
959 964 checkambigfiles=checkambigfiles,
960 965 )
@@ -1,448 +1,454 b''
1 1 Test correctness of revlog inline -> non-inline transition
2 2 ----------------------------------------------------------
3 3
4 4 We test various file length and naming pattern as this created issue in the
5 5 past.
6 6
7 7 Helper extension to intercept renames and kill process
8 8
9 9 $ cat > $TESTTMP/intercept_before_rename.py << EOF
10 10 > import os
11 11 > import signal
12 12 > from mercurial import extensions, util
13 13 >
14 14 > def extsetup(ui):
15 15 > def rename(orig, src, dest, *args, **kwargs):
16 16 > path = util.normpath(dest)
17 17 > if path.endswith(b'data/file.i'):
18 18 > os.kill(os.getpid(), signal.SIGKILL)
19 19 > return orig(src, dest, *args, **kwargs)
20 20 > extensions.wrapfunction(util, 'rename', rename)
21 21 > EOF
22 22
23 23 $ cat > $TESTTMP/intercept_after_rename.py << EOF
24 24 > import os
25 25 > import signal
26 26 > from mercurial import extensions, util
27 27 >
28 28 > def extsetup(ui):
29 29 > def close(orig, *args, **kwargs):
30 30 > path = util.normpath(args[0]._atomictempfile__name)
31 31 > r = orig(*args, **kwargs)
32 32 > if path.endswith(b'/.hg/store/data/file.i'):
33 33 > os.kill(os.getpid(), signal.SIGKILL)
34 34 > return r
35 35 > extensions.wrapfunction(util.atomictempfile, 'close', close)
36 36 > def extsetup(ui):
37 37 > def rename(orig, src, dest, *args, **kwargs):
38 38 > path = util.normpath(dest)
39 39 > r = orig(src, dest, *args, **kwargs)
40 40 > if path.endswith(b'data/file.i'):
41 41 > os.kill(os.getpid(), signal.SIGKILL)
42 42 > return r
43 43 > extensions.wrapfunction(util, 'rename', rename)
44 44 > EOF
45 45
46 46 $ cat > $TESTTMP/killme.py << EOF
47 47 > import os
48 48 > import signal
49 49 >
50 50 > def killme(ui, repo, hooktype, **kwargs):
51 51 > os.kill(os.getpid(), signal.SIGKILL)
52 52 > EOF
53 53
54 54 $ cat > $TESTTMP/reader_wait_split.py << EOF
55 55 > import os
56 56 > import signal
57 57 > from mercurial import extensions, revlog, testing
58 58 > def _wait_post_load(orig, self, *args, **kwargs):
59 59 > wait = b'data/file' in self.radix
60 60 > if wait:
61 61 > testing.wait_file(b"$TESTTMP/writer-revlog-split")
62 62 > r = orig(self, *args, **kwargs)
63 63 > if wait:
64 64 > testing.write_file(b"$TESTTMP/reader-index-read")
65 65 > testing.wait_file(b"$TESTTMP/writer-revlog-unsplit")
66 66 > return r
67 67 >
68 68 > def extsetup(ui):
69 69 > extensions.wrapfunction(revlog.revlog, '_loadindex', _wait_post_load)
70 70 > EOF
71 71
72 72 setup a repository for tests
73 73 ----------------------------
74 74
75 75 $ cat >> $HGRCPATH << EOF
76 76 > [format]
77 77 > revlog-compression=none
78 78 > EOF
79 79
80 80 $ hg init troffset-computation
81 81 $ cd troffset-computation
82 82 $ files="
83 83 > file
84 84 > Directory_With,Special%Char/Complex_File.babar
85 85 > foo/bar/babar_celeste/foo
86 86 > 1234567890/1234567890/1234567890/1234567890/1234567890/1234567890/1234567890/1234567890/1234567890/1234567890/f
87 87 > "
88 88 $ for f in $files; do
89 89 > mkdir -p `dirname $f`
90 90 > done
91 91 $ for f in $files; do
92 92 > printf '%20d' '1' > $f
93 93 > done
94 94 $ hg commit -Aqma
95 95 $ for f in $files; do
96 96 > printf '%1024d' '1' > $f
97 97 > done
98 98 $ hg commit -Aqmb
99 99 $ for f in $files; do
100 100 > printf '%20d' '1' > $f
101 101 > done
102 102 $ hg commit -Aqmc
103 103 $ for f in $files; do
104 104 > dd if=/dev/zero of=$f bs=1k count=128 > /dev/null 2>&1
105 105 > done
106 106 $ hg commit -AqmD --traceback
107 $ for f in $files; do
108 > dd if=/dev/zero of=$f bs=1k count=132 > /dev/null 2>&1
109 > done
110 $ hg commit -AqmD --traceback
107 111
108 112 Reference size:
109 113 $ f -s file
110 file: size=131072
114 file: size=135168
111 115 $ f -s .hg/store/data/file*
112 .hg/store/data/file.d: size=132139
113 .hg/store/data/file.i: size=256
116 .hg/store/data/file.d: size=267307
117 .hg/store/data/file.i: size=320
114 118
115 119 $ cd ..
116 120
117 121
118 122 Test a hard crash after the file was split but before the transaction was committed
119 123 ===================================================================================
120 124
121 125 Test offset computation to correctly factor in the index entries themselves.
122 126 Also test that the new data size has the correct size if the transaction is aborted
123 127 after the index has been replaced.
124 128
125 129 Test repo has commits a, b, c, D, where D is large (grows the revlog enough that it
126 130 transitions to non-inline storage). The clone initially has changes a, b
127 131 and will transition to non-inline storage when adding c, D.
128 132
129 133 If the transaction adding c, D is rolled back, then we don't undo the revlog split,
130 134 but truncate the index and the data to remove both c and D.
131 135
132 136
133 137 $ hg clone --quiet --rev 1 troffset-computation troffset-computation-copy
134 138 $ cd troffset-computation-copy
135 139
136 140 Reference size:
137 141 $ f -s file
138 142 file: size=1024
139 143 $ f -s .hg/store/data/file*
140 144 .hg/store/data/file.i: size=1174
141 145
142 146 $ cat > .hg/hgrc <<EOF
143 147 > [hooks]
144 148 > pretxnchangegroup = python:$TESTTMP/killme.py:killme
145 149 > EOF
146 150 #if chg
147 151 $ hg pull ../troffset-computation
148 152 pulling from ../troffset-computation
149 153 [255]
150 154 #else
151 155 $ hg pull ../troffset-computation
152 156 pulling from ../troffset-computation
153 157 *Killed* (glob)
154 158 [137]
155 159 #endif
156 160
157 161
158 162 The inline revlog still exist, but a split version exist next to it
159 163
164 $ cat .hg/store/journal | tr '\0' ' ' | grep '\.s'
165 [1]
160 166 $ f -s .hg/store/data/file*
161 .hg/store/data/file.d: size=132139
167 .hg/store/data/file.d: size=267307
162 168 .hg/store/data/file.i: size=132395
163 .hg/store/data/file.i.s: size=256
169 .hg/store/data/file.i.s: size=320
164 170
165 171
166 172 The first file.i entry should match the "Reference size" above.
167 173 The first file.d entry is the temporary record during the split,
168 174
169 175 A "temporary file" entry exist for the split index.
170 176
171 177 $ cat .hg/store/journal | tr -s '\000' ' ' | grep data/file
172 178 data/file.i 1174
173 179 data/file.d 0
174 180 $ cat .hg/store/journal.backupfiles | tr -s '\000' ' ' | tr -s '\00' ' '| grep data/file
175 181 data/file.i data/journal.backup.file.i 0
176 182 data/file.i.s 0
177 183
178 184 recover is rolling the split back, the fncache is still valid
179 185
180 186 $ hg recover
181 187 rolling back interrupted transaction
182 188 (verify step skipped, run `hg verify` to check your repository content)
183 189 $ f -s .hg/store/data/file*
184 190 .hg/store/data/file.i: size=1174
185 191 $ hg tip
186 192 changeset: 1:cc8dfb126534
187 193 tag: tip
188 194 user: test
189 195 date: Thu Jan 01 00:00:00 1970 +0000
190 196 summary: b
191 197
192 198 $ hg verify -q
193 199 $ hg debugrebuildfncache --only-data
194 200 fncache already up to date
195 201 $ hg verify -q
196 202 $ cd ..
197 203
198 204 Test a hard crash right before the index is move into place
199 205 ===========================================================
200 206
201 207 Now retry the procedure but intercept the rename of the index and check that
202 208 the journal does not contain the new index size. This demonstrates the edge case
203 209 where the data file is left as garbage.
204 210
205 211 $ hg clone --quiet --rev 1 troffset-computation troffset-computation-copy2
206 212 $ cd troffset-computation-copy2
207 213
208 214 Reference size:
209 215 $ f -s file
210 216 file: size=1024
211 217 $ f -s .hg/store/data/file*
212 218 .hg/store/data/file.i: size=1174
213 219
214 220 $ cat > .hg/hgrc <<EOF
215 221 > [extensions]
216 222 > intercept_rename = $TESTTMP/intercept_before_rename.py
217 223 > EOF
218 224 #if chg
219 225 $ hg pull ../troffset-computation
220 226 pulling from ../troffset-computation
221 227 searching for changes
222 228 adding changesets
223 229 adding manifests
224 230 adding file changes
225 231 [255]
226 232 #else
227 233 $ hg pull ../troffset-computation
228 234 pulling from ../troffset-computation
229 235 searching for changes
230 236 adding changesets
231 237 adding manifests
232 238 adding file changes
233 239 *Killed* (glob)
234 240 [137]
235 241 #endif
236 242
237 243 The inline revlog still exist, but a split version exist next to it
238 244
239 245 $ f -s .hg/store/data/file*
240 .hg/store/data/file.d: size=132139
246 .hg/store/data/file.d: size=267307
241 247 .hg/store/data/file.i: size=132395
242 .hg/store/data/file.i.s: size=256
248 .hg/store/data/file.i.s: size=320
243 249
244 250 $ cat .hg/store/journal | tr -s '\000' ' ' | grep data/file
245 251 data/file.i 1174
246 252 data/file.d 0
247 253
248 254 recover is rolling the split back, the fncache is still valid
249 255
250 256 $ hg recover
251 257 rolling back interrupted transaction
252 258 (verify step skipped, run `hg verify` to check your repository content)
253 259 $ f -s .hg/store/data/file*
254 260 .hg/store/data/file.i: size=1174
255 261 $ hg tip
256 262 changeset: 1:cc8dfb126534
257 263 tag: tip
258 264 user: test
259 265 date: Thu Jan 01 00:00:00 1970 +0000
260 266 summary: b
261 267
262 268 $ hg verify -q
263 269 $ cd ..
264 270
265 271 Test a hard crash right after the index is move into place
266 272 ===========================================================
267 273
268 274 Now retry the procedure but intercept the rename of the index.
269 275
270 276 $ hg clone --quiet --rev 1 troffset-computation troffset-computation-crash-after-rename
271 277 $ cd troffset-computation-crash-after-rename
272 278
273 279 Reference size:
274 280 $ f -s file
275 281 file: size=1024
276 282 $ f -s .hg/store/data/file*
277 283 .hg/store/data/file.i: size=1174
278 284
279 285 $ cat > .hg/hgrc <<EOF
280 286 > [extensions]
281 287 > intercept_rename = $TESTTMP/intercept_after_rename.py
282 288 > EOF
283 289 #if chg
284 290 $ hg pull ../troffset-computation
285 291 pulling from ../troffset-computation
286 292 searching for changes
287 293 adding changesets
288 294 adding manifests
289 295 adding file changes
290 296 [255]
291 297 #else
292 298 $ hg pull ../troffset-computation
293 299 pulling from ../troffset-computation
294 300 searching for changes
295 301 adding changesets
296 302 adding manifests
297 303 adding file changes
298 304 *Killed* (glob)
299 305 [137]
300 306 #endif
301 307
302 308 The inline revlog was over written on disk
303 309
304 310 $ f -s .hg/store/data/file*
305 .hg/store/data/file.d: size=132139
306 .hg/store/data/file.i: size=256
311 .hg/store/data/file.d: size=267307
312 .hg/store/data/file.i: size=320
307 313
308 314 $ cat .hg/store/journal | tr -s '\000' ' ' | grep data/file
309 315 data/file.i 1174
310 316 data/file.d 0
311 317
312 318 recover is rolling the split back, the fncache is still valid
313 319
314 320 $ hg recover
315 321 rolling back interrupted transaction
316 322 (verify step skipped, run `hg verify` to check your repository content)
317 323 $ f -s .hg/store/data/file*
318 324 .hg/store/data/file.i: size=1174
319 325 $ hg tip
320 326 changeset: 1:cc8dfb126534
321 327 tag: tip
322 328 user: test
323 329 date: Thu Jan 01 00:00:00 1970 +0000
324 330 summary: b
325 331
326 332 $ hg verify -q
327 333 $ cd ..
328 334
329 335 Have the transaction rollback itself without any hard crash
330 336 ===========================================================
331 337
332 338
333 339 Repeat the original test but let hg rollback the transaction.
334 340
335 341 $ hg clone --quiet --rev 1 troffset-computation troffset-computation-copy-rb
336 342 $ cd troffset-computation-copy-rb
337 343 $ cat > .hg/hgrc <<EOF
338 344 > [hooks]
339 345 > pretxnchangegroup = false
340 346 > EOF
341 347 $ hg pull ../troffset-computation
342 348 pulling from ../troffset-computation
343 349 searching for changes
344 350 adding changesets
345 351 adding manifests
346 352 adding file changes
347 353 transaction abort!
348 354 rollback completed
349 355 abort: pretxnchangegroup hook exited with status 1
350 356 [40]
351 357
352 358 The split was rollback
353 359
354 360 $ f -s .hg/store/data/file*
355 361 .hg/store/data/file.d: size=0
356 362 .hg/store/data/file.i: size=1174
357 363
358 364
359 365 $ hg tip
360 366 changeset: 1:cc8dfb126534
361 367 tag: tip
362 368 user: test
363 369 date: Thu Jan 01 00:00:00 1970 +0000
364 370 summary: b
365 371
366 372 $ hg verify -q
367 373 $ cd ..
368 374
369 375 Read race
370 376 =========
371 377
372 378 We check that a client that started reading a revlog (its index) after the
373 379 split and end reading (the data) after the rollback should be fine
374 380
375 381 $ hg clone --quiet --rev 1 troffset-computation troffset-computation-race
376 382 $ cd troffset-computation-race
377 383 $ cat > .hg/hgrc <<EOF
378 384 > [hooks]
379 385 > pretxnchangegroup=$RUNTESTDIR/testlib/wait-on-file 5 $TESTTMP/reader-index-read $TESTTMP/writer-revlog-split
380 386 > pretxnclose = false
381 387 > EOF
382 388
383 389 start a reader
384 390
385 391 $ hg cat --rev 0 file \
386 392 > --config "extensions.wait_read=$TESTTMP/reader_wait_split.py" \
387 393 > 2> $TESTTMP/reader.stderr \
388 394 > > $TESTTMP/reader.stdout &
389 395
390 396 Do a failed pull in //
391 397
392 398 $ hg pull ../troffset-computation
393 399 pulling from ../troffset-computation
394 400 searching for changes
395 401 adding changesets
396 402 adding manifests
397 403 adding file changes
398 404 transaction abort!
399 405 rollback completed
400 406 abort: pretxnclose hook exited with status 1
401 407 [40]
402 408 $ touch $TESTTMP/writer-revlog-unsplit
403 409 $ wait
404 410
405 411 The reader should be fine
406 412 $ cat $TESTTMP/reader.stderr
407 413 $ cat $TESTTMP/reader.stdout
408 414 1 (no-eol)
409 415 $ cd ..
410 416
411 417 pending hooks
412 418 =============
413 419
414 420 We checks that hooks properly see the inside of the transaction, while other process don't.
415 421
416 422 $ hg clone --quiet --rev 1 troffset-computation troffset-computation-hooks
417 423 $ cd troffset-computation-hooks
418 424 $ cat > .hg/hgrc <<EOF
419 425 > [hooks]
420 426 > pretxnclose.01-echo = hg cat -r 'max(all())' file | f --size
421 427 > pretxnclose.02-echo = $RUNTESTDIR/testlib/wait-on-file 5 $TESTTMP/hook-done $TESTTMP/hook-tr-ready
422 428 > pretxnclose.03-abort = false
423 429 > EOF
424 430
425 431 $ (
426 432 > $RUNTESTDIR/testlib/wait-on-file 5 $TESTTMP/hook-tr-ready;\
427 433 > hg cat -r 'max(all())' file | f --size;\
428 434 > touch $TESTTMP/hook-done
429 435 > ) >stdout 2>stderr &
430 436
431 437 $ hg pull ../troffset-computation
432 438 pulling from ../troffset-computation
433 439 searching for changes
434 440 adding changesets
435 441 adding manifests
436 442 adding file changes
437 size=131072
443 size=135168
438 444 transaction abort!
439 445 rollback completed
440 446 abort: pretxnclose.03-abort hook exited with status 1
441 447 [40]
442 448
443 449 $ cat stdout
444 450 size=1024
445 451 $ cat stderr
446 452
447 453
448 454 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now