##// END OF EJS Templates
transaction: actually delete file created during the transaction on rollback...
marmoute -
r51703:5c3d0795 default
parent child Browse files
Show More
@@ -1,965 +1,965 b''
1 1 # transaction.py - simple journaling scheme for mercurial
2 2 #
3 3 # This transaction scheme is intended to gracefully handle program
4 4 # errors and interruptions. More serious failures like system crashes
5 5 # can be recovered with an fsck-like tool. As the whole repository is
6 6 # effectively log-structured, this should amount to simply truncating
7 7 # anything that isn't referenced in the changelog.
8 8 #
9 9 # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
10 10 #
11 11 # This software may be used and distributed according to the terms of the
12 12 # GNU General Public License version 2 or any later version.
13 13
14 14 import errno
15 15 import os
16 16
17 17 from .i18n import _
18 18 from . import (
19 19 error,
20 20 pycompat,
21 21 util,
22 22 )
23 23 from .utils import stringutil
24 24
25 25 version = 2
26 26
27 27 GEN_GROUP_ALL = b'all'
28 28 GEN_GROUP_PRE_FINALIZE = b'prefinalize'
29 29 GEN_GROUP_POST_FINALIZE = b'postfinalize'
30 30
31 31
32 32 def active(func):
33 33 def _active(self, *args, **kwds):
34 34 if self._count == 0:
35 35 raise error.ProgrammingError(
36 36 b'cannot use transaction when it is already committed/aborted'
37 37 )
38 38 return func(self, *args, **kwds)
39 39
40 40 return _active
41 41
42 42
43 43 UNDO_BACKUP = b'%s.backupfiles'
44 44
45 45 UNDO_FILES_MAY_NEED_CLEANUP = [
46 46 # legacy entries that might exists on disk from previous version:
47 47 (b'store', b'%s.narrowspec'),
48 48 (b'plain', b'%s.narrowspec.dirstate'),
49 49 (b'plain', b'%s.branch'),
50 50 (b'plain', b'%s.bookmarks'),
51 51 (b'store', b'%s.phaseroots'),
52 52 (b'plain', b'%s.dirstate'),
53 53 # files actually in uses today:
54 54 (b'plain', b'%s.desc'),
55 55 # Always delete undo last to make sure we detect that a clean up is needed if
56 56 # the process is interrupted.
57 57 (b'store', b'%s'),
58 58 ]
59 59
60 60
61 61 def cleanup_undo_files(report, vfsmap, undo_prefix=b'undo'):
62 62 """remove "undo" files used by the rollback logic
63 63
64 64 This is useful to prevent rollback running in situation were it does not
65 65 make sense. For example after a strip.
66 66 """
67 67 backup_listing = UNDO_BACKUP % undo_prefix
68 68
69 69 backup_entries = []
70 70 undo_files = []
71 71 svfs = vfsmap[b'store']
72 72 try:
73 73 with svfs(backup_listing) as f:
74 74 backup_entries = read_backup_files(report, f)
75 75 except OSError as e:
76 76 if e.errno != errno.ENOENT:
77 77 msg = _(b'could not read %s: %s\n')
78 78 msg %= (svfs.join(backup_listing), stringutil.forcebytestr(e))
79 79 report(msg)
80 80
81 81 for location, f, backup_path, c in backup_entries:
82 82 if location in vfsmap and backup_path:
83 83 undo_files.append((vfsmap[location], backup_path))
84 84
85 85 undo_files.append((svfs, backup_listing))
86 86 for location, undo_path in UNDO_FILES_MAY_NEED_CLEANUP:
87 87 undo_files.append((vfsmap[location], undo_path % undo_prefix))
88 88 for undovfs, undofile in undo_files:
89 89 try:
90 90 undovfs.unlink(undofile)
91 91 except OSError as e:
92 92 if e.errno != errno.ENOENT:
93 93 msg = _(b'error removing %s: %s\n')
94 94 msg %= (undovfs.join(undofile), stringutil.forcebytestr(e))
95 95 report(msg)
96 96
97 97
98 98 def _playback(
99 99 journal,
100 100 report,
101 101 opener,
102 102 vfsmap,
103 103 entries,
104 104 backupentries,
105 105 unlink=True,
106 106 checkambigfiles=None,
107 107 ):
108 108 """rollback a transaction :
109 109 - truncate files that have been appended to
110 110 - restore file backups
111 111 - delete temporary files
112 112 """
113 113 backupfiles = []
114 114
115 115 def restore_one_backup(vfs, f, b, checkambig):
116 116 filepath = vfs.join(f)
117 117 backuppath = vfs.join(b)
118 118 try:
119 119 util.copyfile(backuppath, filepath, checkambig=checkambig)
120 120 backupfiles.append((vfs, b))
121 121 except IOError as exc:
122 122 e_msg = stringutil.forcebytestr(exc)
123 123 report(_(b"failed to recover %s (%s)\n") % (f, e_msg))
124 124 raise
125 125
126 126 # gather all backup files that impact the store
127 127 # (we need this to detect files that are both backed up and truncated)
128 128 store_backup = {}
129 129 for entry in backupentries:
130 130 location, file_path, backup_path, cache = entry
131 131 vfs = vfsmap[location]
132 132 is_store = vfs.join(b'') == opener.join(b'')
133 133 if is_store and file_path and backup_path:
134 134 store_backup[file_path] = entry
135 135 copy_done = set()
136 136
137 137 # truncate all file `f` to offset `o`
138 138 for f, o in sorted(dict(entries).items()):
139 139 # if we have a backup for `f`, we should restore it first and truncate
140 140 # the restored file
141 141 bck_entry = store_backup.get(f)
142 142 if bck_entry is not None:
143 143 location, file_path, backup_path, cache = bck_entry
144 144 checkambig = False
145 145 if checkambigfiles:
146 146 checkambig = (file_path, location) in checkambigfiles
147 147 restore_one_backup(opener, file_path, backup_path, checkambig)
148 148 copy_done.add(bck_entry)
149 149 # truncate the file to its pre-transaction size
150 150 if o or not unlink:
151 151 checkambig = checkambigfiles and (f, b'') in checkambigfiles
152 152 try:
153 153 fp = opener(f, b'a', checkambig=checkambig)
154 154 if fp.tell() < o:
155 155 raise error.Abort(
156 156 _(
157 157 b"attempted to truncate %s to %d bytes, but it was "
158 158 b"already %d bytes\n"
159 159 )
160 160 % (f, o, fp.tell())
161 161 )
162 162 fp.truncate(o)
163 163 fp.close()
164 164 except IOError:
165 165 report(_(b"failed to truncate %s\n") % f)
166 166 raise
167 167 else:
168 168 # delete empty file
169 169 try:
170 170 opener.unlink(f)
171 171 except FileNotFoundError:
172 172 pass
173 173 # restore backed up files and clean up temporary files
174 174 for entry in backupentries:
175 175 if entry in copy_done:
176 176 continue
177 177 l, f, b, c = entry
178 178 if l not in vfsmap and c:
179 179 report(b"couldn't handle %s: unknown cache location %s\n" % (b, l))
180 180 vfs = vfsmap[l]
181 181 try:
182 182 checkambig = checkambigfiles and (f, l) in checkambigfiles
183 183 if f and b:
184 184 restore_one_backup(vfs, f, b, checkambig)
185 185 else:
186 186 target = f or b
187 187 try:
188 188 vfs.unlink(target)
189 189 except FileNotFoundError:
190 190 # This is fine because
191 191 #
192 192 # either we are trying to delete the main file, and it is
193 193 # already deleted.
194 194 #
195 195 # or we are trying to delete a temporary file and it is
196 196 # already deleted.
197 197 #
198 198 # in both case, our target result (delete the file) is
199 199 # already achieved.
200 200 pass
201 201 except (IOError, OSError, error.Abort):
202 202 if not c:
203 203 raise
204 204
205 205 # cleanup transaction state file and the backups file
206 206 backuppath = b"%s.backupfiles" % journal
207 207 if opener.exists(backuppath):
208 208 opener.unlink(backuppath)
209 209 opener.unlink(journal)
210 210 try:
211 211 for vfs, f in backupfiles:
212 212 if vfs.exists(f):
213 213 vfs.unlink(f)
214 214 except (IOError, OSError, error.Abort):
215 215 # only pure backup file remains, it is sage to ignore any error
216 216 pass
217 217
218 218
219 219 class transaction(util.transactional):
220 220 def __init__(
221 221 self,
222 222 report,
223 223 opener,
224 224 vfsmap,
225 225 journalname,
226 226 undoname=None,
227 227 after=None,
228 228 createmode=None,
229 229 validator=None,
230 230 releasefn=None,
231 231 checkambigfiles=None,
232 232 name='<unnamed>',
233 233 ):
234 234 """Begin a new transaction
235 235
236 236 Begins a new transaction that allows rolling back writes in the event of
237 237 an exception.
238 238
239 239 * `after`: called after the transaction has been committed
240 240 * `createmode`: the mode of the journal file that will be created
241 241 * `releasefn`: called after releasing (with transaction and result)
242 242
243 243 `checkambigfiles` is a set of (path, vfs-location) tuples,
244 244 which determine whether file stat ambiguity should be avoided
245 245 for corresponded files.
246 246 """
247 247 self._count = 1
248 248 self._usages = 1
249 249 self._report = report
250 250 # a vfs to the store content
251 251 self._opener = opener
252 252 # a map to access file in various {location -> vfs}
253 253 vfsmap = vfsmap.copy()
254 254 vfsmap[b''] = opener # set default value
255 255 self._vfsmap = vfsmap
256 256 self._after = after
257 257 self._offsetmap = {}
258 258 self._newfiles = set()
259 259 self._journal = journalname
260 260 self._journal_files = []
261 261 self._undoname = undoname
262 262 self._queue = []
263 263 # A callback to do something just after releasing transaction.
264 264 if releasefn is None:
265 265 releasefn = lambda tr, success: None
266 266 self._releasefn = releasefn
267 267
268 268 self._checkambigfiles = set()
269 269 if checkambigfiles:
270 270 self._checkambigfiles.update(checkambigfiles)
271 271
272 272 self._names = [name]
273 273
274 274 # A dict dedicated to precisely tracking the changes introduced in the
275 275 # transaction.
276 276 self.changes = {}
277 277
278 278 # a dict of arguments to be passed to hooks
279 279 self.hookargs = {}
280 280 self._file = opener.open(self._journal, b"w+")
281 281
282 282 # a list of ('location', 'path', 'backuppath', cache) entries.
283 283 # - if 'backuppath' is empty, no file existed at backup time
284 284 # - if 'path' is empty, this is a temporary transaction file
285 285 # - if 'location' is not empty, the path is outside main opener reach.
286 286 # use 'location' value as a key in a vfsmap to find the right 'vfs'
287 287 # (cache is currently unused)
288 288 self._backupentries = []
289 289 self._backupmap = {}
290 290 self._backupjournal = b"%s.backupfiles" % self._journal
291 291 self._backupsfile = opener.open(self._backupjournal, b'w')
292 292 self._backupsfile.write(b'%d\n' % version)
293 293 # the set of temporary files
294 294 self._tmp_files = set()
295 295
296 296 if createmode is not None:
297 297 opener.chmod(self._journal, createmode & 0o666)
298 298 opener.chmod(self._backupjournal, createmode & 0o666)
299 299
300 300 # hold file generations to be performed on commit
301 301 self._filegenerators = {}
302 302 # hold callback to write pending data for hooks
303 303 self._pendingcallback = {}
304 304 # True is any pending data have been written ever
305 305 self._anypending = False
306 306 # holds callback to call when writing the transaction
307 307 self._finalizecallback = {}
308 308 # holds callback to call when validating the transaction
309 309 # should raise exception if anything is wrong
310 310 self._validatecallback = {}
311 311 if validator is not None:
312 312 self._validatecallback[b'001-userhooks'] = validator
313 313 # hold callback for post transaction close
314 314 self._postclosecallback = {}
315 315 # holds callbacks to call during abort
316 316 self._abortcallback = {}
317 317
318 318 def __repr__(self):
319 319 name = b'/'.join(self._names)
320 320 return '<transaction name=%s, count=%d, usages=%d>' % (
321 321 name,
322 322 self._count,
323 323 self._usages,
324 324 )
325 325
326 326 def __del__(self):
327 327 if self._journal:
328 328 self._abort()
329 329
330 330 @property
331 331 def finalized(self):
332 332 return self._finalizecallback is None
333 333
334 334 @active
335 335 def startgroup(self):
336 336 """delay registration of file entry
337 337
338 338 This is used by strip to delay vision of strip offset. The transaction
339 339 sees either none or all of the strip actions to be done."""
340 340 self._queue.append([])
341 341
342 342 @active
343 343 def endgroup(self):
344 344 """apply delayed registration of file entry.
345 345
346 346 This is used by strip to delay vision of strip offset. The transaction
347 347 sees either none or all of the strip actions to be done."""
348 348 q = self._queue.pop()
349 349 for f, o in q:
350 350 self._addentry(f, o)
351 351
352 352 @active
353 353 def add(self, file, offset):
354 354 """record the state of an append-only file before update"""
355 355 if (
356 356 file in self._newfiles
357 357 or file in self._offsetmap
358 358 or file in self._backupmap
359 359 or file in self._tmp_files
360 360 ):
361 361 return
362 362 if self._queue:
363 363 self._queue[-1].append((file, offset))
364 364 return
365 365
366 366 self._addentry(file, offset)
367 367
368 368 def _addentry(self, file, offset):
369 369 """add a append-only entry to memory and on-disk state"""
370 370 if (
371 371 file in self._newfiles
372 372 or file in self._offsetmap
373 373 or file in self._backupmap
374 374 or file in self._tmp_files
375 375 ):
376 376 return
377 377 if offset:
378 378 self._offsetmap[file] = offset
379 379 else:
380 380 self._newfiles.add(file)
381 381 # add enough data to the journal to do the truncate
382 382 self._file.write(b"%s\0%d\n" % (file, offset))
383 383 self._file.flush()
384 384
385 385 @active
386 386 def addbackup(self, file, hardlink=True, location=b'', for_offset=False):
387 387 """Adds a backup of the file to the transaction
388 388
389 389 Calling addbackup() creates a hardlink backup of the specified file
390 390 that is used to recover the file in the event of the transaction
391 391 aborting.
392 392
393 393 * `file`: the file path, relative to .hg/store
394 394 * `hardlink`: use a hardlink to quickly create the backup
395 395
396 396 If `for_offset` is set, we expect a offset for this file to have been previously recorded
397 397 """
398 398 if self._queue:
399 399 msg = b'cannot use transaction.addbackup inside "group"'
400 400 raise error.ProgrammingError(msg)
401 401
402 402 if file in self._newfiles or file in self._backupmap:
403 403 return
404 404 elif file in self._offsetmap and not for_offset:
405 405 return
406 406 elif for_offset and file not in self._offsetmap:
407 407 msg = (
408 408 'calling `addbackup` with `for_offmap=True`, '
409 409 'but no offset recorded: [%r] %r'
410 410 )
411 411 msg %= (location, file)
412 412 raise error.ProgrammingError(msg)
413 413
414 414 vfs = self._vfsmap[location]
415 415 dirname, filename = vfs.split(file)
416 416 backupfilename = b"%s.backup.%s.bck" % (self._journal, filename)
417 417 backupfile = vfs.reljoin(dirname, backupfilename)
418 418 if vfs.exists(file):
419 419 filepath = vfs.join(file)
420 420 backuppath = vfs.join(backupfile)
421 421 # store encoding may result in different directory here.
422 422 # so we have to ensure the destination directory exist
423 423 final_dir_name = os.path.dirname(backuppath)
424 424 util.makedirs(final_dir_name, mode=vfs.createmode, notindexed=True)
425 425 # then we can copy the backup
426 426 util.copyfile(filepath, backuppath, hardlink=hardlink)
427 427 else:
428 428 backupfile = b''
429 429
430 430 self._addbackupentry((location, file, backupfile, False))
431 431
432 432 def _addbackupentry(self, entry):
433 433 """register a new backup entry and write it to disk"""
434 434 self._backupentries.append(entry)
435 435 self._backupmap[entry[1]] = len(self._backupentries) - 1
436 436 self._backupsfile.write(b"%s\0%s\0%s\0%d\n" % entry)
437 437 self._backupsfile.flush()
438 438
439 439 @active
440 440 def registertmp(self, tmpfile, location=b''):
441 441 """register a temporary transaction file
442 442
443 443 Such files will be deleted when the transaction exits (on both
444 444 failure and success).
445 445 """
446 446 self._tmp_files.add(tmpfile)
447 447 self._addbackupentry((location, b'', tmpfile, False))
448 448
449 449 @active
450 450 def addfilegenerator(
451 451 self,
452 452 genid,
453 453 filenames,
454 454 genfunc,
455 455 order=0,
456 456 location=b'',
457 457 post_finalize=False,
458 458 ):
459 459 """add a function to generates some files at transaction commit
460 460
461 461 The `genfunc` argument is a function capable of generating proper
462 462 content of each entry in the `filename` tuple.
463 463
464 464 At transaction close time, `genfunc` will be called with one file
465 465 object argument per entries in `filenames`.
466 466
467 467 The transaction itself is responsible for the backup, creation and
468 468 final write of such file.
469 469
470 470 The `genid` argument is used to ensure the same set of file is only
471 471 generated once. Call to `addfilegenerator` for a `genid` already
472 472 present will overwrite the old entry.
473 473
474 474 The `order` argument may be used to control the order in which multiple
475 475 generator will be executed.
476 476
477 477 The `location` arguments may be used to indicate the files are located
478 478 outside of the the standard directory for transaction. It should match
479 479 one of the key of the `transaction.vfsmap` dictionary.
480 480
481 481 The `post_finalize` argument can be set to `True` for file generation
482 482 that must be run after the transaction has been finalized.
483 483 """
484 484 # For now, we are unable to do proper backup and restore of custom vfs
485 485 # but for bookmarks that are handled outside this mechanism.
486 486 entry = (order, filenames, genfunc, location, post_finalize)
487 487 self._filegenerators[genid] = entry
488 488
489 489 @active
490 490 def removefilegenerator(self, genid):
491 491 """reverse of addfilegenerator, remove a file generator function"""
492 492 if genid in self._filegenerators:
493 493 del self._filegenerators[genid]
494 494
495 495 def _generatefiles(self, suffix=b'', group=GEN_GROUP_ALL):
496 496 # write files registered for generation
497 497 any = False
498 498
499 499 if group == GEN_GROUP_ALL:
500 500 skip_post = skip_pre = False
501 501 else:
502 502 skip_pre = group == GEN_GROUP_POST_FINALIZE
503 503 skip_post = group == GEN_GROUP_PRE_FINALIZE
504 504
505 505 for id, entry in sorted(self._filegenerators.items()):
506 506 any = True
507 507 order, filenames, genfunc, location, post_finalize = entry
508 508
509 509 # for generation at closing, check if it's before or after finalize
510 510 if skip_post and post_finalize:
511 511 continue
512 512 elif skip_pre and not post_finalize:
513 513 continue
514 514
515 515 vfs = self._vfsmap[location]
516 516 files = []
517 517 try:
518 518 for name in filenames:
519 519 name += suffix
520 520 if suffix:
521 521 self.registertmp(name, location=location)
522 522 checkambig = False
523 523 else:
524 524 self.addbackup(name, location=location)
525 525 checkambig = (name, location) in self._checkambigfiles
526 526 files.append(
527 527 vfs(name, b'w', atomictemp=True, checkambig=checkambig)
528 528 )
529 529 genfunc(*files)
530 530 for f in files:
531 531 f.close()
532 532 # skip discard() loop since we're sure no open file remains
533 533 del files[:]
534 534 finally:
535 535 for f in files:
536 536 f.discard()
537 537 return any
538 538
539 539 @active
540 540 def findoffset(self, file):
541 541 if file in self._newfiles:
542 542 return 0
543 543 return self._offsetmap.get(file)
544 544
545 545 @active
546 546 def readjournal(self):
547 547 self._file.seek(0)
548 548 entries = []
549 549 for l in self._file.readlines():
550 550 file, troffset = l.split(b'\0')
551 551 entries.append((file, int(troffset)))
552 552 return entries
553 553
554 554 @active
555 555 def replace(self, file, offset):
556 556 """
557 557 replace can only replace already committed entries
558 558 that are not pending in the queue
559 559 """
560 560 if file in self._newfiles:
561 561 if not offset:
562 562 return
563 563 self._newfiles.remove(file)
564 564 self._offsetmap[file] = offset
565 565 elif file in self._offsetmap:
566 566 if not offset:
567 567 del self._offsetmap[file]
568 568 self._newfiles.add(file)
569 569 else:
570 570 self._offsetmap[file] = offset
571 571 else:
572 572 raise KeyError(file)
573 573 self._file.write(b"%s\0%d\n" % (file, offset))
574 574 self._file.flush()
575 575
576 576 @active
577 577 def nest(self, name='<unnamed>'):
578 578 self._count += 1
579 579 self._usages += 1
580 580 self._names.append(name)
581 581 return self
582 582
583 583 def release(self):
584 584 if self._count > 0:
585 585 self._usages -= 1
586 586 if self._names:
587 587 self._names.pop()
588 588 # if the transaction scopes are left without being closed, fail
589 589 if self._count > 0 and self._usages == 0:
590 590 self._abort()
591 591
592 592 def running(self):
593 593 return self._count > 0
594 594
595 595 def addpending(self, category, callback):
596 596 """add a callback to be called when the transaction is pending
597 597
598 598 The transaction will be given as callback's first argument.
599 599
600 600 Category is a unique identifier to allow overwriting an old callback
601 601 with a newer callback.
602 602 """
603 603 self._pendingcallback[category] = callback
604 604
605 605 @active
606 606 def writepending(self):
607 607 """write pending file to temporary version
608 608
609 609 This is used to allow hooks to view a transaction before commit"""
610 610 categories = sorted(self._pendingcallback)
611 611 for cat in categories:
612 612 # remove callback since the data will have been flushed
613 613 any = self._pendingcallback.pop(cat)(self)
614 614 self._anypending = self._anypending or any
615 615 self._anypending |= self._generatefiles(suffix=b'.pending')
616 616 return self._anypending
617 617
618 618 @active
619 619 def hasfinalize(self, category):
620 620 """check is a callback already exist for a category"""
621 621 return category in self._finalizecallback
622 622
623 623 @active
624 624 def addfinalize(self, category, callback):
625 625 """add a callback to be called when the transaction is closed
626 626
627 627 The transaction will be given as callback's first argument.
628 628
629 629 Category is a unique identifier to allow overwriting old callbacks with
630 630 newer callbacks.
631 631 """
632 632 self._finalizecallback[category] = callback
633 633
634 634 @active
635 635 def addpostclose(self, category, callback):
636 636 """add or replace a callback to be called after the transaction closed
637 637
638 638 The transaction will be given as callback's first argument.
639 639
640 640 Category is a unique identifier to allow overwriting an old callback
641 641 with a newer callback.
642 642 """
643 643 self._postclosecallback[category] = callback
644 644
645 645 @active
646 646 def getpostclose(self, category):
647 647 """return a postclose callback added before, or None"""
648 648 return self._postclosecallback.get(category, None)
649 649
650 650 @active
651 651 def addabort(self, category, callback):
652 652 """add a callback to be called when the transaction is aborted.
653 653
654 654 The transaction will be given as the first argument to the callback.
655 655
656 656 Category is a unique identifier to allow overwriting an old callback
657 657 with a newer callback.
658 658 """
659 659 self._abortcallback[category] = callback
660 660
661 661 @active
662 662 def addvalidator(self, category, callback):
663 663 """adds a callback to be called when validating the transaction.
664 664
665 665 The transaction will be given as the first argument to the callback.
666 666
667 667 callback should raise exception if to abort transaction"""
668 668 self._validatecallback[category] = callback
669 669
670 670 @active
671 671 def close(self):
672 672 '''commit the transaction'''
673 673 if self._count == 1:
674 674 for category in sorted(self._validatecallback):
675 675 self._validatecallback[category](self)
676 676 self._validatecallback = None # Help prevent cycles.
677 677 self._generatefiles(group=GEN_GROUP_PRE_FINALIZE)
678 678 while self._finalizecallback:
679 679 callbacks = self._finalizecallback
680 680 self._finalizecallback = {}
681 681 categories = sorted(callbacks)
682 682 for cat in categories:
683 683 callbacks[cat](self)
684 684 # Prevent double usage and help clear cycles.
685 685 self._finalizecallback = None
686 686 self._generatefiles(group=GEN_GROUP_POST_FINALIZE)
687 687
688 688 self._count -= 1
689 689 if self._count != 0:
690 690 return
691 691 self._file.close()
692 692 self._backupsfile.close()
693 693 # cleanup temporary files
694 694 for l, f, b, c in self._backupentries:
695 695 if l not in self._vfsmap and c:
696 696 self._report(
697 697 b"couldn't remove %s: unknown cache location %s\n" % (b, l)
698 698 )
699 699 continue
700 700 vfs = self._vfsmap[l]
701 701 if not f and b and vfs.exists(b):
702 702 try:
703 703 vfs.unlink(b)
704 704 except (IOError, OSError, error.Abort) as inst:
705 705 if not c:
706 706 raise
707 707 # Abort may be raise by read only opener
708 708 self._report(
709 709 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
710 710 )
711 711 self._offsetmap = {}
712 712 self._newfiles = set()
713 713 self._writeundo()
714 714 if self._after:
715 715 self._after()
716 716 self._after = None # Help prevent cycles.
717 717 if self._opener.isfile(self._backupjournal):
718 718 self._opener.unlink(self._backupjournal)
719 719 if self._opener.isfile(self._journal):
720 720 self._opener.unlink(self._journal)
721 721 for l, _f, b, c in self._backupentries:
722 722 if l not in self._vfsmap and c:
723 723 self._report(
724 724 b"couldn't remove %s: unknown cache location"
725 725 b"%s\n" % (b, l)
726 726 )
727 727 continue
728 728 vfs = self._vfsmap[l]
729 729 if b and vfs.exists(b):
730 730 try:
731 731 vfs.unlink(b)
732 732 except (IOError, OSError, error.Abort) as inst:
733 733 if not c:
734 734 raise
735 735 # Abort may be raise by read only opener
736 736 self._report(
737 737 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
738 738 )
739 739 self._backupentries = []
740 740 self._journal = None
741 741
742 742 self._releasefn(self, True) # notify success of closing transaction
743 743 self._releasefn = None # Help prevent cycles.
744 744
745 745 # run post close action
746 746 categories = sorted(self._postclosecallback)
747 747 for cat in categories:
748 748 self._postclosecallback[cat](self)
749 749 # Prevent double usage and help clear cycles.
750 750 self._postclosecallback = None
751 751
752 752 @active
753 753 def abort(self):
754 754 """abort the transaction (generally called on error, or when the
755 755 transaction is not explicitly committed before going out of
756 756 scope)"""
757 757 self._abort()
758 758
759 759 @active
760 760 def add_journal(self, vfs_id, path):
761 761 self._journal_files.append((vfs_id, path))
762 762
763 763 def _writeundo(self):
764 764 """write transaction data for possible future undo call"""
765 765 if self._undoname is None:
766 766 return
767 767 cleanup_undo_files(
768 768 self._report,
769 769 self._vfsmap,
770 770 undo_prefix=self._undoname,
771 771 )
772 772
773 773 def undoname(fn: bytes) -> bytes:
774 774 base, name = os.path.split(fn)
775 775 assert name.startswith(self._journal)
776 776 new_name = name.replace(self._journal, self._undoname, 1)
777 777 return os.path.join(base, new_name)
778 778
779 779 undo_backup_path = b"%s.backupfiles" % self._undoname
780 780 undobackupfile = self._opener.open(undo_backup_path, b'w')
781 781 undobackupfile.write(b'%d\n' % version)
782 782 for l, f, b, c in self._backupentries:
783 783 if not f: # temporary file
784 784 continue
785 785 if not b:
786 786 u = b''
787 787 else:
788 788 if l not in self._vfsmap and c:
789 789 self._report(
790 790 b"couldn't remove %s: unknown cache location"
791 791 b"%s\n" % (b, l)
792 792 )
793 793 continue
794 794 vfs = self._vfsmap[l]
795 795 u = undoname(b)
796 796 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
797 797 undobackupfile.write(b"%s\0%s\0%s\0%d\n" % (l, f, u, c))
798 798 undobackupfile.close()
799 799 for vfs, src in self._journal_files:
800 800 dest = undoname(src)
801 801 # if src and dest refer to a same file, vfs.rename is a no-op,
802 802 # leaving both src and dest on disk. delete dest to make sure
803 803 # the rename couldn't be such a no-op.
804 804 vfs.tryunlink(dest)
805 805 try:
806 806 vfs.rename(src, dest)
807 807 except FileNotFoundError: # journal file does not yet exist
808 808 pass
809 809
810 810 def _abort(self):
811 811 entries = self.readjournal()
812 812 self._count = 0
813 813 self._usages = 0
814 814 self._file.close()
815 815 self._backupsfile.close()
816 816
817 817 quick = self._can_quick_abort(entries)
818 818 try:
819 819 if not quick:
820 820 self._report(_(b"transaction abort!\n"))
821 821 for cat in sorted(self._abortcallback):
822 822 self._abortcallback[cat](self)
823 823 # Prevent double usage and help clear cycles.
824 824 self._abortcallback = None
825 825 if quick:
826 826 self._do_quick_abort(entries)
827 827 else:
828 828 self._do_full_abort(entries)
829 829 finally:
830 830 self._journal = None
831 831 self._releasefn(self, False) # notify failure of transaction
832 832 self._releasefn = None # Help prevent cycles.
833 833
834 834 def _can_quick_abort(self, entries):
835 835 """False if any semantic content have been written on disk
836 836
837 837 True if nothing, except temporary files has been writen on disk."""
838 838 if entries:
839 839 return False
840 840 for e in self._backupentries:
841 841 if e[1]:
842 842 return False
843 843 return True
844 844
845 845 def _do_quick_abort(self, entries):
846 846 """(Silently) do a quick cleanup (see _can_quick_abort)"""
847 847 assert self._can_quick_abort(entries)
848 848 tmp_files = [e for e in self._backupentries if not e[1]]
849 849 for vfs_id, old_path, tmp_path, xxx in tmp_files:
850 850 vfs = self._vfsmap[vfs_id]
851 851 try:
852 852 vfs.unlink(tmp_path)
853 853 except FileNotFoundError:
854 854 pass
855 855 if self._backupjournal:
856 856 self._opener.unlink(self._backupjournal)
857 857 if self._journal:
858 858 self._opener.unlink(self._journal)
859 859
860 860 def _do_full_abort(self, entries):
861 861 """(Noisily) rollback all the change introduced by the transaction"""
862 862 try:
863 863 _playback(
864 864 self._journal,
865 865 self._report,
866 866 self._opener,
867 867 self._vfsmap,
868 868 entries,
869 869 self._backupentries,
870 False,
870 unlink=True,
871 871 checkambigfiles=self._checkambigfiles,
872 872 )
873 873 self._report(_(b"rollback completed\n"))
874 874 except BaseException as exc:
875 875 self._report(_(b"rollback failed - please run hg recover\n"))
876 876 self._report(
877 877 _(b"(failure reason: %s)\n") % stringutil.forcebytestr(exc)
878 878 )
879 879
880 880
881 881 BAD_VERSION_MSG = _(
882 882 b"journal was created by a different version of Mercurial\n"
883 883 )
884 884
885 885
886 886 def read_backup_files(report, fp):
887 887 """parse an (already open) backup file an return contained backup entries
888 888
889 889 entries are in the form: (location, file, backupfile, xxx)
890 890
891 891 :location: the vfs identifier (vfsmap's key)
892 892 :file: original file path (in the vfs)
893 893 :backupfile: path of the backup (in the vfs)
894 894 :cache: a boolean currently always set to False
895 895 """
896 896 lines = fp.readlines()
897 897 backupentries = []
898 898 if lines:
899 899 ver = lines[0][:-1]
900 900 if ver != (b'%d' % version):
901 901 report(BAD_VERSION_MSG)
902 902 else:
903 903 for line in lines[1:]:
904 904 if line:
905 905 # Shave off the trailing newline
906 906 line = line[:-1]
907 907 l, f, b, c = line.split(b'\0')
908 908 backupentries.append((l, f, b, bool(c)))
909 909 return backupentries
910 910
911 911
912 912 def rollback(
913 913 opener,
914 914 vfsmap,
915 915 file,
916 916 report,
917 917 checkambigfiles=None,
918 918 skip_journal_pattern=None,
919 919 ):
920 920 """Rolls back the transaction contained in the given file
921 921
922 922 Reads the entries in the specified file, and the corresponding
923 923 '*.backupfiles' file, to recover from an incomplete transaction.
924 924
925 925 * `file`: a file containing a list of entries, specifying where
926 926 to truncate each file. The file should contain a list of
927 927 file\0offset pairs, delimited by newlines. The corresponding
928 928 '*.backupfiles' file should contain a list of file\0backupfile
929 929 pairs, delimited by \0.
930 930
931 931 `checkambigfiles` is a set of (path, vfs-location) tuples,
932 932 which determine whether file stat ambiguity should be avoided at
933 933 restoring corresponded files.
934 934 """
935 935 entries = []
936 936 backupentries = []
937 937
938 938 with opener.open(file) as fp:
939 939 lines = fp.readlines()
940 940 for l in lines:
941 941 try:
942 942 f, o = l.split(b'\0')
943 943 entries.append((f, int(o)))
944 944 except ValueError:
945 945 report(
946 946 _(b"couldn't read journal entry %r!\n") % pycompat.bytestr(l)
947 947 )
948 948
949 949 backupjournal = b"%s.backupfiles" % file
950 950 if opener.exists(backupjournal):
951 951 with opener.open(backupjournal) as fp:
952 952 backupentries = read_backup_files(report, fp)
953 953 if skip_journal_pattern is not None:
954 954 keep = lambda x: not skip_journal_pattern.match(x[1])
955 955 backupentries = [x for x in backupentries if keep(x)]
956 956
957 957 _playback(
958 958 file,
959 959 report,
960 960 opener,
961 961 vfsmap,
962 962 entries,
963 963 backupentries,
964 964 checkambigfiles=checkambigfiles,
965 965 )
@@ -1,497 +1,496 b''
1 1 Test correctness of revlog inline -> non-inline transition
2 2 ----------------------------------------------------------
3 3
4 4 We test various file length and naming pattern as this created issue in the
5 5 past.
6 6
7 7 Helper extension to intercept renames and kill process
8 8
9 9 $ cat > $TESTTMP/intercept_before_rename.py << EOF
10 10 > import os
11 11 > import signal
12 12 > from mercurial import extensions, util
13 13 >
14 14 > def extsetup(ui):
15 15 > def rename(orig, src, dest, *args, **kwargs):
16 16 > path = util.normpath(dest)
17 17 > if path.endswith(b'data/file.i'):
18 18 > os.kill(os.getpid(), signal.SIGKILL)
19 19 > return orig(src, dest, *args, **kwargs)
20 20 > extensions.wrapfunction(util, 'rename', rename)
21 21 > EOF
22 22
23 23 $ cat > $TESTTMP/intercept_after_rename.py << EOF
24 24 > import os
25 25 > import signal
26 26 > from mercurial import extensions, util
27 27 >
28 28 > def extsetup(ui):
29 29 > def close(orig, *args, **kwargs):
30 30 > path = util.normpath(args[0]._atomictempfile__name)
31 31 > r = orig(*args, **kwargs)
32 32 > if path.endswith(b'/.hg/store/data/file.i'):
33 33 > os.kill(os.getpid(), signal.SIGKILL)
34 34 > return r
35 35 > extensions.wrapfunction(util.atomictempfile, 'close', close)
36 36 > def extsetup(ui):
37 37 > def rename(orig, src, dest, *args, **kwargs):
38 38 > path = util.normpath(dest)
39 39 > r = orig(src, dest, *args, **kwargs)
40 40 > if path.endswith(b'data/file.i'):
41 41 > os.kill(os.getpid(), signal.SIGKILL)
42 42 > return r
43 43 > extensions.wrapfunction(util, 'rename', rename)
44 44 > EOF
45 45
46 46 $ cat > $TESTTMP/killme.py << EOF
47 47 > import os
48 48 > import signal
49 49 >
50 50 > def killme(ui, repo, hooktype, **kwargs):
51 51 > os.kill(os.getpid(), signal.SIGKILL)
52 52 > EOF
53 53
54 54 $ cat > $TESTTMP/reader_wait_split.py << EOF
55 55 > import os
56 56 > import signal
57 57 > from mercurial import extensions, revlog, testing
58 58 > def _wait_post_load(orig, self, *args, **kwargs):
59 59 > wait = b'data/file' in self.radix
60 60 > if wait:
61 61 > testing.wait_file(b"$TESTTMP/writer-revlog-split")
62 62 > r = orig(self, *args, **kwargs)
63 63 > if wait:
64 64 > testing.write_file(b"$TESTTMP/reader-index-read")
65 65 > testing.wait_file(b"$TESTTMP/writer-revlog-unsplit")
66 66 > return r
67 67 >
68 68 > def extsetup(ui):
69 69 > extensions.wrapfunction(revlog.revlog, '_loadindex', _wait_post_load)
70 70 > EOF
71 71
72 72 setup a repository for tests
73 73 ----------------------------
74 74
75 75 $ cat >> $HGRCPATH << EOF
76 76 > [format]
77 77 > revlog-compression=none
78 78 > EOF
79 79
80 80 $ hg init troffset-computation
81 81 $ cd troffset-computation
82 82 $ files="
83 83 > file
84 84 > Directory_With,Special%Char/Complex_File.babar
85 85 > foo/bar/babar_celeste/foo
86 86 > 1234567890/1234567890/1234567890/1234567890/1234567890/1234567890/1234567890/1234567890/1234567890/1234567890/f
87 87 > some_dir/sub_dir/foo_bar
88 88 > some_dir/sub_dir/foo_bar.i.s/tutu
89 89 > "
90 90 $ for f in $files; do
91 91 > mkdir -p `dirname $f`
92 92 > done
93 93 $ for f in $files; do
94 94 > printf '%20d' '1' > $f
95 95 > done
96 96 $ hg commit -Aqma
97 97 $ for f in $files; do
98 98 > printf '%1024d' '1' > $f
99 99 > done
100 100 $ hg commit -Aqmb
101 101 $ for f in $files; do
102 102 > printf '%20d' '1' > $f
103 103 > done
104 104 $ hg commit -Aqmc
105 105 $ for f in $files; do
106 106 > dd if=/dev/zero of=$f bs=1k count=128 > /dev/null 2>&1
107 107 > done
108 108 $ hg commit -AqmD --traceback
109 109 $ for f in $files; do
110 110 > dd if=/dev/zero of=$f bs=1k count=132 > /dev/null 2>&1
111 111 > done
112 112 $ hg commit -AqmD --traceback
113 113
114 114 Reference size:
115 115 $ f -s file
116 116 file: size=135168
117 117 $ f -s .hg/store/data*/file*
118 118 .hg/store/data/file.d: size=267307
119 119 .hg/store/data/file.i: size=320
120 120
121 121 $ cd ..
122 122
123 123 Test a succesful pull
124 124 =====================
125 125
126 126 Make sure everything goes though as expect if we don't do any crash
127 127
128 128 $ hg clone --quiet --rev 1 troffset-computation troffset-success
129 129 $ cd troffset-success
130 130
131 131 Reference size:
132 132 $ f -s file
133 133 file: size=1024
134 134 $ f -s .hg/store/data/file*
135 135 .hg/store/data/file.i: size=1174
136 136
137 137 $ hg pull ../troffset-computation
138 138 pulling from ../troffset-computation
139 139 searching for changes
140 140 adding changesets
141 141 adding manifests
142 142 adding file changes
143 143 added 3 changesets with 18 changes to 6 files
144 144 new changesets c99a94cae9b1:64874a3b0160
145 145 (run 'hg update' to get a working copy)
146 146
147 147
148 148 The inline revlog has been replaced
149 149
150 150 $ f -s .hg/store/data/file*
151 151 .hg/store/data/file.d: size=267307
152 152 .hg/store/data/file.i: size=320
153 153
154 154
155 155 $ hg verify -q
156 156 $ cd ..
157 157
158 158
159 159 Test a hard crash after the file was split but before the transaction was committed
160 160 ===================================================================================
161 161
162 162 Test offset computation to correctly factor in the index entries themselves.
163 163 Also test that the new data size has the correct size if the transaction is aborted
164 164 after the index has been replaced.
165 165
166 166 Test repo has commits a, b, c, D, where D is large (grows the revlog enough that it
167 167 transitions to non-inline storage). The clone initially has changes a, b
168 168 and will transition to non-inline storage when adding c, D.
169 169
170 170 If the transaction adding c, D is rolled back, then we don't undo the revlog split,
171 171 but truncate the index and the data to remove both c and D.
172 172
173 173
174 174 $ hg clone --quiet --rev 1 troffset-computation troffset-computation-copy
175 175 $ cd troffset-computation-copy
176 176
177 177 Reference size:
178 178 $ f -s file
179 179 file: size=1024
180 180 $ f -s .hg/store/data*/file*
181 181 .hg/store/data/file.i: size=1174
182 182
183 183 $ cat > .hg/hgrc <<EOF
184 184 > [hooks]
185 185 > pretxnchangegroup = python:$TESTTMP/killme.py:killme
186 186 > EOF
187 187 #if chg
188 188 $ hg pull ../troffset-computation
189 189 pulling from ../troffset-computation
190 190 [255]
191 191 #else
192 192 $ hg pull ../troffset-computation
193 193 pulling from ../troffset-computation
194 194 *Killed* (glob)
195 195 [137]
196 196 #endif
197 197
198 198
199 199 The inline revlog still exist, but a split version exist next to it
200 200
201 201 $ cat .hg/store/journal | tr '\0' ' ' | grep '\.s'
202 202 data/some_dir/sub_dir/foo_bar.i.s/tutu.i 1174
203 203 data/some_dir/sub_dir/foo_bar.i.s/tutu.d 0
204 204 $ f -s .hg/store/data*/file*
205 205 .hg/store/data-s/file: size=320
206 206 .hg/store/data/file.d: size=267307
207 207 .hg/store/data/file.i: size=132395
208 208
209 209
210 210 The first file.i entry should match the "Reference size" above.
211 211 The first file.d entry is the temporary record during the split,
212 212
213 213 A "temporary file" entry exist for the split index.
214 214
215 215 $ cat .hg/store/journal | tr -s '\000' ' ' | grep data/file
216 216 data/file.i 1174
217 217 data/file.d 0
218 218 $ cat .hg/store/journal.backupfiles | tr -s '\000' ' ' | tr -s '\00' ' '| grep 'data.*/file'
219 219 data/file.i data/journal.backup.file.i.bck 0
220 220 data-s/file 0
221 221
222 222 recover is rolling the split back, the fncache is still valid
223 223
224 224 $ hg recover
225 225 rolling back interrupted transaction
226 226 (verify step skipped, run `hg verify` to check your repository content)
227 227 $ f -s .hg/store/data*/file*
228 228 .hg/store/data/file.i: size=1174
229 229 $ hg tip
230 230 changeset: 1:64b04c8dc267
231 231 tag: tip
232 232 user: test
233 233 date: Thu Jan 01 00:00:00 1970 +0000
234 234 summary: b
235 235
236 236 $ hg verify -q
237 237 $ hg debugrebuildfncache --only-data
238 238 fncache already up to date
239 239 $ hg verify -q
240 240 $ cd ..
241 241
242 242 Test a hard crash right before the index is move into place
243 243 ===========================================================
244 244
245 245 Now retry the procedure but intercept the rename of the index and check that
246 246 the journal does not contain the new index size. This demonstrates the edge case
247 247 where the data file is left as garbage.
248 248
249 249 $ hg clone --quiet --rev 1 troffset-computation troffset-computation-copy2
250 250 $ cd troffset-computation-copy2
251 251
252 252 Reference size:
253 253 $ f -s file
254 254 file: size=1024
255 255 $ f -s .hg/store/data*/file*
256 256 .hg/store/data/file.i: size=1174
257 257
258 258 $ cat > .hg/hgrc <<EOF
259 259 > [extensions]
260 260 > intercept_rename = $TESTTMP/intercept_before_rename.py
261 261 > EOF
262 262 #if chg
263 263 $ hg pull ../troffset-computation
264 264 pulling from ../troffset-computation
265 265 searching for changes
266 266 adding changesets
267 267 adding manifests
268 268 adding file changes
269 269 [255]
270 270 #else
271 271 $ hg pull ../troffset-computation
272 272 pulling from ../troffset-computation
273 273 searching for changes
274 274 adding changesets
275 275 adding manifests
276 276 adding file changes
277 277 *Killed* (glob)
278 278 [137]
279 279 #endif
280 280
281 281 The inline revlog still exist, but a split version exist next to it
282 282
283 283 $ f -s .hg/store/data*/file*
284 284 .hg/store/data-s/file: size=320
285 285 .hg/store/data/file.d: size=267307
286 286 .hg/store/data/file.i: size=132395
287 287
288 288 $ cat .hg/store/journal | tr -s '\000' ' ' | grep 'data.*/file'
289 289 data/file.i 1174
290 290 data/file.d 0
291 291
292 292 recover is rolling the split back, the fncache is still valid
293 293
294 294 $ hg recover
295 295 rolling back interrupted transaction
296 296 (verify step skipped, run `hg verify` to check your repository content)
297 297 $ f -s .hg/store/data*/file*
298 298 .hg/store/data/file.i: size=1174
299 299 $ hg tip
300 300 changeset: 1:64b04c8dc267
301 301 tag: tip
302 302 user: test
303 303 date: Thu Jan 01 00:00:00 1970 +0000
304 304 summary: b
305 305
306 306 $ hg verify -q
307 307 $ cd ..
308 308
309 309 Test a hard crash right after the index is move into place
310 310 ===========================================================
311 311
312 312 Now retry the procedure but intercept the rename of the index.
313 313
314 314 $ hg clone --quiet --rev 1 troffset-computation troffset-computation-crash-after-rename
315 315 $ cd troffset-computation-crash-after-rename
316 316
317 317 Reference size:
318 318 $ f -s file
319 319 file: size=1024
320 320 $ f -s .hg/store/data*/file*
321 321 .hg/store/data/file.i: size=1174
322 322
323 323 $ cat > .hg/hgrc <<EOF
324 324 > [extensions]
325 325 > intercept_rename = $TESTTMP/intercept_after_rename.py
326 326 > EOF
327 327 #if chg
328 328 $ hg pull ../troffset-computation
329 329 pulling from ../troffset-computation
330 330 searching for changes
331 331 adding changesets
332 332 adding manifests
333 333 adding file changes
334 334 [255]
335 335 #else
336 336 $ hg pull ../troffset-computation
337 337 pulling from ../troffset-computation
338 338 searching for changes
339 339 adding changesets
340 340 adding manifests
341 341 adding file changes
342 342 *Killed* (glob)
343 343 [137]
344 344 #endif
345 345
346 346 The inline revlog was over written on disk
347 347
348 348 $ f -s .hg/store/data*/file*
349 349 .hg/store/data/file.d: size=267307
350 350 .hg/store/data/file.i: size=320
351 351
352 352 $ cat .hg/store/journal | tr -s '\000' ' ' | grep 'data.*/file'
353 353 data/file.i 1174
354 354 data/file.d 0
355 355
356 356 recover is rolling the split back, the fncache is still valid
357 357
358 358 $ hg recover
359 359 rolling back interrupted transaction
360 360 (verify step skipped, run `hg verify` to check your repository content)
361 361 $ f -s .hg/store/data*/file*
362 362 .hg/store/data/file.i: size=1174
363 363 $ hg tip
364 364 changeset: 1:64b04c8dc267
365 365 tag: tip
366 366 user: test
367 367 date: Thu Jan 01 00:00:00 1970 +0000
368 368 summary: b
369 369
370 370 $ hg verify -q
371 371 $ cd ..
372 372
373 373 Have the transaction rollback itself without any hard crash
374 374 ===========================================================
375 375
376 376
377 377 Repeat the original test but let hg rollback the transaction.
378 378
379 379 $ hg clone --quiet --rev 1 troffset-computation troffset-computation-copy-rb
380 380 $ cd troffset-computation-copy-rb
381 381 $ cat > .hg/hgrc <<EOF
382 382 > [hooks]
383 383 > pretxnchangegroup = false
384 384 > EOF
385 385 $ hg pull ../troffset-computation
386 386 pulling from ../troffset-computation
387 387 searching for changes
388 388 adding changesets
389 389 adding manifests
390 390 adding file changes
391 391 transaction abort!
392 392 rollback completed
393 393 abort: pretxnchangegroup hook exited with status 1
394 394 [40]
395 395
396 396 The split was rollback
397 397
398 398 $ f -s .hg/store/data*/file*
399 .hg/store/data/file.d: size=0
400 399 .hg/store/data/file.i: size=1174
401 400
402 401
403 402 $ hg tip
404 403 changeset: 1:64b04c8dc267
405 404 tag: tip
406 405 user: test
407 406 date: Thu Jan 01 00:00:00 1970 +0000
408 407 summary: b
409 408
410 409 $ hg verify -q
411 410 $ cd ..
412 411
413 412 Read race
414 413 =========
415 414
416 415 We check that a client that started reading a revlog (its index) after the
417 416 split and end reading (the data) after the rollback should be fine
418 417
419 418 $ hg clone --quiet --rev 1 troffset-computation troffset-computation-race
420 419 $ cd troffset-computation-race
421 420 $ cat > .hg/hgrc <<EOF
422 421 > [hooks]
423 422 > pretxnchangegroup=$RUNTESTDIR/testlib/wait-on-file 5 $TESTTMP/reader-index-read $TESTTMP/writer-revlog-split
424 423 > pretxnclose = false
425 424 > EOF
426 425
427 426 start a reader
428 427
429 428 $ hg cat --rev 0 file \
430 429 > --config "extensions.wait_read=$TESTTMP/reader_wait_split.py" \
431 430 > 2> $TESTTMP/reader.stderr \
432 431 > > $TESTTMP/reader.stdout &
433 432
434 433 Do a failed pull in //
435 434
436 435 $ hg pull ../troffset-computation
437 436 pulling from ../troffset-computation
438 437 searching for changes
439 438 adding changesets
440 439 adding manifests
441 440 adding file changes
442 441 transaction abort!
443 442 rollback completed
444 443 abort: pretxnclose hook exited with status 1
445 444 [40]
446 445 $ touch $TESTTMP/writer-revlog-unsplit
447 446 $ wait
448 447
449 448 The reader should be fine
450 449 $ cat $TESTTMP/reader.stderr
451 450 $ cat $TESTTMP/reader.stdout
452 451 1 (no-eol)
453 452
454 453 $ hg verify -q
455 454
456 455 $ cd ..
457 456
458 457 pending hooks
459 458 =============
460 459
461 460 We checks that hooks properly see the inside of the transaction, while other process don't.
462 461
463 462 $ hg clone --quiet --rev 1 troffset-computation troffset-computation-hooks
464 463 $ cd troffset-computation-hooks
465 464 $ cat > .hg/hgrc <<EOF
466 465 > [hooks]
467 466 > pretxnclose.01-echo = hg cat -r 'max(all())' file | f --size
468 467 > pretxnclose.02-echo = $RUNTESTDIR/testlib/wait-on-file 5 $TESTTMP/hook-done $TESTTMP/hook-tr-ready
469 468 > pretxnclose.03-abort = false
470 469 > EOF
471 470
472 471 $ (
473 472 > $RUNTESTDIR/testlib/wait-on-file 5 $TESTTMP/hook-tr-ready;\
474 473 > hg cat -r 'max(all())' file | f --size;\
475 474 > touch $TESTTMP/hook-done
476 475 > ) >stdout 2>stderr &
477 476
478 477 $ hg pull ../troffset-computation
479 478 pulling from ../troffset-computation
480 479 searching for changes
481 480 adding changesets
482 481 adding manifests
483 482 adding file changes
484 483 size=135168
485 484 transaction abort!
486 485 rollback completed
487 486 abort: pretxnclose.03-abort hook exited with status 1
488 487 [40]
489 488
490 489 $ cat stdout
491 490 size=1024
492 491 $ cat stderr
493 492
494 493 $ hg verify -q
495 494
496 495
497 496 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now