##// END OF EJS Templates
transaction: re-wrap line to avoid a black bug...
Augie Fackler -
r43320:a614f26d default
parent child Browse files
Show More
@@ -1,653 +1,655 b''
1 1 # transaction.py - simple journaling scheme for mercurial
2 2 #
3 3 # This transaction scheme is intended to gracefully handle program
4 4 # errors and interruptions. More serious failures like system crashes
5 5 # can be recovered with an fsck-like tool. As the whole repository is
6 6 # effectively log-structured, this should amount to simply truncating
7 7 # anything that isn't referenced in the changelog.
8 8 #
9 9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 10 #
11 11 # This software may be used and distributed according to the terms of the
12 12 # GNU General Public License version 2 or any later version.
13 13
14 14 from __future__ import absolute_import
15 15
16 16 import errno
17 17
18 18 from .i18n import _
19 19 from . import (
20 20 error,
21 21 pycompat,
22 22 util,
23 23 )
24 24 from .utils import (
25 25 stringutil,
26 26 )
27 27
28 28 version = 2
29 29
30 30 # These are the file generators that should only be executed after the
31 31 # finalizers are done, since they rely on the output of the finalizers (like
32 32 # the changelog having been written).
33 33 postfinalizegenerators = {
34 34 'bookmarks',
35 35 'dirstate'
36 36 }
37 37
38 38 gengroupall='all'
39 39 gengroupprefinalize='prefinalize'
40 40 gengrouppostfinalize='postfinalize'
41 41
42 42 def active(func):
43 43 def _active(self, *args, **kwds):
44 44 if self._count == 0:
45 45 raise error.Abort(_(
46 46 'cannot use transaction when it is already committed/aborted'))
47 47 return func(self, *args, **kwds)
48 48 return _active
49 49
50 50 def _playback(journal, report, opener, vfsmap, entries, backupentries,
51 51 unlink=True, checkambigfiles=None):
52 52 for f, o, _ignore in entries:
53 53 if o or not unlink:
54 54 checkambig = checkambigfiles and (f, '') in checkambigfiles
55 55 try:
56 56 fp = opener(f, 'a', checkambig=checkambig)
57 57 if fp.tell() < o:
58 58 raise error.Abort(_(
59 59 "attempted to truncate %s to %d bytes, but it was "
60 60 "already %d bytes\n") % (f, o, fp.tell()))
61 61 fp.truncate(o)
62 62 fp.close()
63 63 except IOError:
64 64 report(_("failed to truncate %s\n") % f)
65 65 raise
66 66 else:
67 67 try:
68 68 opener.unlink(f)
69 69 except (IOError, OSError) as inst:
70 70 if inst.errno != errno.ENOENT:
71 71 raise
72 72
73 73 backupfiles = []
74 74 for l, f, b, c in backupentries:
75 75 if l not in vfsmap and c:
76 76 report("couldn't handle %s: unknown cache location %s\n"
77 77 % (b, l))
78 78 vfs = vfsmap[l]
79 79 try:
80 80 if f and b:
81 81 filepath = vfs.join(f)
82 82 backuppath = vfs.join(b)
83 83 checkambig = checkambigfiles and (f, l) in checkambigfiles
84 84 try:
85 85 util.copyfile(backuppath, filepath, checkambig=checkambig)
86 86 backupfiles.append(b)
87 87 except IOError:
88 88 report(_("failed to recover %s\n") % f)
89 89 else:
90 90 target = f or b
91 91 try:
92 92 vfs.unlink(target)
93 93 except (IOError, OSError) as inst:
94 94 if inst.errno != errno.ENOENT:
95 95 raise
96 96 except (IOError, OSError, error.Abort):
97 97 if not c:
98 98 raise
99 99
100 100 backuppath = "%s.backupfiles" % journal
101 101 if opener.exists(backuppath):
102 102 opener.unlink(backuppath)
103 103 opener.unlink(journal)
104 104 try:
105 105 for f in backupfiles:
106 106 if opener.exists(f):
107 107 opener.unlink(f)
108 108 except (IOError, OSError, error.Abort):
109 109 # only pure backup file remains, it is sage to ignore any error
110 110 pass
111 111
112 112 class transaction(util.transactional):
113 113 def __init__(self, report, opener, vfsmap, journalname, undoname=None,
114 114 after=None, createmode=None, validator=None, releasefn=None,
115 115 checkambigfiles=None, name=r'<unnamed>'):
116 116 """Begin a new transaction
117 117
118 118 Begins a new transaction that allows rolling back writes in the event of
119 119 an exception.
120 120
121 121 * `after`: called after the transaction has been committed
122 122 * `createmode`: the mode of the journal file that will be created
123 123 * `releasefn`: called after releasing (with transaction and result)
124 124
125 125 `checkambigfiles` is a set of (path, vfs-location) tuples,
126 126 which determine whether file stat ambiguity should be avoided
127 127 for corresponded files.
128 128 """
129 129 self._count = 1
130 130 self._usages = 1
131 131 self._report = report
132 132 # a vfs to the store content
133 133 self._opener = opener
134 134 # a map to access file in various {location -> vfs}
135 135 vfsmap = vfsmap.copy()
136 136 vfsmap[''] = opener # set default value
137 137 self._vfsmap = vfsmap
138 138 self._after = after
139 139 self._entries = []
140 140 self._map = {}
141 141 self._journal = journalname
142 142 self._undoname = undoname
143 143 self._queue = []
144 144 # A callback to validate transaction content before closing it.
145 145 # should raise exception is anything is wrong.
146 146 # target user is repository hooks.
147 147 if validator is None:
148 148 validator = lambda tr: None
149 149 self._validator = validator
150 150 # A callback to do something just after releasing transaction.
151 151 if releasefn is None:
152 152 releasefn = lambda tr, success: None
153 153 self._releasefn = releasefn
154 154
155 155 self._checkambigfiles = set()
156 156 if checkambigfiles:
157 157 self._checkambigfiles.update(checkambigfiles)
158 158
159 159 self._names = [name]
160 160
161 161 # A dict dedicated to precisely tracking the changes introduced in the
162 162 # transaction.
163 163 self.changes = {}
164 164
165 165 # a dict of arguments to be passed to hooks
166 166 self.hookargs = {}
167 167 self._file = opener.open(self._journal, "w")
168 168
169 169 # a list of ('location', 'path', 'backuppath', cache) entries.
170 170 # - if 'backuppath' is empty, no file existed at backup time
171 171 # - if 'path' is empty, this is a temporary transaction file
172 172 # - if 'location' is not empty, the path is outside main opener reach.
173 173 # use 'location' value as a key in a vfsmap to find the right 'vfs'
174 174 # (cache is currently unused)
175 175 self._backupentries = []
176 176 self._backupmap = {}
177 177 self._backupjournal = "%s.backupfiles" % self._journal
178 178 self._backupsfile = opener.open(self._backupjournal, 'w')
179 179 self._backupsfile.write('%d\n' % version)
180 180
181 181 if createmode is not None:
182 182 opener.chmod(self._journal, createmode & 0o666)
183 183 opener.chmod(self._backupjournal, createmode & 0o666)
184 184
185 185 # hold file generations to be performed on commit
186 186 self._filegenerators = {}
187 187 # hold callback to write pending data for hooks
188 188 self._pendingcallback = {}
189 189 # True is any pending data have been written ever
190 190 self._anypending = False
191 191 # holds callback to call when writing the transaction
192 192 self._finalizecallback = {}
193 193 # hold callback for post transaction close
194 194 self._postclosecallback = {}
195 195 # holds callbacks to call during abort
196 196 self._abortcallback = {}
197 197
198 198 def __repr__(self):
199 199 name = r'/'.join(self._names)
200 200 return (r'<transaction name=%s, count=%d, usages=%d>' %
201 201 (name, self._count, self._usages))
202 202
203 203 def __del__(self):
204 204 if self._journal:
205 205 self._abort()
206 206
207 207 @active
208 208 def startgroup(self):
209 209 """delay registration of file entry
210 210
211 211 This is used by strip to delay vision of strip offset. The transaction
212 212 sees either none or all of the strip actions to be done."""
213 213 self._queue.append([])
214 214
215 215 @active
216 216 def endgroup(self):
217 217 """apply delayed registration of file entry.
218 218
219 219 This is used by strip to delay vision of strip offset. The transaction
220 220 sees either none or all of the strip actions to be done."""
221 221 q = self._queue.pop()
222 222 for f, o, data in q:
223 223 self._addentry(f, o, data)
224 224
225 225 @active
226 226 def add(self, file, offset, data=None):
227 227 """record the state of an append-only file before update"""
228 228 if file in self._map or file in self._backupmap:
229 229 return
230 230 if self._queue:
231 231 self._queue[-1].append((file, offset, data))
232 232 return
233 233
234 234 self._addentry(file, offset, data)
235 235
236 236 def _addentry(self, file, offset, data):
237 237 """add a append-only entry to memory and on-disk state"""
238 238 if file in self._map or file in self._backupmap:
239 239 return
240 240 self._entries.append((file, offset, data))
241 241 self._map[file] = len(self._entries) - 1
242 242 # add enough data to the journal to do the truncate
243 243 self._file.write("%s\0%d\n" % (file, offset))
244 244 self._file.flush()
245 245
246 246 @active
247 247 def addbackup(self, file, hardlink=True, location=''):
248 248 """Adds a backup of the file to the transaction
249 249
250 250 Calling addbackup() creates a hardlink backup of the specified file
251 251 that is used to recover the file in the event of the transaction
252 252 aborting.
253 253
254 254 * `file`: the file path, relative to .hg/store
255 255 * `hardlink`: use a hardlink to quickly create the backup
256 256 """
257 257 if self._queue:
258 258 msg = 'cannot use transaction.addbackup inside "group"'
259 259 raise error.ProgrammingError(msg)
260 260
261 261 if file in self._map or file in self._backupmap:
262 262 return
263 263 vfs = self._vfsmap[location]
264 264 dirname, filename = vfs.split(file)
265 265 backupfilename = "%s.backup.%s" % (self._journal, filename)
266 266 backupfile = vfs.reljoin(dirname, backupfilename)
267 267 if vfs.exists(file):
268 268 filepath = vfs.join(file)
269 269 backuppath = vfs.join(backupfile)
270 270 util.copyfile(filepath, backuppath, hardlink=hardlink)
271 271 else:
272 272 backupfile = ''
273 273
274 274 self._addbackupentry((location, file, backupfile, False))
275 275
276 276 def _addbackupentry(self, entry):
277 277 """register a new backup entry and write it to disk"""
278 278 self._backupentries.append(entry)
279 279 self._backupmap[entry[1]] = len(self._backupentries) - 1
280 280 self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
281 281 self._backupsfile.flush()
282 282
283 283 @active
284 284 def registertmp(self, tmpfile, location=''):
285 285 """register a temporary transaction file
286 286
287 287 Such files will be deleted when the transaction exits (on both
288 288 failure and success).
289 289 """
290 290 self._addbackupentry((location, '', tmpfile, False))
291 291
292 292 @active
293 293 def addfilegenerator(self, genid, filenames, genfunc, order=0,
294 294 location=''):
295 295 """add a function to generates some files at transaction commit
296 296
297 297 The `genfunc` argument is a function capable of generating proper
298 298 content of each entry in the `filename` tuple.
299 299
300 300 At transaction close time, `genfunc` will be called with one file
301 301 object argument per entries in `filenames`.
302 302
303 303 The transaction itself is responsible for the backup, creation and
304 304 final write of such file.
305 305
306 306 The `genid` argument is used to ensure the same set of file is only
307 307 generated once. Call to `addfilegenerator` for a `genid` already
308 308 present will overwrite the old entry.
309 309
310 310 The `order` argument may be used to control the order in which multiple
311 311 generator will be executed.
312 312
313 313 The `location` arguments may be used to indicate the files are located
314 314 outside of the the standard directory for transaction. It should match
315 315 one of the key of the `transaction.vfsmap` dictionary.
316 316 """
317 317 # For now, we are unable to do proper backup and restore of custom vfs
318 318 # but for bookmarks that are handled outside this mechanism.
319 319 self._filegenerators[genid] = (order, filenames, genfunc, location)
320 320
321 321 @active
322 322 def removefilegenerator(self, genid):
323 323 """reverse of addfilegenerator, remove a file generator function"""
324 324 if genid in self._filegenerators:
325 325 del self._filegenerators[genid]
326 326
327 327 def _generatefiles(self, suffix='', group=gengroupall):
328 328 # write files registered for generation
329 329 any = False
330 330 for id, entry in sorted(self._filegenerators.iteritems()):
331 331 any = True
332 332 order, filenames, genfunc, location = entry
333 333
334 334 # for generation at closing, check if it's before or after finalize
335 335 postfinalize = group == gengrouppostfinalize
336 if (group != gengroupall and
337 (id in postfinalizegenerators) != (postfinalize)):
336 if (
337 group != gengroupall
338 and (id in postfinalizegenerators) != postfinalize
339 ):
338 340 continue
339 341
340 342 vfs = self._vfsmap[location]
341 343 files = []
342 344 try:
343 345 for name in filenames:
344 346 name += suffix
345 347 if suffix:
346 348 self.registertmp(name, location=location)
347 349 checkambig = False
348 350 else:
349 351 self.addbackup(name, location=location)
350 352 checkambig = (name, location) in self._checkambigfiles
351 353 files.append(vfs(name, 'w', atomictemp=True,
352 354 checkambig=checkambig))
353 355 genfunc(*files)
354 356 for f in files:
355 357 f.close()
356 358 # skip discard() loop since we're sure no open file remains
357 359 del files[:]
358 360 finally:
359 361 for f in files:
360 362 f.discard()
361 363 return any
362 364
363 365 @active
364 366 def find(self, file):
365 367 if file in self._map:
366 368 return self._entries[self._map[file]]
367 369 if file in self._backupmap:
368 370 return self._backupentries[self._backupmap[file]]
369 371 return None
370 372
371 373 @active
372 374 def replace(self, file, offset, data=None):
373 375 '''
374 376 replace can only replace already committed entries
375 377 that are not pending in the queue
376 378 '''
377 379
378 380 if file not in self._map:
379 381 raise KeyError(file)
380 382 index = self._map[file]
381 383 self._entries[index] = (file, offset, data)
382 384 self._file.write("%s\0%d\n" % (file, offset))
383 385 self._file.flush()
384 386
385 387 @active
386 388 def nest(self, name=r'<unnamed>'):
387 389 self._count += 1
388 390 self._usages += 1
389 391 self._names.append(name)
390 392 return self
391 393
392 394 def release(self):
393 395 if self._count > 0:
394 396 self._usages -= 1
395 397 if self._names:
396 398 self._names.pop()
397 399 # if the transaction scopes are left without being closed, fail
398 400 if self._count > 0 and self._usages == 0:
399 401 self._abort()
400 402
401 403 def running(self):
402 404 return self._count > 0
403 405
404 406 def addpending(self, category, callback):
405 407 """add a callback to be called when the transaction is pending
406 408
407 409 The transaction will be given as callback's first argument.
408 410
409 411 Category is a unique identifier to allow overwriting an old callback
410 412 with a newer callback.
411 413 """
412 414 self._pendingcallback[category] = callback
413 415
414 416 @active
415 417 def writepending(self):
416 418 '''write pending file to temporary version
417 419
418 420 This is used to allow hooks to view a transaction before commit'''
419 421 categories = sorted(self._pendingcallback)
420 422 for cat in categories:
421 423 # remove callback since the data will have been flushed
422 424 any = self._pendingcallback.pop(cat)(self)
423 425 self._anypending = self._anypending or any
424 426 self._anypending |= self._generatefiles(suffix='.pending')
425 427 return self._anypending
426 428
427 429 @active
428 430 def addfinalize(self, category, callback):
429 431 """add a callback to be called when the transaction is closed
430 432
431 433 The transaction will be given as callback's first argument.
432 434
433 435 Category is a unique identifier to allow overwriting old callbacks with
434 436 newer callbacks.
435 437 """
436 438 self._finalizecallback[category] = callback
437 439
438 440 @active
439 441 def addpostclose(self, category, callback):
440 442 """add or replace a callback to be called after the transaction closed
441 443
442 444 The transaction will be given as callback's first argument.
443 445
444 446 Category is a unique identifier to allow overwriting an old callback
445 447 with a newer callback.
446 448 """
447 449 self._postclosecallback[category] = callback
448 450
449 451 @active
450 452 def getpostclose(self, category):
451 453 """return a postclose callback added before, or None"""
452 454 return self._postclosecallback.get(category, None)
453 455
454 456 @active
455 457 def addabort(self, category, callback):
456 458 """add a callback to be called when the transaction is aborted.
457 459
458 460 The transaction will be given as the first argument to the callback.
459 461
460 462 Category is a unique identifier to allow overwriting an old callback
461 463 with a newer callback.
462 464 """
463 465 self._abortcallback[category] = callback
464 466
465 467 @active
466 468 def close(self):
467 469 '''commit the transaction'''
468 470 if self._count == 1:
469 471 self._validator(self) # will raise exception if needed
470 472 self._validator = None # Help prevent cycles.
471 473 self._generatefiles(group=gengroupprefinalize)
472 474 categories = sorted(self._finalizecallback)
473 475 for cat in categories:
474 476 self._finalizecallback[cat](self)
475 477 # Prevent double usage and help clear cycles.
476 478 self._finalizecallback = None
477 479 self._generatefiles(group=gengrouppostfinalize)
478 480
479 481 self._count -= 1
480 482 if self._count != 0:
481 483 return
482 484 self._file.close()
483 485 self._backupsfile.close()
484 486 # cleanup temporary files
485 487 for l, f, b, c in self._backupentries:
486 488 if l not in self._vfsmap and c:
487 489 self._report("couldn't remove %s: unknown cache location %s\n"
488 490 % (b, l))
489 491 continue
490 492 vfs = self._vfsmap[l]
491 493 if not f and b and vfs.exists(b):
492 494 try:
493 495 vfs.unlink(b)
494 496 except (IOError, OSError, error.Abort) as inst:
495 497 if not c:
496 498 raise
497 499 # Abort may be raise by read only opener
498 500 self._report("couldn't remove %s: %s\n"
499 501 % (vfs.join(b), inst))
500 502 self._entries = []
501 503 self._writeundo()
502 504 if self._after:
503 505 self._after()
504 506 self._after = None # Help prevent cycles.
505 507 if self._opener.isfile(self._backupjournal):
506 508 self._opener.unlink(self._backupjournal)
507 509 if self._opener.isfile(self._journal):
508 510 self._opener.unlink(self._journal)
509 511 for l, _f, b, c in self._backupentries:
510 512 if l not in self._vfsmap and c:
511 513 self._report("couldn't remove %s: unknown cache location"
512 514 "%s\n" % (b, l))
513 515 continue
514 516 vfs = self._vfsmap[l]
515 517 if b and vfs.exists(b):
516 518 try:
517 519 vfs.unlink(b)
518 520 except (IOError, OSError, error.Abort) as inst:
519 521 if not c:
520 522 raise
521 523 # Abort may be raise by read only opener
522 524 self._report("couldn't remove %s: %s\n"
523 525 % (vfs.join(b), inst))
524 526 self._backupentries = []
525 527 self._journal = None
526 528
527 529 self._releasefn(self, True) # notify success of closing transaction
528 530 self._releasefn = None # Help prevent cycles.
529 531
530 532 # run post close action
531 533 categories = sorted(self._postclosecallback)
532 534 for cat in categories:
533 535 self._postclosecallback[cat](self)
534 536 # Prevent double usage and help clear cycles.
535 537 self._postclosecallback = None
536 538
537 539 @active
538 540 def abort(self):
539 541 '''abort the transaction (generally called on error, or when the
540 542 transaction is not explicitly committed before going out of
541 543 scope)'''
542 544 self._abort()
543 545
544 546 def _writeundo(self):
545 547 """write transaction data for possible future undo call"""
546 548 if self._undoname is None:
547 549 return
548 550 undobackupfile = self._opener.open("%s.backupfiles" % self._undoname,
549 551 'w')
550 552 undobackupfile.write('%d\n' % version)
551 553 for l, f, b, c in self._backupentries:
552 554 if not f: # temporary file
553 555 continue
554 556 if not b:
555 557 u = ''
556 558 else:
557 559 if l not in self._vfsmap and c:
558 560 self._report("couldn't remove %s: unknown cache location"
559 561 "%s\n" % (b, l))
560 562 continue
561 563 vfs = self._vfsmap[l]
562 564 base, name = vfs.split(b)
563 565 assert name.startswith(self._journal), name
564 566 uname = name.replace(self._journal, self._undoname, 1)
565 567 u = vfs.reljoin(base, uname)
566 568 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
567 569 undobackupfile.write("%s\0%s\0%s\0%d\n" % (l, f, u, c))
568 570 undobackupfile.close()
569 571
570 572
571 573 def _abort(self):
572 574 self._count = 0
573 575 self._usages = 0
574 576 self._file.close()
575 577 self._backupsfile.close()
576 578
577 579 try:
578 580 if not self._entries and not self._backupentries:
579 581 if self._backupjournal:
580 582 self._opener.unlink(self._backupjournal)
581 583 if self._journal:
582 584 self._opener.unlink(self._journal)
583 585 return
584 586
585 587 self._report(_("transaction abort!\n"))
586 588
587 589 try:
588 590 for cat in sorted(self._abortcallback):
589 591 self._abortcallback[cat](self)
590 592 # Prevent double usage and help clear cycles.
591 593 self._abortcallback = None
592 594 _playback(self._journal, self._report, self._opener,
593 595 self._vfsmap, self._entries, self._backupentries,
594 596 False, checkambigfiles=self._checkambigfiles)
595 597 self._report(_("rollback completed\n"))
596 598 except BaseException as exc:
597 599 self._report(_("rollback failed - please run hg recover\n"))
598 600 self._report(_("(failure reason: %s)\n")
599 601 % stringutil.forcebytestr(exc))
600 602 finally:
601 603 self._journal = None
602 604 self._releasefn(self, False) # notify failure of transaction
603 605 self._releasefn = None # Help prevent cycles.
604 606
605 607 def rollback(opener, vfsmap, file, report, checkambigfiles=None):
606 608 """Rolls back the transaction contained in the given file
607 609
608 610 Reads the entries in the specified file, and the corresponding
609 611 '*.backupfiles' file, to recover from an incomplete transaction.
610 612
611 613 * `file`: a file containing a list of entries, specifying where
612 614 to truncate each file. The file should contain a list of
613 615 file\0offset pairs, delimited by newlines. The corresponding
614 616 '*.backupfiles' file should contain a list of file\0backupfile
615 617 pairs, delimited by \0.
616 618
617 619 `checkambigfiles` is a set of (path, vfs-location) tuples,
618 620 which determine whether file stat ambiguity should be avoided at
619 621 restoring corresponded files.
620 622 """
621 623 entries = []
622 624 backupentries = []
623 625
624 626 fp = opener.open(file)
625 627 lines = fp.readlines()
626 628 fp.close()
627 629 for l in lines:
628 630 try:
629 631 f, o = l.split('\0')
630 632 entries.append((f, int(o), None))
631 633 except ValueError:
632 634 report(
633 635 _("couldn't read journal entry %r!\n") % pycompat.bytestr(l))
634 636
635 637 backupjournal = "%s.backupfiles" % file
636 638 if opener.exists(backupjournal):
637 639 fp = opener.open(backupjournal)
638 640 lines = fp.readlines()
639 641 if lines:
640 642 ver = lines[0][:-1]
641 643 if ver == (b'%d' % version):
642 644 for line in lines[1:]:
643 645 if line:
644 646 # Shave off the trailing newline
645 647 line = line[:-1]
646 648 l, f, b, c = line.split('\0')
647 649 backupentries.append((l, f, b, bool(c)))
648 650 else:
649 651 report(_("journal was created by a different version of "
650 652 "Mercurial\n"))
651 653
652 654 _playback(file, report, opener, vfsmap, entries, backupentries,
653 655 checkambigfiles=checkambigfiles)
General Comments 0
You need to be logged in to leave comments. Login now