##// END OF EJS Templates
transaction: make names a private attribute...
Gregory Szorc -
r39721:4024c363 default
parent child Browse files
Show More
@@ -1,640 +1,640 b''
1 1 # transaction.py - simple journaling scheme for mercurial
2 2 #
3 3 # This transaction scheme is intended to gracefully handle program
4 4 # errors and interruptions. More serious failures like system crashes
5 5 # can be recovered with an fsck-like tool. As the whole repository is
6 6 # effectively log-structured, this should amount to simply truncating
7 7 # anything that isn't referenced in the changelog.
8 8 #
9 9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 10 #
11 11 # This software may be used and distributed according to the terms of the
12 12 # GNU General Public License version 2 or any later version.
13 13
14 14 from __future__ import absolute_import
15 15
16 16 import errno
17 17
18 18 from .i18n import _
19 19 from . import (
20 20 error,
21 21 pycompat,
22 22 util,
23 23 )
24 24
25 25 version = 2
26 26
27 27 # These are the file generators that should only be executed after the
28 28 # finalizers are done, since they rely on the output of the finalizers (like
29 29 # the changelog having been written).
30 30 postfinalizegenerators = {
31 31 'bookmarks',
32 32 'dirstate'
33 33 }
34 34
35 35 gengroupall='all'
36 36 gengroupprefinalize='prefinalize'
37 37 gengrouppostfinalize='postfinalize'
38 38
39 39 def active(func):
40 40 def _active(self, *args, **kwds):
41 41 if self._count == 0:
42 42 raise error.Abort(_(
43 43 'cannot use transaction when it is already committed/aborted'))
44 44 return func(self, *args, **kwds)
45 45 return _active
46 46
47 47 def _playback(journal, report, opener, vfsmap, entries, backupentries,
48 48 unlink=True, checkambigfiles=None):
49 49 for f, o, _ignore in entries:
50 50 if o or not unlink:
51 51 checkambig = checkambigfiles and (f, '') in checkambigfiles
52 52 try:
53 53 fp = opener(f, 'a', checkambig=checkambig)
54 54 fp.truncate(o)
55 55 fp.close()
56 56 except IOError:
57 57 report(_("failed to truncate %s\n") % f)
58 58 raise
59 59 else:
60 60 try:
61 61 opener.unlink(f)
62 62 except (IOError, OSError) as inst:
63 63 if inst.errno != errno.ENOENT:
64 64 raise
65 65
66 66 backupfiles = []
67 67 for l, f, b, c in backupentries:
68 68 if l not in vfsmap and c:
69 69 report("couldn't handle %s: unknown cache location %s\n"
70 70 % (b, l))
71 71 vfs = vfsmap[l]
72 72 try:
73 73 if f and b:
74 74 filepath = vfs.join(f)
75 75 backuppath = vfs.join(b)
76 76 checkambig = checkambigfiles and (f, l) in checkambigfiles
77 77 try:
78 78 util.copyfile(backuppath, filepath, checkambig=checkambig)
79 79 backupfiles.append(b)
80 80 except IOError:
81 81 report(_("failed to recover %s\n") % f)
82 82 else:
83 83 target = f or b
84 84 try:
85 85 vfs.unlink(target)
86 86 except (IOError, OSError) as inst:
87 87 if inst.errno != errno.ENOENT:
88 88 raise
89 89 except (IOError, OSError, error.Abort) as inst:
90 90 if not c:
91 91 raise
92 92
93 93 backuppath = "%s.backupfiles" % journal
94 94 if opener.exists(backuppath):
95 95 opener.unlink(backuppath)
96 96 opener.unlink(journal)
97 97 try:
98 98 for f in backupfiles:
99 99 if opener.exists(f):
100 100 opener.unlink(f)
101 101 except (IOError, OSError, error.Abort) as inst:
102 102 # only pure backup file remains, it is sage to ignore any error
103 103 pass
104 104
105 105 class transaction(util.transactional):
106 106 def __init__(self, report, opener, vfsmap, journalname, undoname=None,
107 107 after=None, createmode=None, validator=None, releasefn=None,
108 108 checkambigfiles=None, name=r'<unnamed>'):
109 109 """Begin a new transaction
110 110
111 111 Begins a new transaction that allows rolling back writes in the event of
112 112 an exception.
113 113
114 114 * `after`: called after the transaction has been committed
115 115 * `createmode`: the mode of the journal file that will be created
116 116 * `releasefn`: called after releasing (with transaction and result)
117 117
118 118 `checkambigfiles` is a set of (path, vfs-location) tuples,
119 119 which determine whether file stat ambiguity should be avoided
120 120 for corresponded files.
121 121 """
122 122 self._count = 1
123 123 self._usages = 1
124 124 self._report = report
125 125 # a vfs to the store content
126 126 self._opener = opener
127 127 # a map to access file in various {location -> vfs}
128 128 vfsmap = vfsmap.copy()
129 129 vfsmap[''] = opener # set default value
130 130 self._vfsmap = vfsmap
131 131 self._after = after
132 132 self.entries = []
133 133 self._map = {}
134 134 self._journal = journalname
135 135 self._undoname = undoname
136 136 self._queue = []
137 137 # A callback to validate transaction content before closing it.
138 138 # should raise exception is anything is wrong.
139 139 # target user is repository hooks.
140 140 if validator is None:
141 141 validator = lambda tr: None
142 142 self._validator = validator
143 143 # A callback to do something just after releasing transaction.
144 144 if releasefn is None:
145 145 releasefn = lambda tr, success: None
146 146 self._releasefn = releasefn
147 147
148 148 self._checkambigfiles = set()
149 149 if checkambigfiles:
150 150 self._checkambigfiles.update(checkambigfiles)
151 151
152 self.names = [name]
152 self._names = [name]
153 153
154 154 # A dict dedicated to precisely tracking the changes introduced in the
155 155 # transaction.
156 156 self.changes = {}
157 157
158 158 # a dict of arguments to be passed to hooks
159 159 self.hookargs = {}
160 160 self._file = opener.open(self._journal, "w")
161 161
162 162 # a list of ('location', 'path', 'backuppath', cache) entries.
163 163 # - if 'backuppath' is empty, no file existed at backup time
164 164 # - if 'path' is empty, this is a temporary transaction file
165 165 # - if 'location' is not empty, the path is outside main opener reach.
166 166 # use 'location' value as a key in a vfsmap to find the right 'vfs'
167 167 # (cache is currently unused)
168 168 self._backupentries = []
169 169 self._backupmap = {}
170 170 self._backupjournal = "%s.backupfiles" % self._journal
171 171 self._backupsfile = opener.open(self._backupjournal, 'w')
172 172 self._backupsfile.write('%d\n' % version)
173 173
174 174 if createmode is not None:
175 175 opener.chmod(self._journal, createmode & 0o666)
176 176 opener.chmod(self._backupjournal, createmode & 0o666)
177 177
178 178 # hold file generations to be performed on commit
179 179 self._filegenerators = {}
180 180 # hold callback to write pending data for hooks
181 181 self._pendingcallback = {}
182 182 # True is any pending data have been written ever
183 183 self._anypending = False
184 184 # holds callback to call when writing the transaction
185 185 self._finalizecallback = {}
186 186 # hold callback for post transaction close
187 187 self._postclosecallback = {}
188 188 # holds callbacks to call during abort
189 189 self._abortcallback = {}
190 190
191 191 def __repr__(self):
192 name = r'/'.join(self.names)
192 name = r'/'.join(self._names)
193 193 return (r'<transaction name=%s, count=%d, usages=%d>' %
194 194 (name, self._count, self._usages))
195 195
196 196 def __del__(self):
197 197 if self._journal:
198 198 self._abort()
199 199
200 200 @active
201 201 def startgroup(self):
202 202 """delay registration of file entry
203 203
204 204 This is used by strip to delay vision of strip offset. The transaction
205 205 sees either none or all of the strip actions to be done."""
206 206 self._queue.append([])
207 207
208 208 @active
209 209 def endgroup(self):
210 210 """apply delayed registration of file entry.
211 211
212 212 This is used by strip to delay vision of strip offset. The transaction
213 213 sees either none or all of the strip actions to be done."""
214 214 q = self._queue.pop()
215 215 for f, o, data in q:
216 216 self._addentry(f, o, data)
217 217
218 218 @active
219 219 def add(self, file, offset, data=None):
220 220 """record the state of an append-only file before update"""
221 221 if file in self._map or file in self._backupmap:
222 222 return
223 223 if self._queue:
224 224 self._queue[-1].append((file, offset, data))
225 225 return
226 226
227 227 self._addentry(file, offset, data)
228 228
229 229 def _addentry(self, file, offset, data):
230 230 """add a append-only entry to memory and on-disk state"""
231 231 if file in self._map or file in self._backupmap:
232 232 return
233 233 self.entries.append((file, offset, data))
234 234 self._map[file] = len(self.entries) - 1
235 235 # add enough data to the journal to do the truncate
236 236 self._file.write("%s\0%d\n" % (file, offset))
237 237 self._file.flush()
238 238
239 239 @active
240 240 def addbackup(self, file, hardlink=True, location=''):
241 241 """Adds a backup of the file to the transaction
242 242
243 243 Calling addbackup() creates a hardlink backup of the specified file
244 244 that is used to recover the file in the event of the transaction
245 245 aborting.
246 246
247 247 * `file`: the file path, relative to .hg/store
248 248 * `hardlink`: use a hardlink to quickly create the backup
249 249 """
250 250 if self._queue:
251 251 msg = 'cannot use transaction.addbackup inside "group"'
252 252 raise error.ProgrammingError(msg)
253 253
254 254 if file in self._map or file in self._backupmap:
255 255 return
256 256 vfs = self._vfsmap[location]
257 257 dirname, filename = vfs.split(file)
258 258 backupfilename = "%s.backup.%s" % (self._journal, filename)
259 259 backupfile = vfs.reljoin(dirname, backupfilename)
260 260 if vfs.exists(file):
261 261 filepath = vfs.join(file)
262 262 backuppath = vfs.join(backupfile)
263 263 util.copyfile(filepath, backuppath, hardlink=hardlink)
264 264 else:
265 265 backupfile = ''
266 266
267 267 self._addbackupentry((location, file, backupfile, False))
268 268
269 269 def _addbackupentry(self, entry):
270 270 """register a new backup entry and write it to disk"""
271 271 self._backupentries.append(entry)
272 272 self._backupmap[entry[1]] = len(self._backupentries) - 1
273 273 self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
274 274 self._backupsfile.flush()
275 275
276 276 @active
277 277 def registertmp(self, tmpfile, location=''):
278 278 """register a temporary transaction file
279 279
280 280 Such files will be deleted when the transaction exits (on both
281 281 failure and success).
282 282 """
283 283 self._addbackupentry((location, '', tmpfile, False))
284 284
285 285 @active
286 286 def addfilegenerator(self, genid, filenames, genfunc, order=0,
287 287 location=''):
288 288 """add a function to generates some files at transaction commit
289 289
290 290 The `genfunc` argument is a function capable of generating proper
291 291 content of each entry in the `filename` tuple.
292 292
293 293 At transaction close time, `genfunc` will be called with one file
294 294 object argument per entries in `filenames`.
295 295
296 296 The transaction itself is responsible for the backup, creation and
297 297 final write of such file.
298 298
299 299 The `genid` argument is used to ensure the same set of file is only
300 300 generated once. Call to `addfilegenerator` for a `genid` already
301 301 present will overwrite the old entry.
302 302
303 303 The `order` argument may be used to control the order in which multiple
304 304 generator will be executed.
305 305
306 306 The `location` arguments may be used to indicate the files are located
307 307 outside of the the standard directory for transaction. It should match
308 308 one of the key of the `transaction.vfsmap` dictionary.
309 309 """
310 310 # For now, we are unable to do proper backup and restore of custom vfs
311 311 # but for bookmarks that are handled outside this mechanism.
312 312 self._filegenerators[genid] = (order, filenames, genfunc, location)
313 313
314 314 @active
315 315 def removefilegenerator(self, genid):
316 316 """reverse of addfilegenerator, remove a file generator function"""
317 317 if genid in self._filegenerators:
318 318 del self._filegenerators[genid]
319 319
320 320 def _generatefiles(self, suffix='', group=gengroupall):
321 321 # write files registered for generation
322 322 any = False
323 323 for id, entry in sorted(self._filegenerators.iteritems()):
324 324 any = True
325 325 order, filenames, genfunc, location = entry
326 326
327 327 # for generation at closing, check if it's before or after finalize
328 328 postfinalize = group == gengrouppostfinalize
329 329 if (group != gengroupall and
330 330 (id in postfinalizegenerators) != (postfinalize)):
331 331 continue
332 332
333 333 vfs = self._vfsmap[location]
334 334 files = []
335 335 try:
336 336 for name in filenames:
337 337 name += suffix
338 338 if suffix:
339 339 self.registertmp(name, location=location)
340 340 checkambig = False
341 341 else:
342 342 self.addbackup(name, location=location)
343 343 checkambig = (name, location) in self._checkambigfiles
344 344 files.append(vfs(name, 'w', atomictemp=True,
345 345 checkambig=checkambig))
346 346 genfunc(*files)
347 347 finally:
348 348 for f in files:
349 349 f.close()
350 350 return any
351 351
352 352 @active
353 353 def find(self, file):
354 354 if file in self._map:
355 355 return self.entries[self._map[file]]
356 356 if file in self._backupmap:
357 357 return self._backupentries[self._backupmap[file]]
358 358 return None
359 359
360 360 @active
361 361 def replace(self, file, offset, data=None):
362 362 '''
363 363 replace can only replace already committed entries
364 364 that are not pending in the queue
365 365 '''
366 366
367 367 if file not in self._map:
368 368 raise KeyError(file)
369 369 index = self._map[file]
370 370 self.entries[index] = (file, offset, data)
371 371 self._file.write("%s\0%d\n" % (file, offset))
372 372 self._file.flush()
373 373
374 374 @active
375 375 def nest(self, name=r'<unnamed>'):
376 376 self._count += 1
377 377 self._usages += 1
378 self.names.append(name)
378 self._names.append(name)
379 379 return self
380 380
381 381 def release(self):
382 382 if self._count > 0:
383 383 self._usages -= 1
384 if self.names:
385 self.names.pop()
384 if self._names:
385 self._names.pop()
386 386 # if the transaction scopes are left without being closed, fail
387 387 if self._count > 0 and self._usages == 0:
388 388 self._abort()
389 389
390 390 def running(self):
391 391 return self._count > 0
392 392
393 393 def addpending(self, category, callback):
394 394 """add a callback to be called when the transaction is pending
395 395
396 396 The transaction will be given as callback's first argument.
397 397
398 398 Category is a unique identifier to allow overwriting an old callback
399 399 with a newer callback.
400 400 """
401 401 self._pendingcallback[category] = callback
402 402
403 403 @active
404 404 def writepending(self):
405 405 '''write pending file to temporary version
406 406
407 407 This is used to allow hooks to view a transaction before commit'''
408 408 categories = sorted(self._pendingcallback)
409 409 for cat in categories:
410 410 # remove callback since the data will have been flushed
411 411 any = self._pendingcallback.pop(cat)(self)
412 412 self._anypending = self._anypending or any
413 413 self._anypending |= self._generatefiles(suffix='.pending')
414 414 return self._anypending
415 415
416 416 @active
417 417 def addfinalize(self, category, callback):
418 418 """add a callback to be called when the transaction is closed
419 419
420 420 The transaction will be given as callback's first argument.
421 421
422 422 Category is a unique identifier to allow overwriting old callbacks with
423 423 newer callbacks.
424 424 """
425 425 self._finalizecallback[category] = callback
426 426
427 427 @active
428 428 def addpostclose(self, category, callback):
429 429 """add or replace a callback to be called after the transaction closed
430 430
431 431 The transaction will be given as callback's first argument.
432 432
433 433 Category is a unique identifier to allow overwriting an old callback
434 434 with a newer callback.
435 435 """
436 436 self._postclosecallback[category] = callback
437 437
438 438 @active
439 439 def getpostclose(self, category):
440 440 """return a postclose callback added before, or None"""
441 441 return self._postclosecallback.get(category, None)
442 442
443 443 @active
444 444 def addabort(self, category, callback):
445 445 """add a callback to be called when the transaction is aborted.
446 446
447 447 The transaction will be given as the first argument to the callback.
448 448
449 449 Category is a unique identifier to allow overwriting an old callback
450 450 with a newer callback.
451 451 """
452 452 self._abortcallback[category] = callback
453 453
454 454 @active
455 455 def close(self):
456 456 '''commit the transaction'''
457 457 if self._count == 1:
458 458 self._validator(self) # will raise exception if needed
459 459 self._validator = None # Help prevent cycles.
460 460 self._generatefiles(group=gengroupprefinalize)
461 461 categories = sorted(self._finalizecallback)
462 462 for cat in categories:
463 463 self._finalizecallback[cat](self)
464 464 # Prevent double usage and help clear cycles.
465 465 self._finalizecallback = None
466 466 self._generatefiles(group=gengrouppostfinalize)
467 467
468 468 self._count -= 1
469 469 if self._count != 0:
470 470 return
471 471 self._file.close()
472 472 self._backupsfile.close()
473 473 # cleanup temporary files
474 474 for l, f, b, c in self._backupentries:
475 475 if l not in self._vfsmap and c:
476 476 self._report("couldn't remove %s: unknown cache location %s\n"
477 477 % (b, l))
478 478 continue
479 479 vfs = self._vfsmap[l]
480 480 if not f and b and vfs.exists(b):
481 481 try:
482 482 vfs.unlink(b)
483 483 except (IOError, OSError, error.Abort) as inst:
484 484 if not c:
485 485 raise
486 486 # Abort may be raise by read only opener
487 487 self._report("couldn't remove %s: %s\n"
488 488 % (vfs.join(b), inst))
489 489 self.entries = []
490 490 self._writeundo()
491 491 if self._after:
492 492 self._after()
493 493 self._after = None # Help prevent cycles.
494 494 if self._opener.isfile(self._backupjournal):
495 495 self._opener.unlink(self._backupjournal)
496 496 if self._opener.isfile(self._journal):
497 497 self._opener.unlink(self._journal)
498 498 for l, _f, b, c in self._backupentries:
499 499 if l not in self._vfsmap and c:
500 500 self._report("couldn't remove %s: unknown cache location"
501 501 "%s\n" % (b, l))
502 502 continue
503 503 vfs = self._vfsmap[l]
504 504 if b and vfs.exists(b):
505 505 try:
506 506 vfs.unlink(b)
507 507 except (IOError, OSError, error.Abort) as inst:
508 508 if not c:
509 509 raise
510 510 # Abort may be raise by read only opener
511 511 self._report("couldn't remove %s: %s\n"
512 512 % (vfs.join(b), inst))
513 513 self._backupentries = []
514 514 self._journal = None
515 515
516 516 self._releasefn(self, True) # notify success of closing transaction
517 517 self._releasefn = None # Help prevent cycles.
518 518
519 519 # run post close action
520 520 categories = sorted(self._postclosecallback)
521 521 for cat in categories:
522 522 self._postclosecallback[cat](self)
523 523 # Prevent double usage and help clear cycles.
524 524 self._postclosecallback = None
525 525
526 526 @active
527 527 def abort(self):
528 528 '''abort the transaction (generally called on error, or when the
529 529 transaction is not explicitly committed before going out of
530 530 scope)'''
531 531 self._abort()
532 532
533 533 def _writeundo(self):
534 534 """write transaction data for possible future undo call"""
535 535 if self._undoname is None:
536 536 return
537 537 undobackupfile = self._opener.open("%s.backupfiles" % self._undoname,
538 538 'w')
539 539 undobackupfile.write('%d\n' % version)
540 540 for l, f, b, c in self._backupentries:
541 541 if not f: # temporary file
542 542 continue
543 543 if not b:
544 544 u = ''
545 545 else:
546 546 if l not in self._vfsmap and c:
547 547 self._report("couldn't remove %s: unknown cache location"
548 548 "%s\n" % (b, l))
549 549 continue
550 550 vfs = self._vfsmap[l]
551 551 base, name = vfs.split(b)
552 552 assert name.startswith(self._journal), name
553 553 uname = name.replace(self._journal, self._undoname, 1)
554 554 u = vfs.reljoin(base, uname)
555 555 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
556 556 undobackupfile.write("%s\0%s\0%s\0%d\n" % (l, f, u, c))
557 557 undobackupfile.close()
558 558
559 559
560 560 def _abort(self):
561 561 self._count = 0
562 562 self._usages = 0
563 563 self._file.close()
564 564 self._backupsfile.close()
565 565
566 566 try:
567 567 if not self.entries and not self._backupentries:
568 568 if self._backupjournal:
569 569 self._opener.unlink(self._backupjournal)
570 570 if self._journal:
571 571 self._opener.unlink(self._journal)
572 572 return
573 573
574 574 self._report(_("transaction abort!\n"))
575 575
576 576 try:
577 577 for cat in sorted(self._abortcallback):
578 578 self._abortcallback[cat](self)
579 579 # Prevent double usage and help clear cycles.
580 580 self._abortcallback = None
581 581 _playback(self._journal, self._report, self._opener,
582 582 self._vfsmap, self.entries, self._backupentries,
583 583 False, checkambigfiles=self._checkambigfiles)
584 584 self._report(_("rollback completed\n"))
585 585 except BaseException:
586 586 self._report(_("rollback failed - please run hg recover\n"))
587 587 finally:
588 588 self._journal = None
589 589 self._releasefn(self, False) # notify failure of transaction
590 590 self._releasefn = None # Help prevent cycles.
591 591
592 592 def rollback(opener, vfsmap, file, report, checkambigfiles=None):
593 593 """Rolls back the transaction contained in the given file
594 594
595 595 Reads the entries in the specified file, and the corresponding
596 596 '*.backupfiles' file, to recover from an incomplete transaction.
597 597
598 598 * `file`: a file containing a list of entries, specifying where
599 599 to truncate each file. The file should contain a list of
600 600 file\0offset pairs, delimited by newlines. The corresponding
601 601 '*.backupfiles' file should contain a list of file\0backupfile
602 602 pairs, delimited by \0.
603 603
604 604 `checkambigfiles` is a set of (path, vfs-location) tuples,
605 605 which determine whether file stat ambiguity should be avoided at
606 606 restoring corresponded files.
607 607 """
608 608 entries = []
609 609 backupentries = []
610 610
611 611 fp = opener.open(file)
612 612 lines = fp.readlines()
613 613 fp.close()
614 614 for l in lines:
615 615 try:
616 616 f, o = l.split('\0')
617 617 entries.append((f, int(o), None))
618 618 except ValueError:
619 619 report(
620 620 _("couldn't read journal entry %r!\n") % pycompat.bytestr(l))
621 621
622 622 backupjournal = "%s.backupfiles" % file
623 623 if opener.exists(backupjournal):
624 624 fp = opener.open(backupjournal)
625 625 lines = fp.readlines()
626 626 if lines:
627 627 ver = lines[0][:-1]
628 628 if ver == (b'%d' % version):
629 629 for line in lines[1:]:
630 630 if line:
631 631 # Shave off the trailing newline
632 632 line = line[:-1]
633 633 l, f, b, c = line.split('\0')
634 634 backupentries.append((l, f, b, bool(c)))
635 635 else:
636 636 report(_("journal was created by a different version of "
637 637 "Mercurial\n"))
638 638
639 639 _playback(file, report, opener, vfsmap, entries, backupentries,
640 640 checkambigfiles=checkambigfiles)
General Comments 0
You need to be logged in to leave comments. Login now