##// END OF EJS Templates
transaction: fix hg version check when loading journal...
Augie Fackler -
r35850:aad39713 default
parent child Browse files
Show More
@@ -1,627 +1,627
1 1 # transaction.py - simple journaling scheme for mercurial
2 2 #
3 3 # This transaction scheme is intended to gracefully handle program
4 4 # errors and interruptions. More serious failures like system crashes
5 5 # can be recovered with an fsck-like tool. As the whole repository is
6 6 # effectively log-structured, this should amount to simply truncating
7 7 # anything that isn't referenced in the changelog.
8 8 #
9 9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 10 #
11 11 # This software may be used and distributed according to the terms of the
12 12 # GNU General Public License version 2 or any later version.
13 13
14 14 from __future__ import absolute_import
15 15
16 16 import errno
17 17
18 18 from .i18n import _
19 19 from . import (
20 20 error,
21 21 util,
22 22 )
23 23
24 24 version = 2
25 25
26 26 # These are the file generators that should only be executed after the
27 27 # finalizers are done, since they rely on the output of the finalizers (like
28 28 # the changelog having been written).
29 29 postfinalizegenerators = {
30 30 'bookmarks',
31 31 'dirstate'
32 32 }
33 33
34 34 gengroupall='all'
35 35 gengroupprefinalize='prefinalize'
36 36 gengrouppostfinalize='postfinalize'
37 37
38 38 def active(func):
39 39 def _active(self, *args, **kwds):
40 40 if self.count == 0:
41 41 raise error.Abort(_(
42 42 'cannot use transaction when it is already committed/aborted'))
43 43 return func(self, *args, **kwds)
44 44 return _active
45 45
46 46 def _playback(journal, report, opener, vfsmap, entries, backupentries,
47 47 unlink=True, checkambigfiles=None):
48 48 for f, o, _ignore in entries:
49 49 if o or not unlink:
50 50 checkambig = checkambigfiles and (f, '') in checkambigfiles
51 51 try:
52 52 fp = opener(f, 'a', checkambig=checkambig)
53 53 fp.truncate(o)
54 54 fp.close()
55 55 except IOError:
56 56 report(_("failed to truncate %s\n") % f)
57 57 raise
58 58 else:
59 59 try:
60 60 opener.unlink(f)
61 61 except (IOError, OSError) as inst:
62 62 if inst.errno != errno.ENOENT:
63 63 raise
64 64
65 65 backupfiles = []
66 66 for l, f, b, c in backupentries:
67 67 if l not in vfsmap and c:
68 68 report("couldn't handle %s: unknown cache location %s\n"
69 69 % (b, l))
70 70 vfs = vfsmap[l]
71 71 try:
72 72 if f and b:
73 73 filepath = vfs.join(f)
74 74 backuppath = vfs.join(b)
75 75 checkambig = checkambigfiles and (f, l) in checkambigfiles
76 76 try:
77 77 util.copyfile(backuppath, filepath, checkambig=checkambig)
78 78 backupfiles.append(b)
79 79 except IOError:
80 80 report(_("failed to recover %s\n") % f)
81 81 else:
82 82 target = f or b
83 83 try:
84 84 vfs.unlink(target)
85 85 except (IOError, OSError) as inst:
86 86 if inst.errno != errno.ENOENT:
87 87 raise
88 88 except (IOError, OSError, error.Abort) as inst:
89 89 if not c:
90 90 raise
91 91
92 92 backuppath = "%s.backupfiles" % journal
93 93 if opener.exists(backuppath):
94 94 opener.unlink(backuppath)
95 95 opener.unlink(journal)
96 96 try:
97 97 for f in backupfiles:
98 98 if opener.exists(f):
99 99 opener.unlink(f)
100 100 except (IOError, OSError, error.Abort) as inst:
101 101 # only pure backup file remains, it is sage to ignore any error
102 102 pass
103 103
104 104 class transaction(util.transactional):
105 105 def __init__(self, report, opener, vfsmap, journalname, undoname=None,
106 106 after=None, createmode=None, validator=None, releasefn=None,
107 107 checkambigfiles=None):
108 108 """Begin a new transaction
109 109
110 110 Begins a new transaction that allows rolling back writes in the event of
111 111 an exception.
112 112
113 113 * `after`: called after the transaction has been committed
114 114 * `createmode`: the mode of the journal file that will be created
115 115 * `releasefn`: called after releasing (with transaction and result)
116 116
117 117 `checkambigfiles` is a set of (path, vfs-location) tuples,
118 118 which determine whether file stat ambiguity should be avoided
119 119 for corresponded files.
120 120 """
121 121 self.count = 1
122 122 self.usages = 1
123 123 self.report = report
124 124 # a vfs to the store content
125 125 self.opener = opener
126 126 # a map to access file in various {location -> vfs}
127 127 vfsmap = vfsmap.copy()
128 128 vfsmap[''] = opener # set default value
129 129 self._vfsmap = vfsmap
130 130 self.after = after
131 131 self.entries = []
132 132 self.map = {}
133 133 self.journal = journalname
134 134 self.undoname = undoname
135 135 self._queue = []
136 136 # A callback to validate transaction content before closing it.
137 137 # should raise exception is anything is wrong.
138 138 # target user is repository hooks.
139 139 if validator is None:
140 140 validator = lambda tr: None
141 141 self.validator = validator
142 142 # A callback to do something just after releasing transaction.
143 143 if releasefn is None:
144 144 releasefn = lambda tr, success: None
145 145 self.releasefn = releasefn
146 146
147 147 self.checkambigfiles = set()
148 148 if checkambigfiles:
149 149 self.checkambigfiles.update(checkambigfiles)
150 150
151 151 # A dict dedicated to precisely tracking the changes introduced in the
152 152 # transaction.
153 153 self.changes = {}
154 154
155 155 # a dict of arguments to be passed to hooks
156 156 self.hookargs = {}
157 157 self.file = opener.open(self.journal, "w")
158 158
159 159 # a list of ('location', 'path', 'backuppath', cache) entries.
160 160 # - if 'backuppath' is empty, no file existed at backup time
161 161 # - if 'path' is empty, this is a temporary transaction file
162 162 # - if 'location' is not empty, the path is outside main opener reach.
163 163 # use 'location' value as a key in a vfsmap to find the right 'vfs'
164 164 # (cache is currently unused)
165 165 self._backupentries = []
166 166 self._backupmap = {}
167 167 self._backupjournal = "%s.backupfiles" % self.journal
168 168 self._backupsfile = opener.open(self._backupjournal, 'w')
169 169 self._backupsfile.write('%d\n' % version)
170 170
171 171 if createmode is not None:
172 172 opener.chmod(self.journal, createmode & 0o666)
173 173 opener.chmod(self._backupjournal, createmode & 0o666)
174 174
175 175 # hold file generations to be performed on commit
176 176 self._filegenerators = {}
177 177 # hold callback to write pending data for hooks
178 178 self._pendingcallback = {}
179 179 # True is any pending data have been written ever
180 180 self._anypending = False
181 181 # holds callback to call when writing the transaction
182 182 self._finalizecallback = {}
183 183 # hold callback for post transaction close
184 184 self._postclosecallback = {}
185 185 # holds callbacks to call during abort
186 186 self._abortcallback = {}
187 187
188 188 def __del__(self):
189 189 if self.journal:
190 190 self._abort()
191 191
192 192 @active
193 193 def startgroup(self):
194 194 """delay registration of file entry
195 195
196 196 This is used by strip to delay vision of strip offset. The transaction
197 197 sees either none or all of the strip actions to be done."""
198 198 self._queue.append([])
199 199
200 200 @active
201 201 def endgroup(self):
202 202 """apply delayed registration of file entry.
203 203
204 204 This is used by strip to delay vision of strip offset. The transaction
205 205 sees either none or all of the strip actions to be done."""
206 206 q = self._queue.pop()
207 207 for f, o, data in q:
208 208 self._addentry(f, o, data)
209 209
210 210 @active
211 211 def add(self, file, offset, data=None):
212 212 """record the state of an append-only file before update"""
213 213 if file in self.map or file in self._backupmap:
214 214 return
215 215 if self._queue:
216 216 self._queue[-1].append((file, offset, data))
217 217 return
218 218
219 219 self._addentry(file, offset, data)
220 220
221 221 def _addentry(self, file, offset, data):
222 222 """add a append-only entry to memory and on-disk state"""
223 223 if file in self.map or file in self._backupmap:
224 224 return
225 225 self.entries.append((file, offset, data))
226 226 self.map[file] = len(self.entries) - 1
227 227 # add enough data to the journal to do the truncate
228 228 self.file.write("%s\0%d\n" % (file, offset))
229 229 self.file.flush()
230 230
231 231 @active
232 232 def addbackup(self, file, hardlink=True, location=''):
233 233 """Adds a backup of the file to the transaction
234 234
235 235 Calling addbackup() creates a hardlink backup of the specified file
236 236 that is used to recover the file in the event of the transaction
237 237 aborting.
238 238
239 239 * `file`: the file path, relative to .hg/store
240 240 * `hardlink`: use a hardlink to quickly create the backup
241 241 """
242 242 if self._queue:
243 243 msg = 'cannot use transaction.addbackup inside "group"'
244 244 raise error.ProgrammingError(msg)
245 245
246 246 if file in self.map or file in self._backupmap:
247 247 return
248 248 vfs = self._vfsmap[location]
249 249 dirname, filename = vfs.split(file)
250 250 backupfilename = "%s.backup.%s" % (self.journal, filename)
251 251 backupfile = vfs.reljoin(dirname, backupfilename)
252 252 if vfs.exists(file):
253 253 filepath = vfs.join(file)
254 254 backuppath = vfs.join(backupfile)
255 255 util.copyfile(filepath, backuppath, hardlink=hardlink)
256 256 else:
257 257 backupfile = ''
258 258
259 259 self._addbackupentry((location, file, backupfile, False))
260 260
261 261 def _addbackupentry(self, entry):
262 262 """register a new backup entry and write it to disk"""
263 263 self._backupentries.append(entry)
264 264 self._backupmap[entry[1]] = len(self._backupentries) - 1
265 265 self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
266 266 self._backupsfile.flush()
267 267
268 268 @active
269 269 def registertmp(self, tmpfile, location=''):
270 270 """register a temporary transaction file
271 271
272 272 Such files will be deleted when the transaction exits (on both
273 273 failure and success).
274 274 """
275 275 self._addbackupentry((location, '', tmpfile, False))
276 276
277 277 @active
278 278 def addfilegenerator(self, genid, filenames, genfunc, order=0,
279 279 location=''):
280 280 """add a function to generates some files at transaction commit
281 281
282 282 The `genfunc` argument is a function capable of generating proper
283 283 content of each entry in the `filename` tuple.
284 284
285 285 At transaction close time, `genfunc` will be called with one file
286 286 object argument per entries in `filenames`.
287 287
288 288 The transaction itself is responsible for the backup, creation and
289 289 final write of such file.
290 290
291 291 The `genid` argument is used to ensure the same set of file is only
292 292 generated once. Call to `addfilegenerator` for a `genid` already
293 293 present will overwrite the old entry.
294 294
295 295 The `order` argument may be used to control the order in which multiple
296 296 generator will be executed.
297 297
298 298 The `location` arguments may be used to indicate the files are located
299 299 outside of the the standard directory for transaction. It should match
300 300 one of the key of the `transaction.vfsmap` dictionary.
301 301 """
302 302 # For now, we are unable to do proper backup and restore of custom vfs
303 303 # but for bookmarks that are handled outside this mechanism.
304 304 self._filegenerators[genid] = (order, filenames, genfunc, location)
305 305
306 306 @active
307 307 def removefilegenerator(self, genid):
308 308 """reverse of addfilegenerator, remove a file generator function"""
309 309 if genid in self._filegenerators:
310 310 del self._filegenerators[genid]
311 311
312 312 def _generatefiles(self, suffix='', group=gengroupall):
313 313 # write files registered for generation
314 314 any = False
315 315 for id, entry in sorted(self._filegenerators.iteritems()):
316 316 any = True
317 317 order, filenames, genfunc, location = entry
318 318
319 319 # for generation at closing, check if it's before or after finalize
320 320 postfinalize = group == gengrouppostfinalize
321 321 if (group != gengroupall and
322 322 (id in postfinalizegenerators) != (postfinalize)):
323 323 continue
324 324
325 325 vfs = self._vfsmap[location]
326 326 files = []
327 327 try:
328 328 for name in filenames:
329 329 name += suffix
330 330 if suffix:
331 331 self.registertmp(name, location=location)
332 332 checkambig = False
333 333 else:
334 334 self.addbackup(name, location=location)
335 335 checkambig = (name, location) in self.checkambigfiles
336 336 files.append(vfs(name, 'w', atomictemp=True,
337 337 checkambig=checkambig))
338 338 genfunc(*files)
339 339 finally:
340 340 for f in files:
341 341 f.close()
342 342 return any
343 343
344 344 @active
345 345 def find(self, file):
346 346 if file in self.map:
347 347 return self.entries[self.map[file]]
348 348 if file in self._backupmap:
349 349 return self._backupentries[self._backupmap[file]]
350 350 return None
351 351
352 352 @active
353 353 def replace(self, file, offset, data=None):
354 354 '''
355 355 replace can only replace already committed entries
356 356 that are not pending in the queue
357 357 '''
358 358
359 359 if file not in self.map:
360 360 raise KeyError(file)
361 361 index = self.map[file]
362 362 self.entries[index] = (file, offset, data)
363 363 self.file.write("%s\0%d\n" % (file, offset))
364 364 self.file.flush()
365 365
366 366 @active
367 367 def nest(self):
368 368 self.count += 1
369 369 self.usages += 1
370 370 return self
371 371
372 372 def release(self):
373 373 if self.count > 0:
374 374 self.usages -= 1
375 375 # if the transaction scopes are left without being closed, fail
376 376 if self.count > 0 and self.usages == 0:
377 377 self._abort()
378 378
379 379 def running(self):
380 380 return self.count > 0
381 381
382 382 def addpending(self, category, callback):
383 383 """add a callback to be called when the transaction is pending
384 384
385 385 The transaction will be given as callback's first argument.
386 386
387 387 Category is a unique identifier to allow overwriting an old callback
388 388 with a newer callback.
389 389 """
390 390 self._pendingcallback[category] = callback
391 391
392 392 @active
393 393 def writepending(self):
394 394 '''write pending file to temporary version
395 395
396 396 This is used to allow hooks to view a transaction before commit'''
397 397 categories = sorted(self._pendingcallback)
398 398 for cat in categories:
399 399 # remove callback since the data will have been flushed
400 400 any = self._pendingcallback.pop(cat)(self)
401 401 self._anypending = self._anypending or any
402 402 self._anypending |= self._generatefiles(suffix='.pending')
403 403 return self._anypending
404 404
405 405 @active
406 406 def addfinalize(self, category, callback):
407 407 """add a callback to be called when the transaction is closed
408 408
409 409 The transaction will be given as callback's first argument.
410 410
411 411 Category is a unique identifier to allow overwriting old callbacks with
412 412 newer callbacks.
413 413 """
414 414 self._finalizecallback[category] = callback
415 415
416 416 @active
417 417 def addpostclose(self, category, callback):
418 418 """add or replace a callback to be called after the transaction closed
419 419
420 420 The transaction will be given as callback's first argument.
421 421
422 422 Category is a unique identifier to allow overwriting an old callback
423 423 with a newer callback.
424 424 """
425 425 self._postclosecallback[category] = callback
426 426
427 427 @active
428 428 def getpostclose(self, category):
429 429 """return a postclose callback added before, or None"""
430 430 return self._postclosecallback.get(category, None)
431 431
432 432 @active
433 433 def addabort(self, category, callback):
434 434 """add a callback to be called when the transaction is aborted.
435 435
436 436 The transaction will be given as the first argument to the callback.
437 437
438 438 Category is a unique identifier to allow overwriting an old callback
439 439 with a newer callback.
440 440 """
441 441 self._abortcallback[category] = callback
442 442
443 443 @active
444 444 def close(self):
445 445 '''commit the transaction'''
446 446 if self.count == 1:
447 447 self.validator(self) # will raise exception if needed
448 448 self.validator = None # Help prevent cycles.
449 449 self._generatefiles(group=gengroupprefinalize)
450 450 categories = sorted(self._finalizecallback)
451 451 for cat in categories:
452 452 self._finalizecallback[cat](self)
453 453 # Prevent double usage and help clear cycles.
454 454 self._finalizecallback = None
455 455 self._generatefiles(group=gengrouppostfinalize)
456 456
457 457 self.count -= 1
458 458 if self.count != 0:
459 459 return
460 460 self.file.close()
461 461 self._backupsfile.close()
462 462 # cleanup temporary files
463 463 for l, f, b, c in self._backupentries:
464 464 if l not in self._vfsmap and c:
465 465 self.report("couldn't remove %s: unknown cache location %s\n"
466 466 % (b, l))
467 467 continue
468 468 vfs = self._vfsmap[l]
469 469 if not f and b and vfs.exists(b):
470 470 try:
471 471 vfs.unlink(b)
472 472 except (IOError, OSError, error.Abort) as inst:
473 473 if not c:
474 474 raise
475 475 # Abort may be raise by read only opener
476 476 self.report("couldn't remove %s: %s\n"
477 477 % (vfs.join(b), inst))
478 478 self.entries = []
479 479 self._writeundo()
480 480 if self.after:
481 481 self.after()
482 482 self.after = None # Help prevent cycles.
483 483 if self.opener.isfile(self._backupjournal):
484 484 self.opener.unlink(self._backupjournal)
485 485 if self.opener.isfile(self.journal):
486 486 self.opener.unlink(self.journal)
487 487 for l, _f, b, c in self._backupentries:
488 488 if l not in self._vfsmap and c:
489 489 self.report("couldn't remove %s: unknown cache location"
490 490 "%s\n" % (b, l))
491 491 continue
492 492 vfs = self._vfsmap[l]
493 493 if b and vfs.exists(b):
494 494 try:
495 495 vfs.unlink(b)
496 496 except (IOError, OSError, error.Abort) as inst:
497 497 if not c:
498 498 raise
499 499 # Abort may be raise by read only opener
500 500 self.report("couldn't remove %s: %s\n"
501 501 % (vfs.join(b), inst))
502 502 self._backupentries = []
503 503 self.journal = None
504 504
505 505 self.releasefn(self, True) # notify success of closing transaction
506 506 self.releasefn = None # Help prevent cycles.
507 507
508 508 # run post close action
509 509 categories = sorted(self._postclosecallback)
510 510 for cat in categories:
511 511 self._postclosecallback[cat](self)
512 512 # Prevent double usage and help clear cycles.
513 513 self._postclosecallback = None
514 514
515 515 @active
516 516 def abort(self):
517 517 '''abort the transaction (generally called on error, or when the
518 518 transaction is not explicitly committed before going out of
519 519 scope)'''
520 520 self._abort()
521 521
522 522 def _writeundo(self):
523 523 """write transaction data for possible future undo call"""
524 524 if self.undoname is None:
525 525 return
526 526 undobackupfile = self.opener.open("%s.backupfiles" % self.undoname, 'w')
527 527 undobackupfile.write('%d\n' % version)
528 528 for l, f, b, c in self._backupentries:
529 529 if not f: # temporary file
530 530 continue
531 531 if not b:
532 532 u = ''
533 533 else:
534 534 if l not in self._vfsmap and c:
535 535 self.report("couldn't remove %s: unknown cache location"
536 536 "%s\n" % (b, l))
537 537 continue
538 538 vfs = self._vfsmap[l]
539 539 base, name = vfs.split(b)
540 540 assert name.startswith(self.journal), name
541 541 uname = name.replace(self.journal, self.undoname, 1)
542 542 u = vfs.reljoin(base, uname)
543 543 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
544 544 undobackupfile.write("%s\0%s\0%s\0%d\n" % (l, f, u, c))
545 545 undobackupfile.close()
546 546
547 547
548 548 def _abort(self):
549 549 self.count = 0
550 550 self.usages = 0
551 551 self.file.close()
552 552 self._backupsfile.close()
553 553
554 554 try:
555 555 if not self.entries and not self._backupentries:
556 556 if self._backupjournal:
557 557 self.opener.unlink(self._backupjournal)
558 558 if self.journal:
559 559 self.opener.unlink(self.journal)
560 560 return
561 561
562 562 self.report(_("transaction abort!\n"))
563 563
564 564 try:
565 565 for cat in sorted(self._abortcallback):
566 566 self._abortcallback[cat](self)
567 567 # Prevent double usage and help clear cycles.
568 568 self._abortcallback = None
569 569 _playback(self.journal, self.report, self.opener, self._vfsmap,
570 570 self.entries, self._backupentries, False,
571 571 checkambigfiles=self.checkambigfiles)
572 572 self.report(_("rollback completed\n"))
573 573 except BaseException:
574 574 self.report(_("rollback failed - please run hg recover\n"))
575 575 finally:
576 576 self.journal = None
577 577 self.releasefn(self, False) # notify failure of transaction
578 578 self.releasefn = None # Help prevent cycles.
579 579
580 580 def rollback(opener, vfsmap, file, report, checkambigfiles=None):
581 581 """Rolls back the transaction contained in the given file
582 582
583 583 Reads the entries in the specified file, and the corresponding
584 584 '*.backupfiles' file, to recover from an incomplete transaction.
585 585
586 586 * `file`: a file containing a list of entries, specifying where
587 587 to truncate each file. The file should contain a list of
588 588 file\0offset pairs, delimited by newlines. The corresponding
589 589 '*.backupfiles' file should contain a list of file\0backupfile
590 590 pairs, delimited by \0.
591 591
592 592 `checkambigfiles` is a set of (path, vfs-location) tuples,
593 593 which determine whether file stat ambiguity should be avoided at
594 594 restoring corresponded files.
595 595 """
596 596 entries = []
597 597 backupentries = []
598 598
599 599 fp = opener.open(file)
600 600 lines = fp.readlines()
601 601 fp.close()
602 602 for l in lines:
603 603 try:
604 604 f, o = l.split('\0')
605 605 entries.append((f, int(o), None))
606 606 except ValueError:
607 607 report(_("couldn't read journal entry %r!\n") % l)
608 608
609 609 backupjournal = "%s.backupfiles" % file
610 610 if opener.exists(backupjournal):
611 611 fp = opener.open(backupjournal)
612 612 lines = fp.readlines()
613 613 if lines:
614 614 ver = lines[0][:-1]
615 if ver == str(version):
615 if ver == (b'%d' % version):
616 616 for line in lines[1:]:
617 617 if line:
618 618 # Shave off the trailing newline
619 619 line = line[:-1]
620 620 l, f, b, c = line.split('\0')
621 621 backupentries.append((l, f, b, bool(c)))
622 622 else:
623 623 report(_("journal was created by a different version of "
624 624 "Mercurial\n"))
625 625
626 626 _playback(file, report, opener, vfsmap, entries, backupentries,
627 627 checkambigfiles=checkambigfiles)
General Comments 0
You need to be logged in to leave comments. Login now