##// END OF EJS Templates
transaction: split new files into a separate set...
Joerg Sonnenberger -
r46484:ec73a6a7 default
parent child Browse files
Show More
@@ -1,544 +1,545 b''
1 1 # repair.py - functions for repository repair for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 4 # Copyright 2007 Matt Mackall
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import errno
12 12
13 13 from .i18n import _
14 14 from .node import (
15 15 hex,
16 16 short,
17 17 )
18 18 from . import (
19 19 bundle2,
20 20 changegroup,
21 21 discovery,
22 22 error,
23 23 exchange,
24 24 obsolete,
25 25 obsutil,
26 26 pathutil,
27 27 phases,
28 28 pycompat,
29 29 requirements,
30 30 scmutil,
31 31 util,
32 32 )
33 33 from .utils import (
34 34 hashutil,
35 35 stringutil,
36 36 )
37 37
38 38
39 39 def backupbundle(
40 40 repo, bases, heads, node, suffix, compress=True, obsolescence=True
41 41 ):
42 42 """create a bundle with the specified revisions as a backup"""
43 43
44 44 backupdir = b"strip-backup"
45 45 vfs = repo.vfs
46 46 if not vfs.isdir(backupdir):
47 47 vfs.mkdir(backupdir)
48 48
49 49 # Include a hash of all the nodes in the filename for uniqueness
50 50 allcommits = repo.set(b'%ln::%ln', bases, heads)
51 51 allhashes = sorted(c.hex() for c in allcommits)
52 52 totalhash = hashutil.sha1(b''.join(allhashes)).digest()
53 53 name = b"%s/%s-%s-%s.hg" % (
54 54 backupdir,
55 55 short(node),
56 56 hex(totalhash[:4]),
57 57 suffix,
58 58 )
59 59
60 60 cgversion = changegroup.localversion(repo)
61 61 comp = None
62 62 if cgversion != b'01':
63 63 bundletype = b"HG20"
64 64 if compress:
65 65 comp = b'BZ'
66 66 elif compress:
67 67 bundletype = b"HG10BZ"
68 68 else:
69 69 bundletype = b"HG10UN"
70 70
71 71 outgoing = discovery.outgoing(repo, missingroots=bases, ancestorsof=heads)
72 72 contentopts = {
73 73 b'cg.version': cgversion,
74 74 b'obsolescence': obsolescence,
75 75 b'phases': True,
76 76 }
77 77 return bundle2.writenewbundle(
78 78 repo.ui,
79 79 repo,
80 80 b'strip',
81 81 name,
82 82 bundletype,
83 83 outgoing,
84 84 contentopts,
85 85 vfs,
86 86 compression=comp,
87 87 )
88 88
89 89
90 90 def _collectfiles(repo, striprev):
91 91 """find out the filelogs affected by the strip"""
92 92 files = set()
93 93
94 94 for x in pycompat.xrange(striprev, len(repo)):
95 95 files.update(repo[x].files())
96 96
97 97 return sorted(files)
98 98
99 99
100 100 def _collectrevlog(revlog, striprev):
101 101 _, brokenset = revlog.getstrippoint(striprev)
102 102 return [revlog.linkrev(r) for r in brokenset]
103 103
104 104
105 105 def _collectbrokencsets(repo, files, striprev):
106 106 """return the changesets which will be broken by the truncation"""
107 107 s = set()
108 108
109 109 for revlog in manifestrevlogs(repo):
110 110 s.update(_collectrevlog(revlog, striprev))
111 111 for fname in files:
112 112 s.update(_collectrevlog(repo.file(fname), striprev))
113 113
114 114 return s
115 115
116 116
117 117 def strip(ui, repo, nodelist, backup=True, topic=b'backup'):
118 118 # This function requires the caller to lock the repo, but it operates
119 119 # within a transaction of its own, and thus requires there to be no current
120 120 # transaction when it is called.
121 121 if repo.currenttransaction() is not None:
122 122 raise error.ProgrammingError(b'cannot strip from inside a transaction')
123 123
124 124 # Simple way to maintain backwards compatibility for this
125 125 # argument.
126 126 if backup in [b'none', b'strip']:
127 127 backup = False
128 128
129 129 repo = repo.unfiltered()
130 130 repo.destroying()
131 131 vfs = repo.vfs
132 132 # load bookmark before changelog to avoid side effect from outdated
133 133 # changelog (see repo._refreshchangelog)
134 134 repo._bookmarks
135 135 cl = repo.changelog
136 136
137 137 # TODO handle undo of merge sets
138 138 if isinstance(nodelist, bytes):
139 139 nodelist = [nodelist]
140 140 striplist = [cl.rev(node) for node in nodelist]
141 141 striprev = min(striplist)
142 142
143 143 files = _collectfiles(repo, striprev)
144 144 saverevs = _collectbrokencsets(repo, files, striprev)
145 145
146 146 # Some revisions with rev > striprev may not be descendants of striprev.
147 147 # We have to find these revisions and put them in a bundle, so that
148 148 # we can restore them after the truncations.
149 149 # To create the bundle we use repo.changegroupsubset which requires
150 150 # the list of heads and bases of the set of interesting revisions.
151 151 # (head = revision in the set that has no descendant in the set;
152 152 # base = revision in the set that has no ancestor in the set)
153 153 tostrip = set(striplist)
154 154 saveheads = set(saverevs)
155 155 for r in cl.revs(start=striprev + 1):
156 156 if any(p in tostrip for p in cl.parentrevs(r)):
157 157 tostrip.add(r)
158 158
159 159 if r not in tostrip:
160 160 saverevs.add(r)
161 161 saveheads.difference_update(cl.parentrevs(r))
162 162 saveheads.add(r)
163 163 saveheads = [cl.node(r) for r in saveheads]
164 164
165 165 # compute base nodes
166 166 if saverevs:
167 167 descendants = set(cl.descendants(saverevs))
168 168 saverevs.difference_update(descendants)
169 169 savebases = [cl.node(r) for r in saverevs]
170 170 stripbases = [cl.node(r) for r in tostrip]
171 171
172 172 stripobsidx = obsmarkers = ()
173 173 if repo.ui.configbool(b'devel', b'strip-obsmarkers'):
174 174 obsmarkers = obsutil.exclusivemarkers(repo, stripbases)
175 175 if obsmarkers:
176 176 stripobsidx = [
177 177 i for i, m in enumerate(repo.obsstore) if m in obsmarkers
178 178 ]
179 179
180 180 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
181 181
182 182 backupfile = None
183 183 node = nodelist[-1]
184 184 if backup:
185 185 backupfile = _createstripbackup(repo, stripbases, node, topic)
186 186 # create a changegroup for all the branches we need to keep
187 187 tmpbundlefile = None
188 188 if saveheads:
189 189 # do not compress temporary bundle if we remove it from disk later
190 190 #
191 191 # We do not include obsolescence, it might re-introduce prune markers
192 192 # we are trying to strip. This is harmless since the stripped markers
193 193 # are already backed up and we did not touched the markers for the
194 194 # saved changesets.
195 195 tmpbundlefile = backupbundle(
196 196 repo,
197 197 savebases,
198 198 saveheads,
199 199 node,
200 200 b'temp',
201 201 compress=False,
202 202 obsolescence=False,
203 203 )
204 204
205 205 with ui.uninterruptible():
206 206 try:
207 207 with repo.transaction(b"strip") as tr:
208 208 # TODO this code violates the interface abstraction of the
209 209 # transaction and makes assumptions that file storage is
210 210 # using append-only files. We'll need some kind of storage
211 211 # API to handle stripping for us.
212 212 oldfiles = set(tr._offsetmap.keys())
213 oldfiles.update(tr._newfiles)
213 214
214 215 tr.startgroup()
215 216 cl.strip(striprev, tr)
216 217 stripmanifest(repo, striprev, tr, files)
217 218
218 219 for fn in files:
219 220 repo.file(fn).strip(striprev, tr)
220 221 tr.endgroup()
221 222
222 223 entries = tr.readjournal()
223 224
224 225 for file, troffset in entries:
225 226 if file in oldfiles:
226 227 continue
227 228 with repo.svfs(file, b'a', checkambig=True) as fp:
228 229 fp.truncate(troffset)
229 230 if troffset == 0:
230 231 repo.store.markremoved(file)
231 232
232 233 deleteobsmarkers(repo.obsstore, stripobsidx)
233 234 del repo.obsstore
234 235 repo.invalidatevolatilesets()
235 236 repo._phasecache.filterunknown(repo)
236 237
237 238 if tmpbundlefile:
238 239 ui.note(_(b"adding branch\n"))
239 240 f = vfs.open(tmpbundlefile, b"rb")
240 241 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
241 242 if not repo.ui.verbose:
242 243 # silence internal shuffling chatter
243 244 repo.ui.pushbuffer()
244 245 tmpbundleurl = b'bundle:' + vfs.join(tmpbundlefile)
245 246 txnname = b'strip'
246 247 if not isinstance(gen, bundle2.unbundle20):
247 248 txnname = b"strip\n%s" % util.hidepassword(tmpbundleurl)
248 249 with repo.transaction(txnname) as tr:
249 250 bundle2.applybundle(
250 251 repo, gen, tr, source=b'strip', url=tmpbundleurl
251 252 )
252 253 if not repo.ui.verbose:
253 254 repo.ui.popbuffer()
254 255 f.close()
255 256
256 257 with repo.transaction(b'repair') as tr:
257 258 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
258 259 repo._bookmarks.applychanges(repo, tr, bmchanges)
259 260
260 261 # remove undo files
261 262 for undovfs, undofile in repo.undofiles():
262 263 try:
263 264 undovfs.unlink(undofile)
264 265 except OSError as e:
265 266 if e.errno != errno.ENOENT:
266 267 ui.warn(
267 268 _(b'error removing %s: %s\n')
268 269 % (
269 270 undovfs.join(undofile),
270 271 stringutil.forcebytestr(e),
271 272 )
272 273 )
273 274
274 275 except: # re-raises
275 276 if backupfile:
276 277 ui.warn(
277 278 _(b"strip failed, backup bundle stored in '%s'\n")
278 279 % vfs.join(backupfile)
279 280 )
280 281 if tmpbundlefile:
281 282 ui.warn(
282 283 _(b"strip failed, unrecovered changes stored in '%s'\n")
283 284 % vfs.join(tmpbundlefile)
284 285 )
285 286 ui.warn(
286 287 _(
287 288 b"(fix the problem, then recover the changesets with "
288 289 b"\"hg unbundle '%s'\")\n"
289 290 )
290 291 % vfs.join(tmpbundlefile)
291 292 )
292 293 raise
293 294 else:
294 295 if tmpbundlefile:
295 296 # Remove temporary bundle only if there were no exceptions
296 297 vfs.unlink(tmpbundlefile)
297 298
298 299 repo.destroyed()
299 300 # return the backup file path (or None if 'backup' was False) so
300 301 # extensions can use it
301 302 return backupfile
302 303
303 304
304 305 def softstrip(ui, repo, nodelist, backup=True, topic=b'backup'):
305 306 """perform a "soft" strip using the archived phase"""
306 307 tostrip = [c.node() for c in repo.set(b'sort(%ln::)', nodelist)]
307 308 if not tostrip:
308 309 return None
309 310
310 311 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
311 312 if backup:
312 313 node = tostrip[0]
313 314 backupfile = _createstripbackup(repo, tostrip, node, topic)
314 315
315 316 with repo.transaction(b'strip') as tr:
316 317 phases.retractboundary(repo, tr, phases.archived, tostrip)
317 318 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
318 319 repo._bookmarks.applychanges(repo, tr, bmchanges)
319 320 return backupfile
320 321
321 322
322 323 def _bookmarkmovements(repo, tostrip):
323 324 # compute necessary bookmark movement
324 325 bm = repo._bookmarks
325 326 updatebm = []
326 327 for m in bm:
327 328 rev = repo[bm[m]].rev()
328 329 if rev in tostrip:
329 330 updatebm.append(m)
330 331 newbmtarget = None
331 332 # If we need to move bookmarks, compute bookmark
332 333 # targets. Otherwise we can skip doing this logic.
333 334 if updatebm:
334 335 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)),
335 336 # but is much faster
336 337 newbmtarget = repo.revs(b'max(parents(%ld) - (%ld))', tostrip, tostrip)
337 338 if newbmtarget:
338 339 newbmtarget = repo[newbmtarget.first()].node()
339 340 else:
340 341 newbmtarget = b'.'
341 342 return newbmtarget, updatebm
342 343
343 344
344 345 def _createstripbackup(repo, stripbases, node, topic):
345 346 # backup the changeset we are about to strip
346 347 vfs = repo.vfs
347 348 cl = repo.changelog
348 349 backupfile = backupbundle(repo, stripbases, cl.heads(), node, topic)
349 350 repo.ui.status(_(b"saved backup bundle to %s\n") % vfs.join(backupfile))
350 351 repo.ui.log(
351 352 b"backupbundle", b"saved backup bundle to %s\n", vfs.join(backupfile)
352 353 )
353 354 return backupfile
354 355
355 356
356 357 def safestriproots(ui, repo, nodes):
357 358 """return list of roots of nodes where descendants are covered by nodes"""
358 359 torev = repo.unfiltered().changelog.rev
359 360 revs = {torev(n) for n in nodes}
360 361 # tostrip = wanted - unsafe = wanted - ancestors(orphaned)
361 362 # orphaned = affected - wanted
362 363 # affected = descendants(roots(wanted))
363 364 # wanted = revs
364 365 revset = b'%ld - ( ::( (roots(%ld):: and not _phase(%s)) -%ld) )'
365 366 tostrip = set(repo.revs(revset, revs, revs, phases.internal, revs))
366 367 notstrip = revs - tostrip
367 368 if notstrip:
368 369 nodestr = b', '.join(sorted(short(repo[n].node()) for n in notstrip))
369 370 ui.warn(
370 371 _(b'warning: orphaned descendants detected, not stripping %s\n')
371 372 % nodestr
372 373 )
373 374 return [c.node() for c in repo.set(b'roots(%ld)', tostrip)]
374 375
375 376
376 377 class stripcallback(object):
377 378 """used as a transaction postclose callback"""
378 379
379 380 def __init__(self, ui, repo, backup, topic):
380 381 self.ui = ui
381 382 self.repo = repo
382 383 self.backup = backup
383 384 self.topic = topic or b'backup'
384 385 self.nodelist = []
385 386
386 387 def addnodes(self, nodes):
387 388 self.nodelist.extend(nodes)
388 389
389 390 def __call__(self, tr):
390 391 roots = safestriproots(self.ui, self.repo, self.nodelist)
391 392 if roots:
392 393 strip(self.ui, self.repo, roots, self.backup, self.topic)
393 394
394 395
395 396 def delayedstrip(ui, repo, nodelist, topic=None, backup=True):
396 397 """like strip, but works inside transaction and won't strip irreverent revs
397 398
398 399 nodelist must explicitly contain all descendants. Otherwise a warning will
399 400 be printed that some nodes are not stripped.
400 401
401 402 Will do a backup if `backup` is True. The last non-None "topic" will be
402 403 used as the backup topic name. The default backup topic name is "backup".
403 404 """
404 405 tr = repo.currenttransaction()
405 406 if not tr:
406 407 nodes = safestriproots(ui, repo, nodelist)
407 408 return strip(ui, repo, nodes, backup=backup, topic=topic)
408 409 # transaction postclose callbacks are called in alphabet order.
409 410 # use '\xff' as prefix so we are likely to be called last.
410 411 callback = tr.getpostclose(b'\xffstrip')
411 412 if callback is None:
412 413 callback = stripcallback(ui, repo, backup=backup, topic=topic)
413 414 tr.addpostclose(b'\xffstrip', callback)
414 415 if topic:
415 416 callback.topic = topic
416 417 callback.addnodes(nodelist)
417 418
418 419
419 420 def stripmanifest(repo, striprev, tr, files):
420 421 for revlog in manifestrevlogs(repo):
421 422 revlog.strip(striprev, tr)
422 423
423 424
424 425 def manifestrevlogs(repo):
425 426 yield repo.manifestlog.getstorage(b'')
426 427 if scmutil.istreemanifest(repo):
427 428 # This logic is safe if treemanifest isn't enabled, but also
428 429 # pointless, so we skip it if treemanifest isn't enabled.
429 430 for unencoded, encoded, size in repo.store.datafiles():
430 431 if unencoded.startswith(b'meta/') and unencoded.endswith(
431 432 b'00manifest.i'
432 433 ):
433 434 dir = unencoded[5:-12]
434 435 yield repo.manifestlog.getstorage(dir)
435 436
436 437
437 438 def rebuildfncache(ui, repo):
438 439 """Rebuilds the fncache file from repo history.
439 440
440 441 Missing entries will be added. Extra entries will be removed.
441 442 """
442 443 repo = repo.unfiltered()
443 444
444 445 if b'fncache' not in repo.requirements:
445 446 ui.warn(
446 447 _(
447 448 b'(not rebuilding fncache because repository does not '
448 449 b'support fncache)\n'
449 450 )
450 451 )
451 452 return
452 453
453 454 with repo.lock():
454 455 fnc = repo.store.fncache
455 456 fnc.ensureloaded(warn=ui.warn)
456 457
457 458 oldentries = set(fnc.entries)
458 459 newentries = set()
459 460 seenfiles = set()
460 461
461 462 progress = ui.makeprogress(
462 463 _(b'rebuilding'), unit=_(b'changesets'), total=len(repo)
463 464 )
464 465 for rev in repo:
465 466 progress.update(rev)
466 467
467 468 ctx = repo[rev]
468 469 for f in ctx.files():
469 470 # This is to minimize I/O.
470 471 if f in seenfiles:
471 472 continue
472 473 seenfiles.add(f)
473 474
474 475 i = b'data/%s.i' % f
475 476 d = b'data/%s.d' % f
476 477
477 478 if repo.store._exists(i):
478 479 newentries.add(i)
479 480 if repo.store._exists(d):
480 481 newentries.add(d)
481 482
482 483 progress.complete()
483 484
484 485 if requirements.TREEMANIFEST_REQUIREMENT in repo.requirements:
485 486 # This logic is safe if treemanifest isn't enabled, but also
486 487 # pointless, so we skip it if treemanifest isn't enabled.
487 488 for dir in pathutil.dirs(seenfiles):
488 489 i = b'meta/%s/00manifest.i' % dir
489 490 d = b'meta/%s/00manifest.d' % dir
490 491
491 492 if repo.store._exists(i):
492 493 newentries.add(i)
493 494 if repo.store._exists(d):
494 495 newentries.add(d)
495 496
496 497 addcount = len(newentries - oldentries)
497 498 removecount = len(oldentries - newentries)
498 499 for p in sorted(oldentries - newentries):
499 500 ui.write(_(b'removing %s\n') % p)
500 501 for p in sorted(newentries - oldentries):
501 502 ui.write(_(b'adding %s\n') % p)
502 503
503 504 if addcount or removecount:
504 505 ui.write(
505 506 _(b'%d items added, %d removed from fncache\n')
506 507 % (addcount, removecount)
507 508 )
508 509 fnc.entries = newentries
509 510 fnc._dirty = True
510 511
511 512 with repo.transaction(b'fncache') as tr:
512 513 fnc.write(tr)
513 514 else:
514 515 ui.write(_(b'fncache already up to date\n'))
515 516
516 517
517 518 def deleteobsmarkers(obsstore, indices):
518 519 """Delete some obsmarkers from obsstore and return how many were deleted
519 520
520 521 'indices' is a list of ints which are the indices
521 522 of the markers to be deleted.
522 523
523 524 Every invocation of this function completely rewrites the obsstore file,
524 525 skipping the markers we want to be removed. The new temporary file is
525 526 created, remaining markers are written there and on .close() this file
526 527 gets atomically renamed to obsstore, thus guaranteeing consistency."""
527 528 if not indices:
528 529 # we don't want to rewrite the obsstore with the same content
529 530 return
530 531
531 532 left = []
532 533 current = obsstore._all
533 534 n = 0
534 535 for i, m in enumerate(current):
535 536 if i in indices:
536 537 n += 1
537 538 continue
538 539 left.append(m)
539 540
540 541 newobsstorefile = obsstore.svfs(b'obsstore', b'w', atomictemp=True)
541 542 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
542 543 newobsstorefile.write(bytes)
543 544 newobsstorefile.close()
544 545 return n
@@ -1,737 +1,765 b''
1 1 # transaction.py - simple journaling scheme for mercurial
2 2 #
3 3 # This transaction scheme is intended to gracefully handle program
4 4 # errors and interruptions. More serious failures like system crashes
5 5 # can be recovered with an fsck-like tool. As the whole repository is
6 6 # effectively log-structured, this should amount to simply truncating
7 7 # anything that isn't referenced in the changelog.
8 8 #
9 9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 10 #
11 11 # This software may be used and distributed according to the terms of the
12 12 # GNU General Public License version 2 or any later version.
13 13
14 14 from __future__ import absolute_import
15 15
16 16 import errno
17 17
18 18 from .i18n import _
19 19 from . import (
20 20 error,
21 21 pycompat,
22 22 util,
23 23 )
24 24 from .utils import stringutil
25 25
26 26 version = 2
27 27
28 28 # These are the file generators that should only be executed after the
29 29 # finalizers are done, since they rely on the output of the finalizers (like
30 30 # the changelog having been written).
31 31 postfinalizegenerators = {b'bookmarks', b'dirstate'}
32 32
33 33 GEN_GROUP_ALL = b'all'
34 34 GEN_GROUP_PRE_FINALIZE = b'prefinalize'
35 35 GEN_GROUP_POST_FINALIZE = b'postfinalize'
36 36
37 37
38 38 def active(func):
39 39 def _active(self, *args, **kwds):
40 40 if self._count == 0:
41 41 raise error.ProgrammingError(
42 42 b'cannot use transaction when it is already committed/aborted'
43 43 )
44 44 return func(self, *args, **kwds)
45 45
46 46 return _active
47 47
48 48
49 49 def _playback(
50 50 journal,
51 51 report,
52 52 opener,
53 53 vfsmap,
54 54 entries,
55 55 backupentries,
56 56 unlink=True,
57 57 checkambigfiles=None,
58 58 ):
59 59 for f, o in entries:
60 60 if o or not unlink:
61 61 checkambig = checkambigfiles and (f, b'') in checkambigfiles
62 62 try:
63 63 fp = opener(f, b'a', checkambig=checkambig)
64 64 if fp.tell() < o:
65 65 raise error.Abort(
66 66 _(
67 67 b"attempted to truncate %s to %d bytes, but it was "
68 68 b"already %d bytes\n"
69 69 )
70 70 % (f, o, fp.tell())
71 71 )
72 72 fp.truncate(o)
73 73 fp.close()
74 74 except IOError:
75 75 report(_(b"failed to truncate %s\n") % f)
76 76 raise
77 77 else:
78 78 try:
79 79 opener.unlink(f)
80 80 except (IOError, OSError) as inst:
81 81 if inst.errno != errno.ENOENT:
82 82 raise
83 83
84 84 backupfiles = []
85 85 for l, f, b, c in backupentries:
86 86 if l not in vfsmap and c:
87 87 report(b"couldn't handle %s: unknown cache location %s\n" % (b, l))
88 88 vfs = vfsmap[l]
89 89 try:
90 90 if f and b:
91 91 filepath = vfs.join(f)
92 92 backuppath = vfs.join(b)
93 93 checkambig = checkambigfiles and (f, l) in checkambigfiles
94 94 try:
95 95 util.copyfile(backuppath, filepath, checkambig=checkambig)
96 96 backupfiles.append(b)
97 97 except IOError:
98 98 report(_(b"failed to recover %s\n") % f)
99 99 else:
100 100 target = f or b
101 101 try:
102 102 vfs.unlink(target)
103 103 except (IOError, OSError) as inst:
104 104 if inst.errno != errno.ENOENT:
105 105 raise
106 106 except (IOError, OSError, error.Abort):
107 107 if not c:
108 108 raise
109 109
110 110 backuppath = b"%s.backupfiles" % journal
111 111 if opener.exists(backuppath):
112 112 opener.unlink(backuppath)
113 113 opener.unlink(journal)
114 114 try:
115 115 for f in backupfiles:
116 116 if opener.exists(f):
117 117 opener.unlink(f)
118 118 except (IOError, OSError, error.Abort):
119 119 # only pure backup file remains, it is sage to ignore any error
120 120 pass
121 121
122 122
123 123 class transaction(util.transactional):
124 124 def __init__(
125 125 self,
126 126 report,
127 127 opener,
128 128 vfsmap,
129 129 journalname,
130 130 undoname=None,
131 131 after=None,
132 132 createmode=None,
133 133 validator=None,
134 134 releasefn=None,
135 135 checkambigfiles=None,
136 136 name='<unnamed>',
137 137 ):
138 138 """Begin a new transaction
139 139
140 140 Begins a new transaction that allows rolling back writes in the event of
141 141 an exception.
142 142
143 143 * `after`: called after the transaction has been committed
144 144 * `createmode`: the mode of the journal file that will be created
145 145 * `releasefn`: called after releasing (with transaction and result)
146 146
147 147 `checkambigfiles` is a set of (path, vfs-location) tuples,
148 148 which determine whether file stat ambiguity should be avoided
149 149 for corresponded files.
150 150 """
151 151 self._count = 1
152 152 self._usages = 1
153 153 self._report = report
154 154 # a vfs to the store content
155 155 self._opener = opener
156 156 # a map to access file in various {location -> vfs}
157 157 vfsmap = vfsmap.copy()
158 158 vfsmap[b''] = opener # set default value
159 159 self._vfsmap = vfsmap
160 160 self._after = after
161 161 self._offsetmap = {}
162 self._newfiles = set()
162 163 self._journal = journalname
163 164 self._undoname = undoname
164 165 self._queue = []
165 166 # A callback to do something just after releasing transaction.
166 167 if releasefn is None:
167 168 releasefn = lambda tr, success: None
168 169 self._releasefn = releasefn
169 170
170 171 self._checkambigfiles = set()
171 172 if checkambigfiles:
172 173 self._checkambigfiles.update(checkambigfiles)
173 174
174 175 self._names = [name]
175 176
176 177 # A dict dedicated to precisely tracking the changes introduced in the
177 178 # transaction.
178 179 self.changes = {}
179 180
180 181 # a dict of arguments to be passed to hooks
181 182 self.hookargs = {}
182 183 self._file = opener.open(self._journal, b"w+")
183 184
184 185 # a list of ('location', 'path', 'backuppath', cache) entries.
185 186 # - if 'backuppath' is empty, no file existed at backup time
186 187 # - if 'path' is empty, this is a temporary transaction file
187 188 # - if 'location' is not empty, the path is outside main opener reach.
188 189 # use 'location' value as a key in a vfsmap to find the right 'vfs'
189 190 # (cache is currently unused)
190 191 self._backupentries = []
191 192 self._backupmap = {}
192 193 self._backupjournal = b"%s.backupfiles" % self._journal
193 194 self._backupsfile = opener.open(self._backupjournal, b'w')
194 195 self._backupsfile.write(b'%d\n' % version)
195 196
196 197 if createmode is not None:
197 198 opener.chmod(self._journal, createmode & 0o666)
198 199 opener.chmod(self._backupjournal, createmode & 0o666)
199 200
200 201 # hold file generations to be performed on commit
201 202 self._filegenerators = {}
202 203 # hold callback to write pending data for hooks
203 204 self._pendingcallback = {}
204 205 # True is any pending data have been written ever
205 206 self._anypending = False
206 207 # holds callback to call when writing the transaction
207 208 self._finalizecallback = {}
208 209 # holds callback to call when validating the transaction
209 210 # should raise exception if anything is wrong
210 211 self._validatecallback = {}
211 212 if validator is not None:
212 213 self._validatecallback[b'001-userhooks'] = validator
213 214 # hold callback for post transaction close
214 215 self._postclosecallback = {}
215 216 # holds callbacks to call during abort
216 217 self._abortcallback = {}
217 218
218 219 def __repr__(self):
219 220 name = '/'.join(self._names)
220 221 return '<transaction name=%s, count=%d, usages=%d>' % (
221 222 name,
222 223 self._count,
223 224 self._usages,
224 225 )
225 226
226 227 def __del__(self):
227 228 if self._journal:
228 229 self._abort()
229 230
230 231 @active
231 232 def startgroup(self):
232 233 """delay registration of file entry
233 234
234 235 This is used by strip to delay vision of strip offset. The transaction
235 236 sees either none or all of the strip actions to be done."""
236 237 self._queue.append([])
237 238
238 239 @active
239 240 def endgroup(self):
240 241 """apply delayed registration of file entry.
241 242
242 243 This is used by strip to delay vision of strip offset. The transaction
243 244 sees either none or all of the strip actions to be done."""
244 245 q = self._queue.pop()
245 246 for f, o in q:
246 247 self._addentry(f, o)
247 248
248 249 @active
249 250 def add(self, file, offset):
250 251 """record the state of an append-only file before update"""
251 if file in self._offsetmap or file in self._backupmap:
252 if (
253 file in self._newfiles
254 or file in self._offsetmap
255 or file in self._backupmap
256 ):
252 257 return
253 258 if self._queue:
254 259 self._queue[-1].append((file, offset))
255 260 return
256 261
257 262 self._addentry(file, offset)
258 263
259 264 def _addentry(self, file, offset):
260 265 """add a append-only entry to memory and on-disk state"""
261 if file in self._offsetmap or file in self._backupmap:
266 if (
267 file in self._newfiles
268 or file in self._offsetmap
269 or file in self._backupmap
270 ):
262 271 return
263 self._offsetmap[file] = offset
272 if offset:
273 self._offsetmap[file] = offset
274 else:
275 self._newfiles.add(file)
264 276 # add enough data to the journal to do the truncate
265 277 self._file.write(b"%s\0%d\n" % (file, offset))
266 278 self._file.flush()
267 279
268 280 @active
269 281 def addbackup(self, file, hardlink=True, location=b''):
270 282 """Adds a backup of the file to the transaction
271 283
272 284 Calling addbackup() creates a hardlink backup of the specified file
273 285 that is used to recover the file in the event of the transaction
274 286 aborting.
275 287
276 288 * `file`: the file path, relative to .hg/store
277 289 * `hardlink`: use a hardlink to quickly create the backup
278 290 """
279 291 if self._queue:
280 292 msg = b'cannot use transaction.addbackup inside "group"'
281 293 raise error.ProgrammingError(msg)
282 294
283 if file in self._offsetmap or file in self._backupmap:
295 if (
296 file in self._newfiles
297 or file in self._offsetmap
298 or file in self._backupmap
299 ):
284 300 return
285 301 vfs = self._vfsmap[location]
286 302 dirname, filename = vfs.split(file)
287 303 backupfilename = b"%s.backup.%s" % (self._journal, filename)
288 304 backupfile = vfs.reljoin(dirname, backupfilename)
289 305 if vfs.exists(file):
290 306 filepath = vfs.join(file)
291 307 backuppath = vfs.join(backupfile)
292 308 util.copyfile(filepath, backuppath, hardlink=hardlink)
293 309 else:
294 310 backupfile = b''
295 311
296 312 self._addbackupentry((location, file, backupfile, False))
297 313
298 314 def _addbackupentry(self, entry):
299 315 """register a new backup entry and write it to disk"""
300 316 self._backupentries.append(entry)
301 317 self._backupmap[entry[1]] = len(self._backupentries) - 1
302 318 self._backupsfile.write(b"%s\0%s\0%s\0%d\n" % entry)
303 319 self._backupsfile.flush()
304 320
305 321 @active
306 322 def registertmp(self, tmpfile, location=b''):
307 323 """register a temporary transaction file
308 324
309 325 Such files will be deleted when the transaction exits (on both
310 326 failure and success).
311 327 """
312 328 self._addbackupentry((location, b'', tmpfile, False))
313 329
314 330 @active
315 331 def addfilegenerator(
316 332 self, genid, filenames, genfunc, order=0, location=b''
317 333 ):
318 334 """add a function to generates some files at transaction commit
319 335
320 336 The `genfunc` argument is a function capable of generating proper
321 337 content of each entry in the `filename` tuple.
322 338
323 339 At transaction close time, `genfunc` will be called with one file
324 340 object argument per entries in `filenames`.
325 341
326 342 The transaction itself is responsible for the backup, creation and
327 343 final write of such file.
328 344
329 345 The `genid` argument is used to ensure the same set of file is only
330 346 generated once. Call to `addfilegenerator` for a `genid` already
331 347 present will overwrite the old entry.
332 348
333 349 The `order` argument may be used to control the order in which multiple
334 350 generator will be executed.
335 351
336 352 The `location` arguments may be used to indicate the files are located
337 353 outside of the the standard directory for transaction. It should match
338 354 one of the key of the `transaction.vfsmap` dictionary.
339 355 """
340 356 # For now, we are unable to do proper backup and restore of custom vfs
341 357 # but for bookmarks that are handled outside this mechanism.
342 358 self._filegenerators[genid] = (order, filenames, genfunc, location)
343 359
344 360 @active
345 361 def removefilegenerator(self, genid):
346 362 """reverse of addfilegenerator, remove a file generator function"""
347 363 if genid in self._filegenerators:
348 364 del self._filegenerators[genid]
349 365
350 366 def _generatefiles(self, suffix=b'', group=GEN_GROUP_ALL):
351 367 # write files registered for generation
352 368 any = False
353 369
354 370 if group == GEN_GROUP_ALL:
355 371 skip_post = skip_pre = False
356 372 else:
357 373 skip_pre = group == GEN_GROUP_POST_FINALIZE
358 374 skip_post = group == GEN_GROUP_PRE_FINALIZE
359 375
360 376 for id, entry in sorted(pycompat.iteritems(self._filegenerators)):
361 377 any = True
362 378 order, filenames, genfunc, location = entry
363 379
364 380 # for generation at closing, check if it's before or after finalize
365 381 is_post = id in postfinalizegenerators
366 382 if skip_post and is_post:
367 383 continue
368 384 elif skip_pre and not is_post:
369 385 continue
370 386
371 387 vfs = self._vfsmap[location]
372 388 files = []
373 389 try:
374 390 for name in filenames:
375 391 name += suffix
376 392 if suffix:
377 393 self.registertmp(name, location=location)
378 394 checkambig = False
379 395 else:
380 396 self.addbackup(name, location=location)
381 397 checkambig = (name, location) in self._checkambigfiles
382 398 files.append(
383 399 vfs(name, b'w', atomictemp=True, checkambig=checkambig)
384 400 )
385 401 genfunc(*files)
386 402 for f in files:
387 403 f.close()
388 404 # skip discard() loop since we're sure no open file remains
389 405 del files[:]
390 406 finally:
391 407 for f in files:
392 408 f.discard()
393 409 return any
394 410
395 411 @active
396 412 def findoffset(self, file):
413 if file in self._newfiles:
414 return 0
397 415 return self._offsetmap.get(file)
398 416
399 417 @active
400 418 def readjournal(self):
401 419 self._file.seek(0)
402 420 entries = []
403 421 for l in self._file:
404 422 file, troffset = l.split(b'\0')
405 423 entries.append((file, int(troffset)))
406 424 return entries
407 425
408 426 @active
409 427 def replace(self, file, offset):
410 428 '''
411 429 replace can only replace already committed entries
412 430 that are not pending in the queue
413 431 '''
414
415 if file not in self._offsetmap:
432 if file in self._newfiles:
433 if not offset:
434 return
435 self._newfiles.remove(file)
436 self._offsetmap[file] = offset
437 elif file in self._offsetmap:
438 if not offset:
439 del self._offsetmap[file]
440 self._newfiles.add(file)
441 else:
442 self._offsetmap[file] = offset
443 else:
416 444 raise KeyError(file)
417 self._offsetmap[file] = offset
418 445 self._file.write(b"%s\0%d\n" % (file, offset))
419 446 self._file.flush()
420 447
421 448 @active
422 449 def nest(self, name='<unnamed>'):
423 450 self._count += 1
424 451 self._usages += 1
425 452 self._names.append(name)
426 453 return self
427 454
428 455 def release(self):
429 456 if self._count > 0:
430 457 self._usages -= 1
431 458 if self._names:
432 459 self._names.pop()
433 460 # if the transaction scopes are left without being closed, fail
434 461 if self._count > 0 and self._usages == 0:
435 462 self._abort()
436 463
437 464 def running(self):
438 465 return self._count > 0
439 466
440 467 def addpending(self, category, callback):
441 468 """add a callback to be called when the transaction is pending
442 469
443 470 The transaction will be given as callback's first argument.
444 471
445 472 Category is a unique identifier to allow overwriting an old callback
446 473 with a newer callback.
447 474 """
448 475 self._pendingcallback[category] = callback
449 476
450 477 @active
451 478 def writepending(self):
452 479 '''write pending file to temporary version
453 480
454 481 This is used to allow hooks to view a transaction before commit'''
455 482 categories = sorted(self._pendingcallback)
456 483 for cat in categories:
457 484 # remove callback since the data will have been flushed
458 485 any = self._pendingcallback.pop(cat)(self)
459 486 self._anypending = self._anypending or any
460 487 self._anypending |= self._generatefiles(suffix=b'.pending')
461 488 return self._anypending
462 489
463 490 @active
464 491 def hasfinalize(self, category):
465 492 """check is a callback already exist for a category
466 493 """
467 494 return category in self._finalizecallback
468 495
469 496 @active
470 497 def addfinalize(self, category, callback):
471 498 """add a callback to be called when the transaction is closed
472 499
473 500 The transaction will be given as callback's first argument.
474 501
475 502 Category is a unique identifier to allow overwriting old callbacks with
476 503 newer callbacks.
477 504 """
478 505 self._finalizecallback[category] = callback
479 506
480 507 @active
481 508 def addpostclose(self, category, callback):
482 509 """add or replace a callback to be called after the transaction closed
483 510
484 511 The transaction will be given as callback's first argument.
485 512
486 513 Category is a unique identifier to allow overwriting an old callback
487 514 with a newer callback.
488 515 """
489 516 self._postclosecallback[category] = callback
490 517
491 518 @active
492 519 def getpostclose(self, category):
493 520 """return a postclose callback added before, or None"""
494 521 return self._postclosecallback.get(category, None)
495 522
496 523 @active
497 524 def addabort(self, category, callback):
498 525 """add a callback to be called when the transaction is aborted.
499 526
500 527 The transaction will be given as the first argument to the callback.
501 528
502 529 Category is a unique identifier to allow overwriting an old callback
503 530 with a newer callback.
504 531 """
505 532 self._abortcallback[category] = callback
506 533
507 534 @active
508 535 def addvalidator(self, category, callback):
509 536 """ adds a callback to be called when validating the transaction.
510 537
511 538 The transaction will be given as the first argument to the callback.
512 539
513 540 callback should raise exception if to abort transaction """
514 541 self._validatecallback[category] = callback
515 542
516 543 @active
517 544 def close(self):
518 545 '''commit the transaction'''
519 546 if self._count == 1:
520 547 for category in sorted(self._validatecallback):
521 548 self._validatecallback[category](self)
522 549 self._validatecallback = None # Help prevent cycles.
523 550 self._generatefiles(group=GEN_GROUP_PRE_FINALIZE)
524 551 while self._finalizecallback:
525 552 callbacks = self._finalizecallback
526 553 self._finalizecallback = {}
527 554 categories = sorted(callbacks)
528 555 for cat in categories:
529 556 callbacks[cat](self)
530 557 # Prevent double usage and help clear cycles.
531 558 self._finalizecallback = None
532 559 self._generatefiles(group=GEN_GROUP_POST_FINALIZE)
533 560
534 561 self._count -= 1
535 562 if self._count != 0:
536 563 return
537 564 self._file.close()
538 565 self._backupsfile.close()
539 566 # cleanup temporary files
540 567 for l, f, b, c in self._backupentries:
541 568 if l not in self._vfsmap and c:
542 569 self._report(
543 570 b"couldn't remove %s: unknown cache location %s\n" % (b, l)
544 571 )
545 572 continue
546 573 vfs = self._vfsmap[l]
547 574 if not f and b and vfs.exists(b):
548 575 try:
549 576 vfs.unlink(b)
550 577 except (IOError, OSError, error.Abort) as inst:
551 578 if not c:
552 579 raise
553 580 # Abort may be raise by read only opener
554 581 self._report(
555 582 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
556 583 )
557 584 self._offsetmap = {}
585 self._newfiles = set()
558 586 self._writeundo()
559 587 if self._after:
560 588 self._after()
561 589 self._after = None # Help prevent cycles.
562 590 if self._opener.isfile(self._backupjournal):
563 591 self._opener.unlink(self._backupjournal)
564 592 if self._opener.isfile(self._journal):
565 593 self._opener.unlink(self._journal)
566 594 for l, _f, b, c in self._backupentries:
567 595 if l not in self._vfsmap and c:
568 596 self._report(
569 597 b"couldn't remove %s: unknown cache location"
570 598 b"%s\n" % (b, l)
571 599 )
572 600 continue
573 601 vfs = self._vfsmap[l]
574 602 if b and vfs.exists(b):
575 603 try:
576 604 vfs.unlink(b)
577 605 except (IOError, OSError, error.Abort) as inst:
578 606 if not c:
579 607 raise
580 608 # Abort may be raise by read only opener
581 609 self._report(
582 610 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
583 611 )
584 612 self._backupentries = []
585 613 self._journal = None
586 614
587 615 self._releasefn(self, True) # notify success of closing transaction
588 616 self._releasefn = None # Help prevent cycles.
589 617
590 618 # run post close action
591 619 categories = sorted(self._postclosecallback)
592 620 for cat in categories:
593 621 self._postclosecallback[cat](self)
594 622 # Prevent double usage and help clear cycles.
595 623 self._postclosecallback = None
596 624
597 625 @active
598 626 def abort(self):
599 627 '''abort the transaction (generally called on error, or when the
600 628 transaction is not explicitly committed before going out of
601 629 scope)'''
602 630 self._abort()
603 631
604 632 def _writeundo(self):
605 633 """write transaction data for possible future undo call"""
606 634 if self._undoname is None:
607 635 return
608 636 undobackupfile = self._opener.open(
609 637 b"%s.backupfiles" % self._undoname, b'w'
610 638 )
611 639 undobackupfile.write(b'%d\n' % version)
612 640 for l, f, b, c in self._backupentries:
613 641 if not f: # temporary file
614 642 continue
615 643 if not b:
616 644 u = b''
617 645 else:
618 646 if l not in self._vfsmap and c:
619 647 self._report(
620 648 b"couldn't remove %s: unknown cache location"
621 649 b"%s\n" % (b, l)
622 650 )
623 651 continue
624 652 vfs = self._vfsmap[l]
625 653 base, name = vfs.split(b)
626 654 assert name.startswith(self._journal), name
627 655 uname = name.replace(self._journal, self._undoname, 1)
628 656 u = vfs.reljoin(base, uname)
629 657 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
630 658 undobackupfile.write(b"%s\0%s\0%s\0%d\n" % (l, f, u, c))
631 659 undobackupfile.close()
632 660
633 661 def _abort(self):
634 662 entries = self.readjournal()
635 663 self._count = 0
636 664 self._usages = 0
637 665 self._file.close()
638 666 self._backupsfile.close()
639 667
640 668 try:
641 if not self._offsetmap and not self._backupentries:
669 if not entries and not self._backupentries:
642 670 if self._backupjournal:
643 671 self._opener.unlink(self._backupjournal)
644 672 if self._journal:
645 673 self._opener.unlink(self._journal)
646 674 return
647 675
648 676 self._report(_(b"transaction abort!\n"))
649 677
650 678 try:
651 679 for cat in sorted(self._abortcallback):
652 680 self._abortcallback[cat](self)
653 681 # Prevent double usage and help clear cycles.
654 682 self._abortcallback = None
655 683 _playback(
656 684 self._journal,
657 685 self._report,
658 686 self._opener,
659 687 self._vfsmap,
660 688 entries,
661 689 self._backupentries,
662 690 False,
663 691 checkambigfiles=self._checkambigfiles,
664 692 )
665 693 self._report(_(b"rollback completed\n"))
666 694 except BaseException as exc:
667 695 self._report(_(b"rollback failed - please run hg recover\n"))
668 696 self._report(
669 697 _(b"(failure reason: %s)\n") % stringutil.forcebytestr(exc)
670 698 )
671 699 finally:
672 700 self._journal = None
673 701 self._releasefn(self, False) # notify failure of transaction
674 702 self._releasefn = None # Help prevent cycles.
675 703
676 704
677 705 def rollback(opener, vfsmap, file, report, checkambigfiles=None):
678 706 """Rolls back the transaction contained in the given file
679 707
680 708 Reads the entries in the specified file, and the corresponding
681 709 '*.backupfiles' file, to recover from an incomplete transaction.
682 710
683 711 * `file`: a file containing a list of entries, specifying where
684 712 to truncate each file. The file should contain a list of
685 713 file\0offset pairs, delimited by newlines. The corresponding
686 714 '*.backupfiles' file should contain a list of file\0backupfile
687 715 pairs, delimited by \0.
688 716
689 717 `checkambigfiles` is a set of (path, vfs-location) tuples,
690 718 which determine whether file stat ambiguity should be avoided at
691 719 restoring corresponded files.
692 720 """
693 721 entries = []
694 722 backupentries = []
695 723
696 724 fp = opener.open(file)
697 725 lines = fp.readlines()
698 726 fp.close()
699 727 for l in lines:
700 728 try:
701 729 f, o = l.split(b'\0')
702 730 entries.append((f, int(o)))
703 731 except ValueError:
704 732 report(
705 733 _(b"couldn't read journal entry %r!\n") % pycompat.bytestr(l)
706 734 )
707 735
708 736 backupjournal = b"%s.backupfiles" % file
709 737 if opener.exists(backupjournal):
710 738 fp = opener.open(backupjournal)
711 739 lines = fp.readlines()
712 740 if lines:
713 741 ver = lines[0][:-1]
714 742 if ver == (b'%d' % version):
715 743 for line in lines[1:]:
716 744 if line:
717 745 # Shave off the trailing newline
718 746 line = line[:-1]
719 747 l, f, b, c = line.split(b'\0')
720 748 backupentries.append((l, f, b, bool(c)))
721 749 else:
722 750 report(
723 751 _(
724 752 b"journal was created by a different version of "
725 753 b"Mercurial\n"
726 754 )
727 755 )
728 756
729 757 _playback(
730 758 file,
731 759 report,
732 760 opener,
733 761 vfsmap,
734 762 entries,
735 763 backupentries,
736 764 checkambigfiles=checkambigfiles,
737 765 )
General Comments 0
You need to be logged in to leave comments. Login now