##// END OF EJS Templates
undo-files: extract the cleanup code from strip in a function...
marmoute -
r51184:80110176 stable
parent child Browse files
Show More
@@ -1,561 +1,565 b''
1 1 # repair.py - functions for repository repair for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 4 # Copyright 2007 Olivia Mackall
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9
10 10 import errno
11 11
12 12 from .i18n import _
13 13 from .node import (
14 14 hex,
15 15 short,
16 16 )
17 17 from . import (
18 18 bundle2,
19 19 changegroup,
20 20 discovery,
21 21 error,
22 22 exchange,
23 23 obsolete,
24 24 obsutil,
25 25 pathutil,
26 26 phases,
27 27 requirements,
28 28 scmutil,
29 29 util,
30 30 )
31 31 from .utils import (
32 32 hashutil,
33 33 stringutil,
34 34 urlutil,
35 35 )
36 36
37 37
38 38 def backupbundle(
39 39 repo, bases, heads, node, suffix, compress=True, obsolescence=True
40 40 ):
41 41 """create a bundle with the specified revisions as a backup"""
42 42
43 43 backupdir = b"strip-backup"
44 44 vfs = repo.vfs
45 45 if not vfs.isdir(backupdir):
46 46 vfs.mkdir(backupdir)
47 47
48 48 # Include a hash of all the nodes in the filename for uniqueness
49 49 allcommits = repo.set(b'%ln::%ln', bases, heads)
50 50 allhashes = sorted(c.hex() for c in allcommits)
51 51 totalhash = hashutil.sha1(b''.join(allhashes)).digest()
52 52 name = b"%s/%s-%s-%s.hg" % (
53 53 backupdir,
54 54 short(node),
55 55 hex(totalhash[:4]),
56 56 suffix,
57 57 )
58 58
59 59 cgversion = changegroup.localversion(repo)
60 60 comp = None
61 61 if cgversion != b'01':
62 62 bundletype = b"HG20"
63 63 if compress:
64 64 comp = b'BZ'
65 65 elif compress:
66 66 bundletype = b"HG10BZ"
67 67 else:
68 68 bundletype = b"HG10UN"
69 69
70 70 outgoing = discovery.outgoing(repo, missingroots=bases, ancestorsof=heads)
71 71 contentopts = {
72 72 b'cg.version': cgversion,
73 73 b'obsolescence': obsolescence,
74 74 b'phases': True,
75 75 }
76 76 return bundle2.writenewbundle(
77 77 repo.ui,
78 78 repo,
79 79 b'strip',
80 80 name,
81 81 bundletype,
82 82 outgoing,
83 83 contentopts,
84 84 vfs,
85 85 compression=comp,
86 86 )
87 87
88 88
89 89 def _collectfiles(repo, striprev):
90 90 """find out the filelogs affected by the strip"""
91 91 files = set()
92 92
93 93 for x in range(striprev, len(repo)):
94 94 files.update(repo[x].files())
95 95
96 96 return sorted(files)
97 97
98 98
99 99 def _collectrevlog(revlog, striprev):
100 100 _, brokenset = revlog.getstrippoint(striprev)
101 101 return [revlog.linkrev(r) for r in brokenset]
102 102
103 103
104 104 def _collectbrokencsets(repo, files, striprev):
105 105 """return the changesets which will be broken by the truncation"""
106 106 s = set()
107 107
108 108 for revlog in manifestrevlogs(repo):
109 109 s.update(_collectrevlog(revlog, striprev))
110 110 for fname in files:
111 111 s.update(_collectrevlog(repo.file(fname), striprev))
112 112
113 113 return s
114 114
115 115
116 def cleanup_undo_files(repo):
117 """remove "undo" files used by the rollback logic
118
119 This is useful to prevent rollback running in situation were it does not
120 make sense. For example after a strip.
121 """
122 for undovfs, undofile in repo.undofiles():
123 try:
124 undovfs.unlink(undofile)
125 except OSError as e:
126 if e.errno != errno.ENOENT:
127 msg = _(b'error removing %s: %s\n')
128 msg %= (undovfs.join(undofile), stringutil.forcebytestr(e))
129 repo.ui.warn(msg)
130
131
116 132 def strip(ui, repo, nodelist, backup=True, topic=b'backup'):
117 133 # This function requires the caller to lock the repo, but it operates
118 134 # within a transaction of its own, and thus requires there to be no current
119 135 # transaction when it is called.
120 136 if repo.currenttransaction() is not None:
121 137 raise error.ProgrammingError(b'cannot strip from inside a transaction')
122 138
123 139 # Simple way to maintain backwards compatibility for this
124 140 # argument.
125 141 if backup in [b'none', b'strip']:
126 142 backup = False
127 143
128 144 repo = repo.unfiltered()
129 145 repo.destroying()
130 146 vfs = repo.vfs
131 147 # load bookmark before changelog to avoid side effect from outdated
132 148 # changelog (see repo._refreshchangelog)
133 149 repo._bookmarks
134 150 cl = repo.changelog
135 151
136 152 # TODO handle undo of merge sets
137 153 if isinstance(nodelist, bytes):
138 154 nodelist = [nodelist]
139 155 striplist = [cl.rev(node) for node in nodelist]
140 156 striprev = min(striplist)
141 157
142 158 files = _collectfiles(repo, striprev)
143 159 saverevs = _collectbrokencsets(repo, files, striprev)
144 160
145 161 # Some revisions with rev > striprev may not be descendants of striprev.
146 162 # We have to find these revisions and put them in a bundle, so that
147 163 # we can restore them after the truncations.
148 164 # To create the bundle we use repo.changegroupsubset which requires
149 165 # the list of heads and bases of the set of interesting revisions.
150 166 # (head = revision in the set that has no descendant in the set;
151 167 # base = revision in the set that has no ancestor in the set)
152 168 tostrip = set(striplist)
153 169 saveheads = set(saverevs)
154 170 for r in cl.revs(start=striprev + 1):
155 171 if any(p in tostrip for p in cl.parentrevs(r)):
156 172 tostrip.add(r)
157 173
158 174 if r not in tostrip:
159 175 saverevs.add(r)
160 176 saveheads.difference_update(cl.parentrevs(r))
161 177 saveheads.add(r)
162 178 saveheads = [cl.node(r) for r in saveheads]
163 179
164 180 # compute base nodes
165 181 if saverevs:
166 182 descendants = set(cl.descendants(saverevs))
167 183 saverevs.difference_update(descendants)
168 184 savebases = [cl.node(r) for r in saverevs]
169 185 stripbases = [cl.node(r) for r in tostrip]
170 186
171 187 stripobsidx = obsmarkers = ()
172 188 if repo.ui.configbool(b'devel', b'strip-obsmarkers'):
173 189 obsmarkers = obsutil.exclusivemarkers(repo, stripbases)
174 190 if obsmarkers:
175 191 stripobsidx = [
176 192 i for i, m in enumerate(repo.obsstore) if m in obsmarkers
177 193 ]
178 194
179 195 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
180 196
181 197 backupfile = None
182 198 node = nodelist[-1]
183 199 if backup:
184 200 backupfile = _createstripbackup(repo, stripbases, node, topic)
185 201 # create a changegroup for all the branches we need to keep
186 202 tmpbundlefile = None
187 203 if saveheads:
188 204 # do not compress temporary bundle if we remove it from disk later
189 205 #
190 206 # We do not include obsolescence, it might re-introduce prune markers
191 207 # we are trying to strip. This is harmless since the stripped markers
192 208 # are already backed up and we did not touched the markers for the
193 209 # saved changesets.
194 210 tmpbundlefile = backupbundle(
195 211 repo,
196 212 savebases,
197 213 saveheads,
198 214 node,
199 215 b'temp',
200 216 compress=False,
201 217 obsolescence=False,
202 218 )
203 219
204 220 with ui.uninterruptible():
205 221 try:
206 222 with repo.transaction(b"strip") as tr:
207 223 # TODO this code violates the interface abstraction of the
208 224 # transaction and makes assumptions that file storage is
209 225 # using append-only files. We'll need some kind of storage
210 226 # API to handle stripping for us.
211 227 oldfiles = set(tr._offsetmap.keys())
212 228 oldfiles.update(tr._newfiles)
213 229
214 230 tr.startgroup()
215 231 cl.strip(striprev, tr)
216 232 stripmanifest(repo, striprev, tr, files)
217 233
218 234 for fn in files:
219 235 repo.file(fn).strip(striprev, tr)
220 236 tr.endgroup()
221 237
222 238 entries = tr.readjournal()
223 239
224 240 for file, troffset in entries:
225 241 if file in oldfiles:
226 242 continue
227 243 with repo.svfs(file, b'a', checkambig=True) as fp:
228 244 fp.truncate(troffset)
229 245 if troffset == 0:
230 246 repo.store.markremoved(file)
231 247
232 248 deleteobsmarkers(repo.obsstore, stripobsidx)
233 249 del repo.obsstore
234 250 repo.invalidatevolatilesets()
235 251 repo._phasecache.filterunknown(repo)
236 252
237 253 if tmpbundlefile:
238 254 ui.note(_(b"adding branch\n"))
239 255 f = vfs.open(tmpbundlefile, b"rb")
240 256 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
241 257 # silence internal shuffling chatter
242 258 maybe_silent = (
243 259 repo.ui.silent()
244 260 if not repo.ui.verbose
245 261 else util.nullcontextmanager()
246 262 )
247 263 with maybe_silent:
248 264 tmpbundleurl = b'bundle:' + vfs.join(tmpbundlefile)
249 265 txnname = b'strip'
250 266 if not isinstance(gen, bundle2.unbundle20):
251 267 txnname = b"strip\n%s" % urlutil.hidepassword(
252 268 tmpbundleurl
253 269 )
254 270 with repo.transaction(txnname) as tr:
255 271 bundle2.applybundle(
256 272 repo, gen, tr, source=b'strip', url=tmpbundleurl
257 273 )
258 274 f.close()
259 275
260 276 with repo.transaction(b'repair') as tr:
261 277 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
262 278 repo._bookmarks.applychanges(repo, tr, bmchanges)
263 279
264 # remove undo files
265 for undovfs, undofile in repo.undofiles():
266 try:
267 undovfs.unlink(undofile)
268 except OSError as e:
269 if e.errno != errno.ENOENT:
270 ui.warn(
271 _(b'error removing %s: %s\n')
272 % (
273 undovfs.join(undofile),
274 stringutil.forcebytestr(e),
275 )
276 )
280 cleanup_undo_files(repo)
277 281
278 282 except: # re-raises
279 283 if backupfile:
280 284 ui.warn(
281 285 _(b"strip failed, backup bundle stored in '%s'\n")
282 286 % vfs.join(backupfile)
283 287 )
284 288 if tmpbundlefile:
285 289 ui.warn(
286 290 _(b"strip failed, unrecovered changes stored in '%s'\n")
287 291 % vfs.join(tmpbundlefile)
288 292 )
289 293 ui.warn(
290 294 _(
291 295 b"(fix the problem, then recover the changesets with "
292 296 b"\"hg unbundle '%s'\")\n"
293 297 )
294 298 % vfs.join(tmpbundlefile)
295 299 )
296 300 raise
297 301 else:
298 302 if tmpbundlefile:
299 303 # Remove temporary bundle only if there were no exceptions
300 304 vfs.unlink(tmpbundlefile)
301 305
302 306 repo.destroyed()
303 307 # return the backup file path (or None if 'backup' was False) so
304 308 # extensions can use it
305 309 return backupfile
306 310
307 311
308 312 def softstrip(ui, repo, nodelist, backup=True, topic=b'backup'):
309 313 """perform a "soft" strip using the archived phase"""
310 314 tostrip = [c.node() for c in repo.set(b'sort(%ln::)', nodelist)]
311 315 if not tostrip:
312 316 return None
313 317
314 318 backupfile = None
315 319 if backup:
316 320 node = tostrip[0]
317 321 backupfile = _createstripbackup(repo, tostrip, node, topic)
318 322
319 323 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
320 324 with repo.transaction(b'strip') as tr:
321 325 phases.retractboundary(repo, tr, phases.archived, tostrip)
322 326 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
323 327 repo._bookmarks.applychanges(repo, tr, bmchanges)
324 328 return backupfile
325 329
326 330
327 331 def _bookmarkmovements(repo, tostrip):
328 332 # compute necessary bookmark movement
329 333 bm = repo._bookmarks
330 334 updatebm = []
331 335 for m in bm:
332 336 rev = repo[bm[m]].rev()
333 337 if rev in tostrip:
334 338 updatebm.append(m)
335 339 newbmtarget = None
336 340 # If we need to move bookmarks, compute bookmark
337 341 # targets. Otherwise we can skip doing this logic.
338 342 if updatebm:
339 343 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)),
340 344 # but is much faster
341 345 newbmtarget = repo.revs(b'max(parents(%ld) - (%ld))', tostrip, tostrip)
342 346 if newbmtarget:
343 347 newbmtarget = repo[newbmtarget.first()].node()
344 348 else:
345 349 newbmtarget = b'.'
346 350 return newbmtarget, updatebm
347 351
348 352
349 353 def _createstripbackup(repo, stripbases, node, topic):
350 354 # backup the changeset we are about to strip
351 355 vfs = repo.vfs
352 356 cl = repo.changelog
353 357 backupfile = backupbundle(repo, stripbases, cl.heads(), node, topic)
354 358 repo.ui.status(_(b"saved backup bundle to %s\n") % vfs.join(backupfile))
355 359 repo.ui.log(
356 360 b"backupbundle", b"saved backup bundle to %s\n", vfs.join(backupfile)
357 361 )
358 362 return backupfile
359 363
360 364
361 365 def safestriproots(ui, repo, nodes):
362 366 """return list of roots of nodes where descendants are covered by nodes"""
363 367 torev = repo.unfiltered().changelog.rev
364 368 revs = {torev(n) for n in nodes}
365 369 # tostrip = wanted - unsafe = wanted - ancestors(orphaned)
366 370 # orphaned = affected - wanted
367 371 # affected = descendants(roots(wanted))
368 372 # wanted = revs
369 373 revset = b'%ld - ( ::( (roots(%ld):: and not _phase(%s)) -%ld) )'
370 374 tostrip = set(repo.revs(revset, revs, revs, phases.internal, revs))
371 375 notstrip = revs - tostrip
372 376 if notstrip:
373 377 nodestr = b', '.join(sorted(short(repo[n].node()) for n in notstrip))
374 378 ui.warn(
375 379 _(b'warning: orphaned descendants detected, not stripping %s\n')
376 380 % nodestr
377 381 )
378 382 return [c.node() for c in repo.set(b'roots(%ld)', tostrip)]
379 383
380 384
381 385 class stripcallback:
382 386 """used as a transaction postclose callback"""
383 387
384 388 def __init__(self, ui, repo, backup, topic):
385 389 self.ui = ui
386 390 self.repo = repo
387 391 self.backup = backup
388 392 self.topic = topic or b'backup'
389 393 self.nodelist = []
390 394
391 395 def addnodes(self, nodes):
392 396 self.nodelist.extend(nodes)
393 397
394 398 def __call__(self, tr):
395 399 roots = safestriproots(self.ui, self.repo, self.nodelist)
396 400 if roots:
397 401 strip(self.ui, self.repo, roots, self.backup, self.topic)
398 402
399 403
400 404 def delayedstrip(ui, repo, nodelist, topic=None, backup=True):
401 405 """like strip, but works inside transaction and won't strip irreverent revs
402 406
403 407 nodelist must explicitly contain all descendants. Otherwise a warning will
404 408 be printed that some nodes are not stripped.
405 409
406 410 Will do a backup if `backup` is True. The last non-None "topic" will be
407 411 used as the backup topic name. The default backup topic name is "backup".
408 412 """
409 413 tr = repo.currenttransaction()
410 414 if not tr:
411 415 nodes = safestriproots(ui, repo, nodelist)
412 416 return strip(ui, repo, nodes, backup=backup, topic=topic)
413 417 # transaction postclose callbacks are called in alphabet order.
414 418 # use '\xff' as prefix so we are likely to be called last.
415 419 callback = tr.getpostclose(b'\xffstrip')
416 420 if callback is None:
417 421 callback = stripcallback(ui, repo, backup=backup, topic=topic)
418 422 tr.addpostclose(b'\xffstrip', callback)
419 423 if topic:
420 424 callback.topic = topic
421 425 callback.addnodes(nodelist)
422 426
423 427
424 428 def stripmanifest(repo, striprev, tr, files):
425 429 for revlog in manifestrevlogs(repo):
426 430 revlog.strip(striprev, tr)
427 431
428 432
429 433 def manifestrevlogs(repo):
430 434 yield repo.manifestlog.getstorage(b'')
431 435 if scmutil.istreemanifest(repo):
432 436 # This logic is safe if treemanifest isn't enabled, but also
433 437 # pointless, so we skip it if treemanifest isn't enabled.
434 438 for t, unencoded, size in repo.store.datafiles():
435 439 if unencoded.startswith(b'meta/') and unencoded.endswith(
436 440 b'00manifest.i'
437 441 ):
438 442 dir = unencoded[5:-12]
439 443 yield repo.manifestlog.getstorage(dir)
440 444
441 445
442 446 def rebuildfncache(ui, repo, only_data=False):
443 447 """Rebuilds the fncache file from repo history.
444 448
445 449 Missing entries will be added. Extra entries will be removed.
446 450 """
447 451 repo = repo.unfiltered()
448 452
449 453 if requirements.FNCACHE_REQUIREMENT not in repo.requirements:
450 454 ui.warn(
451 455 _(
452 456 b'(not rebuilding fncache because repository does not '
453 457 b'support fncache)\n'
454 458 )
455 459 )
456 460 return
457 461
458 462 with repo.lock():
459 463 fnc = repo.store.fncache
460 464 fnc.ensureloaded(warn=ui.warn)
461 465
462 466 oldentries = set(fnc.entries)
463 467 newentries = set()
464 468 seenfiles = set()
465 469
466 470 if only_data:
467 471 # Trust the listing of .i from the fncache, but not the .d. This is
468 472 # much faster, because we only need to stat every possible .d files,
469 473 # instead of reading the full changelog
470 474 for f in fnc:
471 475 if f[:5] == b'data/' and f[-2:] == b'.i':
472 476 seenfiles.add(f[5:-2])
473 477 newentries.add(f)
474 478 dataf = f[:-2] + b'.d'
475 479 if repo.store._exists(dataf):
476 480 newentries.add(dataf)
477 481 else:
478 482 progress = ui.makeprogress(
479 483 _(b'rebuilding'), unit=_(b'changesets'), total=len(repo)
480 484 )
481 485 for rev in repo:
482 486 progress.update(rev)
483 487
484 488 ctx = repo[rev]
485 489 for f in ctx.files():
486 490 # This is to minimize I/O.
487 491 if f in seenfiles:
488 492 continue
489 493 seenfiles.add(f)
490 494
491 495 i = b'data/%s.i' % f
492 496 d = b'data/%s.d' % f
493 497
494 498 if repo.store._exists(i):
495 499 newentries.add(i)
496 500 if repo.store._exists(d):
497 501 newentries.add(d)
498 502
499 503 progress.complete()
500 504
501 505 if requirements.TREEMANIFEST_REQUIREMENT in repo.requirements:
502 506 # This logic is safe if treemanifest isn't enabled, but also
503 507 # pointless, so we skip it if treemanifest isn't enabled.
504 508 for dir in pathutil.dirs(seenfiles):
505 509 i = b'meta/%s/00manifest.i' % dir
506 510 d = b'meta/%s/00manifest.d' % dir
507 511
508 512 if repo.store._exists(i):
509 513 newentries.add(i)
510 514 if repo.store._exists(d):
511 515 newentries.add(d)
512 516
513 517 addcount = len(newentries - oldentries)
514 518 removecount = len(oldentries - newentries)
515 519 for p in sorted(oldentries - newentries):
516 520 ui.write(_(b'removing %s\n') % p)
517 521 for p in sorted(newentries - oldentries):
518 522 ui.write(_(b'adding %s\n') % p)
519 523
520 524 if addcount or removecount:
521 525 ui.write(
522 526 _(b'%d items added, %d removed from fncache\n')
523 527 % (addcount, removecount)
524 528 )
525 529 fnc.entries = newentries
526 530 fnc._dirty = True
527 531
528 532 with repo.transaction(b'fncache') as tr:
529 533 fnc.write(tr)
530 534 else:
531 535 ui.write(_(b'fncache already up to date\n'))
532 536
533 537
534 538 def deleteobsmarkers(obsstore, indices):
535 539 """Delete some obsmarkers from obsstore and return how many were deleted
536 540
537 541 'indices' is a list of ints which are the indices
538 542 of the markers to be deleted.
539 543
540 544 Every invocation of this function completely rewrites the obsstore file,
541 545 skipping the markers we want to be removed. The new temporary file is
542 546 created, remaining markers are written there and on .close() this file
543 547 gets atomically renamed to obsstore, thus guaranteeing consistency."""
544 548 if not indices:
545 549 # we don't want to rewrite the obsstore with the same content
546 550 return
547 551
548 552 left = []
549 553 current = obsstore._all
550 554 n = 0
551 555 for i, m in enumerate(current):
552 556 if i in indices:
553 557 n += 1
554 558 continue
555 559 left.append(m)
556 560
557 561 newobsstorefile = obsstore.svfs(b'obsstore', b'w', atomictemp=True)
558 562 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
559 563 newobsstorefile.write(bytes)
560 564 newobsstorefile.close()
561 565 return n
General Comments 0
You need to be logged in to leave comments. Login now