##// END OF EJS Templates
undo-files: also remove the undo.backupfiles...
marmoute -
r51186:cd680b45 stable
parent child Browse files
Show More
@@ -1,565 +1,568 b''
1 1 # repair.py - functions for repository repair for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 4 # Copyright 2007 Olivia Mackall
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9
10 10 import errno
11 11
12 12 from .i18n import _
13 13 from .node import (
14 14 hex,
15 15 short,
16 16 )
17 17 from . import (
18 18 bundle2,
19 19 changegroup,
20 20 discovery,
21 21 error,
22 22 exchange,
23 23 obsolete,
24 24 obsutil,
25 25 pathutil,
26 26 phases,
27 27 requirements,
28 28 scmutil,
29 29 util,
30 30 )
31 31 from .utils import (
32 32 hashutil,
33 33 stringutil,
34 34 urlutil,
35 35 )
36 36
37 37
38 38 def backupbundle(
39 39 repo, bases, heads, node, suffix, compress=True, obsolescence=True
40 40 ):
41 41 """create a bundle with the specified revisions as a backup"""
42 42
43 43 backupdir = b"strip-backup"
44 44 vfs = repo.vfs
45 45 if not vfs.isdir(backupdir):
46 46 vfs.mkdir(backupdir)
47 47
48 48 # Include a hash of all the nodes in the filename for uniqueness
49 49 allcommits = repo.set(b'%ln::%ln', bases, heads)
50 50 allhashes = sorted(c.hex() for c in allcommits)
51 51 totalhash = hashutil.sha1(b''.join(allhashes)).digest()
52 52 name = b"%s/%s-%s-%s.hg" % (
53 53 backupdir,
54 54 short(node),
55 55 hex(totalhash[:4]),
56 56 suffix,
57 57 )
58 58
59 59 cgversion = changegroup.localversion(repo)
60 60 comp = None
61 61 if cgversion != b'01':
62 62 bundletype = b"HG20"
63 63 if compress:
64 64 comp = b'BZ'
65 65 elif compress:
66 66 bundletype = b"HG10BZ"
67 67 else:
68 68 bundletype = b"HG10UN"
69 69
70 70 outgoing = discovery.outgoing(repo, missingroots=bases, ancestorsof=heads)
71 71 contentopts = {
72 72 b'cg.version': cgversion,
73 73 b'obsolescence': obsolescence,
74 74 b'phases': True,
75 75 }
76 76 return bundle2.writenewbundle(
77 77 repo.ui,
78 78 repo,
79 79 b'strip',
80 80 name,
81 81 bundletype,
82 82 outgoing,
83 83 contentopts,
84 84 vfs,
85 85 compression=comp,
86 86 )
87 87
88 88
89 89 def _collectfiles(repo, striprev):
90 90 """find out the filelogs affected by the strip"""
91 91 files = set()
92 92
93 93 for x in range(striprev, len(repo)):
94 94 files.update(repo[x].files())
95 95
96 96 return sorted(files)
97 97
98 98
99 99 def _collectrevlog(revlog, striprev):
100 100 _, brokenset = revlog.getstrippoint(striprev)
101 101 return [revlog.linkrev(r) for r in brokenset]
102 102
103 103
104 104 def _collectbrokencsets(repo, files, striprev):
105 105 """return the changesets which will be broken by the truncation"""
106 106 s = set()
107 107
108 108 for revlog in manifestrevlogs(repo):
109 109 s.update(_collectrevlog(revlog, striprev))
110 110 for fname in files:
111 111 s.update(_collectrevlog(repo.file(fname), striprev))
112 112
113 113 return s
114 114
115 115
116 116 def cleanup_undo_files(repo):
117 117 """remove "undo" files used by the rollback logic
118 118
119 119 This is useful to prevent rollback running in situation were it does not
120 120 make sense. For example after a strip.
121 121 """
122 for undovfs, undofile in repo.undofiles():
122 # XXX need to remove the backups themselve too
123 undo_files = [(repo.svfs, b'undo.backupfiles')]
124 undo_files.extend(repo.undofiles())
125 for undovfs, undofile in undo_files:
123 126 try:
124 127 undovfs.unlink(undofile)
125 128 except OSError as e:
126 129 if e.errno != errno.ENOENT:
127 130 msg = _(b'error removing %s: %s\n')
128 131 msg %= (undovfs.join(undofile), stringutil.forcebytestr(e))
129 132 repo.ui.warn(msg)
130 133
131 134
132 135 def strip(ui, repo, nodelist, backup=True, topic=b'backup'):
133 136 # This function requires the caller to lock the repo, but it operates
134 137 # within a transaction of its own, and thus requires there to be no current
135 138 # transaction when it is called.
136 139 if repo.currenttransaction() is not None:
137 140 raise error.ProgrammingError(b'cannot strip from inside a transaction')
138 141
139 142 # Simple way to maintain backwards compatibility for this
140 143 # argument.
141 144 if backup in [b'none', b'strip']:
142 145 backup = False
143 146
144 147 repo = repo.unfiltered()
145 148 repo.destroying()
146 149 vfs = repo.vfs
147 150 # load bookmark before changelog to avoid side effect from outdated
148 151 # changelog (see repo._refreshchangelog)
149 152 repo._bookmarks
150 153 cl = repo.changelog
151 154
152 155 # TODO handle undo of merge sets
153 156 if isinstance(nodelist, bytes):
154 157 nodelist = [nodelist]
155 158 striplist = [cl.rev(node) for node in nodelist]
156 159 striprev = min(striplist)
157 160
158 161 files = _collectfiles(repo, striprev)
159 162 saverevs = _collectbrokencsets(repo, files, striprev)
160 163
161 164 # Some revisions with rev > striprev may not be descendants of striprev.
162 165 # We have to find these revisions and put them in a bundle, so that
163 166 # we can restore them after the truncations.
164 167 # To create the bundle we use repo.changegroupsubset which requires
165 168 # the list of heads and bases of the set of interesting revisions.
166 169 # (head = revision in the set that has no descendant in the set;
167 170 # base = revision in the set that has no ancestor in the set)
168 171 tostrip = set(striplist)
169 172 saveheads = set(saverevs)
170 173 for r in cl.revs(start=striprev + 1):
171 174 if any(p in tostrip for p in cl.parentrevs(r)):
172 175 tostrip.add(r)
173 176
174 177 if r not in tostrip:
175 178 saverevs.add(r)
176 179 saveheads.difference_update(cl.parentrevs(r))
177 180 saveheads.add(r)
178 181 saveheads = [cl.node(r) for r in saveheads]
179 182
180 183 # compute base nodes
181 184 if saverevs:
182 185 descendants = set(cl.descendants(saverevs))
183 186 saverevs.difference_update(descendants)
184 187 savebases = [cl.node(r) for r in saverevs]
185 188 stripbases = [cl.node(r) for r in tostrip]
186 189
187 190 stripobsidx = obsmarkers = ()
188 191 if repo.ui.configbool(b'devel', b'strip-obsmarkers'):
189 192 obsmarkers = obsutil.exclusivemarkers(repo, stripbases)
190 193 if obsmarkers:
191 194 stripobsidx = [
192 195 i for i, m in enumerate(repo.obsstore) if m in obsmarkers
193 196 ]
194 197
195 198 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
196 199
197 200 backupfile = None
198 201 node = nodelist[-1]
199 202 if backup:
200 203 backupfile = _createstripbackup(repo, stripbases, node, topic)
201 204 # create a changegroup for all the branches we need to keep
202 205 tmpbundlefile = None
203 206 if saveheads:
204 207 # do not compress temporary bundle if we remove it from disk later
205 208 #
206 209 # We do not include obsolescence, it might re-introduce prune markers
207 210 # we are trying to strip. This is harmless since the stripped markers
208 211 # are already backed up and we did not touched the markers for the
209 212 # saved changesets.
210 213 tmpbundlefile = backupbundle(
211 214 repo,
212 215 savebases,
213 216 saveheads,
214 217 node,
215 218 b'temp',
216 219 compress=False,
217 220 obsolescence=False,
218 221 )
219 222
220 223 with ui.uninterruptible():
221 224 try:
222 225 with repo.transaction(b"strip") as tr:
223 226 # TODO this code violates the interface abstraction of the
224 227 # transaction and makes assumptions that file storage is
225 228 # using append-only files. We'll need some kind of storage
226 229 # API to handle stripping for us.
227 230 oldfiles = set(tr._offsetmap.keys())
228 231 oldfiles.update(tr._newfiles)
229 232
230 233 tr.startgroup()
231 234 cl.strip(striprev, tr)
232 235 stripmanifest(repo, striprev, tr, files)
233 236
234 237 for fn in files:
235 238 repo.file(fn).strip(striprev, tr)
236 239 tr.endgroup()
237 240
238 241 entries = tr.readjournal()
239 242
240 243 for file, troffset in entries:
241 244 if file in oldfiles:
242 245 continue
243 246 with repo.svfs(file, b'a', checkambig=True) as fp:
244 247 fp.truncate(troffset)
245 248 if troffset == 0:
246 249 repo.store.markremoved(file)
247 250
248 251 deleteobsmarkers(repo.obsstore, stripobsidx)
249 252 del repo.obsstore
250 253 repo.invalidatevolatilesets()
251 254 repo._phasecache.filterunknown(repo)
252 255
253 256 if tmpbundlefile:
254 257 ui.note(_(b"adding branch\n"))
255 258 f = vfs.open(tmpbundlefile, b"rb")
256 259 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
257 260 # silence internal shuffling chatter
258 261 maybe_silent = (
259 262 repo.ui.silent()
260 263 if not repo.ui.verbose
261 264 else util.nullcontextmanager()
262 265 )
263 266 with maybe_silent:
264 267 tmpbundleurl = b'bundle:' + vfs.join(tmpbundlefile)
265 268 txnname = b'strip'
266 269 if not isinstance(gen, bundle2.unbundle20):
267 270 txnname = b"strip\n%s" % urlutil.hidepassword(
268 271 tmpbundleurl
269 272 )
270 273 with repo.transaction(txnname) as tr:
271 274 bundle2.applybundle(
272 275 repo, gen, tr, source=b'strip', url=tmpbundleurl
273 276 )
274 277 f.close()
275 278
276 279 with repo.transaction(b'repair') as tr:
277 280 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
278 281 repo._bookmarks.applychanges(repo, tr, bmchanges)
279 282
280 283 cleanup_undo_files(repo)
281 284
282 285 except: # re-raises
283 286 if backupfile:
284 287 ui.warn(
285 288 _(b"strip failed, backup bundle stored in '%s'\n")
286 289 % vfs.join(backupfile)
287 290 )
288 291 if tmpbundlefile:
289 292 ui.warn(
290 293 _(b"strip failed, unrecovered changes stored in '%s'\n")
291 294 % vfs.join(tmpbundlefile)
292 295 )
293 296 ui.warn(
294 297 _(
295 298 b"(fix the problem, then recover the changesets with "
296 299 b"\"hg unbundle '%s'\")\n"
297 300 )
298 301 % vfs.join(tmpbundlefile)
299 302 )
300 303 raise
301 304 else:
302 305 if tmpbundlefile:
303 306 # Remove temporary bundle only if there were no exceptions
304 307 vfs.unlink(tmpbundlefile)
305 308
306 309 repo.destroyed()
307 310 # return the backup file path (or None if 'backup' was False) so
308 311 # extensions can use it
309 312 return backupfile
310 313
311 314
312 315 def softstrip(ui, repo, nodelist, backup=True, topic=b'backup'):
313 316 """perform a "soft" strip using the archived phase"""
314 317 tostrip = [c.node() for c in repo.set(b'sort(%ln::)', nodelist)]
315 318 if not tostrip:
316 319 return None
317 320
318 321 backupfile = None
319 322 if backup:
320 323 node = tostrip[0]
321 324 backupfile = _createstripbackup(repo, tostrip, node, topic)
322 325
323 326 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
324 327 with repo.transaction(b'strip') as tr:
325 328 phases.retractboundary(repo, tr, phases.archived, tostrip)
326 329 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
327 330 repo._bookmarks.applychanges(repo, tr, bmchanges)
328 331 return backupfile
329 332
330 333
331 334 def _bookmarkmovements(repo, tostrip):
332 335 # compute necessary bookmark movement
333 336 bm = repo._bookmarks
334 337 updatebm = []
335 338 for m in bm:
336 339 rev = repo[bm[m]].rev()
337 340 if rev in tostrip:
338 341 updatebm.append(m)
339 342 newbmtarget = None
340 343 # If we need to move bookmarks, compute bookmark
341 344 # targets. Otherwise we can skip doing this logic.
342 345 if updatebm:
343 346 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)),
344 347 # but is much faster
345 348 newbmtarget = repo.revs(b'max(parents(%ld) - (%ld))', tostrip, tostrip)
346 349 if newbmtarget:
347 350 newbmtarget = repo[newbmtarget.first()].node()
348 351 else:
349 352 newbmtarget = b'.'
350 353 return newbmtarget, updatebm
351 354
352 355
353 356 def _createstripbackup(repo, stripbases, node, topic):
354 357 # backup the changeset we are about to strip
355 358 vfs = repo.vfs
356 359 cl = repo.changelog
357 360 backupfile = backupbundle(repo, stripbases, cl.heads(), node, topic)
358 361 repo.ui.status(_(b"saved backup bundle to %s\n") % vfs.join(backupfile))
359 362 repo.ui.log(
360 363 b"backupbundle", b"saved backup bundle to %s\n", vfs.join(backupfile)
361 364 )
362 365 return backupfile
363 366
364 367
365 368 def safestriproots(ui, repo, nodes):
366 369 """return list of roots of nodes where descendants are covered by nodes"""
367 370 torev = repo.unfiltered().changelog.rev
368 371 revs = {torev(n) for n in nodes}
369 372 # tostrip = wanted - unsafe = wanted - ancestors(orphaned)
370 373 # orphaned = affected - wanted
371 374 # affected = descendants(roots(wanted))
372 375 # wanted = revs
373 376 revset = b'%ld - ( ::( (roots(%ld):: and not _phase(%s)) -%ld) )'
374 377 tostrip = set(repo.revs(revset, revs, revs, phases.internal, revs))
375 378 notstrip = revs - tostrip
376 379 if notstrip:
377 380 nodestr = b', '.join(sorted(short(repo[n].node()) for n in notstrip))
378 381 ui.warn(
379 382 _(b'warning: orphaned descendants detected, not stripping %s\n')
380 383 % nodestr
381 384 )
382 385 return [c.node() for c in repo.set(b'roots(%ld)', tostrip)]
383 386
384 387
385 388 class stripcallback:
386 389 """used as a transaction postclose callback"""
387 390
388 391 def __init__(self, ui, repo, backup, topic):
389 392 self.ui = ui
390 393 self.repo = repo
391 394 self.backup = backup
392 395 self.topic = topic or b'backup'
393 396 self.nodelist = []
394 397
395 398 def addnodes(self, nodes):
396 399 self.nodelist.extend(nodes)
397 400
398 401 def __call__(self, tr):
399 402 roots = safestriproots(self.ui, self.repo, self.nodelist)
400 403 if roots:
401 404 strip(self.ui, self.repo, roots, self.backup, self.topic)
402 405
403 406
404 407 def delayedstrip(ui, repo, nodelist, topic=None, backup=True):
405 408 """like strip, but works inside transaction and won't strip irreverent revs
406 409
407 410 nodelist must explicitly contain all descendants. Otherwise a warning will
408 411 be printed that some nodes are not stripped.
409 412
410 413 Will do a backup if `backup` is True. The last non-None "topic" will be
411 414 used as the backup topic name. The default backup topic name is "backup".
412 415 """
413 416 tr = repo.currenttransaction()
414 417 if not tr:
415 418 nodes = safestriproots(ui, repo, nodelist)
416 419 return strip(ui, repo, nodes, backup=backup, topic=topic)
417 420 # transaction postclose callbacks are called in alphabet order.
418 421 # use '\xff' as prefix so we are likely to be called last.
419 422 callback = tr.getpostclose(b'\xffstrip')
420 423 if callback is None:
421 424 callback = stripcallback(ui, repo, backup=backup, topic=topic)
422 425 tr.addpostclose(b'\xffstrip', callback)
423 426 if topic:
424 427 callback.topic = topic
425 428 callback.addnodes(nodelist)
426 429
427 430
428 431 def stripmanifest(repo, striprev, tr, files):
429 432 for revlog in manifestrevlogs(repo):
430 433 revlog.strip(striprev, tr)
431 434
432 435
433 436 def manifestrevlogs(repo):
434 437 yield repo.manifestlog.getstorage(b'')
435 438 if scmutil.istreemanifest(repo):
436 439 # This logic is safe if treemanifest isn't enabled, but also
437 440 # pointless, so we skip it if treemanifest isn't enabled.
438 441 for t, unencoded, size in repo.store.datafiles():
439 442 if unencoded.startswith(b'meta/') and unencoded.endswith(
440 443 b'00manifest.i'
441 444 ):
442 445 dir = unencoded[5:-12]
443 446 yield repo.manifestlog.getstorage(dir)
444 447
445 448
446 449 def rebuildfncache(ui, repo, only_data=False):
447 450 """Rebuilds the fncache file from repo history.
448 451
449 452 Missing entries will be added. Extra entries will be removed.
450 453 """
451 454 repo = repo.unfiltered()
452 455
453 456 if requirements.FNCACHE_REQUIREMENT not in repo.requirements:
454 457 ui.warn(
455 458 _(
456 459 b'(not rebuilding fncache because repository does not '
457 460 b'support fncache)\n'
458 461 )
459 462 )
460 463 return
461 464
462 465 with repo.lock():
463 466 fnc = repo.store.fncache
464 467 fnc.ensureloaded(warn=ui.warn)
465 468
466 469 oldentries = set(fnc.entries)
467 470 newentries = set()
468 471 seenfiles = set()
469 472
470 473 if only_data:
471 474 # Trust the listing of .i from the fncache, but not the .d. This is
472 475 # much faster, because we only need to stat every possible .d files,
473 476 # instead of reading the full changelog
474 477 for f in fnc:
475 478 if f[:5] == b'data/' and f[-2:] == b'.i':
476 479 seenfiles.add(f[5:-2])
477 480 newentries.add(f)
478 481 dataf = f[:-2] + b'.d'
479 482 if repo.store._exists(dataf):
480 483 newentries.add(dataf)
481 484 else:
482 485 progress = ui.makeprogress(
483 486 _(b'rebuilding'), unit=_(b'changesets'), total=len(repo)
484 487 )
485 488 for rev in repo:
486 489 progress.update(rev)
487 490
488 491 ctx = repo[rev]
489 492 for f in ctx.files():
490 493 # This is to minimize I/O.
491 494 if f in seenfiles:
492 495 continue
493 496 seenfiles.add(f)
494 497
495 498 i = b'data/%s.i' % f
496 499 d = b'data/%s.d' % f
497 500
498 501 if repo.store._exists(i):
499 502 newentries.add(i)
500 503 if repo.store._exists(d):
501 504 newentries.add(d)
502 505
503 506 progress.complete()
504 507
505 508 if requirements.TREEMANIFEST_REQUIREMENT in repo.requirements:
506 509 # This logic is safe if treemanifest isn't enabled, but also
507 510 # pointless, so we skip it if treemanifest isn't enabled.
508 511 for dir in pathutil.dirs(seenfiles):
509 512 i = b'meta/%s/00manifest.i' % dir
510 513 d = b'meta/%s/00manifest.d' % dir
511 514
512 515 if repo.store._exists(i):
513 516 newentries.add(i)
514 517 if repo.store._exists(d):
515 518 newentries.add(d)
516 519
517 520 addcount = len(newentries - oldentries)
518 521 removecount = len(oldentries - newentries)
519 522 for p in sorted(oldentries - newentries):
520 523 ui.write(_(b'removing %s\n') % p)
521 524 for p in sorted(newentries - oldentries):
522 525 ui.write(_(b'adding %s\n') % p)
523 526
524 527 if addcount or removecount:
525 528 ui.write(
526 529 _(b'%d items added, %d removed from fncache\n')
527 530 % (addcount, removecount)
528 531 )
529 532 fnc.entries = newentries
530 533 fnc._dirty = True
531 534
532 535 with repo.transaction(b'fncache') as tr:
533 536 fnc.write(tr)
534 537 else:
535 538 ui.write(_(b'fncache already up to date\n'))
536 539
537 540
538 541 def deleteobsmarkers(obsstore, indices):
539 542 """Delete some obsmarkers from obsstore and return how many were deleted
540 543
541 544 'indices' is a list of ints which are the indices
542 545 of the markers to be deleted.
543 546
544 547 Every invocation of this function completely rewrites the obsstore file,
545 548 skipping the markers we want to be removed. The new temporary file is
546 549 created, remaining markers are written there and on .close() this file
547 550 gets atomically renamed to obsstore, thus guaranteeing consistency."""
548 551 if not indices:
549 552 # we don't want to rewrite the obsstore with the same content
550 553 return
551 554
552 555 left = []
553 556 current = obsstore._all
554 557 n = 0
555 558 for i, m in enumerate(current):
556 559 if i in indices:
557 560 n += 1
558 561 continue
559 562 left.append(m)
560 563
561 564 newobsstorefile = obsstore.svfs(b'obsstore', b'w', atomictemp=True)
562 565 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
563 566 newobsstorefile.write(bytes)
564 567 newobsstorefile.close()
565 568 return n
General Comments 0
You need to be logged in to leave comments. Login now