repair.py
437 lines
| 15.2 KiB
| text/x-python
|
PythonLexer
/ mercurial / repair.py
Matt Mackall
|
r4702 | # repair.py - functions for repository repair for mercurial | ||
# | ||||
# Copyright 2005, 2006 Chris Mason <mason@suse.com> | ||||
# Copyright 2007 Matt Mackall | ||||
# | ||||
Martin Geisler
|
r8225 | # This software may be used and distributed according to the terms of the | ||
Matt Mackall
|
r10263 | # GNU General Public License version 2 or any later version. | ||
Matt Mackall
|
r4702 | |||
Gregory Szorc
|
r25970 | from __future__ import absolute_import | ||
Alain Leufroy
|
r16440 | import errno | ||
Augie Fackler
|
r29341 | import hashlib | ||
Matt Mackall
|
r4702 | |||
Gregory Szorc
|
r25970 | from .i18n import _ | ||
Augie Fackler
|
r34219 | from .node import ( | ||
hex, | ||||
short, | ||||
) | ||||
Gregory Szorc
|
r25970 | from . import ( | ||
bundle2, | ||||
changegroup, | ||||
r32468 | discovery, | |||
Pierre-Yves David
|
r26587 | error, | ||
Gregory Szorc
|
r25970 | exchange, | ||
Kostia Balytskyi
|
r28868 | obsolete, | ||
r33144 | obsutil, | |||
Gregory Szorc
|
r38806 | pycompat, | ||
Gregory Szorc
|
r25970 | util, | ||
) | ||||
Yuya Nishihara
|
r37102 | from .utils import ( | ||
stringutil, | ||||
) | ||||
Gregory Szorc
|
r25970 | |||
Gregory Szorc
|
r37034 | def backupbundle(repo, bases, heads, node, suffix, compress=True, | ||
obsolescence=True): | ||||
Alexis S. L. Carvalho
|
r5905 | """create a bundle with the specified revisions as a backup""" | ||
Eric Sumner
|
r23898 | |||
FUJIWARA Katsunori
|
r20977 | backupdir = "strip-backup" | ||
vfs = repo.vfs | ||||
if not vfs.isdir(backupdir): | ||||
vfs.mkdir(backupdir) | ||||
Durham Goode
|
r23835 | |||
# Include a hash of all the nodes in the filename for uniqueness | ||||
Yuya Nishihara
|
r25340 | allcommits = repo.set('%ln::%ln', bases, heads) | ||
Durham Goode
|
r23835 | allhashes = sorted(c.hex() for c in allcommits) | ||
Augie Fackler
|
r34219 | totalhash = hashlib.sha1(''.join(allhashes)).digest() | ||
name = "%s/%s-%s-%s.hg" % (backupdir, short(node), | ||||
hex(totalhash[:4]), suffix) | ||||
Durham Goode
|
r23835 | |||
Martin von Zweigbergk
|
r34179 | cgversion = changegroup.localversion(repo) | ||
Pierre-Yves David
|
r26425 | comp = None | ||
Pierre-Yves David
|
r26423 | if cgversion != '01': | ||
Pierre-Yves David
|
r24686 | bundletype = "HG20" | ||
Pierre-Yves David
|
r26425 | if compress: | ||
comp = 'BZ' | ||||
Eric Sumner
|
r23898 | elif compress: | ||
Nicolas Dumazet
|
r11791 | bundletype = "HG10BZ" | ||
else: | ||||
bundletype = "HG10UN" | ||||
r32468 | ||||
outgoing = discovery.outgoing(repo, missingroots=bases, missingheads=heads) | ||||
Martin von Zweigbergk
|
r33032 | contentopts = { | ||
'cg.version': cgversion, | ||||
'obsolescence': obsolescence, | ||||
'phases': True, | ||||
} | ||||
r32468 | return bundle2.writenewbundle(repo.ui, repo, 'strip', name, bundletype, | |||
outgoing, contentopts, vfs, compression=comp) | ||||
Matt Mackall
|
r4702 | |||
Alexis S. L. Carvalho
|
r5910 | def _collectfiles(repo, striprev): | ||
"""find out the filelogs affected by the strip""" | ||||
Benoit Boissinot
|
r8462 | files = set() | ||
Matt Mackall
|
r4702 | |||
Gregory Szorc
|
r38806 | for x in pycompat.xrange(striprev, len(repo)): | ||
Martin Geisler
|
r8479 | files.update(repo[x].files()) | ||
Alexis S. L. Carvalho
|
r5902 | |||
Benoit Boissinot
|
r8462 | return sorted(files) | ||
Alexis S. L. Carvalho
|
r5902 | |||
Durham Goode
|
r33690 | def _collectrevlog(revlog, striprev): | ||
_, brokenset = revlog.getstrippoint(striprev) | ||||
return [revlog.linkrev(r) for r in brokenset] | ||||
def _collectmanifest(repo, striprev): | ||||
return _collectrevlog(repo.manifestlog._revlog, striprev) | ||||
Benoit Boissinot
|
r13702 | def _collectbrokencsets(repo, files, striprev): | ||
"""return the changesets which will be broken by the truncation""" | ||||
Matt Mackall
|
r13705 | s = set() | ||
Alexis S. L. Carvalho
|
r5909 | |||
Durham Goode
|
r33690 | s.update(_collectmanifest(repo, striprev)) | ||
Matt Mackall
|
r13705 | for fname in files: | ||
Durham Goode
|
r33690 | s.update(_collectrevlog(repo.file(fname), striprev)) | ||
Alexis S. L. Carvalho
|
r5909 | |||
Matt Mackall
|
r13705 | return s | ||
Alexis S. L. Carvalho
|
r5909 | |||
Jordi Gutiérrez Hermoso
|
r22057 | def strip(ui, repo, nodelist, backup=True, topic='backup'): | ||
Martin von Zweigbergk
|
r32922 | # This function requires the caller to lock the repo, but it operates | ||
# within a transaction of its own, and thus requires there to be no current | ||||
# transaction when it is called. | ||||
Martin von Zweigbergk
|
r32924 | if repo.currenttransaction() is not None: | ||
raise error.ProgrammingError('cannot strip from inside a transaction') | ||||
Jordi Gutiérrez Hermoso
|
r22057 | # Simple way to maintain backwards compatibility for this | ||
# argument. | ||||
if backup in ['none', 'strip']: | ||||
backup = False | ||||
Pierre-Yves David
|
r18004 | repo = repo.unfiltered() | ||
Idan Kamara
|
r18310 | repo.destroying() | ||
Joshua Redstone
|
r17013 | |||
Alexis S. L. Carvalho
|
r5901 | cl = repo.changelog | ||
Idan Kamara
|
r16237 | # TODO handle undo of merge sets | ||
Wagner Bruna
|
r16252 | if isinstance(nodelist, str): | ||
nodelist = [nodelist] | ||||
striplist = [cl.rev(node) for node in nodelist] | ||||
striprev = min(striplist) | ||||
Matt Mackall
|
r4702 | |||
Martin von Zweigbergk
|
r30707 | files = _collectfiles(repo, striprev) | ||
saverevs = _collectbrokencsets(repo, files, striprev) | ||||
Alexis S. L. Carvalho
|
r6147 | # Some revisions with rev > striprev may not be descendants of striprev. | ||
# We have to find these revisions and put them in a bundle, so that | ||||
# we can restore them after the truncations. | ||||
# To create the bundle we use repo.changegroupsubset which requires | ||||
# the list of heads and bases of the set of interesting revisions. | ||||
# (head = revision in the set that has no descendant in the set; | ||||
# base = revision in the set that has no ancestor in the set) | ||||
Wagner Bruna
|
r16252 | tostrip = set(striplist) | ||
Martin von Zweigbergk
|
r30707 | saveheads = set(saverevs) | ||
Martin von Zweigbergk
|
r30706 | for r in cl.revs(start=striprev + 1): | ||
if any(p in tostrip for p in cl.parentrevs(r)): | ||||
tostrip.add(r) | ||||
Benoit Boissinot
|
r13702 | |||
if r not in tostrip: | ||||
saverevs.add(r) | ||||
saveheads.difference_update(cl.parentrevs(r)) | ||||
saveheads.add(r) | ||||
saveheads = [cl.node(r) for r in saveheads] | ||||
Matt Mackall
|
r4702 | |||
Matt Mackall
|
r15386 | # compute base nodes | ||
if saverevs: | ||||
Bryan O'Sullivan
|
r16867 | descendants = set(cl.descendants(saverevs)) | ||
Matt Mackall
|
r15386 | saverevs.difference_update(descendants) | ||
savebases = [cl.node(r) for r in saverevs] | ||||
Wagner Bruna
|
r16252 | stripbases = [cl.node(r) for r in tostrip] | ||
Siddharth Agarwal
|
r18040 | |||
r32629 | stripobsidx = obsmarkers = () | |||
r33167 | if repo.ui.configbool('devel', 'strip-obsmarkers'): | |||
r33144 | obsmarkers = obsutil.exclusivemarkers(repo, stripbases) | |||
r32629 | if obsmarkers: | |||
stripobsidx = [i for i, m in enumerate(repo.obsstore) | ||||
if m in obsmarkers] | ||||
Siddharth Agarwal
|
r18040 | # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but | ||
# is much faster | ||||
newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip) | ||||
Augie Fackler
|
r17264 | if newbmtarget: | ||
Pierre-Yves David
|
r22818 | newbmtarget = repo[newbmtarget.first()].node() | ||
Augie Fackler
|
r17264 | else: | ||
newbmtarget = '.' | ||||
Matt Mackall
|
r4702 | |||
Matt Mackall
|
r13362 | bm = repo._bookmarks | ||
updatebm = [] | ||||
for m in bm: | ||||
rev = repo[bm[m]].rev() | ||||
if rev in tostrip: | ||||
updatebm.append(m) | ||||
Matt Mackall
|
r4702 | # create a changegroup for all the branches we need to keep | ||
Matt Mackall
|
r11197 | backupfile = None | ||
FUJIWARA Katsunori
|
r20979 | vfs = repo.vfs | ||
Mike Edgar
|
r24252 | node = nodelist[-1] | ||
Jordi Gutiérrez Hermoso
|
r22057 | if backup: | ||
Gregory Szorc
|
r37034 | backupfile = backupbundle(repo, stripbases, cl.heads(), node, topic) | ||
FUJIWARA Katsunori
|
r20979 | repo.ui.status(_("saved backup bundle to %s\n") % | ||
vfs.join(backupfile)) | ||||
repo.ui.log("backupbundle", "saved backup bundle to %s\n", | ||||
vfs.join(backupfile)) | ||||
Martin von Zweigbergk
|
r29954 | tmpbundlefile = None | ||
Martin von Zweigbergk
|
r29951 | if saveheads: | ||
Martin von Zweigbergk
|
r29954 | # do not compress temporary bundle if we remove it from disk later | ||
r32628 | # | |||
# We do not include obsolescence, it might re-introduce prune markers | ||||
# we are trying to strip. This is harmless since the stripped markers | ||||
# are already backed up and we did not touched the markers for the | ||||
# saved changesets. | ||||
Gregory Szorc
|
r37034 | tmpbundlefile = backupbundle(repo, savebases, saveheads, node, 'temp', | ||
compress=False, obsolescence=False) | ||||
Matt Mackall
|
r4702 | |||
Augie Fackler
|
r38546 | with ui.uninterruptable(): | ||
try: | ||||
with repo.transaction("strip") as tr: | ||||
offset = len(tr.entries) | ||||
Henrik Stuart
|
r8073 | |||
Augie Fackler
|
r38546 | tr.startgroup() | ||
cl.strip(striprev, tr) | ||||
stripmanifest(repo, striprev, tr, files) | ||||
Henrik Stuart
|
r8073 | |||
Augie Fackler
|
r38546 | for fn in files: | ||
repo.file(fn).strip(striprev, tr) | ||||
tr.endgroup() | ||||
Matt Mackall
|
r11197 | |||
Gregory Szorc
|
r38806 | for i in pycompat.xrange(offset, len(tr.entries)): | ||
Augie Fackler
|
r38546 | file, troffset, ignore = tr.entries[i] | ||
with repo.svfs(file, 'a', checkambig=True) as fp: | ||||
fp.truncate(troffset) | ||||
if troffset == 0: | ||||
repo.store.markremoved(file) | ||||
deleteobsmarkers(repo.obsstore, stripobsidx) | ||||
del repo.obsstore | ||||
repo.invalidatevolatilesets() | ||||
repo._phasecache.filterunknown(repo) | ||||
r32629 | ||||
Augie Fackler
|
r38546 | if tmpbundlefile: | ||
ui.note(_("adding branch\n")) | ||||
f = vfs.open(tmpbundlefile, "rb") | ||||
gen = exchange.readbundle(ui, f, tmpbundlefile, vfs) | ||||
if not repo.ui.verbose: | ||||
# silence internal shuffling chatter | ||||
repo.ui.pushbuffer() | ||||
tmpbundleurl = 'bundle:' + vfs.join(tmpbundlefile) | ||||
txnname = 'strip' | ||||
if not isinstance(gen, bundle2.unbundle20): | ||||
txnname = "strip\n%s" % util.hidepassword(tmpbundleurl) | ||||
with repo.transaction(txnname) as tr: | ||||
bundle2.applybundle(repo, gen, tr, source='strip', | ||||
url=tmpbundleurl) | ||||
if not repo.ui.verbose: | ||||
repo.ui.popbuffer() | ||||
f.close() | ||||
Matt Harbison
|
r31626 | |||
Augie Fackler
|
r38546 | with repo.transaction('repair') as tr: | ||
bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm] | ||||
bm.applychanges(repo, tr, bmchanges) | ||||
Laurent Charignon
|
r27157 | |||
Augie Fackler
|
r38546 | # remove undo files | ||
for undovfs, undofile in repo.undofiles(): | ||||
try: | ||||
undovfs.unlink(undofile) | ||||
except OSError as e: | ||||
if e.errno != errno.ENOENT: | ||||
ui.warn(_('error removing %s: %s\n') % | ||||
(undovfs.join(undofile), | ||||
stringutil.forcebytestr(e))) | ||||
Idan Kamara
|
r16237 | |||
Augie Fackler
|
r38546 | except: # re-raises | ||
if backupfile: | ||||
ui.warn(_("strip failed, backup bundle stored in '%s'\n") | ||||
% vfs.join(backupfile)) | ||||
if tmpbundlefile: | ||||
ui.warn(_("strip failed, unrecovered changes stored in '%s'\n") | ||||
% vfs.join(tmpbundlefile)) | ||||
ui.warn(_("(fix the problem, then recover the changesets with " | ||||
"\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile)) | ||||
raise | ||||
else: | ||||
if tmpbundlefile: | ||||
# Remove temporary bundle only if there were no exceptions | ||||
vfs.unlink(tmpbundlefile) | ||||
Matt Mackall
|
r4702 | |||
Pierre-Yves David
|
r18395 | repo.destroyed() | ||
Martin von Zweigbergk
|
r30274 | # return the backup file path (or None if 'backup' was False) so | ||
# extensions can use it | ||||
return backupfile | ||||
Gregory Szorc
|
r25652 | |||
Jun Wu
|
r33087 | def safestriproots(ui, repo, nodes): | ||
"""return list of roots of nodes where descendants are covered by nodes""" | ||||
torev = repo.unfiltered().changelog.rev | ||||
revs = set(torev(n) for n in nodes) | ||||
# tostrip = wanted - unsafe = wanted - ancestors(orphaned) | ||||
# orphaned = affected - wanted | ||||
# affected = descendants(roots(wanted)) | ||||
# wanted = revs | ||||
tostrip = set(repo.revs('%ld-(::((roots(%ld)::)-%ld))', revs, revs, revs)) | ||||
notstrip = revs - tostrip | ||||
if notstrip: | ||||
nodestr = ', '.join(sorted(short(repo[n].node()) for n in notstrip)) | ||||
ui.warn(_('warning: orphaned descendants detected, ' | ||||
'not stripping %s\n') % nodestr) | ||||
return [c.node() for c in repo.set('roots(%ld)', tostrip)] | ||||
class stripcallback(object): | ||||
"""used as a transaction postclose callback""" | ||||
def __init__(self, ui, repo, backup, topic): | ||||
self.ui = ui | ||||
self.repo = repo | ||||
self.backup = backup | ||||
self.topic = topic or 'backup' | ||||
self.nodelist = [] | ||||
def addnodes(self, nodes): | ||||
self.nodelist.extend(nodes) | ||||
def __call__(self, tr): | ||||
roots = safestriproots(self.ui, self.repo, self.nodelist) | ||||
if roots: | ||||
Jun Wu
|
r33108 | strip(self.ui, self.repo, roots, self.backup, self.topic) | ||
Jun Wu
|
r33087 | |||
def delayedstrip(ui, repo, nodelist, topic=None): | ||||
"""like strip, but works inside transaction and won't strip irreverent revs | ||||
nodelist must explicitly contain all descendants. Otherwise a warning will | ||||
be printed that some nodes are not stripped. | ||||
Always do a backup. The last non-None "topic" will be used as the backup | ||||
topic name. The default backup topic name is "backup". | ||||
""" | ||||
tr = repo.currenttransaction() | ||||
if not tr: | ||||
nodes = safestriproots(ui, repo, nodelist) | ||||
return strip(ui, repo, nodes, True, topic) | ||||
# transaction postclose callbacks are called in alphabet order. | ||||
# use '\xff' as prefix so we are likely to be called last. | ||||
callback = tr.getpostclose('\xffstrip') | ||||
if callback is None: | ||||
callback = stripcallback(ui, repo, True, topic) | ||||
tr.addpostclose('\xffstrip', callback) | ||||
if topic: | ||||
callback.topic = topic | ||||
callback.addnodes(nodelist) | ||||
Durham Goode
|
r33691 | def stripmanifest(repo, striprev, tr, files): | ||
revlog = repo.manifestlog._revlog | ||||
revlog.strip(striprev, tr) | ||||
striptrees(repo, tr, striprev, files) | ||||
Durham Goode
|
r32196 | def striptrees(repo, tr, striprev, files): | ||
if 'treemanifest' in repo.requirements: # safe but unnecessary | ||||
# otherwise | ||||
Durham Goode
|
r32296 | for unencoded, encoded, size in repo.store.datafiles(): | ||
if (unencoded.startswith('meta/') and | ||||
unencoded.endswith('00manifest.i')): | ||||
dir = unencoded[5:-12] | ||||
repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr) | ||||
Durham Goode
|
r32196 | |||
Gregory Szorc
|
r25652 | def rebuildfncache(ui, repo): | ||
"""Rebuilds the fncache file from repo history. | ||||
Missing entries will be added. Extra entries will be removed. | ||||
""" | ||||
repo = repo.unfiltered() | ||||
if 'fncache' not in repo.requirements: | ||||
ui.warn(_('(not rebuilding fncache because repository does not ' | ||||
Wagner Bruna
|
r25874 | 'support fncache)\n')) | ||
Gregory Szorc
|
r25652 | return | ||
Bryan O'Sullivan
|
r27860 | with repo.lock(): | ||
Gregory Szorc
|
r25652 | fnc = repo.store.fncache | ||
# Trigger load of fncache. | ||||
if 'irrelevant' in fnc: | ||||
pass | ||||
oldentries = set(fnc.entries) | ||||
newentries = set() | ||||
seenfiles = set() | ||||
Martin von Zweigbergk
|
r38413 | progress = ui.makeprogress(_('rebuilding'), unit=_('changesets'), | ||
total=len(repo)) | ||||
Gregory Szorc
|
r25652 | for rev in repo: | ||
Martin von Zweigbergk
|
r38413 | progress.update(rev) | ||
Gregory Szorc
|
r25652 | |||
ctx = repo[rev] | ||||
for f in ctx.files(): | ||||
# This is to minimize I/O. | ||||
if f in seenfiles: | ||||
continue | ||||
seenfiles.add(f) | ||||
i = 'data/%s.i' % f | ||||
d = 'data/%s.d' % f | ||||
if repo.store._exists(i): | ||||
newentries.add(i) | ||||
if repo.store._exists(d): | ||||
newentries.add(d) | ||||
Martin von Zweigbergk
|
r38413 | progress.complete() | ||
Gregory Szorc
|
r25652 | |||
Martin von Zweigbergk
|
r28031 | if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise | ||
Martin von Zweigbergk
|
r28007 | for dir in util.dirs(seenfiles): | ||
i = 'meta/%s/00manifest.i' % dir | ||||
d = 'meta/%s/00manifest.d' % dir | ||||
if repo.store._exists(i): | ||||
newentries.add(i) | ||||
if repo.store._exists(d): | ||||
newentries.add(d) | ||||
Gregory Szorc
|
r25652 | addcount = len(newentries - oldentries) | ||
removecount = len(oldentries - newentries) | ||||
for p in sorted(oldentries - newentries): | ||||
ui.write(_('removing %s\n') % p) | ||||
for p in sorted(newentries - oldentries): | ||||
ui.write(_('adding %s\n') % p) | ||||
if addcount or removecount: | ||||
ui.write(_('%d items added, %d removed from fncache\n') % | ||||
(addcount, removecount)) | ||||
fnc.entries = newentries | ||||
fnc._dirty = True | ||||
Bryan O'Sullivan
|
r27871 | with repo.transaction('fncache') as tr: | ||
Gregory Szorc
|
r25652 | fnc.write(tr) | ||
else: | ||||
ui.write(_('fncache already up to date\n')) | ||||
Kostia Balytskyi
|
r28868 | def deleteobsmarkers(obsstore, indices): | ||
"""Delete some obsmarkers from obsstore and return how many were deleted | ||||
'indices' is a list of ints which are the indices | ||||
of the markers to be deleted. | ||||
Every invocation of this function completely rewrites the obsstore file, | ||||
skipping the markers we want to be removed. The new temporary file is | ||||
created, remaining markers are written there and on .close() this file | ||||
gets atomically renamed to obsstore, thus guaranteeing consistency.""" | ||||
if not indices: | ||||
# we don't want to rewrite the obsstore with the same content | ||||
return | ||||
left = [] | ||||
current = obsstore._all | ||||
n = 0 | ||||
for i, m in enumerate(current): | ||||
if i in indices: | ||||
n += 1 | ||||
continue | ||||
left.append(m) | ||||
newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True) | ||||
for bytes in obsolete.encodemarkers(left, True, obsstore._version): | ||||
newobsstorefile.write(bytes) | ||||
newobsstorefile.close() | ||||
return n | ||||