repair.py
297 lines
| 9.8 KiB
| text/x-python
|
PythonLexer
/ mercurial / repair.py
Matt Mackall
|
r4702 | # repair.py - functions for repository repair for mercurial | ||
# | ||||
# Copyright 2005, 2006 Chris Mason <mason@suse.com> | ||||
# Copyright 2007 Matt Mackall | ||||
# | ||||
Martin Geisler
|
r8225 | # This software may be used and distributed according to the terms of the | ||
Matt Mackall
|
r10263 | # GNU General Public License version 2 or any later version. | ||
Matt Mackall
|
r4702 | |||
Eric Sumner
|
r23898 | from mercurial import changegroup, exchange, util, bundle2 | ||
Yuya Nishihara
|
r25340 | from mercurial.node import short | ||
Alexander Solovyov
|
r14064 | from mercurial.i18n import _ | ||
Alain Leufroy
|
r16440 | import errno | ||
Matt Mackall
|
r4702 | |||
Matt Mackall
|
r15386 | def _bundle(repo, bases, heads, node, suffix, compress=True): | ||
Alexis S. L. Carvalho
|
r5905 | """create a bundle with the specified revisions as a backup""" | ||
Matt Mackall
|
r25845 | usebundle2 = (repo.ui.configbool('experimental', 'bundle2-exp', True) and | ||
Eric Sumner
|
r23898 | repo.ui.config('experimental', 'strip-bundle2-version')) | ||
if usebundle2: | ||||
cgversion = repo.ui.config('experimental', 'strip-bundle2-version') | ||||
Eric Sumner
|
r23939 | if cgversion not in changegroup.packermap: | ||
FUJIWARA Katsunori
|
r24863 | repo.ui.warn(_('unknown strip-bundle2-version value %r; ' | ||
Eric Sumner
|
r23939 | 'should be one of %r\n') % | ||
(cgversion, sorted(changegroup.packermap.keys()),)) | ||||
cgversion = '01' | ||||
usebundle2 = False | ||||
Eric Sumner
|
r23898 | else: | ||
cgversion = '01' | ||||
cg = changegroup.changegroupsubset(repo, bases, heads, 'strip', | ||||
version=cgversion) | ||||
FUJIWARA Katsunori
|
r20977 | backupdir = "strip-backup" | ||
vfs = repo.vfs | ||||
if not vfs.isdir(backupdir): | ||||
vfs.mkdir(backupdir) | ||||
Durham Goode
|
r23835 | |||
# Include a hash of all the nodes in the filename for uniqueness | ||||
Yuya Nishihara
|
r25340 | allcommits = repo.set('%ln::%ln', bases, heads) | ||
Durham Goode
|
r23835 | allhashes = sorted(c.hex() for c in allcommits) | ||
totalhash = util.sha1(''.join(allhashes)).hexdigest() | ||||
name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix) | ||||
Eric Sumner
|
r23898 | if usebundle2: | ||
Pierre-Yves David
|
r24686 | bundletype = "HG20" | ||
Eric Sumner
|
r23898 | elif compress: | ||
Nicolas Dumazet
|
r11791 | bundletype = "HG10BZ" | ||
else: | ||||
bundletype = "HG10UN" | ||||
Eric Sumner
|
r23895 | return changegroup.writebundle(repo.ui, cg, name, bundletype, vfs) | ||
Matt Mackall
|
r4702 | |||
Alexis S. L. Carvalho
|
r5910 | def _collectfiles(repo, striprev): | ||
"""find out the filelogs affected by the strip""" | ||||
Benoit Boissinot
|
r8462 | files = set() | ||
Matt Mackall
|
r4702 | |||
Matt Mackall
|
r6750 | for x in xrange(striprev, len(repo)): | ||
Martin Geisler
|
r8479 | files.update(repo[x].files()) | ||
Alexis S. L. Carvalho
|
r5902 | |||
Benoit Boissinot
|
r8462 | return sorted(files) | ||
Alexis S. L. Carvalho
|
r5902 | |||
Benoit Boissinot
|
r13702 | def _collectbrokencsets(repo, files, striprev): | ||
"""return the changesets which will be broken by the truncation""" | ||||
Matt Mackall
|
r13705 | s = set() | ||
Benoit Boissinot
|
r13702 | def collectone(revlog): | ||
Durham Goode
|
r20074 | _, brokenset = revlog.getstrippoint(striprev) | ||
s.update([revlog.linkrev(r) for r in brokenset]) | ||||
Alexis S. L. Carvalho
|
r5909 | |||
Matt Mackall
|
r13705 | collectone(repo.manifest) | ||
for fname in files: | ||||
collectone(repo.file(fname)) | ||||
Alexis S. L. Carvalho
|
r5909 | |||
Matt Mackall
|
r13705 | return s | ||
Alexis S. L. Carvalho
|
r5909 | |||
Jordi Gutiérrez Hermoso
|
r22057 | def strip(ui, repo, nodelist, backup=True, topic='backup'): | ||
# Simple way to maintain backwards compatibility for this | ||||
# argument. | ||||
if backup in ['none', 'strip']: | ||||
backup = False | ||||
Pierre-Yves David
|
r18004 | repo = repo.unfiltered() | ||
Idan Kamara
|
r18310 | repo.destroying() | ||
Joshua Redstone
|
r17013 | |||
Alexis S. L. Carvalho
|
r5901 | cl = repo.changelog | ||
Idan Kamara
|
r16237 | # TODO handle undo of merge sets | ||
Wagner Bruna
|
r16252 | if isinstance(nodelist, str): | ||
nodelist = [nodelist] | ||||
striplist = [cl.rev(node) for node in nodelist] | ||||
striprev = min(striplist) | ||||
Matt Mackall
|
r4702 | |||
Alexis S. L. Carvalho
|
r6147 | # Some revisions with rev > striprev may not be descendants of striprev. | ||
# We have to find these revisions and put them in a bundle, so that | ||||
# we can restore them after the truncations. | ||||
# To create the bundle we use repo.changegroupsubset which requires | ||||
# the list of heads and bases of the set of interesting revisions. | ||||
# (head = revision in the set that has no descendant in the set; | ||||
# base = revision in the set that has no ancestor in the set) | ||||
Wagner Bruna
|
r16252 | tostrip = set(striplist) | ||
for rev in striplist: | ||||
Bryan O'Sullivan
|
r16867 | for desc in cl.descendants([rev]): | ||
Wagner Bruna
|
r16252 | tostrip.add(desc) | ||
Benoit Boissinot
|
r13702 | |||
files = _collectfiles(repo, striprev) | ||||
Matt Mackall
|
r13705 | saverevs = _collectbrokencsets(repo, files, striprev) | ||
Benoit Boissinot
|
r13702 | |||
# compute heads | ||||
saveheads = set(saverevs) | ||||
Matt Mackall
|
r6750 | for r in xrange(striprev + 1, len(cl)): | ||
Benoit Boissinot
|
r13702 | if r not in tostrip: | ||
saverevs.add(r) | ||||
saveheads.difference_update(cl.parentrevs(r)) | ||||
saveheads.add(r) | ||||
saveheads = [cl.node(r) for r in saveheads] | ||||
Matt Mackall
|
r4702 | |||
Matt Mackall
|
r15386 | # compute base nodes | ||
if saverevs: | ||||
Bryan O'Sullivan
|
r16867 | descendants = set(cl.descendants(saverevs)) | ||
Matt Mackall
|
r15386 | saverevs.difference_update(descendants) | ||
savebases = [cl.node(r) for r in saverevs] | ||||
Wagner Bruna
|
r16252 | stripbases = [cl.node(r) for r in tostrip] | ||
Siddharth Agarwal
|
r18040 | |||
# For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but | ||||
# is much faster | ||||
newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip) | ||||
Augie Fackler
|
r17264 | if newbmtarget: | ||
Pierre-Yves David
|
r22818 | newbmtarget = repo[newbmtarget.first()].node() | ||
Augie Fackler
|
r17264 | else: | ||
newbmtarget = '.' | ||||
Matt Mackall
|
r4702 | |||
Matt Mackall
|
r13362 | bm = repo._bookmarks | ||
updatebm = [] | ||||
for m in bm: | ||||
rev = repo[bm[m]].rev() | ||||
if rev in tostrip: | ||||
updatebm.append(m) | ||||
Matt Mackall
|
r4702 | # create a changegroup for all the branches we need to keep | ||
Matt Mackall
|
r11197 | backupfile = None | ||
FUJIWARA Katsunori
|
r20979 | vfs = repo.vfs | ||
Mike Edgar
|
r24252 | node = nodelist[-1] | ||
Jordi Gutiérrez Hermoso
|
r22057 | if backup: | ||
Idan Kamara
|
r16388 | backupfile = _bundle(repo, stripbases, cl.heads(), node, topic) | ||
FUJIWARA Katsunori
|
r20979 | repo.ui.status(_("saved backup bundle to %s\n") % | ||
vfs.join(backupfile)) | ||||
repo.ui.log("backupbundle", "saved backup bundle to %s\n", | ||||
vfs.join(backupfile)) | ||||
Matt Mackall
|
r15386 | if saveheads or savebases: | ||
Nicolas Dumazet
|
r11791 | # do not compress partial bundle if we remove it from disk later | ||
Matt Mackall
|
r15386 | chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp', | ||
Jordi Gutiérrez Hermoso
|
r22057 | compress=False) | ||
Matt Mackall
|
r4702 | |||
Henrik Stuart
|
r8073 | mfst = repo.manifest | ||
Pierre-Yves David
|
r25300 | curtr = repo.currenttransaction() | ||
if curtr is not None: | ||||
del curtr # avoid carrying reference to transaction for nothing | ||||
msg = _('programming error: cannot strip from inside a transaction') | ||||
raise util.Abort(msg, hint=_('contact your extension maintainer')) | ||||
Steve Borho
|
r10881 | tr = repo.transaction("strip") | ||
Henrik Stuart
|
r8073 | offset = len(tr.entries) | ||
Matt Mackall
|
r11197 | try: | ||
tr.startgroup() | ||||
cl.strip(striprev, tr) | ||||
mfst.strip(striprev, tr) | ||||
for fn in files: | ||||
repo.file(fn).strip(striprev, tr) | ||||
tr.endgroup() | ||||
Henrik Stuart
|
r8073 | |||
Matt Mackall
|
r11197 | try: | ||
for i in xrange(offset, len(tr.entries)): | ||||
file, troffset, ignore = tr.entries[i] | ||||
Angel Ezquerra
|
r23878 | repo.svfs(file, 'a').truncate(troffset) | ||
Durham Goode
|
r20885 | if troffset == 0: | ||
repo.store.markremoved(file) | ||||
Matt Mackall
|
r11197 | tr.close() | ||
Pierre-Yves David
|
r26012 | finally: | ||
tr.release() | ||||
Matt Mackall
|
r11197 | |||
Matt Mackall
|
r15386 | if saveheads or savebases: | ||
Matt Mackall
|
r11202 | ui.note(_("adding branch\n")) | ||
FUJIWARA Katsunori
|
r20979 | f = vfs.open(chgrpfile, "rb") | ||
Pierre-Yves David
|
r21064 | gen = exchange.readbundle(ui, f, chgrpfile, vfs) | ||
Matt Mackall
|
r11202 | if not repo.ui.verbose: | ||
# silence internal shuffling chatter | ||||
repo.ui.pushbuffer() | ||||
Eric Sumner
|
r23898 | if isinstance(gen, bundle2.unbundle20): | ||
tr = repo.transaction('strip') | ||||
Eric Sumner
|
r24170 | tr.hookargs = {'source': 'strip', | ||
'url': 'bundle:' + vfs.join(chgrpfile)} | ||||
Eric Sumner
|
r23898 | try: | ||
bundle2.processbundle(repo, gen, lambda: tr) | ||||
tr.close() | ||||
finally: | ||||
tr.release() | ||||
else: | ||||
changegroup.addchangegroup(repo, gen, 'strip', | ||||
'bundle:' + vfs.join(chgrpfile), | ||||
True) | ||||
Matt Mackall
|
r11202 | if not repo.ui.verbose: | ||
repo.ui.popbuffer() | ||||
Matt Mackall
|
r11197 | f.close() | ||
Matt Mackall
|
r13362 | |||
Idan Kamara
|
r16237 | # remove undo files | ||
FUJIWARA Katsunori
|
r20975 | for undovfs, undofile in repo.undofiles(): | ||
Idan Kamara
|
r16237 | try: | ||
FUJIWARA Katsunori
|
r20975 | undovfs.unlink(undofile) | ||
Gregory Szorc
|
r25660 | except OSError as e: | ||
Idan Kamara
|
r16237 | if e.errno != errno.ENOENT: | ||
FUJIWARA Katsunori
|
r20975 | ui.warn(_('error removing %s: %s\n') % | ||
(undovfs.join(undofile), str(e))) | ||||
Idan Kamara
|
r16237 | |||
Matt Mackall
|
r13362 | for m in updatebm: | ||
Augie Fackler
|
r17264 | bm[m] = repo[newbmtarget].node() | ||
Augie Fackler
|
r17922 | bm.write() | ||
Brodie Rao
|
r16705 | except: # re-raises | ||
Matt Mackall
|
r11197 | if backupfile: | ||
Martin Geisler
|
r11600 | ui.warn(_("strip failed, full bundle stored in '%s'\n") | ||
FUJIWARA Katsunori
|
r20979 | % vfs.join(backupfile)) | ||
Matt Mackall
|
r11197 | elif saveheads: | ||
Martin Geisler
|
r11600 | ui.warn(_("strip failed, partial bundle stored in '%s'\n") | ||
FUJIWARA Katsunori
|
r20979 | % vfs.join(chgrpfile)) | ||
Henrik Stuart
|
r8073 | raise | ||
Jordi Gutiérrez Hermoso
|
r22057 | else: | ||
if saveheads or savebases: | ||||
# Remove partial backup only if there were no exceptions | ||||
vfs.unlink(chgrpfile) | ||||
Matt Mackall
|
r4702 | |||
Pierre-Yves David
|
r18395 | repo.destroyed() | ||
Gregory Szorc
|
r25652 | |||
def rebuildfncache(ui, repo): | ||||
"""Rebuilds the fncache file from repo history. | ||||
Missing entries will be added. Extra entries will be removed. | ||||
""" | ||||
repo = repo.unfiltered() | ||||
if 'fncache' not in repo.requirements: | ||||
ui.warn(_('(not rebuilding fncache because repository does not ' | ||||
Wagner Bruna
|
r25874 | 'support fncache)\n')) | ||
Gregory Szorc
|
r25652 | return | ||
lock = repo.lock() | ||||
try: | ||||
fnc = repo.store.fncache | ||||
# Trigger load of fncache. | ||||
if 'irrelevant' in fnc: | ||||
pass | ||||
oldentries = set(fnc.entries) | ||||
newentries = set() | ||||
seenfiles = set() | ||||
repolen = len(repo) | ||||
for rev in repo: | ||||
ui.progress(_('changeset'), rev, total=repolen) | ||||
ctx = repo[rev] | ||||
for f in ctx.files(): | ||||
# This is to minimize I/O. | ||||
if f in seenfiles: | ||||
continue | ||||
seenfiles.add(f) | ||||
i = 'data/%s.i' % f | ||||
d = 'data/%s.d' % f | ||||
if repo.store._exists(i): | ||||
newentries.add(i) | ||||
if repo.store._exists(d): | ||||
newentries.add(d) | ||||
ui.progress(_('changeset'), None) | ||||
addcount = len(newentries - oldentries) | ||||
removecount = len(oldentries - newentries) | ||||
for p in sorted(oldentries - newentries): | ||||
ui.write(_('removing %s\n') % p) | ||||
for p in sorted(newentries - oldentries): | ||||
ui.write(_('adding %s\n') % p) | ||||
if addcount or removecount: | ||||
ui.write(_('%d items added, %d removed from fncache\n') % | ||||
(addcount, removecount)) | ||||
fnc.entries = newentries | ||||
fnc._dirty = True | ||||
tr = repo.transaction('fncache') | ||||
try: | ||||
fnc.write(tr) | ||||
tr.close() | ||||
finally: | ||||
tr.release() | ||||
else: | ||||
ui.write(_('fncache already up to date\n')) | ||||
finally: | ||||
lock.release() | ||||