repair.py
495 lines
| 17.0 KiB
| text/x-python
|
PythonLexer
/ mercurial / repair.py
Matt Mackall
|
r4702 | # repair.py - functions for repository repair for mercurial | ||
# | ||||
# Copyright 2005, 2006 Chris Mason <mason@suse.com> | ||||
# Copyright 2007 Matt Mackall | ||||
# | ||||
Martin Geisler
|
r8225 | # This software may be used and distributed according to the terms of the | ||
Matt Mackall
|
r10263 | # GNU General Public License version 2 or any later version. | ||
Matt Mackall
|
r4702 | |||
Gregory Szorc
|
r25970 | from __future__ import absolute_import | ||
Alain Leufroy
|
r16440 | import errno | ||
Augie Fackler
|
r29341 | import hashlib | ||
Matt Mackall
|
r4702 | |||
Gregory Szorc
|
r25970 | from .i18n import _ | ||
from .node import short | ||||
from . import ( | ||||
bundle2, | ||||
changegroup, | ||||
Pierre-Yves David
|
r26587 | error, | ||
Gregory Szorc
|
r25970 | exchange, | ||
Kostia Balytskyi
|
r28868 | obsolete, | ||
Gregory Szorc
|
r25970 | util, | ||
) | ||||
Matt Mackall
|
r15386 | def _bundle(repo, bases, heads, node, suffix, compress=True): | ||
Alexis S. L. Carvalho
|
r5905 | """create a bundle with the specified revisions as a backup""" | ||
Martin von Zweigbergk
|
r27930 | cgversion = changegroup.safeversion(repo) | ||
Eric Sumner
|
r23898 | |||
cg = changegroup.changegroupsubset(repo, bases, heads, 'strip', | ||||
version=cgversion) | ||||
FUJIWARA Katsunori
|
r20977 | backupdir = "strip-backup" | ||
vfs = repo.vfs | ||||
if not vfs.isdir(backupdir): | ||||
vfs.mkdir(backupdir) | ||||
Durham Goode
|
r23835 | |||
# Include a hash of all the nodes in the filename for uniqueness | ||||
Yuya Nishihara
|
r25340 | allcommits = repo.set('%ln::%ln', bases, heads) | ||
Durham Goode
|
r23835 | allhashes = sorted(c.hex() for c in allcommits) | ||
Augie Fackler
|
r29341 | totalhash = hashlib.sha1(''.join(allhashes)).hexdigest() | ||
Durham Goode
|
r23835 | name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix) | ||
Pierre-Yves David
|
r26425 | comp = None | ||
Pierre-Yves David
|
r26423 | if cgversion != '01': | ||
Pierre-Yves David
|
r24686 | bundletype = "HG20" | ||
Pierre-Yves David
|
r26425 | if compress: | ||
comp = 'BZ' | ||||
Eric Sumner
|
r23898 | elif compress: | ||
Nicolas Dumazet
|
r11791 | bundletype = "HG10BZ" | ||
else: | ||||
bundletype = "HG10UN" | ||||
Martin von Zweigbergk
|
r28666 | return bundle2.writebundle(repo.ui, cg, name, bundletype, vfs, | ||
Pierre-Yves David
|
r26425 | compression=comp) | ||
Matt Mackall
|
r4702 | |||
Alexis S. L. Carvalho
|
r5910 | def _collectfiles(repo, striprev): | ||
"""find out the filelogs affected by the strip""" | ||||
Benoit Boissinot
|
r8462 | files = set() | ||
Matt Mackall
|
r4702 | |||
Matt Mackall
|
r6750 | for x in xrange(striprev, len(repo)): | ||
Martin Geisler
|
r8479 | files.update(repo[x].files()) | ||
Alexis S. L. Carvalho
|
r5902 | |||
Benoit Boissinot
|
r8462 | return sorted(files) | ||
Alexis S. L. Carvalho
|
r5902 | |||
Benoit Boissinot
|
r13702 | def _collectbrokencsets(repo, files, striprev): | ||
"""return the changesets which will be broken by the truncation""" | ||||
Matt Mackall
|
r13705 | s = set() | ||
Benoit Boissinot
|
r13702 | def collectone(revlog): | ||
Durham Goode
|
r20074 | _, brokenset = revlog.getstrippoint(striprev) | ||
s.update([revlog.linkrev(r) for r in brokenset]) | ||||
Alexis S. L. Carvalho
|
r5909 | |||
Durham Goode
|
r30375 | collectone(repo.manifestlog._revlog) | ||
Matt Mackall
|
r13705 | for fname in files: | ||
collectone(repo.file(fname)) | ||||
Alexis S. L. Carvalho
|
r5909 | |||
Matt Mackall
|
r13705 | return s | ||
Alexis S. L. Carvalho
|
r5909 | |||
Jordi Gutiérrez Hermoso
|
r22057 | def strip(ui, repo, nodelist, backup=True, topic='backup'): | ||
Laurent Charignon
|
r27553 | # This function operates within a transaction of its own, but does | ||
# not take any lock on the repo. | ||||
Jordi Gutiérrez Hermoso
|
r22057 | # Simple way to maintain backwards compatibility for this | ||
# argument. | ||||
if backup in ['none', 'strip']: | ||||
backup = False | ||||
Pierre-Yves David
|
r18004 | repo = repo.unfiltered() | ||
Idan Kamara
|
r18310 | repo.destroying() | ||
Joshua Redstone
|
r17013 | |||
Alexis S. L. Carvalho
|
r5901 | cl = repo.changelog | ||
Idan Kamara
|
r16237 | # TODO handle undo of merge sets | ||
Wagner Bruna
|
r16252 | if isinstance(nodelist, str): | ||
nodelist = [nodelist] | ||||
striplist = [cl.rev(node) for node in nodelist] | ||||
striprev = min(striplist) | ||||
Matt Mackall
|
r4702 | |||
Martin von Zweigbergk
|
r30707 | files = _collectfiles(repo, striprev) | ||
saverevs = _collectbrokencsets(repo, files, striprev) | ||||
Alexis S. L. Carvalho
|
r6147 | # Some revisions with rev > striprev may not be descendants of striprev. | ||
# We have to find these revisions and put them in a bundle, so that | ||||
# we can restore them after the truncations. | ||||
# To create the bundle we use repo.changegroupsubset which requires | ||||
# the list of heads and bases of the set of interesting revisions. | ||||
# (head = revision in the set that has no descendant in the set; | ||||
# base = revision in the set that has no ancestor in the set) | ||||
Wagner Bruna
|
r16252 | tostrip = set(striplist) | ||
Martin von Zweigbergk
|
r30707 | saveheads = set(saverevs) | ||
Martin von Zweigbergk
|
r30706 | for r in cl.revs(start=striprev + 1): | ||
if any(p in tostrip for p in cl.parentrevs(r)): | ||||
tostrip.add(r) | ||||
Benoit Boissinot
|
r13702 | |||
if r not in tostrip: | ||||
saverevs.add(r) | ||||
saveheads.difference_update(cl.parentrevs(r)) | ||||
saveheads.add(r) | ||||
saveheads = [cl.node(r) for r in saveheads] | ||||
Matt Mackall
|
r4702 | |||
Matt Mackall
|
r15386 | # compute base nodes | ||
if saverevs: | ||||
Bryan O'Sullivan
|
r16867 | descendants = set(cl.descendants(saverevs)) | ||
Matt Mackall
|
r15386 | saverevs.difference_update(descendants) | ||
savebases = [cl.node(r) for r in saverevs] | ||||
Wagner Bruna
|
r16252 | stripbases = [cl.node(r) for r in tostrip] | ||
Siddharth Agarwal
|
r18040 | |||
# For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but | ||||
# is much faster | ||||
newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip) | ||||
Augie Fackler
|
r17264 | if newbmtarget: | ||
Pierre-Yves David
|
r22818 | newbmtarget = repo[newbmtarget.first()].node() | ||
Augie Fackler
|
r17264 | else: | ||
newbmtarget = '.' | ||||
Matt Mackall
|
r4702 | |||
Matt Mackall
|
r13362 | bm = repo._bookmarks | ||
updatebm = [] | ||||
for m in bm: | ||||
rev = repo[bm[m]].rev() | ||||
if rev in tostrip: | ||||
updatebm.append(m) | ||||
Matt Mackall
|
r4702 | # create a changegroup for all the branches we need to keep | ||
Matt Mackall
|
r11197 | backupfile = None | ||
FUJIWARA Katsunori
|
r20979 | vfs = repo.vfs | ||
Mike Edgar
|
r24252 | node = nodelist[-1] | ||
Jordi Gutiérrez Hermoso
|
r22057 | if backup: | ||
Idan Kamara
|
r16388 | backupfile = _bundle(repo, stripbases, cl.heads(), node, topic) | ||
FUJIWARA Katsunori
|
r20979 | repo.ui.status(_("saved backup bundle to %s\n") % | ||
vfs.join(backupfile)) | ||||
repo.ui.log("backupbundle", "saved backup bundle to %s\n", | ||||
vfs.join(backupfile)) | ||||
Martin von Zweigbergk
|
r29954 | tmpbundlefile = None | ||
Martin von Zweigbergk
|
r29951 | if saveheads: | ||
Martin von Zweigbergk
|
r29954 | # do not compress temporary bundle if we remove it from disk later | ||
tmpbundlefile = _bundle(repo, savebases, saveheads, node, 'temp', | ||||
Jordi Gutiérrez Hermoso
|
r22057 | compress=False) | ||
Matt Mackall
|
r4702 | |||
Durham Goode
|
r30375 | mfst = repo.manifestlog._revlog | ||
Henrik Stuart
|
r8073 | |||
Pierre-Yves David
|
r25300 | curtr = repo.currenttransaction() | ||
if curtr is not None: | ||||
del curtr # avoid carrying reference to transaction for nothing | ||||
msg = _('programming error: cannot strip from inside a transaction') | ||||
Pierre-Yves David
|
r26587 | raise error.Abort(msg, hint=_('contact your extension maintainer')) | ||
Pierre-Yves David
|
r25300 | |||
Bryan O'Sullivan
|
r27873 | try: | ||
with repo.transaction("strip") as tr: | ||||
offset = len(tr.entries) | ||||
Henrik Stuart
|
r8073 | |||
Bryan O'Sullivan
|
r27873 | tr.startgroup() | ||
cl.strip(striprev, tr) | ||||
mfst.strip(striprev, tr) | ||||
Martin von Zweigbergk
|
r29464 | if 'treemanifest' in repo.requirements: # safe but unnecessary | ||
# otherwise | ||||
for unencoded, encoded, size in repo.store.datafiles(): | ||||
if (unencoded.startswith('meta/') and | ||||
unencoded.endswith('00manifest.i')): | ||||
dir = unencoded[5:-12] | ||||
Durham Goode
|
r30371 | repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr) | ||
Bryan O'Sullivan
|
r27873 | for fn in files: | ||
repo.file(fn).strip(striprev, tr) | ||||
tr.endgroup() | ||||
Henrik Stuart
|
r8073 | |||
Matt Mackall
|
r11197 | for i in xrange(offset, len(tr.entries)): | ||
file, troffset, ignore = tr.entries[i] | ||||
FUJIWARA Katsunori
|
r30001 | with repo.svfs(file, 'a', checkambig=True) as fp: | ||
fp.truncate(troffset) | ||||
Durham Goode
|
r20885 | if troffset == 0: | ||
repo.store.markremoved(file) | ||||
Matt Mackall
|
r11197 | |||
Martin von Zweigbergk
|
r29954 | if tmpbundlefile: | ||
Matt Mackall
|
r11202 | ui.note(_("adding branch\n")) | ||
Martin von Zweigbergk
|
r29954 | f = vfs.open(tmpbundlefile, "rb") | ||
gen = exchange.readbundle(ui, f, tmpbundlefile, vfs) | ||||
Matt Mackall
|
r11202 | if not repo.ui.verbose: | ||
# silence internal shuffling chatter | ||||
repo.ui.pushbuffer() | ||||
Eric Sumner
|
r23898 | if isinstance(gen, bundle2.unbundle20): | ||
Bryan O'Sullivan
|
r27875 | with repo.transaction('strip') as tr: | ||
tr.hookargs = {'source': 'strip', | ||||
Martin von Zweigbergk
|
r29954 | 'url': 'bundle:' + vfs.join(tmpbundlefile)} | ||
Pierre-Yves David
|
r26797 | bundle2.applybundle(repo, gen, tr, source='strip', | ||
Martin von Zweigbergk
|
r29954 | url='bundle:' + vfs.join(tmpbundlefile)) | ||
Eric Sumner
|
r23898 | else: | ||
Martin von Zweigbergk
|
r29954 | gen.apply(repo, 'strip', 'bundle:' + vfs.join(tmpbundlefile), | ||
True) | ||||
Matt Mackall
|
r11202 | if not repo.ui.verbose: | ||
repo.ui.popbuffer() | ||||
Matt Mackall
|
r11197 | f.close() | ||
Laurent Charignon
|
r29196 | repo._phasecache.invalidate() | ||
Matt Mackall
|
r13362 | |||
Laurent Charignon
|
r27157 | for m in updatebm: | ||
bm[m] = repo[newbmtarget].node() | ||||
lock = tr = None | ||||
try: | ||||
lock = repo.lock() | ||||
tr = repo.transaction('repair') | ||||
bm.recordchange(tr) | ||||
tr.close() | ||||
finally: | ||||
tr.release() | ||||
lock.release() | ||||
Idan Kamara
|
r16237 | # remove undo files | ||
FUJIWARA Katsunori
|
r20975 | for undovfs, undofile in repo.undofiles(): | ||
Idan Kamara
|
r16237 | try: | ||
FUJIWARA Katsunori
|
r20975 | undovfs.unlink(undofile) | ||
Gregory Szorc
|
r25660 | except OSError as e: | ||
Idan Kamara
|
r16237 | if e.errno != errno.ENOENT: | ||
FUJIWARA Katsunori
|
r20975 | ui.warn(_('error removing %s: %s\n') % | ||
(undovfs.join(undofile), str(e))) | ||||
Idan Kamara
|
r16237 | |||
Brodie Rao
|
r16705 | except: # re-raises | ||
Matt Mackall
|
r11197 | if backupfile: | ||
Martin von Zweigbergk
|
r29954 | ui.warn(_("strip failed, backup bundle stored in '%s'\n") | ||
FUJIWARA Katsunori
|
r20979 | % vfs.join(backupfile)) | ||
Martin von Zweigbergk
|
r29954 | if tmpbundlefile: | ||
Martin von Zweigbergk
|
r29953 | ui.warn(_("strip failed, unrecovered changes stored in '%s'\n") | ||
Martin von Zweigbergk
|
r29954 | % vfs.join(tmpbundlefile)) | ||
Martin von Zweigbergk
|
r29953 | ui.warn(_("(fix the problem, then recover the changesets with " | ||
Martin von Zweigbergk
|
r29954 | "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile)) | ||
Henrik Stuart
|
r8073 | raise | ||
Jordi Gutiérrez Hermoso
|
r22057 | else: | ||
Martin von Zweigbergk
|
r29954 | if tmpbundlefile: | ||
# Remove temporary bundle only if there were no exceptions | ||||
vfs.unlink(tmpbundlefile) | ||||
Matt Mackall
|
r4702 | |||
Pierre-Yves David
|
r18395 | repo.destroyed() | ||
Martin von Zweigbergk
|
r30274 | # return the backup file path (or None if 'backup' was False) so | ||
# extensions can use it | ||||
return backupfile | ||||
Gregory Szorc
|
r25652 | |||
def rebuildfncache(ui, repo): | ||||
"""Rebuilds the fncache file from repo history. | ||||
Missing entries will be added. Extra entries will be removed. | ||||
""" | ||||
repo = repo.unfiltered() | ||||
if 'fncache' not in repo.requirements: | ||||
ui.warn(_('(not rebuilding fncache because repository does not ' | ||||
Wagner Bruna
|
r25874 | 'support fncache)\n')) | ||
Gregory Szorc
|
r25652 | return | ||
Bryan O'Sullivan
|
r27860 | with repo.lock(): | ||
Gregory Szorc
|
r25652 | fnc = repo.store.fncache | ||
# Trigger load of fncache. | ||||
if 'irrelevant' in fnc: | ||||
pass | ||||
oldentries = set(fnc.entries) | ||||
newentries = set() | ||||
seenfiles = set() | ||||
repolen = len(repo) | ||||
for rev in repo: | ||||
r28466 | ui.progress(_('rebuilding'), rev, total=repolen, | |||
unit=_('changesets')) | ||||
Gregory Szorc
|
r25652 | |||
ctx = repo[rev] | ||||
for f in ctx.files(): | ||||
# This is to minimize I/O. | ||||
if f in seenfiles: | ||||
continue | ||||
seenfiles.add(f) | ||||
i = 'data/%s.i' % f | ||||
d = 'data/%s.d' % f | ||||
if repo.store._exists(i): | ||||
newentries.add(i) | ||||
if repo.store._exists(d): | ||||
newentries.add(d) | ||||
r28465 | ui.progress(_('rebuilding'), None) | |||
Gregory Szorc
|
r25652 | |||
Martin von Zweigbergk
|
r28031 | if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise | ||
Martin von Zweigbergk
|
r28007 | for dir in util.dirs(seenfiles): | ||
i = 'meta/%s/00manifest.i' % dir | ||||
d = 'meta/%s/00manifest.d' % dir | ||||
if repo.store._exists(i): | ||||
newentries.add(i) | ||||
if repo.store._exists(d): | ||||
newentries.add(d) | ||||
Gregory Szorc
|
r25652 | addcount = len(newentries - oldentries) | ||
removecount = len(oldentries - newentries) | ||||
for p in sorted(oldentries - newentries): | ||||
ui.write(_('removing %s\n') % p) | ||||
for p in sorted(newentries - oldentries): | ||||
ui.write(_('adding %s\n') % p) | ||||
if addcount or removecount: | ||||
ui.write(_('%d items added, %d removed from fncache\n') % | ||||
(addcount, removecount)) | ||||
fnc.entries = newentries | ||||
fnc._dirty = True | ||||
Bryan O'Sullivan
|
r27871 | with repo.transaction('fncache') as tr: | ||
Gregory Szorc
|
r25652 | fnc.write(tr) | ||
else: | ||||
ui.write(_('fncache already up to date\n')) | ||||
Ryan McElroy
|
r26624 | def stripbmrevset(repo, mark): | ||
""" | ||||
The revset to strip when strip is called with -B mark | ||||
Needs to live here so extensions can use it and wrap it even when strip is | ||||
not enabled or not present on a box. | ||||
""" | ||||
return repo.revs("ancestors(bookmark(%s)) - " | ||||
"ancestors(head() and not bookmark(%s)) - " | ||||
"ancestors(bookmark() and not bookmark(%s))", | ||||
mark, mark, mark) | ||||
Kostia Balytskyi
|
r28868 | |||
def deleteobsmarkers(obsstore, indices): | ||||
"""Delete some obsmarkers from obsstore and return how many were deleted | ||||
'indices' is a list of ints which are the indices | ||||
of the markers to be deleted. | ||||
Every invocation of this function completely rewrites the obsstore file, | ||||
skipping the markers we want to be removed. The new temporary file is | ||||
created, remaining markers are written there and on .close() this file | ||||
gets atomically renamed to obsstore, thus guaranteeing consistency.""" | ||||
if not indices: | ||||
# we don't want to rewrite the obsstore with the same content | ||||
return | ||||
left = [] | ||||
current = obsstore._all | ||||
n = 0 | ||||
for i, m in enumerate(current): | ||||
if i in indices: | ||||
n += 1 | ||||
continue | ||||
left.append(m) | ||||
newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True) | ||||
for bytes in obsolete.encodemarkers(left, True, obsstore._version): | ||||
newobsstorefile.write(bytes) | ||||
newobsstorefile.close() | ||||
return n | ||||
Gregory Szorc
|
r30775 | |||
def upgraderequiredsourcerequirements(repo): | ||||
"""Obtain requirements required to be present to upgrade a repo. | ||||
An upgrade will not be allowed if the repository doesn't have the | ||||
requirements returned by this function. | ||||
""" | ||||
return set([ | ||||
# Introduced in Mercurial 0.9.2. | ||||
'revlogv1', | ||||
# Introduced in Mercurial 0.9.2. | ||||
'store', | ||||
]) | ||||
def upgradeblocksourcerequirements(repo): | ||||
"""Obtain requirements that will prevent an upgrade from occurring. | ||||
An upgrade cannot be performed if the source repository contains a | ||||
requirements in the returned set. | ||||
""" | ||||
return set([ | ||||
# The upgrade code does not yet support these experimental features. | ||||
# This is an artificial limitation. | ||||
'manifestv2', | ||||
'treemanifest', | ||||
# This was a precursor to generaldelta and was never enabled by default. | ||||
# It should (hopefully) not exist in the wild. | ||||
'parentdelta', | ||||
# Upgrade should operate on the actual store, not the shared link. | ||||
'shared', | ||||
]) | ||||
def upgradesupportremovedrequirements(repo): | ||||
"""Obtain requirements that can be removed during an upgrade. | ||||
If an upgrade were to create a repository that dropped a requirement, | ||||
the dropped requirement must appear in the returned set for the upgrade | ||||
to be allowed. | ||||
""" | ||||
return set() | ||||
def upgradesupporteddestrequirements(repo): | ||||
"""Obtain requirements that upgrade supports in the destination. | ||||
If the result of the upgrade would create requirements not in this set, | ||||
the upgrade is disallowed. | ||||
Extensions should monkeypatch this to add their custom requirements. | ||||
""" | ||||
return set([ | ||||
'dotencode', | ||||
'fncache', | ||||
'generaldelta', | ||||
'revlogv1', | ||||
'store', | ||||
]) | ||||
def upgradeallowednewrequirements(repo): | ||||
"""Obtain requirements that can be added to a repository during upgrade. | ||||
This is used to disallow proposed requirements from being added when | ||||
they weren't present before. | ||||
We use a list of allowed requirement additions instead of a list of known | ||||
bad additions because the whitelist approach is safer and will prevent | ||||
future, unknown requirements from accidentally being added. | ||||
""" | ||||
return set([ | ||||
'dotencode', | ||||
'fncache', | ||||
'generaldelta', | ||||
]) | ||||
def upgraderepo(ui, repo, run=False, optimize=None): | ||||
"""Upgrade a repository in place.""" | ||||
# Avoid cycle: cmdutil -> repair -> localrepo -> cmdutil | ||||
from . import localrepo | ||||
repo = repo.unfiltered() | ||||
# Ensure the repository can be upgraded. | ||||
missingreqs = upgraderequiredsourcerequirements(repo) - repo.requirements | ||||
if missingreqs: | ||||
raise error.Abort(_('cannot upgrade repository; requirement ' | ||||
'missing: %s') % _(', ').join(sorted(missingreqs))) | ||||
blockedreqs = upgradeblocksourcerequirements(repo) & repo.requirements | ||||
if blockedreqs: | ||||
raise error.Abort(_('cannot upgrade repository; unsupported source ' | ||||
'requirement: %s') % | ||||
_(', ').join(sorted(blockedreqs))) | ||||
# FUTURE there is potentially a need to control the wanted requirements via | ||||
# command arguments or via an extension hook point. | ||||
newreqs = localrepo.newreporequirements(repo) | ||||
noremovereqs = (repo.requirements - newreqs - | ||||
upgradesupportremovedrequirements(repo)) | ||||
if noremovereqs: | ||||
raise error.Abort(_('cannot upgrade repository; requirement would be ' | ||||
'removed: %s') % _(', ').join(sorted(noremovereqs))) | ||||
noaddreqs = (newreqs - repo.requirements - | ||||
upgradeallowednewrequirements(repo)) | ||||
if noaddreqs: | ||||
raise error.Abort(_('cannot upgrade repository; do not support adding ' | ||||
'requirement: %s') % | ||||
_(', ').join(sorted(noaddreqs))) | ||||
unsupportedreqs = newreqs - upgradesupporteddestrequirements(repo) | ||||
if unsupportedreqs: | ||||
raise error.Abort(_('cannot upgrade repository; do not support ' | ||||
'destination requirement: %s') % | ||||
_(', ').join(sorted(unsupportedreqs))) | ||||
def printrequirements(): | ||||
ui.write(_('requirements\n')) | ||||
ui.write(_(' preserved: %s\n') % | ||||
_(', ').join(sorted(newreqs & repo.requirements))) | ||||
if repo.requirements - newreqs: | ||||
ui.write(_(' removed: %s\n') % | ||||
_(', ').join(sorted(repo.requirements - newreqs))) | ||||
if newreqs - repo.requirements: | ||||
ui.write(_(' added: %s\n') % | ||||
_(', ').join(sorted(newreqs - repo.requirements))) | ||||
ui.write('\n') | ||||
if not run: | ||||
ui.write(_('performing an upgrade with "--run" will make the following ' | ||||
'changes:\n\n')) | ||||
printrequirements() | ||||