scmutil.py
1986 lines
| 69.8 KiB
| text/x-python
|
PythonLexer
/ mercurial / scmutil.py
Adrian Buehlmann
|
r13962 | # scmutil.py - Mercurial core utility functions | ||
# | ||||
# Copyright Matt Mackall <mpm@selenic.com> | ||||
# | ||||
# This software may be used and distributed according to the terms of the | ||||
# GNU General Public License version 2 or any later version. | ||||
Gregory Szorc
|
r27482 | from __future__ import absolute_import | ||
import errno | ||||
import glob | ||||
Augie Fackler
|
r29341 | import hashlib | ||
Gregory Szorc
|
r27482 | import os | ||
Martin von Zweigbergk
|
r41799 | import posixpath | ||
Gregory Szorc
|
r27482 | import re | ||
Yuya Nishihara
|
r34462 | import subprocess | ||
r33249 | import weakref | |||
Gregory Szorc
|
r27482 | |||
from .i18n import _ | ||||
Yuya Nishihara
|
r32656 | from .node import ( | ||
Martin von Zweigbergk
|
r37546 | bin, | ||
Jun Wu
|
r33088 | hex, | ||
nullid, | ||||
Martin von Zweigbergk
|
r39930 | nullrev, | ||
Yuya Nishihara
|
r34328 | short, | ||
Yuya Nishihara
|
r32656 | wdirid, | ||
wdirrev, | ||||
) | ||||
Gregory Szorc
|
r27482 | from . import ( | ||
Martin von Zweigbergk
|
r42103 | copies as copiesmod, | ||
Gregory Szorc
|
r27482 | encoding, | ||
error, | ||||
match as matchmod, | ||||
Jun Wu
|
r33088 | obsolete, | ||
r33249 | obsutil, | |||
Gregory Szorc
|
r27482 | pathutil, | ||
phases, | ||||
Martin von Zweigbergk
|
r39262 | policy, | ||
Pulkit Goyal
|
r30305 | pycompat, | ||
Yuya Nishihara
|
r31024 | revsetlang, | ||
Gregory Szorc
|
r27482 | similar, | ||
Boris Feld
|
r39933 | smartset, | ||
Matt Mackall
|
r34457 | url, | ||
Gregory Szorc
|
r27482 | util, | ||
Mark Thomas
|
r34544 | vfs, | ||
Gregory Szorc
|
r27482 | ) | ||
Kevin Bullock
|
r18690 | |||
Yuya Nishihara
|
r37102 | from .utils import ( | ||
Yuya Nishihara
|
r37138 | procutil, | ||
Yuya Nishihara
|
r37102 | stringutil, | ||
) | ||||
Jun Wu
|
r34646 | if pycompat.iswindows: | ||
Gregory Szorc
|
r27482 | from . import scmwindows as scmplatform | ||
Kevin Bullock
|
r18690 | else: | ||
Gregory Szorc
|
r27482 | from . import scmposix as scmplatform | ||
Kevin Bullock
|
r18690 | |||
Martin von Zweigbergk
|
r39262 | parsers = policy.importmod(r'parsers') | ||
Yuya Nishihara
|
r30314 | termsize = scmplatform.termsize | ||
Adrian Buehlmann
|
r13962 | |||
Martin von Zweigbergk
|
r22913 | class status(tuple): | ||
'''Named tuple with a list of files per status. The 'deleted', 'unknown' | ||||
and 'ignored' properties are only relevant to the working copy. | ||||
''' | ||||
__slots__ = () | ||||
def __new__(cls, modified, added, removed, deleted, unknown, ignored, | ||||
clean): | ||||
return tuple.__new__(cls, (modified, added, removed, deleted, unknown, | ||||
ignored, clean)) | ||||
@property | ||||
def modified(self): | ||||
Martin von Zweigbergk
|
r22915 | '''files that have been modified''' | ||
Martin von Zweigbergk
|
r22913 | return self[0] | ||
@property | ||||
def added(self): | ||||
Martin von Zweigbergk
|
r22915 | '''files that have been added''' | ||
Martin von Zweigbergk
|
r22913 | return self[1] | ||
@property | ||||
def removed(self): | ||||
Martin von Zweigbergk
|
r22915 | '''files that have been removed''' | ||
Martin von Zweigbergk
|
r22913 | return self[2] | ||
@property | ||||
def deleted(self): | ||||
Martin von Zweigbergk
|
r22915 | '''files that are in the dirstate, but have been deleted from the | ||
working copy (aka "missing") | ||||
''' | ||||
Martin von Zweigbergk
|
r22913 | return self[3] | ||
@property | ||||
def unknown(self): | ||||
Martin von Zweigbergk
|
r22915 | '''files not in the dirstate that are not ignored''' | ||
Martin von Zweigbergk
|
r22913 | return self[4] | ||
@property | ||||
def ignored(self): | ||||
Martin von Zweigbergk
|
r22915 | '''files not in the dirstate that are ignored (by _dirignore())''' | ||
Martin von Zweigbergk
|
r22913 | return self[5] | ||
@property | ||||
def clean(self): | ||||
Martin von Zweigbergk
|
r22915 | '''files that have not been modified''' | ||
Martin von Zweigbergk
|
r22913 | return self[6] | ||
def __repr__(self, *args, **kwargs): | ||||
Augie Fackler
|
r37940 | return ((r'<status modified=%s, added=%s, removed=%s, deleted=%s, ' | ||
r'unknown=%s, ignored=%s, clean=%s>') % | ||||
Yuya Nishihara
|
r37961 | tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self)) | ||
Martin von Zweigbergk
|
r22913 | |||
Augie Fackler
|
r20392 | def itersubrepos(ctx1, ctx2): | ||
"""find subrepos in ctx1 or ctx2""" | ||||
# Create a (subpath, ctx) mapping where we prefer subpaths from | ||||
# ctx1. The subpaths from ctx2 are important when the .hgsub file | ||||
# has been modified (in ctx2) but not yet committed (in ctx1). | ||||
subpaths = dict.fromkeys(ctx2.substate, ctx2) | ||||
subpaths.update(dict.fromkeys(ctx1.substate, ctx1)) | ||||
Matt Harbison
|
r25418 | |||
missing = set() | ||||
for subpath in ctx2.substate: | ||||
if subpath not in ctx1.substate: | ||||
del subpaths[subpath] | ||||
missing.add(subpath) | ||||
Augie Fackler
|
r20392 | for subpath, ctx in sorted(subpaths.iteritems()): | ||
yield subpath, ctx.sub(subpath) | ||||
Matt Harbison
|
r25418 | # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way, | ||
# status and diff will have an accurate result when it does | ||||
# 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared | ||||
# against itself. | ||||
for subpath in missing: | ||||
yield subpath, ctx2.nullsub(subpath, ctx1) | ||||
Patrick Mezard
|
r17248 | def nochangesfound(ui, repo, excluded=None): | ||
'''Report no changes for push/pull, excluded is None or a list of | ||||
nodes excluded from the push/pull. | ||||
''' | ||||
secretlist = [] | ||||
if excluded: | ||||
for n in excluded: | ||||
ctx = repo[n] | ||||
if ctx.phase() >= phases.secret and not ctx.extinct(): | ||||
secretlist.append(n) | ||||
Matt Mackall
|
r15993 | if secretlist: | ||
ui.status(_("no changes found (ignored %d secret changesets)\n") | ||||
% len(secretlist)) | ||||
else: | ||||
ui.status(_("no changes found\n")) | ||||
Jun Wu
|
r30520 | def callcatch(ui, func): | ||
"""call func() with global exception handling | ||||
return func() if no exception happens. otherwise do some error handling | ||||
and return an exit code accordingly. does not handle all exceptions. | ||||
""" | ||||
try: | ||||
Yuya Nishihara
|
r32041 | try: | ||
return func() | ||||
except: # re-raises | ||||
ui.traceback() | ||||
raise | ||||
Jun Wu
|
r30520 | # Global exception handling, alphabetically | ||
# Mercurial-specific first, followed by built-in and library exceptions | ||||
except error.LockHeld as inst: | ||||
if inst.errno == errno.ETIMEDOUT: | ||||
Augie Fackler
|
r40203 | reason = _('timed out waiting for lock held by %r') % ( | ||
pycompat.bytestr(inst.locker)) | ||||
Jun Wu
|
r30520 | else: | ||
FUJIWARA Katsunori
|
r32089 | reason = _('lock held by %r') % inst.locker | ||
Rodrigo Damazio Bovendorp
|
r38791 | ui.error(_("abort: %s: %s\n") % ( | ||
inst.desc or stringutil.forcebytestr(inst.filename), reason)) | ||||
FUJIWARA Katsunori
|
r32089 | if not inst.locker: | ||
Rodrigo Damazio Bovendorp
|
r38791 | ui.error(_("(lock might be very busy)\n")) | ||
Jun Wu
|
r30520 | except error.LockUnavailable as inst: | ||
Rodrigo Damazio Bovendorp
|
r38791 | ui.error(_("abort: could not lock %s: %s\n") % | ||
(inst.desc or stringutil.forcebytestr(inst.filename), | ||||
encoding.strtolocal(inst.strerror))) | ||||
Jun Wu
|
r30520 | except error.OutOfBandError as inst: | ||
if inst.args: | ||||
msg = _("abort: remote error:\n") | ||||
else: | ||||
msg = _("abort: remote error\n") | ||||
Rodrigo Damazio Bovendorp
|
r38791 | ui.error(msg) | ||
Jun Wu
|
r30520 | if inst.args: | ||
Rodrigo Damazio Bovendorp
|
r38791 | ui.error(''.join(inst.args)) | ||
Jun Wu
|
r30520 | if inst.hint: | ||
Rodrigo Damazio Bovendorp
|
r38791 | ui.error('(%s)\n' % inst.hint) | ||
Jun Wu
|
r30520 | except error.RepoError as inst: | ||
Rodrigo Damazio Bovendorp
|
r38791 | ui.error(_("abort: %s!\n") % inst) | ||
Jun Wu
|
r30520 | if inst.hint: | ||
Rodrigo Damazio Bovendorp
|
r38791 | ui.error(_("(%s)\n") % inst.hint) | ||
Jun Wu
|
r30520 | except error.ResponseError as inst: | ||
Rodrigo Damazio Bovendorp
|
r38791 | ui.error(_("abort: %s") % inst.args[0]) | ||
Augie Fackler
|
r36679 | msg = inst.args[1] | ||
if isinstance(msg, type(u'')): | ||||
msg = pycompat.sysbytes(msg) | ||||
Augie Fackler
|
r36713 | if not isinstance(msg, bytes): | ||
Rodrigo Damazio Bovendorp
|
r38791 | ui.error(" %r\n" % (msg,)) | ||
Augie Fackler
|
r36713 | elif not msg: | ||
Rodrigo Damazio Bovendorp
|
r38791 | ui.error(_(" empty string\n")) | ||
Jun Wu
|
r30520 | else: | ||
Rodrigo Damazio Bovendorp
|
r38791 | ui.error("\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg))) | ||
Jun Wu
|
r30520 | except error.CensoredNodeError as inst: | ||
Rodrigo Damazio Bovendorp
|
r38791 | ui.error(_("abort: file censored %s!\n") % inst) | ||
Gregory Szorc
|
r39813 | except error.StorageError as inst: | ||
Rodrigo Damazio Bovendorp
|
r38791 | ui.error(_("abort: %s!\n") % inst) | ||
Matt Harbison
|
r40694 | if inst.hint: | ||
ui.error(_("(%s)\n") % inst.hint) | ||||
Jun Wu
|
r30520 | except error.InterventionRequired as inst: | ||
Rodrigo Damazio Bovendorp
|
r38791 | ui.error("%s\n" % inst) | ||
Jun Wu
|
r30520 | if inst.hint: | ||
Rodrigo Damazio Bovendorp
|
r38791 | ui.error(_("(%s)\n") % inst.hint) | ||
Jun Wu
|
r30520 | return 1 | ||
Yuya Nishihara
|
r32657 | except error.WdirUnsupported: | ||
Rodrigo Damazio Bovendorp
|
r38791 | ui.error(_("abort: working directory revision cannot be specified\n")) | ||
Jun Wu
|
r30520 | except error.Abort as inst: | ||
Rodrigo Damazio Bovendorp
|
r38791 | ui.error(_("abort: %s\n") % inst) | ||
Jun Wu
|
r30520 | if inst.hint: | ||
Rodrigo Damazio Bovendorp
|
r38791 | ui.error(_("(%s)\n") % inst.hint) | ||
Jun Wu
|
r30520 | except ImportError as inst: | ||
Rodrigo Damazio Bovendorp
|
r38791 | ui.error(_("abort: %s!\n") % stringutil.forcebytestr(inst)) | ||
Yuya Nishihara
|
r37102 | m = stringutil.forcebytestr(inst).split()[-1] | ||
Jun Wu
|
r30520 | if m in "mpatch bdiff".split(): | ||
Rodrigo Damazio Bovendorp
|
r38791 | ui.error(_("(did you forget to compile extensions?)\n")) | ||
Jun Wu
|
r30520 | elif m in "zlib".split(): | ||
Rodrigo Damazio Bovendorp
|
r38791 | ui.error(_("(is your Python install correct?)\n")) | ||
Yuya Nishihara
|
r41466 | except (IOError, OSError) as inst: | ||
Yuya Nishihara
|
r41464 | if util.safehasattr(inst, "code"): # HTTPError | ||
Rodrigo Damazio Bovendorp
|
r38791 | ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst)) | ||
Yuya Nishihara
|
r41464 | elif util.safehasattr(inst, "reason"): # URLError or SSLError | ||
Jun Wu
|
r30520 | try: # usually it is in the form (errno, strerror) | ||
reason = inst.reason.args[1] | ||||
except (AttributeError, IndexError): | ||||
# it might be anything, for example a string | ||||
reason = inst.reason | ||||
Pulkit Goyal
|
r38332 | if isinstance(reason, pycompat.unicode): | ||
Jun Wu
|
r30520 | # SSLError of Python 2.7.9 contains a unicode | ||
Pulkit Goyal
|
r32152 | reason = encoding.unitolocal(reason) | ||
Rodrigo Damazio Bovendorp
|
r38791 | ui.error(_("abort: error: %s\n") % reason) | ||
Jun Wu
|
r30520 | elif (util.safehasattr(inst, "args") | ||
and inst.args and inst.args[0] == errno.EPIPE): | ||||
pass | ||||
Yuya Nishihara
|
r41466 | elif getattr(inst, "strerror", None): # common IOError or OSError | ||
Yuya Nishihara
|
r41465 | if getattr(inst, "filename", None) is not None: | ||
ui.error(_("abort: %s: '%s'\n") % ( | ||||
Yuya Nishihara
|
r36659 | encoding.strtolocal(inst.strerror), | ||
Yuya Nishihara
|
r37102 | stringutil.forcebytestr(inst.filename))) | ||
Jun Wu
|
r30520 | else: | ||
Rodrigo Damazio Bovendorp
|
r38791 | ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror)) | ||
Yuya Nishihara
|
r41464 | else: # suspicious IOError | ||
Jun Wu
|
r30520 | raise | ||
except MemoryError: | ||||
Rodrigo Damazio Bovendorp
|
r38791 | ui.error(_("abort: out of memory\n")) | ||
Jun Wu
|
r30520 | except SystemExit as inst: | ||
# Commands shouldn't sys.exit directly, but give a return code. | ||||
# Just in case catch this and and pass exit code to caller. | ||||
return inst.code | ||||
return -1 | ||||
Kevin Bullock
|
r17821 | def checknewlabel(repo, lbl, kind): | ||
Durham Goode
|
r19070 | # Do not use the "kind" parameter in ui output. | ||
# It makes strings difficult to translate. | ||||
Kevin Bullock
|
r17817 | if lbl in ['tip', '.', 'null']: | ||
Pierre-Yves David
|
r26587 | raise error.Abort(_("the name '%s' is reserved") % lbl) | ||
Kevin Bullock
|
r17821 | for c in (':', '\0', '\n', '\r'): | ||
if c in lbl: | ||||
Augie Fackler
|
r36587 | raise error.Abort( | ||
_("%r cannot be used in a name") % pycompat.bytestr(c)) | ||||
Durham Goode
|
r18566 | try: | ||
int(lbl) | ||||
Pierre-Yves David
|
r26587 | raise error.Abort(_("cannot use an integer as a name")) | ||
Durham Goode
|
r18566 | except ValueError: | ||
pass | ||||
Boris Feld
|
r36162 | if lbl.strip() != lbl: | ||
raise error.Abort(_("leading or trailing whitespace in name %r") % lbl) | ||||
Kevin Bullock
|
r17817 | |||
Adrian Buehlmann
|
r13974 | def checkfilename(f): | ||
'''Check that the filename f is an acceptable filename for a tracked file''' | ||||
if '\r' in f or '\n' in f: | ||||
Yuya Nishihara
|
r38353 | raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") | ||
% pycompat.bytestr(f)) | ||||
Adrian Buehlmann
|
r13974 | |||
Adrian Buehlmann
|
r13962 | def checkportable(ui, f): | ||
'''Check if filename f is portable and warn or abort depending on config''' | ||||
Adrian Buehlmann
|
r13974 | checkfilename(f) | ||
Adrian Buehlmann
|
r14138 | abort, warn = checkportabilityalert(ui) | ||
if abort or warn: | ||||
Adrian Buehlmann
|
r13962 | msg = util.checkwinfilename(f) | ||
if msg: | ||||
Yuya Nishihara
|
r37138 | msg = "%s: %s" % (msg, procutil.shellquote(f)) | ||
Adrian Buehlmann
|
r14138 | if abort: | ||
Pierre-Yves David
|
r26587 | raise error.Abort(msg) | ||
Adrian Buehlmann
|
r14138 | ui.warn(_("warning: %s\n") % msg) | ||
Kevin Gessner
|
r14068 | |||
Kevin Gessner
|
r14067 | def checkportabilityalert(ui): | ||
'''check if the user's config requests nothing, a warning, or abort for | ||||
non-portable filenames''' | ||||
Jun Wu
|
r33499 | val = ui.config('ui', 'portablefilenames') | ||
Kevin Gessner
|
r14067 | lval = val.lower() | ||
Yuya Nishihara
|
r37102 | bval = stringutil.parsebool(val) | ||
Jun Wu
|
r34646 | abort = pycompat.iswindows or lval == 'abort' | ||
Kevin Gessner
|
r14067 | warn = bval or lval == 'warn' | ||
if bval is None and not (warn or abort or lval == 'ignore'): | ||||
Adrian Buehlmann
|
r13962 | raise error.ConfigError( | ||
_("ui.portablefilenames value is invalid ('%s')") % val) | ||||
Kevin Gessner
|
r14067 | return abort, warn | ||
Adrian Buehlmann
|
r14138 | class casecollisionauditor(object): | ||
Joshua Redstone
|
r17201 | def __init__(self, ui, abort, dirstate): | ||
Adrian Buehlmann
|
r14138 | self._ui = ui | ||
self._abort = abort | ||||
Joshua Redstone
|
r17201 | allfiles = '\0'.join(dirstate._map) | ||
self._loweredfiles = set(encoding.lower(allfiles).split('\0')) | ||||
self._dirstate = dirstate | ||||
# The purpose of _newfiles is so that we don't complain about | ||||
# case collisions if someone were to call this object with the | ||||
# same filename twice. | ||||
self._newfiles = set() | ||||
Kevin Gessner
|
r14067 | |||
Adrian Buehlmann
|
r14138 | def __call__(self, f): | ||
FUJIWARA Katsunori
|
r20006 | if f in self._newfiles: | ||
return | ||||
FUJIWARA Katsunori
|
r14980 | fl = encoding.lower(f) | ||
FUJIWARA Katsunori
|
r20006 | if fl in self._loweredfiles and f not in self._dirstate: | ||
Adrian Buehlmann
|
r14138 | msg = _('possible case-folding collision for %s') % f | ||
if self._abort: | ||||
Pierre-Yves David
|
r26587 | raise error.Abort(msg) | ||
Adrian Buehlmann
|
r14138 | self._ui.warn(_("warning: %s\n") % msg) | ||
Joshua Redstone
|
r17201 | self._loweredfiles.add(fl) | ||
self._newfiles.add(f) | ||||
Adrian Buehlmann
|
r13970 | |||
Gregory Szorc
|
r24723 | def filteredhash(repo, maxrev): | ||
"""build hash of filtered revisions in the current repoview. | ||||
Multiple caches perform up-to-date validation by checking that the | ||||
tiprev and tipnode stored in the cache file match the current repository. | ||||
However, this is not sufficient for validating repoviews because the set | ||||
of revisions in the view may change without the repository tiprev and | ||||
tipnode changing. | ||||
This function hashes all the revs filtered from the view and returns | ||||
that SHA-1 digest. | ||||
""" | ||||
cl = repo.changelog | ||||
if not cl.filteredrevs: | ||||
return None | ||||
key = None | ||||
revs = sorted(r for r in cl.filteredrevs if r <= maxrev) | ||||
if revs: | ||||
Augie Fackler
|
r29341 | s = hashlib.sha1() | ||
Gregory Szorc
|
r24723 | for rev in revs: | ||
Augie Fackler
|
r31349 | s.update('%d;' % rev) | ||
Gregory Szorc
|
r24723 | key = s.digest() | ||
return key | ||||
Adrian Buehlmann
|
r13975 | def walkrepos(path, followsym=False, seen_dirs=None, recurse=False): | ||
Mads Kiilerich
|
r17104 | '''yield every hg repository under path, always recursively. | ||
The recurse flag will only control recursion into repo working dirs''' | ||||
Adrian Buehlmann
|
r13975 | def errhandler(err): | ||
if err.filename == path: | ||||
raise err | ||||
Augie Fackler
|
r14961 | samestat = getattr(os.path, 'samestat', None) | ||
if followsym and samestat is not None: | ||||
Adrian Buehlmann
|
r14227 | def adddir(dirlst, dirname): | ||
Adrian Buehlmann
|
r13975 | dirstat = os.stat(dirname) | ||
Martin von Zweigbergk
|
r36356 | match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst) | ||
Adrian Buehlmann
|
r13975 | if not match: | ||
dirlst.append(dirstat) | ||||
return not match | ||||
else: | ||||
followsym = False | ||||
if (seen_dirs is None) and followsym: | ||||
seen_dirs = [] | ||||
Adrian Buehlmann
|
r14227 | adddir(seen_dirs, path) | ||
Adrian Buehlmann
|
r13975 | for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler): | ||
dirs.sort() | ||||
if '.hg' in dirs: | ||||
yield root # found a repository | ||||
qroot = os.path.join(root, '.hg', 'patches') | ||||
if os.path.isdir(os.path.join(qroot, '.hg')): | ||||
yield qroot # we have a patch queue repo here | ||||
if recurse: | ||||
# avoid recursing inside the .hg directory | ||||
dirs.remove('.hg') | ||||
else: | ||||
dirs[:] = [] # don't descend further | ||||
elif followsym: | ||||
newdirs = [] | ||||
for d in dirs: | ||||
fname = os.path.join(root, d) | ||||
Adrian Buehlmann
|
r14227 | if adddir(seen_dirs, fname): | ||
Adrian Buehlmann
|
r13975 | if os.path.islink(fname): | ||
for hgname in walkrepos(fname, True, seen_dirs): | ||||
yield hgname | ||||
else: | ||||
newdirs.append(d) | ||||
dirs[:] = newdirs | ||||
Adrian Buehlmann
|
r13984 | |||
Yuya Nishihara
|
r32656 | def binnode(ctx): | ||
"""Return binary node id for a given basectx""" | ||||
node = ctx.node() | ||||
if node is None: | ||||
return wdirid | ||||
return node | ||||
Yuya Nishihara
|
r32654 | def intrev(ctx): | ||
"""Return integer for a given basectx that can be used in comparison or | ||||
Yuya Nishihara
|
r24582 | arithmetic operation""" | ||
Yuya Nishihara
|
r32654 | rev = ctx.rev() | ||
Yuya Nishihara
|
r24582 | if rev is None: | ||
Yuya Nishihara
|
r25739 | return wdirrev | ||
Yuya Nishihara
|
r24582 | return rev | ||
Yuya Nishihara
|
r34328 | def formatchangeid(ctx): | ||
"""Format changectx as '{rev}:{node|formatnode}', which is the default | ||||
Yuya Nishihara
|
r35906 | template provided by logcmdutil.changesettemplater""" | ||
Yuya Nishihara
|
r34328 | repo = ctx.repo() | ||
return formatrevnode(repo.ui, intrev(ctx), binnode(ctx)) | ||||
def formatrevnode(ui, rev, node): | ||||
"""Format given revision and node depending on the current verbosity""" | ||||
if ui.debugflag: | ||||
hexfunc = hex | ||||
else: | ||||
hexfunc = short | ||||
return '%d:%s' % (rev, hexfunc(node)) | ||||
Martin von Zweigbergk
|
r37696 | def resolvehexnodeidprefix(repo, prefix): | ||
Martin von Zweigbergk
|
r38891 | if (prefix.startswith('x') and | ||
repo.ui.configbool('experimental', 'revisions.prefixhexnode')): | ||||
prefix = prefix[1:] | ||||
Martin von Zweigbergk
|
r38878 | try: | ||
# Uses unfiltered repo because it's faster when prefix is ambiguous/ | ||||
# This matches the shortesthexnodeidprefix() function below. | ||||
node = repo.unfiltered().changelog._partialmatch(prefix) | ||||
except error.AmbiguousPrefixLookupError: | ||||
revset = repo.ui.config('experimental', 'revisions.disambiguatewithin') | ||||
if revset: | ||||
# Clear config to avoid infinite recursion | ||||
configoverrides = {('experimental', | ||||
'revisions.disambiguatewithin'): None} | ||||
with repo.ui.configoverride(configoverrides): | ||||
revs = repo.anyrevs([revset], user=True) | ||||
matches = [] | ||||
for rev in revs: | ||||
node = repo.changelog.node(rev) | ||||
if hex(node).startswith(prefix): | ||||
matches.append(node) | ||||
if len(matches) == 1: | ||||
return matches[0] | ||||
raise | ||||
Martin von Zweigbergk
|
r37522 | if node is None: | ||
return | ||||
repo.changelog.rev(node) # make sure node isn't filtered | ||||
return node | ||||
Martin von Zweigbergk
|
r38890 | def mayberevnum(repo, prefix): | ||
"""Checks if the given prefix may be mistaken for a revision number""" | ||||
try: | ||||
i = int(prefix) | ||||
# if we are a pure int, then starting with zero will not be | ||||
# confused as a rev; or, obviously, if the int is larger | ||||
Kyle Lippincott
|
r40377 | # than the value of the tip rev. We still need to disambiguate if | ||
# prefix == '0', since that *is* a valid revnum. | ||||
if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo): | ||||
Martin von Zweigbergk
|
r38890 | return False | ||
return True | ||||
except ValueError: | ||||
return False | ||||
Martin von Zweigbergk
|
r38889 | def shortesthexnodeidprefix(repo, node, minlength=1, cache=None): | ||
"""Find the shortest unambiguous prefix that matches hexnode. | ||||
If "cache" is not None, it must be a dictionary that can be used for | ||||
caching between calls to this method. | ||||
""" | ||||
Martin von Zweigbergk
|
r37726 | # _partialmatch() of filtered changelog could take O(len(repo)) time, | ||
# which would be unacceptably slow. so we look for hash collision in | ||||
# unfiltered space, which means some hashes may be slightly longer. | ||||
Martin von Zweigbergk
|
r37990 | |||
Martin von Zweigbergk
|
r40439 | minlength=max(minlength, 1) | ||
Martin von Zweigbergk
|
r37990 | def disambiguate(prefix): | ||
"""Disambiguate against revnums.""" | ||||
Martin von Zweigbergk
|
r38892 | if repo.ui.configbool('experimental', 'revisions.prefixhexnode'): | ||
if mayberevnum(repo, prefix): | ||||
return 'x' + prefix | ||||
else: | ||||
return prefix | ||||
Martin von Zweigbergk
|
r37990 | hexnode = hex(node) | ||
Martin von Zweigbergk
|
r38000 | for length in range(len(prefix), len(hexnode) + 1): | ||
Martin von Zweigbergk
|
r37990 | prefix = hexnode[:length] | ||
Martin von Zweigbergk
|
r38890 | if not mayberevnum(repo, prefix): | ||
Martin von Zweigbergk
|
r37990 | return prefix | ||
Martin von Zweigbergk
|
r38890 | cl = repo.unfiltered().changelog | ||
Martin von Zweigbergk
|
r38879 | revset = repo.ui.config('experimental', 'revisions.disambiguatewithin') | ||
if revset: | ||||
Martin von Zweigbergk
|
r38889 | revs = None | ||
if cache is not None: | ||||
revs = cache.get('disambiguationrevset') | ||||
if revs is None: | ||||
revs = repo.anyrevs([revset], user=True) | ||||
if cache is not None: | ||||
cache['disambiguationrevset'] = revs | ||||
Martin von Zweigbergk
|
r38879 | if cl.rev(node) in revs: | ||
hexnode = hex(node) | ||||
Martin von Zweigbergk
|
r39262 | nodetree = None | ||
if cache is not None: | ||||
nodetree = cache.get('disambiguationnodetree') | ||||
if not nodetree: | ||||
try: | ||||
nodetree = parsers.nodetree(cl.index, len(revs)) | ||||
except AttributeError: | ||||
# no native nodetree | ||||
pass | ||||
else: | ||||
for r in revs: | ||||
nodetree.insert(r) | ||||
if cache is not None: | ||||
cache['disambiguationnodetree'] = nodetree | ||||
if nodetree is not None: | ||||
length = max(nodetree.shortest(node), minlength) | ||||
prefix = hexnode[:length] | ||||
return disambiguate(prefix) | ||||
Martin von Zweigbergk
|
r38879 | for length in range(minlength, len(hexnode) + 1): | ||
matches = [] | ||||
prefix = hexnode[:length] | ||||
for rev in revs: | ||||
otherhexnode = repo[rev].hex() | ||||
if prefix == otherhexnode[:length]: | ||||
matches.append(otherhexnode) | ||||
if len(matches) == 1: | ||||
return disambiguate(prefix) | ||||
Martin von Zweigbergk
|
r37882 | try: | ||
Martin von Zweigbergk
|
r37990 | return disambiguate(cl.shortest(node, minlength)) | ||
Martin von Zweigbergk
|
r37882 | except error.LookupError: | ||
raise error.RepoLookupError() | ||||
Martin von Zweigbergk
|
r37698 | |||
Martin von Zweigbergk
|
r37368 | def isrevsymbol(repo, symbol): | ||
Martin von Zweigbergk
|
r37695 | """Checks if a symbol exists in the repo. | ||
Martin von Zweigbergk
|
r38877 | See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the | ||
symbol is an ambiguous nodeid prefix. | ||||
Martin von Zweigbergk
|
r37695 | """ | ||
Martin von Zweigbergk
|
r37368 | try: | ||
revsymbol(repo, symbol) | ||||
return True | ||||
except error.RepoLookupError: | ||||
return False | ||||
Martin von Zweigbergk
|
r37289 | def revsymbol(repo, symbol): | ||
"""Returns a context given a single revision symbol (as string). | ||||
This is similar to revsingle(), but accepts only a single revision symbol, | ||||
i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but | ||||
not "max(public())". | ||||
""" | ||||
if not isinstance(symbol, bytes): | ||||
msg = ("symbol (%s of type %s) was not a string, did you mean " | ||||
"repo[symbol]?" % (symbol, type(symbol))) | ||||
raise error.ProgrammingError(msg) | ||||
Martin von Zweigbergk
|
r37403 | try: | ||
Martin von Zweigbergk
|
r37545 | if symbol in ('.', 'tip', 'null'): | ||
return repo[symbol] | ||||
try: | ||||
r = int(symbol) | ||||
if '%d' % r != symbol: | ||||
raise ValueError | ||||
l = len(repo.changelog) | ||||
if r < 0: | ||||
r += l | ||||
if r < 0 or r >= l and r != wdirrev: | ||||
raise ValueError | ||||
return repo[r] | ||||
except error.FilteredIndexError: | ||||
raise | ||||
except (ValueError, OverflowError, IndexError): | ||||
pass | ||||
Martin von Zweigbergk
|
r37546 | if len(symbol) == 40: | ||
try: | ||||
node = bin(symbol) | ||||
rev = repo.changelog.rev(node) | ||||
return repo[rev] | ||||
except error.FilteredLookupError: | ||||
raise | ||||
except (TypeError, LookupError): | ||||
pass | ||||
Martin von Zweigbergk
|
r37547 | # look up bookmarks through the name interface | ||
try: | ||||
node = repo.names.singlenode(repo, symbol) | ||||
rev = repo.changelog.rev(node) | ||||
return repo[rev] | ||||
except KeyError: | ||||
pass | ||||
Martin von Zweigbergk
|
r37697 | node = resolvehexnodeidprefix(repo, symbol) | ||
Martin von Zweigbergk
|
r37548 | if node is not None: | ||
rev = repo.changelog.rev(node) | ||||
return repo[rev] | ||||
Martin von Zweigbergk
|
r37549 | raise error.RepoLookupError(_("unknown revision '%s'") % symbol) | ||
Martin von Zweigbergk
|
r37545 | |||
Martin von Zweigbergk
|
r37546 | except error.WdirUnsupported: | ||
return repo[None] | ||||
Martin von Zweigbergk
|
r37403 | except (error.FilteredIndexError, error.FilteredLookupError, | ||
error.FilteredRepoLookupError): | ||||
raise _filterederror(repo, symbol) | ||||
def _filterederror(repo, changeid): | ||||
"""build an exception to be raised about a filtered changeid | ||||
This is extracted in a function to help extensions (eg: evolve) to | ||||
experiment with various message variants.""" | ||||
if repo.filtername.startswith('visible'): | ||||
# Check if the changeset is obsolete | ||||
unfilteredrepo = repo.unfiltered() | ||||
ctx = revsymbol(unfilteredrepo, changeid) | ||||
# If the changeset is obsolete, enrich the message with the reason | ||||
# that made this changeset not visible | ||||
if ctx.obsolete(): | ||||
msg = obsutil._getfilteredreason(repo, changeid, ctx) | ||||
else: | ||||
msg = _("hidden revision '%s'") % changeid | ||||
hint = _('use --hidden to access hidden revisions') | ||||
return error.FilteredRepoLookupError(msg, hint=hint) | ||||
msg = _("filtered revision '%s' (not in '%s' subset)") | ||||
msg %= (changeid, repo.filtername) | ||||
return error.FilteredRepoLookupError(msg) | ||||
Martin von Zweigbergk
|
r37289 | |||
Jun Wu
|
r34007 | def revsingle(repo, revspec, default='.', localalias=None): | ||
Matt Mackall
|
r19509 | if not revspec and revspec != 0: | ||
Matt Mackall
|
r14319 | return repo[default] | ||
Jun Wu
|
r34007 | l = revrange(repo, [revspec], localalias=localalias) | ||
Pierre-Yves David
|
r22814 | if not l: | ||
Pierre-Yves David
|
r26587 | raise error.Abort(_('empty revision set')) | ||
Pierre-Yves David
|
r22815 | return repo[l.last()] | ||
Matt Mackall
|
r14319 | |||
Yuya Nishihara
|
r26020 | def _pairspec(revspec): | ||
Yuya Nishihara
|
r31024 | tree = revsetlang.parse(revspec) | ||
Yuya Nishihara
|
r26020 | return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall') | ||
Matt Mackall
|
r14319 | def revpair(repo, revs): | ||
if not revs: | ||||
Martin von Zweigbergk
|
r37269 | return repo['.'], repo[None] | ||
Matt Mackall
|
r14319 | |||
l = revrange(repo, revs) | ||||
Martin von Zweigbergk
|
r41415 | if not l: | ||
raise error.Abort(_('empty revision range')) | ||||
Martin von Zweigbergk
|
r41414 | first = l.first() | ||
second = l.last() | ||||
Pierre-Yves David
|
r20862 | |||
Matt Harbison
|
r26836 | if (first == second and len(revs) >= 2 | ||
and not all(revrange(repo, [r]) for r in revs)): | ||||
raise error.Abort(_('empty revision on one side of range')) | ||||
Matt Mackall
|
r14319 | |||
Yuya Nishihara
|
r26020 | # if top-level is range expression, the result must always be a pair | ||
if first == second and len(revs) == 1 and not _pairspec(revs[0]): | ||||
Martin von Zweigbergk
|
r37269 | return repo[first], repo[None] | ||
Matt Mackall
|
r14319 | |||
Martin von Zweigbergk
|
r37269 | return repo[first], repo[second] | ||
Matt Mackall
|
r14319 | |||
Jun Wu
|
r34007 | def revrange(repo, specs, localalias=None): | ||
Gregory Szorc
|
r29417 | """Execute 1 to many revsets and return the union. | ||
This is the preferred mechanism for executing revsets using user-specified | ||||
config options, such as revset aliases. | ||||
The revsets specified by ``specs`` will be executed via a chained ``OR`` | ||||
expression. If ``specs`` is empty, an empty result is returned. | ||||
``specs`` can contain integers, in which case they are assumed to be | ||||
revision numbers. | ||||
It is assumed the revsets are already formatted. If you have arguments | ||||
Yuya Nishihara
|
r31024 | that need to be expanded in the revset, call ``revsetlang.formatspec()`` | ||
Gregory Szorc
|
r29417 | and pass the result as an element of ``specs``. | ||
Specifying a single revset is allowed. | ||||
Returns a ``revset.abstractsmartset`` which is a list-like interface over | ||||
integer revisions. | ||||
""" | ||||
Yuya Nishihara
|
r25928 | allspecs = [] | ||
Gregory Szorc
|
r29417 | for spec in specs: | ||
Yuya Nishihara
|
r25904 | if isinstance(spec, int): | ||
Boris Feld
|
r41254 | spec = revsetlang.formatspec('%d', spec) | ||
Yuya Nishihara
|
r25928 | allspecs.append(spec) | ||
Jun Wu
|
r34007 | return repo.anyrevs(allspecs, user=True, localalias=localalias) | ||
Matt Mackall
|
r14320 | |||
Yuya Nishihara
|
r26433 | def meaningfulparents(repo, ctx): | ||
"""Return list of meaningful (or all if debug) parentrevs for rev. | ||||
For merges (two non-nullrev revisions) both parents are meaningful. | ||||
Otherwise the first parent revision is considered meaningful if it | ||||
is not the preceding revision. | ||||
""" | ||||
parents = ctx.parents() | ||||
if len(parents) > 1: | ||||
return parents | ||||
if repo.ui.debugflag: | ||||
Martin von Zweigbergk
|
r39930 | return [parents[0], repo[nullrev]] | ||
Yuya Nishihara
|
r32654 | if parents[0].rev() >= intrev(ctx) - 1: | ||
Yuya Nishihara
|
r26433 | return [] | ||
return parents | ||||
Martin von Zweigbergk
|
r41716 | def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None): | ||
"""Return a function that produced paths for presenting to the user. | ||||
The returned function takes a repo-relative path and produces a path | ||||
that can be presented in the UI. | ||||
Depending on the value of ui.relative-paths, either a repo-relative or | ||||
cwd-relative path will be produced. | ||||
legacyrelativevalue is the value to use if ui.relative-paths=legacy | ||||
If forcerelativevalue is not None, then that value will be used regardless | ||||
of what ui.relative-paths is set to. | ||||
""" | ||||
if forcerelativevalue is not None: | ||||
relative = forcerelativevalue | ||||
else: | ||||
config = repo.ui.config('ui', 'relative-paths') | ||||
if config == 'legacy': | ||||
relative = legacyrelativevalue | ||||
else: | ||||
relative = stringutil.parsebool(config) | ||||
if relative is None: | ||||
raise error.ConfigError( | ||||
_("ui.relative-paths is not a boolean ('%s')") % config) | ||||
Martin von Zweigbergk
|
r41632 | if relative: | ||
cwd = repo.getcwd() | ||||
pathto = repo.pathto | ||||
return lambda f: pathto(f, cwd) | ||||
Martin von Zweigbergk
|
r41833 | elif repo.ui.configbool('ui', 'slash'): | ||
return lambda f: f | ||||
Martin von Zweigbergk
|
r41632 | else: | ||
Martin von Zweigbergk
|
r41833 | return util.localpath | ||
Martin von Zweigbergk
|
r41632 | |||
Martin von Zweigbergk
|
r41799 | def subdiruipathfn(subpath, uipathfn): | ||
'''Create a new uipathfn that treats the file as relative to subpath.''' | ||||
return lambda f: uipathfn(posixpath.join(subpath, f)) | ||||
Martin von Zweigbergk
|
r41801 | def anypats(pats, opts): | ||
'''Checks if any patterns, including --include and --exclude were given. | ||||
Some commands (e.g. addremove) use this condition for deciding whether to | ||||
print absolute or relative paths. | ||||
''' | ||||
return bool(pats or opts.get('include') or opts.get('exclude')) | ||||
Matt Mackall
|
r14320 | def expandpats(pats): | ||
Mads Kiilerich
|
r21111 | '''Expand bare globs when running on windows. | ||
On posix we assume it already has already been done by sh.''' | ||||
Matt Mackall
|
r14320 | if not util.expandglobs: | ||
return list(pats) | ||||
ret = [] | ||||
Mads Kiilerich
|
r21111 | for kindpat in pats: | ||
kind, pat = matchmod._patsplit(kindpat, None) | ||||
Matt Mackall
|
r14320 | if kind is None: | ||
try: | ||||
Mads Kiilerich
|
r21111 | globbed = glob.glob(pat) | ||
Matt Mackall
|
r14320 | except re.error: | ||
Mads Kiilerich
|
r21111 | globbed = [pat] | ||
Matt Mackall
|
r14320 | if globbed: | ||
ret.extend(globbed) | ||||
continue | ||||
Mads Kiilerich
|
r21111 | ret.append(kindpat) | ||
Matt Mackall
|
r14320 | return ret | ||
Pierre-Yves David
|
r26326 | def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath', | ||
Matt Harbison
|
r25467 | badfn=None): | ||
Mads Kiilerich
|
r21111 | '''Return a matcher and the patterns that were used. | ||
Matt Harbison
|
r25467 | The matcher will warn about bad matches, unless an alternate badfn callback | ||
is provided.''' | ||||
Pierre-Yves David
|
r26326 | if opts is None: | ||
opts = {} | ||||
Matt Mackall
|
r14320 | if not globbed and default == 'relpath': | ||
pats = expandpats(pats or []) | ||||
Matt Mackall
|
r14670 | |||
Martin von Zweigbergk
|
r41814 | uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True) | ||
Matt Harbison
|
r25467 | def bad(f, msg): | ||
Martin von Zweigbergk
|
r41814 | ctx.repo().ui.warn("%s: %s\n" % (uipathfn(f), msg)) | ||
Matt Harbison
|
r25466 | |||
Matt Harbison
|
r25467 | if badfn is None: | ||
badfn = bad | ||||
Matt Harbison
|
r25466 | m = ctx.match(pats, opts.get('include'), opts.get('exclude'), | ||
default, listsubrepos=opts.get('subrepos'), badfn=badfn) | ||||
Martin von Zweigbergk
|
r24447 | if m.always(): | ||
pats = [] | ||||
Patrick Mezard
|
r16171 | return m, pats | ||
Pierre-Yves David
|
r26328 | def match(ctx, pats=(), opts=None, globbed=False, default='relpath', | ||
badfn=None): | ||||
Mads Kiilerich
|
r21111 | '''Return a matcher that will warn about bad matches.''' | ||
Matt Harbison
|
r25467 | return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0] | ||
Matt Mackall
|
r14320 | |||
def matchall(repo): | ||||
Mads Kiilerich
|
r21111 | '''Return a matcher that will efficiently match everything.''' | ||
Martin von Zweigbergk
|
r41825 | return matchmod.always() | ||
Matt Mackall
|
r14320 | |||
Matt Harbison
|
r25467 | def matchfiles(repo, files, badfn=None): | ||
Mads Kiilerich
|
r21111 | '''Return a matcher that will efficiently match exactly these files.''' | ||
Martin von Zweigbergk
|
r41825 | return matchmod.exact(files, badfn=badfn) | ||
Matt Mackall
|
r14320 | |||
Denis Laxalde
|
r34855 | def parsefollowlinespattern(repo, rev, pat, msg): | ||
"""Return a file name from `pat` pattern suitable for usage in followlines | ||||
logic. | ||||
""" | ||||
if not matchmod.patkind(pat): | ||||
return pathutil.canonpath(repo.root, repo.getcwd(), pat) | ||||
else: | ||||
ctx = repo[rev] | ||||
m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx) | ||||
files = [f for f in ctx if m(f)] | ||||
if len(files) != 1: | ||||
raise error.ParseError(msg) | ||||
return files[0] | ||||
Boris Feld
|
r40783 | def getorigvfs(ui, repo): | ||
"""return a vfs suitable to save 'orig' file | ||||
return None if no special directory is configured""" | ||||
origbackuppath = ui.config('ui', 'origbackuppath') | ||||
if not origbackuppath: | ||||
return None | ||||
return vfs.vfs(repo.wvfs.join(origbackuppath)) | ||||
Martin von Zweigbergk
|
r41735 | def backuppath(ui, repo, filepath): | ||
'''customize where working copy backup files (.orig files) are created | ||||
Fetch user defined path from config file: [ui] origbackuppath = <path> | ||||
Fall back to default (filepath with .orig suffix) if not specified | ||||
filepath is repo-relative | ||||
Returns an absolute path | ||||
''' | ||||
origvfs = getorigvfs(ui, repo) | ||||
if origvfs is None: | ||||
return repo.wjoin(filepath + ".orig") | ||||
origbackupdir = origvfs.dirname(filepath) | ||||
if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir): | ||||
ui.note(_('creating directory: %s\n') % origvfs.join(origbackupdir)) | ||||
# Remove any files that conflict with the backup file's path | ||||
for f in reversed(list(util.finddirs(filepath))): | ||||
if origvfs.isfileorlink(f): | ||||
ui.note(_('removing conflicting file: %s\n') | ||||
% origvfs.join(f)) | ||||
origvfs.unlink(f) | ||||
break | ||||
origvfs.makedirs(origbackupdir) | ||||
if origvfs.isdir(filepath) and not origvfs.islink(filepath): | ||||
ui.note(_('removing conflicting directory: %s\n') | ||||
% origvfs.join(filepath)) | ||||
origvfs.rmtree(filepath, forcibly=True) | ||||
return origvfs.join(filepath) | ||||
Jun Wu
|
r33331 | class _containsnode(object): | ||
"""proxy __contains__(node) to container.__contains__ which accepts revs""" | ||||
def __init__(self, repo, revcontainer): | ||||
self._torev = repo.changelog.rev | ||||
self._revcontains = revcontainer.__contains__ | ||||
def __contains__(self, node): | ||||
return self._revcontains(self._torev(node)) | ||||
Martin von Zweigbergk
|
r38442 | def cleanupnodes(repo, replacements, operation, moves=None, metadata=None, | ||
Sushil khanchi
|
r38835 | fixphase=False, targetphase=None, backup=True): | ||
Jun Wu
|
r33088 | """do common cleanups when old nodes are replaced by new nodes | ||
That includes writing obsmarkers or stripping nodes, and moving bookmarks. | ||||
(we might also want to move working directory parent in the future) | ||||
Jun Wu
|
r34364 | By default, bookmark moves are calculated automatically from 'replacements', | ||
but 'moves' can be used to override that. Also, 'moves' may include | ||||
additional bookmark moves that should not have associated obsmarkers. | ||||
Martin von Zweigbergk
|
r34363 | replacements is {oldnode: [newnode]} or a iterable of nodes if they do not | ||
have replacements. operation is a string, like "rebase". | ||||
Pulkit Goyal
|
r34794 | |||
metadata is dictionary containing metadata to be stored in obsmarker if | ||||
obsolescence is enabled. | ||||
Jun Wu
|
r33088 | """ | ||
Martin von Zweigbergk
|
r38442 | assert fixphase or targetphase is None | ||
Jun Wu
|
r34364 | if not replacements and not moves: | ||
return | ||||
# translate mapping's other forms | ||||
Martin von Zweigbergk
|
r34363 | if not util.safehasattr(replacements, 'items'): | ||
Boris Feld
|
r39927 | replacements = {(n,): () for n in replacements} | ||
else: | ||||
# upgrading non tuple "source" to tuple ones for BC | ||||
repls = {} | ||||
for key, value in replacements.items(): | ||||
if not isinstance(key, tuple): | ||||
key = (key,) | ||||
repls[key] = value | ||||
replacements = repls | ||||
Jun Wu
|
r33088 | |||
Martin von Zweigbergk
|
r40890 | # Unfiltered repo is needed since nodes in replacements might be hidden. | ||
unfi = repo.unfiltered() | ||||
Martin von Zweigbergk
|
r34362 | # Calculate bookmark movements | ||
Jun Wu
|
r34364 | if moves is None: | ||
moves = {} | ||||
Martin von Zweigbergk
|
r40890 | for oldnodes, newnodes in replacements.items(): | ||
for oldnode in oldnodes: | ||||
if oldnode in moves: | ||||
continue | ||||
if len(newnodes) > 1: | ||||
# usually a split, take the one with biggest rev number | ||||
newnode = next(unfi.set('max(%ln)', newnodes)).node() | ||||
elif len(newnodes) == 0: | ||||
# move bookmark backwards | ||||
allreplaced = [] | ||||
for rep in replacements: | ||||
allreplaced.extend(rep) | ||||
roots = list(unfi.set('max((::%n) - %ln)', oldnode, | ||||
allreplaced)) | ||||
if roots: | ||||
newnode = roots[0].node() | ||||
else: | ||||
newnode = nullid | ||||
Boris Feld
|
r39927 | else: | ||
Martin von Zweigbergk
|
r40890 | newnode = newnodes[0] | ||
moves[oldnode] = newnode | ||||
Martin von Zweigbergk
|
r34362 | |||
Martin von Zweigbergk
|
r38442 | allnewnodes = [n for ns in replacements.values() for n in ns] | ||
toretract = {} | ||||
toadvance = {} | ||||
if fixphase: | ||||
precursors = {} | ||||
Boris Feld
|
r39927 | for oldnodes, newnodes in replacements.items(): | ||
for oldnode in oldnodes: | ||||
for newnode in newnodes: | ||||
precursors.setdefault(newnode, []).append(oldnode) | ||||
Martin von Zweigbergk
|
r38442 | |||
allnewnodes.sort(key=lambda n: unfi[n].rev()) | ||||
newphases = {} | ||||
def phase(ctx): | ||||
return newphases.get(ctx.node(), ctx.phase()) | ||||
for newnode in allnewnodes: | ||||
ctx = unfi[newnode] | ||||
Martin von Zweigbergk
|
r38451 | parentphase = max(phase(p) for p in ctx.parents()) | ||
Martin von Zweigbergk
|
r38442 | if targetphase is None: | ||
oldphase = max(unfi[oldnode].phase() | ||||
for oldnode in precursors[newnode]) | ||||
newphase = max(oldphase, parentphase) | ||||
else: | ||||
Martin von Zweigbergk
|
r38451 | newphase = max(targetphase, parentphase) | ||
Martin von Zweigbergk
|
r38442 | newphases[newnode] = newphase | ||
if newphase > ctx.phase(): | ||||
toretract.setdefault(newphase, []).append(newnode) | ||||
elif newphase < ctx.phase(): | ||||
toadvance.setdefault(newphase, []).append(newnode) | ||||
Jun Wu
|
r33088 | with repo.transaction('cleanup') as tr: | ||
# Move bookmarks | ||||
bmarks = repo._bookmarks | ||||
Boris Feld
|
r33511 | bmarkchanges = [] | ||
Martin von Zweigbergk
|
r34362 | for oldnode, newnode in moves.items(): | ||
Jun Wu
|
r33088 | oldbmarks = repo.nodebookmarks(oldnode) | ||
if not oldbmarks: | ||||
continue | ||||
Jun Wu
|
r33331 | from . import bookmarks # avoid import cycle | ||
Jun Wu
|
r33088 | repo.ui.debug('moving bookmarks %r from %s to %s\n' % | ||
Yuya Nishihara
|
r38594 | (pycompat.rapply(pycompat.maybebytestr, oldbmarks), | ||
Yuya Nishihara
|
r36854 | hex(oldnode), hex(newnode))) | ||
Jun Wu
|
r33331 | # Delete divergent bookmarks being parents of related newnodes | ||
deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)', | ||||
allnewnodes, newnode, oldnode) | ||||
deletenodes = _containsnode(repo, deleterevs) | ||||
Jun Wu
|
r33088 | for name in oldbmarks: | ||
Boris Feld
|
r33511 | bmarkchanges.append((name, newnode)) | ||
for b in bookmarks.divergent2delete(repo, deletenodes, name): | ||||
bmarkchanges.append((b, None)) | ||||
if bmarkchanges: | ||||
bmarks.applychanges(repo, tr, bmarkchanges) | ||||
Jun Wu
|
r33088 | |||
Martin von Zweigbergk
|
r38442 | for phase, nodes in toretract.items(): | ||
phases.retractboundary(repo, tr, phase, nodes) | ||||
for phase, nodes in toadvance.items(): | ||||
phases.advanceboundary(repo, tr, phase, nodes) | ||||
Boris Feld
|
r41961 | mayusearchived = repo.ui.config('experimental', 'cleanup-as-archived') | ||
Jun Wu
|
r33088 | # Obsolete or strip nodes | ||
if obsolete.isenabled(repo, obsolete.createmarkersopt): | ||||
# If a node is already obsoleted, and we want to obsolete it | ||||
# without a successor, skip that obssolete request since it's | ||||
# unnecessary. That's the "if s or not isobs(n)" check below. | ||||
# Also sort the node in topology order, that might be useful for | ||||
# some obsstore logic. | ||||
Boris Feld
|
r40077 | # NOTE: the sorting might belong to createmarkers. | ||
Jun Wu
|
r33330 | torev = unfi.changelog.rev | ||
Boris Feld
|
r39927 | sortfunc = lambda ns: torev(ns[0][0]) | ||
Boris Feld
|
r39926 | rels = [] | ||
Boris Feld
|
r39927 | for ns, s in sorted(replacements.items(), key=sortfunc): | ||
Boris Feld
|
r39959 | rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s)) | ||
rels.append(rel) | ||||
Jun Wu
|
r34364 | if rels: | ||
Pulkit Goyal
|
r34794 | obsolete.createmarkers(repo, rels, operation=operation, | ||
metadata=metadata) | ||||
Boris Feld
|
r41961 | elif phases.supportinternal(repo) and mayusearchived: | ||
# this assume we do not have "unstable" nodes above the cleaned ones | ||||
allreplaced = set() | ||||
for ns in replacements.keys(): | ||||
allreplaced.update(ns) | ||||
if backup: | ||||
from . import repair # avoid import cycle | ||||
node = min(allreplaced, key=repo.changelog.rev) | ||||
repair.backupbundle(repo, allreplaced, allreplaced, node, | ||||
operation) | ||||
phases.retractboundary(repo, tr, phases.archived, allreplaced) | ||||
Jun Wu
|
r33088 | else: | ||
from . import repair # avoid import cycle | ||||
Boris Feld
|
r39927 | tostrip = list(n for ns in replacements for n in ns) | ||
Jun Wu
|
r34364 | if tostrip: | ||
Sushil khanchi
|
r38835 | repair.delayedstrip(repo.ui, repo, tostrip, operation, | ||
backup=backup) | ||||
Jun Wu
|
r33088 | |||
Martin von Zweigbergk
|
r41801 | def addremove(repo, matcher, prefix, uipathfn, opts=None): | ||
Pierre-Yves David
|
r26329 | if opts is None: | ||
opts = {} | ||||
Matt Harbison
|
r23533 | m = matcher | ||
Sushil khanchi
|
r37286 | dry_run = opts.get('dry_run') | ||
Yuya Nishihara
|
r37322 | try: | ||
similarity = float(opts.get('similarity') or 0) | ||||
except ValueError: | ||||
raise error.Abort(_('similarity must be a number')) | ||||
if similarity < 0 or similarity > 100: | ||||
raise error.Abort(_('similarity must be between 0 and 100')) | ||||
similarity /= 100.0 | ||||
Matt Harbison
|
r23533 | |||
Matt Harbison
|
r23537 | ret = 0 | ||
wctx = repo[None] | ||||
for subpath in sorted(wctx.substate): | ||||
Hannes Oldenburg
|
r29802 | submatch = matchmod.subdirmatcher(subpath, m) | ||
if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()): | ||||
Matt Harbison
|
r23537 | sub = wctx.sub(subpath) | ||
Martin von Zweigbergk
|
r41778 | subprefix = repo.wvfs.reljoin(prefix, subpath) | ||
Martin von Zweigbergk
|
r41801 | subuipathfn = subdiruipathfn(subpath, uipathfn) | ||
Matt Harbison
|
r23537 | try: | ||
Martin von Zweigbergk
|
r41801 | if sub.addremove(submatch, subprefix, subuipathfn, opts): | ||
Matt Harbison
|
r23537 | ret = 1 | ||
except error.LookupError: | ||||
repo.ui.status(_("skipping missing subrepository: %s\n") | ||||
Martin von Zweigbergk
|
r41801 | % uipathfn(subpath)) | ||
Matt Harbison
|
r23537 | |||
Matt Mackall
|
r16167 | rejected = [] | ||
Matt Harbison
|
r23534 | def badfn(f, msg): | ||
if f in m.files(): | ||||
Matt Harbison
|
r25434 | m.bad(f, msg) | ||
Matt Harbison
|
r23534 | rejected.append(f) | ||
Matt Mackall
|
r16167 | |||
Matt Harbison
|
r25434 | badmatch = matchmod.badmatch(m, badfn) | ||
added, unknown, deleted, removed, forgotten = _interestingfiles(repo, | ||||
badmatch) | ||||
Siddharth Agarwal
|
r18863 | |||
Martin von Zweigbergk
|
r23259 | unknownset = set(unknown + forgotten) | ||
Siddharth Agarwal
|
r18863 | toprint = unknownset.copy() | ||
toprint.update(deleted) | ||||
for abs in sorted(toprint): | ||||
if repo.ui.verbose or not m.exact(abs): | ||||
if abs in unknownset: | ||||
Martin von Zweigbergk
|
r41801 | status = _('adding %s\n') % uipathfn(abs) | ||
Yuya Nishihara
|
r40403 | label = 'ui.addremove.added' | ||
Siddharth Agarwal
|
r18863 | else: | ||
Martin von Zweigbergk
|
r41801 | status = _('removing %s\n') % uipathfn(abs) | ||
Yuya Nishihara
|
r40403 | label = 'ui.addremove.removed' | ||
Boris Feld
|
r39123 | repo.ui.status(status, label=label) | ||
Siddharth Agarwal
|
r18863 | |||
Siddharth Agarwal
|
r19152 | renames = _findrenames(repo, m, added + unknown, removed + deleted, | ||
Martin von Zweigbergk
|
r41811 | similarity, uipathfn) | ||
Matt Mackall
|
r14320 | |||
if not dry_run: | ||||
Martin von Zweigbergk
|
r23259 | _markchanges(repo, unknown + forgotten, deleted, renames) | ||
Matt Mackall
|
r14320 | |||
Matt Mackall
|
r16167 | for f in rejected: | ||
if f in m.files(): | ||||
return 1 | ||||
Matt Harbison
|
r23537 | return ret | ||
Matt Mackall
|
r16167 | |||
Siddharth Agarwal
|
r19154 | def marktouched(repo, files, similarity=0.0): | ||
'''Assert that files have somehow been operated upon. files are relative to | ||||
the repo root.''' | ||||
Matt Harbison
|
r25467 | m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x)) | ||
Siddharth Agarwal
|
r19154 | rejected = [] | ||
Martin von Zweigbergk
|
r23259 | added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m) | ||
Siddharth Agarwal
|
r19154 | |||
if repo.ui.verbose: | ||||
Martin von Zweigbergk
|
r23259 | unknownset = set(unknown + forgotten) | ||
Siddharth Agarwal
|
r19154 | toprint = unknownset.copy() | ||
toprint.update(deleted) | ||||
for abs in sorted(toprint): | ||||
if abs in unknownset: | ||||
status = _('adding %s\n') % abs | ||||
else: | ||||
status = _('removing %s\n') % abs | ||||
repo.ui.status(status) | ||||
Martin von Zweigbergk
|
r41811 | # TODO: We should probably have the caller pass in uipathfn and apply it to | ||
Martin von Zweigbergk
|
r41846 | # the messages above too. legacyrelativevalue=True is consistent with how | ||
Martin von Zweigbergk
|
r41811 | # it used to work. | ||
Martin von Zweigbergk
|
r41834 | uipathfn = getuipathfn(repo, legacyrelativevalue=True) | ||
Siddharth Agarwal
|
r19154 | renames = _findrenames(repo, m, added + unknown, removed + deleted, | ||
Martin von Zweigbergk
|
r41811 | similarity, uipathfn) | ||
Siddharth Agarwal
|
r19154 | |||
Martin von Zweigbergk
|
r23259 | _markchanges(repo, unknown + forgotten, deleted, renames) | ||
Siddharth Agarwal
|
r19154 | |||
for f in rejected: | ||||
if f in m.files(): | ||||
return 1 | ||||
return 0 | ||||
Siddharth Agarwal
|
r19150 | def _interestingfiles(repo, matcher): | ||
'''Walk dirstate with matcher, looking for files that addremove would care | ||||
about. | ||||
This is different from dirstate.status because it doesn't care about | ||||
whether files are modified or clean.''' | ||||
Martin von Zweigbergk
|
r23259 | added, unknown, deleted, removed, forgotten = [], [], [], [], [] | ||
Yuya Nishihara
|
r33722 | audit_path = pathutil.pathauditor(repo.root, cached=True) | ||
Siddharth Agarwal
|
r19150 | |||
ctx = repo[None] | ||||
dirstate = repo.dirstate | ||||
Martin von Zweigbergk
|
r40123 | matcher = repo.narrowmatch(matcher, includeexact=True) | ||
Martin von Zweigbergk
|
r34344 | walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate), | ||
unknown=True, ignored=False, full=False) | ||||
Siddharth Agarwal
|
r19150 | for abs, st in walkresults.iteritems(): | ||
dstate = dirstate[abs] | ||||
if dstate == '?' and audit_path.check(abs): | ||||
unknown.append(abs) | ||||
elif dstate != 'r' and not st: | ||||
deleted.append(abs) | ||||
Martin von Zweigbergk
|
r23259 | elif dstate == 'r' and st: | ||
forgotten.append(abs) | ||||
Siddharth Agarwal
|
r19150 | # for finding renames | ||
Martin von Zweigbergk
|
r23259 | elif dstate == 'r' and not st: | ||
Siddharth Agarwal
|
r19150 | removed.append(abs) | ||
elif dstate == 'a': | ||||
added.append(abs) | ||||
Martin von Zweigbergk
|
r23259 | return added, unknown, deleted, removed, forgotten | ||
Siddharth Agarwal
|
r19150 | |||
Martin von Zweigbergk
|
r41811 | def _findrenames(repo, matcher, added, removed, similarity, uipathfn): | ||
Siddharth Agarwal
|
r19152 | '''Find renames from removed files to added ones.''' | ||
renames = {} | ||||
if similarity > 0: | ||||
for old, new, score in similar.findrenames(repo, added, removed, | ||||
similarity): | ||||
if (repo.ui.verbose or not matcher.exact(old) | ||||
or not matcher.exact(new)): | ||||
repo.ui.status(_('recording removal of %s as rename to %s ' | ||||
'(%d%% similar)\n') % | ||||
Martin von Zweigbergk
|
r41811 | (uipathfn(old), uipathfn(new), | ||
Siddharth Agarwal
|
r19152 | score * 100)) | ||
renames[new] = old | ||||
return renames | ||||
Siddharth Agarwal
|
r19153 | def _markchanges(repo, unknown, deleted, renames): | ||
'''Marks the files in unknown as added, the files in deleted as removed, | ||||
and the files in renames as copied.''' | ||||
wctx = repo[None] | ||||
Bryan O'Sullivan
|
r27851 | with repo.wlock(): | ||
Siddharth Agarwal
|
r19153 | wctx.forget(deleted) | ||
wctx.add(unknown) | ||||
for new, old in renames.iteritems(): | ||||
wctx.copy(old, new) | ||||
Martin von Zweigbergk
|
r41947 | def getrenamedfn(repo, endrev=None): | ||
Martin von Zweigbergk
|
r42284 | if copiesmod.usechangesetcentricalgo(repo): | ||
Martin von Zweigbergk
|
r42283 | def getrenamed(fn, rev): | ||
ctx = repo[rev] | ||||
p1copies = ctx.p1copies() | ||||
if fn in p1copies: | ||||
return p1copies[fn] | ||||
p2copies = ctx.p2copies() | ||||
if fn in p2copies: | ||||
return p2copies[fn] | ||||
return None | ||||
return getrenamed | ||||
Martin von Zweigbergk
|
r41947 | rcache = {} | ||
if endrev is None: | ||||
endrev = len(repo) | ||||
def getrenamed(fn, rev): | ||||
'''looks up all renames for a file (up to endrev) the first | ||||
time the file is given. It indexes on the changerev and only | ||||
parses the manifest if linkrev != changerev. | ||||
Returns rename info for fn at changerev rev.''' | ||||
if fn not in rcache: | ||||
rcache[fn] = {} | ||||
fl = repo.file(fn) | ||||
for i in fl: | ||||
lr = fl.linkrev(i) | ||||
renamed = fl.renamed(fl.node(i)) | ||||
rcache[fn][lr] = renamed and renamed[0] | ||||
if lr >= endrev: | ||||
break | ||||
if rev in rcache[fn]: | ||||
return rcache[fn][rev] | ||||
# If linkrev != rev (i.e. rev not found in rcache) fallback to | ||||
# filectx logic. | ||||
try: | ||||
return repo[rev][fn].copysource() | ||||
except error.LookupError: | ||||
return None | ||||
return getrenamed | ||||
Martin von Zweigbergk
|
r42703 | def getcopiesfn(repo, endrev=None): | ||
if copiesmod.usechangesetcentricalgo(repo): | ||||
def copiesfn(ctx): | ||||
if ctx.p2copies(): | ||||
allcopies = ctx.p1copies().copy() | ||||
# There should be no overlap | ||||
allcopies.update(ctx.p2copies()) | ||||
return sorted(allcopies.items()) | ||||
else: | ||||
return sorted(ctx.p1copies().items()) | ||||
else: | ||||
getrenamed = getrenamedfn(repo, endrev) | ||||
def copiesfn(ctx): | ||||
copies = [] | ||||
for fn in ctx.files(): | ||||
rename = getrenamed(fn, ctx.rev()) | ||||
if rename: | ||||
copies.append((fn, rename)) | ||||
return copies | ||||
return copiesfn | ||||
Matt Mackall
|
r14320 | def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None): | ||
"""Update the dirstate to reflect the intent of copying src to dst. For | ||||
different reasons it might not end with dst being marked as copied from src. | ||||
""" | ||||
origsrc = repo.dirstate.copied(src) or src | ||||
if dst == origsrc: # copying back a copy? | ||||
if repo.dirstate[dst] not in 'mn' and not dryrun: | ||||
repo.dirstate.normallookup(dst) | ||||
else: | ||||
if repo.dirstate[origsrc] == 'a' and origsrc == src: | ||||
if not ui.quiet: | ||||
ui.warn(_("%s has not been committed yet, so no copy " | ||||
"data will be stored for %s.\n") | ||||
% (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))) | ||||
if repo.dirstate[dst] in '?r' and not dryrun: | ||||
wctx.add([dst]) | ||||
elif not dryrun: | ||||
wctx.copy(origsrc, dst) | ||||
Adrian Buehlmann
|
r14482 | |||
Martin von Zweigbergk
|
r42103 | def movedirstate(repo, newctx, match=None): | ||
Martin von Zweigbergk
|
r42104 | """Move the dirstate to newctx and adjust it as necessary. | ||
A matcher can be provided as an optimization. It is probably a bug to pass | ||||
a matcher that doesn't match all the differences between the parent of the | ||||
working copy and newctx. | ||||
""" | ||||
Martin von Zweigbergk
|
r42103 | oldctx = repo['.'] | ||
ds = repo.dirstate | ||||
ds.setparents(newctx.node(), nullid) | ||||
copies = dict(ds.copies()) | ||||
s = newctx.status(oldctx, match=match) | ||||
for f in s.modified: | ||||
if ds[f] == 'r': | ||||
# modified + removed -> removed | ||||
continue | ||||
ds.normallookup(f) | ||||
for f in s.added: | ||||
if ds[f] == 'r': | ||||
# added + removed -> unknown | ||||
ds.drop(f) | ||||
elif ds[f] != 'a': | ||||
ds.add(f) | ||||
for f in s.removed: | ||||
if ds[f] == 'a': | ||||
# removed + added -> normal | ||||
ds.normallookup(f) | ||||
elif ds[f] != 'r': | ||||
ds.remove(f) | ||||
# Merge old parent and old working dir copies | ||||
oldcopies = copiesmod.pathcopies(newctx, oldctx, match) | ||||
oldcopies.update(copies) | ||||
copies = dict((dst, oldcopies.get(src, src)) | ||||
for dst, src in oldcopies.iteritems()) | ||||
# Adjust the dirstate copies | ||||
for dst, src in copies.iteritems(): | ||||
if (src not in newctx or dst in newctx or ds[dst] != 'a'): | ||||
src = None | ||||
ds.copy(src, dst) | ||||
Drew Gottlieb
|
r24934 | def writerequires(opener, requirements): | ||
Martin von Zweigbergk
|
r40669 | with opener('requires', 'w', atomictemp=True) as fp: | ||
Gregory Szorc
|
r27706 | for r in sorted(requirements): | ||
fp.write("%s\n" % r) | ||||
Drew Gottlieb
|
r24934 | |||
Siddharth Agarwal
|
r20043 | class filecachesubentry(object): | ||
Siddharth Agarwal
|
r20042 | def __init__(self, path, stat): | ||
Idan Kamara
|
r14928 | self.path = path | ||
Idan Kamara
|
r18315 | self.cachestat = None | ||
self._cacheable = None | ||||
Idan Kamara
|
r14928 | |||
Idan Kamara
|
r18315 | if stat: | ||
Siddharth Agarwal
|
r20043 | self.cachestat = filecachesubentry.stat(self.path) | ||
Idan Kamara
|
r18315 | |||
if self.cachestat: | ||||
self._cacheable = self.cachestat.cacheable() | ||||
else: | ||||
# None means we don't know yet | ||||
self._cacheable = None | ||||
Idan Kamara
|
r14928 | |||
def refresh(self): | ||||
if self.cacheable(): | ||||
Siddharth Agarwal
|
r20043 | self.cachestat = filecachesubentry.stat(self.path) | ||
Idan Kamara
|
r14928 | |||
def cacheable(self): | ||||
if self._cacheable is not None: | ||||
return self._cacheable | ||||
# we don't know yet, assume it is for now | ||||
return True | ||||
def changed(self): | ||||
# no point in going further if we can't cache it | ||||
if not self.cacheable(): | ||||
return True | ||||
Siddharth Agarwal
|
r20043 | newstat = filecachesubentry.stat(self.path) | ||
Idan Kamara
|
r14928 | |||
# we may not know if it's cacheable yet, check again now | ||||
if newstat and self._cacheable is None: | ||||
self._cacheable = newstat.cacheable() | ||||
# check again | ||||
if not self._cacheable: | ||||
return True | ||||
if self.cachestat != newstat: | ||||
self.cachestat = newstat | ||||
return True | ||||
else: | ||||
return False | ||||
@staticmethod | ||||
def stat(path): | ||||
try: | ||||
return util.cachestat(path) | ||||
Gregory Szorc
|
r25660 | except OSError as e: | ||
Idan Kamara
|
r14928 | if e.errno != errno.ENOENT: | ||
raise | ||||
Siddharth Agarwal
|
r20044 | class filecacheentry(object): | ||
def __init__(self, paths, stat=True): | ||||
self._entries = [] | ||||
for path in paths: | ||||
self._entries.append(filecachesubentry(path, stat)) | ||||
def changed(self): | ||||
'''true if any entry has changed''' | ||||
for entry in self._entries: | ||||
if entry.changed(): | ||||
return True | ||||
return False | ||||
def refresh(self): | ||||
for entry in self._entries: | ||||
entry.refresh() | ||||
Idan Kamara
|
r14928 | class filecache(object): | ||
Gregory Szorc
|
r38698 | """A property like decorator that tracks files under .hg/ for updates. | ||
Idan Kamara
|
r14928 | |||
Gregory Szorc
|
r38698 | On first access, the files defined as arguments are stat()ed and the | ||
results cached. The decorated function is called. The results are stashed | ||||
away in a ``_filecache`` dict on the object whose method is decorated. | ||||
Idan Kamara
|
r14928 | |||
Yuya Nishihara
|
r40454 | On subsequent access, the cached result is used as it is set to the | ||
instance dictionary. | ||||
Gregory Szorc
|
r38698 | |||
Yuya Nishihara
|
r40454 | On external property set/delete operations, the caller must update the | ||
corresponding _filecache entry appropriately. Use __class__.<attr>.set() | ||||
instead of directly setting <attr>. | ||||
Idan Kamara
|
r14928 | |||
Yuya Nishihara
|
r40454 | When using the property API, the cached data is always used if available. | ||
No stat() is performed to check if the file has changed. | ||||
Siddharth Agarwal
|
r20045 | |||
Gregory Szorc
|
r38698 | Others can muck about with the state of the ``_filecache`` dict. e.g. they | ||
can populate an entry before the property's getter is called. In this case, | ||||
entries in ``_filecache`` will be used during property operations, | ||||
if available. If the underlying file changes, it is up to external callers | ||||
to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached | ||||
method result as well as possibly calling ``del obj._filecache[attr]`` to | ||||
remove the ``filecacheentry``. | ||||
""" | ||||
Siddharth Agarwal
|
r20045 | def __init__(self, *paths): | ||
self.paths = paths | ||||
Idan Kamara
|
r16198 | |||
def join(self, obj, fname): | ||||
Siddharth Agarwal
|
r20045 | """Used to compute the runtime path of a cached file. | ||
Idan Kamara
|
r16198 | |||
Users should subclass filecache and provide their own version of this | ||||
function to call the appropriate join function on 'obj' (an instance | ||||
of the class that its member function was decorated). | ||||
""" | ||||
Pierre-Yves David
|
r31285 | raise NotImplementedError | ||
Idan Kamara
|
r14928 | |||
def __call__(self, func): | ||||
self.func = func | ||||
Augie Fackler
|
r37886 | self.sname = func.__name__ | ||
self.name = pycompat.sysbytes(self.sname) | ||||
Idan Kamara
|
r14928 | return self | ||
def __get__(self, obj, type=None): | ||||
Martijn Pieters
|
r29373 | # if accessed on the class, return the descriptor itself. | ||
if obj is None: | ||||
return self | ||||
Yuya Nishihara
|
r40454 | |||
assert self.sname not in obj.__dict__ | ||||
Idan Kamara
|
r16115 | |||
Idan Kamara
|
r14928 | entry = obj._filecache.get(self.name) | ||
if entry: | ||||
if entry.changed(): | ||||
entry.obj = self.func(obj) | ||||
else: | ||||
Siddharth Agarwal
|
r20045 | paths = [self.join(obj, path) for path in self.paths] | ||
Idan Kamara
|
r14928 | |||
# We stat -before- creating the object so our cache doesn't lie if | ||||
# a writer modified between the time we read and stat | ||||
Siddharth Agarwal
|
r20045 | entry = filecacheentry(paths, True) | ||
Idan Kamara
|
r14928 | entry.obj = self.func(obj) | ||
obj._filecache[self.name] = entry | ||||
Augie Fackler
|
r37886 | obj.__dict__[self.sname] = entry.obj | ||
Idan Kamara
|
r14928 | return entry.obj | ||
Idan Kamara
|
r16115 | |||
Yuya Nishihara
|
r40454 | # don't implement __set__(), which would make __dict__ lookup as slow as | ||
# function call. | ||||
def set(self, obj, value): | ||||
Idan Kamara
|
r18316 | if self.name not in obj._filecache: | ||
# we add an entry for the missing value because X in __dict__ | ||||
# implies X in _filecache | ||||
Siddharth Agarwal
|
r20045 | paths = [self.join(obj, path) for path in self.paths] | ||
ce = filecacheentry(paths, False) | ||||
Idan Kamara
|
r18316 | obj._filecache[self.name] = ce | ||
else: | ||||
ce = obj._filecache[self.name] | ||||
ce.obj = value # update cached copy | ||||
Augie Fackler
|
r37886 | obj.__dict__[self.sname] = value # update copy returned by obj.x | ||
Idan Kamara
|
r16115 | |||
Matt Mackall
|
r34457 | def extdatasource(repo, source): | ||
"""Gather a map of rev -> value dict from the specified source | ||||
A source spec is treated as a URL, with a special case shell: type | ||||
for parsing the output from a shell command. | ||||
The data is parsed as a series of newline-separated records where | ||||
each record is a revision specifier optionally followed by a space | ||||
and a freeform string value. If the revision is known locally, it | ||||
is converted to a rev, otherwise the record is skipped. | ||||
Note that both key and value are treated as UTF-8 and converted to | ||||
the local encoding. This allows uniformity between local and | ||||
remote data sources. | ||||
""" | ||||
spec = repo.ui.config("extdata", source) | ||||
if not spec: | ||||
raise error.Abort(_("unknown extdata source '%s'") % source) | ||||
data = {} | ||||
Yuya Nishihara
|
r34462 | src = proc = None | ||
Matt Mackall
|
r34457 | try: | ||
Yuya Nishihara
|
r34462 | if spec.startswith("shell:"): | ||
# external commands should be run relative to the repo root | ||||
cmd = spec[6:] | ||||
Matt Harbison
|
r39868 | proc = subprocess.Popen(procutil.tonativestr(cmd), | ||
Matt Harbison
|
r39851 | shell=True, bufsize=-1, | ||
Yuya Nishihara
|
r37138 | close_fds=procutil.closefds, | ||
Matt Harbison
|
r39851 | stdout=subprocess.PIPE, | ||
cwd=procutil.tonativestr(repo.root)) | ||||
Yuya Nishihara
|
r34462 | src = proc.stdout | ||
else: | ||||
# treat as a URL or file | ||||
src = url.open(repo.ui, spec) | ||||
Yuya Nishihara
|
r34461 | for l in src: | ||
Matt Mackall
|
r34457 | if " " in l: | ||
k, v = l.strip().split(" ", 1) | ||||
else: | ||||
k, v = l.strip(), "" | ||||
k = encoding.tolocal(k) | ||||
Yuya Nishihara
|
r34460 | try: | ||
Martin von Zweigbergk
|
r37378 | data[revsingle(repo, k).rev()] = encoding.tolocal(v) | ||
Yuya Nishihara
|
r34460 | except (error.LookupError, error.RepoLookupError): | ||
pass # we ignore data for nodes that don't exist locally | ||||
Matt Mackall
|
r34457 | finally: | ||
Yuya Nishihara
|
r34462 | if proc: | ||
Augie Fackler
|
r42776 | try: | ||
proc.communicate() | ||||
except ValueError: | ||||
# This happens if we started iterating src and then | ||||
# get a parse error on a line. It should be safe to ignore. | ||||
pass | ||||
Yuya Nishihara
|
r34462 | if src: | ||
src.close() | ||||
Yuya Nishihara
|
r35413 | if proc and proc.returncode != 0: | ||
raise error.Abort(_("extdata command '%s' failed: %s") | ||||
Yuya Nishihara
|
r37481 | % (cmd, procutil.explainexit(proc.returncode))) | ||
Matt Mackall
|
r34457 | |||
return data | ||||
Siddharth Agarwal
|
r26490 | def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs): | ||
if lock is None: | ||||
raise error.LockInheritanceContractViolation( | ||||
'lock can only be inherited while held') | ||||
if environ is None: | ||||
environ = {} | ||||
with lock.inherit() as locker: | ||||
environ[envvar] = locker | ||||
return repo.ui.system(cmd, environ=environ, *args, **kwargs) | ||||
Siddharth Agarwal
|
r26491 | |||
def wlocksub(repo, cmd, *args, **kwargs): | ||||
"""run cmd as a subprocess that allows inheriting repo's wlock | ||||
This can only be called while the wlock is held. This takes all the | ||||
arguments that ui.system does, and returns the exit code of the | ||||
subprocess.""" | ||||
return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args, | ||||
**kwargs) | ||||
Pierre-Yves David
|
r26906 | |||
Martin von Zweigbergk
|
r38364 | class progress(object): | ||
Yuya Nishihara
|
r41246 | def __init__(self, ui, updatebar, topic, unit="", total=None): | ||
Martin von Zweigbergk
|
r38364 | self.ui = ui | ||
self.pos = 0 | ||||
self.topic = topic | ||||
self.unit = unit | ||||
self.total = total | ||||
Martin von Zweigbergk
|
r41179 | self.debug = ui.configbool('progress', 'debug') | ||
Martin von Zweigbergk
|
r41181 | self._updatebar = updatebar | ||
Martin von Zweigbergk
|
r38364 | |||
Martin von Zweigbergk
|
r38393 | def __enter__(self): | ||
Danny Hooper
|
r38522 | return self | ||
Martin von Zweigbergk
|
r38393 | |||
def __exit__(self, exc_type, exc_value, exc_tb): | ||||
self.complete() | ||||
Martin von Zweigbergk
|
r38364 | def update(self, pos, item="", total=None): | ||
Martin von Zweigbergk
|
r38438 | assert pos is not None | ||
Martin von Zweigbergk
|
r38364 | if total: | ||
self.total = total | ||||
self.pos = pos | ||||
Yuya Nishihara
|
r41245 | self._updatebar(self.topic, self.pos, item, self.unit, self.total) | ||
Martin von Zweigbergk
|
r41180 | if self.debug: | ||
self._printdebug(item) | ||||
Martin von Zweigbergk
|
r38364 | |||
def increment(self, step=1, item="", total=None): | ||||
self.update(self.pos + step, item, total) | ||||
Martin von Zweigbergk
|
r38392 | def complete(self): | ||
Martin von Zweigbergk
|
r41178 | self.pos = None | ||
self.unit = "" | ||||
self.total = None | ||||
Yuya Nishihara
|
r41245 | self._updatebar(self.topic, self.pos, "", self.unit, self.total) | ||
Martin von Zweigbergk
|
r38392 | |||
Martin von Zweigbergk
|
r41180 | def _printdebug(self, item): | ||
Martin von Zweigbergk
|
r41178 | if self.unit: | ||
unit = ' ' + self.unit | ||||
if item: | ||||
item = ' ' + item | ||||
if self.total: | ||||
pct = 100.0 * self.pos / self.total | ||||
self.ui.debug('%s:%s %d/%d%s (%4.2f%%)\n' | ||||
% (self.topic, item, self.pos, self.total, unit, pct)) | ||||
else: | ||||
self.ui.debug('%s:%s %d%s\n' % (self.topic, item, self.pos, unit)) | ||||
Martin von Zweigbergk
|
r38364 | |||
Pierre-Yves David
|
r26906 | def gdinitconfig(ui): | ||
"""helper function to know if a repo should be created as general delta | ||||
Pierre-Yves David
|
r26907 | """ | ||
# experimental config: format.generaldelta | ||||
r33235 | return (ui.configbool('format', 'generaldelta') | |||
Boris Feld
|
r40920 | or ui.configbool('format', 'usegeneraldelta')) | ||
Pierre-Yves David
|
r26906 | |||
Pierre-Yves David
|
r26907 | def gddeltaconfig(ui): | ||
"""helper function to know if incoming delta should be optimised | ||||
""" | ||||
Pierre-Yves David
|
r26906 | # experimental config: format.generaldelta | ||
r33235 | return ui.configbool('format', 'generaldelta') | |||
Kostia Balytskyi
|
r31553 | |||
class simplekeyvaluefile(object): | ||||
"""A simple file with key=value lines | ||||
Keys must be alphanumerics and start with a letter, values must not | ||||
contain '\n' characters""" | ||||
Kostia Balytskyi
|
r32270 | firstlinekey = '__firstline' | ||
Kostia Balytskyi
|
r31553 | |||
def __init__(self, vfs, path, keys=None): | ||||
self.vfs = vfs | ||||
self.path = path | ||||
Kostia Balytskyi
|
r32270 | def read(self, firstlinenonkeyval=False): | ||
"""Read the contents of a simple key-value file | ||||
'firstlinenonkeyval' indicates whether the first line of file should | ||||
be treated as a key-value pair or reuturned fully under the | ||||
__firstline key.""" | ||||
Kostia Balytskyi
|
r31553 | lines = self.vfs.readlines(self.path) | ||
Kostia Balytskyi
|
r32270 | d = {} | ||
if firstlinenonkeyval: | ||||
if not lines: | ||||
e = _("empty simplekeyvalue file") | ||||
raise error.CorruptedState(e) | ||||
# we don't want to include '\n' in the __firstline | ||||
d[self.firstlinekey] = lines[0][:-1] | ||||
del lines[0] | ||||
Kostia Balytskyi
|
r31553 | try: | ||
Kostia Balytskyi
|
r32269 | # the 'if line.strip()' part prevents us from failing on empty | ||
# lines which only contain '\n' therefore are not skipped | ||||
# by 'if line' | ||||
Kostia Balytskyi
|
r32270 | updatedict = dict(line[:-1].split('=', 1) for line in lines | ||
if line.strip()) | ||||
if self.firstlinekey in updatedict: | ||||
e = _("%r can't be used as a key") | ||||
raise error.CorruptedState(e % self.firstlinekey) | ||||
d.update(updatedict) | ||||
Kostia Balytskyi
|
r31553 | except ValueError as e: | ||
raise error.CorruptedState(str(e)) | ||||
return d | ||||
Kostia Balytskyi
|
r32270 | def write(self, data, firstline=None): | ||
Kostia Balytskyi
|
r31553 | """Write key=>value mapping to a file | ||
data is a dict. Keys must be alphanumerical and start with a letter. | ||||
Kostia Balytskyi
|
r32270 | Values must not contain newline characters. | ||
If 'firstline' is not None, it is written to file before | ||||
everything else, as it is, not in a key=value form""" | ||||
Kostia Balytskyi
|
r31553 | lines = [] | ||
Kostia Balytskyi
|
r32270 | if firstline is not None: | ||
lines.append('%s\n' % firstline) | ||||
Kostia Balytskyi
|
r31553 | for k, v in data.items(): | ||
Kostia Balytskyi
|
r32270 | if k == self.firstlinekey: | ||
e = "key name '%s' is reserved" % self.firstlinekey | ||||
raise error.ProgrammingError(e) | ||||
Pulkit Goyal
|
r35931 | if not k[0:1].isalpha(): | ||
Kostia Balytskyi
|
r31553 | e = "keys must start with a letter in a key-value file" | ||
raise error.ProgrammingError(e) | ||||
if not k.isalnum(): | ||||
e = "invalid key name in a simple key-value file" | ||||
raise error.ProgrammingError(e) | ||||
if '\n' in v: | ||||
e = "invalid value in a simple key-value file" | ||||
raise error.ProgrammingError(e) | ||||
lines.append("%s=%s\n" % (k, v)) | ||||
with self.vfs(self.path, mode='wb', atomictemp=True) as fp: | ||||
fp.write(''.join(lines)) | ||||
r33249 | ||||
Boris Feld
|
r33541 | _reportobsoletedsource = [ | ||
Boris Feld
|
r33542 | 'debugobsolete', | ||
Boris Feld
|
r33541 | 'pull', | ||
'push', | ||||
'serve', | ||||
'unbundle', | ||||
] | ||||
Denis Laxalde
|
r34662 | _reportnewcssource = [ | ||
'pull', | ||||
'unbundle', | ||||
] | ||||
Matt Harbison
|
r37780 | def prefetchfiles(repo, revs, match): | ||
"""Invokes the registered file prefetch functions, allowing extensions to | ||||
ensure the corresponding files are available locally, before the command | ||||
uses them.""" | ||||
if match: | ||||
# The command itself will complain about files that don't exist, so | ||||
# don't duplicate the message. | ||||
match = matchmod.badmatch(match, lambda fn, msg: None) | ||||
else: | ||||
match = matchall(repo) | ||||
fileprefetchhooks(repo, revs, match) | ||||
# a list of (repo, revs, match) prefetch functions | ||||
Matt Harbison
|
r36154 | fileprefetchhooks = util.hooks() | ||
Martin von Zweigbergk
|
r35727 | # A marker that tells the evolve extension to suppress its own reporting | ||
_reportstroubledchangesets = True | ||||
Boris Feld
|
r33541 | def registersummarycallback(repo, otr, txnname=''): | ||
r33249 | """register a callback to issue a summary after the transaction is closed | |||
""" | ||||
Denis Laxalde
|
r34620 | def txmatch(sources): | ||
return any(txnname.startswith(source) for source in sources) | ||||
Denis Laxalde
|
r34621 | categories = [] | ||
def reportsummary(func): | ||||
"""decorator for report callbacks.""" | ||||
Boris Feld
|
r35140 | # The repoview life cycle is shorter than the one of the actual | ||
# underlying repository. So the filtered object can die before the | ||||
# weakref is used leading to troubles. We keep a reference to the | ||||
# unfiltered object and restore the filtering when retrieving the | ||||
# repository through the weakref. | ||||
filtername = repo.filtername | ||||
reporef = weakref.ref(repo.unfiltered()) | ||||
Denis Laxalde
|
r34621 | def wrapped(tr): | ||
Denis Laxalde
|
r34620 | repo = reporef() | ||
Boris Feld
|
r35140 | if filtername: | ||
repo = repo.filtered(filtername) | ||||
Denis Laxalde
|
r34621 | func(repo, tr) | ||
Martin von Zweigbergk
|
r35766 | newcat = '%02i-txnreport' % len(categories) | ||
Denis Laxalde
|
r34621 | otr.addpostclose(newcat, wrapped) | ||
categories.append(newcat) | ||||
return wrapped | ||||
if txmatch(_reportobsoletedsource): | ||||
@reportsummary | ||||
def reportobsoleted(repo, tr): | ||||
Denis Laxalde
|
r34620 | obsoleted = obsutil.getobsoleted(repo, tr) | ||
if obsoleted: | ||||
repo.ui.status(_('obsoleted %i changesets\n') | ||||
% len(obsoleted)) | ||||
Denis Laxalde
|
r34662 | |||
Martin von Zweigbergk
|
r35728 | if (obsolete.isenabled(repo, obsolete.createmarkersopt) and | ||
repo.ui.configbool('experimental', 'evolution.report-instabilities')): | ||||
Martin von Zweigbergk
|
r35727 | instabilitytypes = [ | ||
('orphan', 'orphan'), | ||||
('phase-divergent', 'phasedivergent'), | ||||
('content-divergent', 'contentdivergent'), | ||||
] | ||||
def getinstabilitycounts(repo): | ||||
filtered = repo.changelog.filteredrevs | ||||
counts = {} | ||||
for instability, revset in instabilitytypes: | ||||
counts[instability] = len(set(obsolete.getrevs(repo, revset)) - | ||||
filtered) | ||||
return counts | ||||
oldinstabilitycounts = getinstabilitycounts(repo) | ||||
@reportsummary | ||||
def reportnewinstabilities(repo, tr): | ||||
newinstabilitycounts = getinstabilitycounts(repo) | ||||
for instability, revset in instabilitytypes: | ||||
delta = (newinstabilitycounts[instability] - | ||||
oldinstabilitycounts[instability]) | ||||
Pulkit Goyal
|
r38474 | msg = getinstabilitymessage(delta, instability) | ||
if msg: | ||||
repo.ui.warn(msg) | ||||
Martin von Zweigbergk
|
r35727 | |||
Denis Laxalde
|
r34662 | if txmatch(_reportnewcssource): | ||
@reportsummary | ||||
def reportnewcs(repo, tr): | ||||
"""Report the range of new revisions pulled/unbundled.""" | ||||
Yuya Nishihara
|
r39337 | origrepolen = tr.changes.get('origrepolen', len(repo)) | ||
Boris Feld
|
r39934 | unfi = repo.unfiltered() | ||
if origrepolen >= len(unfi): | ||||
Denis Laxalde
|
r34662 | return | ||
Boris Feld
|
r39933 | # Compute the bounds of new visible revisions' range. | ||
revs = smartset.spanset(repo, start=origrepolen) | ||||
Boris Feld
|
r39934 | if revs: | ||
minrev, maxrev = repo[revs.min()], repo[revs.max()] | ||||
Denis Laxalde
|
r34662 | |||
Boris Feld
|
r39934 | if minrev == maxrev: | ||
revrange = minrev | ||||
else: | ||||
revrange = '%s:%s' % (minrev, maxrev) | ||||
draft = len(repo.revs('%ld and draft()', revs)) | ||||
secret = len(repo.revs('%ld and secret()', revs)) | ||||
if not (draft or secret): | ||||
msg = _('new changesets %s\n') % revrange | ||||
elif draft and secret: | ||||
msg = _('new changesets %s (%d drafts, %d secrets)\n') | ||||
msg %= (revrange, draft, secret) | ||||
elif draft: | ||||
msg = _('new changesets %s (%d drafts)\n') | ||||
msg %= (revrange, draft) | ||||
elif secret: | ||||
msg = _('new changesets %s (%d secrets)\n') | ||||
msg %= (revrange, secret) | ||||
else: | ||||
errormsg = 'entered unreachable condition' | ||||
raise error.ProgrammingError(errormsg) | ||||
repo.ui.status(msg) | ||||
Matt Harbison
|
r35169 | |||
Boris Feld
|
r39935 | # search new changesets directly pulled as obsolete | ||
Boris Feld
|
r39936 | duplicates = tr.changes.get('revduplicates', ()) | ||
obsadded = unfi.revs('(%d: + %ld) and obsolete()', | ||||
origrepolen, duplicates) | ||||
Boris Feld
|
r39935 | cl = repo.changelog | ||
extinctadded = [r for r in obsadded if r not in cl] | ||||
if extinctadded: | ||||
# They are not just obsolete, but obsolete and invisible | ||||
# we call them "extinct" internally but the terms have not been | ||||
# exposed to users. | ||||
msg = '(%d other changesets obsolete on arrival)\n' | ||||
repo.ui.status(msg % len(extinctadded)) | ||||
Denis Laxalde
|
r38189 | @reportsummary | ||
def reportphasechanges(repo, tr): | ||||
"""Report statistics of phase changes for changesets pre-existing | ||||
pull/unbundle. | ||||
""" | ||||
Yuya Nishihara
|
r39337 | origrepolen = tr.changes.get('origrepolen', len(repo)) | ||
Denis Laxalde
|
r38189 | phasetracking = tr.changes.get('phases', {}) | ||
if not phasetracking: | ||||
return | ||||
published = [ | ||||
rev for rev, (old, new) in phasetracking.iteritems() | ||||
Yuya Nishihara
|
r39337 | if new == phases.public and rev < origrepolen | ||
Denis Laxalde
|
r38189 | ] | ||
if not published: | ||||
return | ||||
Boris Feld
|
r38268 | repo.ui.status(_('%d local changesets published\n') | ||
Denis Laxalde
|
r38189 | % len(published)) | ||
Pulkit Goyal
|
r38474 | def getinstabilitymessage(delta, instability): | ||
"""function to return the message to show warning about new instabilities | ||||
exists as a separate function so that extension can wrap to show more | ||||
information like how to fix instabilities""" | ||||
if delta > 0: | ||||
return _('%i new %s changesets\n') % (delta, instability) | ||||
Boris Feld
|
r35185 | def nodesummaries(repo, nodes, maxnumnodes=4): | ||
if len(nodes) <= maxnumnodes or repo.ui.verbose: | ||||
return ' '.join(short(h) for h in nodes) | ||||
first = ' '.join(short(h) for h in nodes[:maxnumnodes]) | ||||
Boris Feld
|
r35207 | return _("%s and %d others") % (first, len(nodes) - maxnumnodes) | ||
Boris Feld
|
r35185 | |||
Boris Feld
|
r35186 | def enforcesinglehead(repo, tr, desc): | ||
"""check that no named branch has multiple heads""" | ||||
if desc in ('strip', 'repair'): | ||||
# skip the logic during strip | ||||
return | ||||
visible = repo.filtered('visible') | ||||
# possible improvement: we could restrict the check to affected branch | ||||
for name, heads in visible.branchmap().iteritems(): | ||||
if len(heads) > 1: | ||||
msg = _('rejecting multiple heads on branch "%s"') | ||||
msg %= name | ||||
hint = _('%d heads: %s') | ||||
hint %= (len(heads), nodesummaries(repo, heads)) | ||||
raise error.Abort(msg, hint=hint) | ||||
Matt Harbison
|
r35169 | def wrapconvertsink(sink): | ||
"""Allow extensions to wrap the sink returned by convcmd.convertsink() | ||||
before it is used, whether or not the convert extension was formally loaded. | ||||
""" | ||||
return sink | ||||
Pulkit Goyal
|
r35512 | |||
def unhidehashlikerevs(repo, specs, hiddentype): | ||||
"""parse the user specs and unhide changesets whose hash or revision number | ||||
is passed. | ||||
hiddentype can be: 1) 'warn': warn while unhiding changesets | ||||
2) 'nowarn': don't warn while unhiding changesets | ||||
returns a repo object with the required changesets unhidden | ||||
""" | ||||
if not repo.filtername or not repo.ui.configbool('experimental', | ||||
'directaccess'): | ||||
return repo | ||||
Pulkit Goyal
|
r35515 | if repo.filtername not in ('visible', 'visible-hidden'): | ||
Pulkit Goyal
|
r35512 | return repo | ||
symbols = set() | ||||
for spec in specs: | ||||
try: | ||||
tree = revsetlang.parse(spec) | ||||
except error.ParseError: # will be reported by scmutil.revrange() | ||||
continue | ||||
symbols.update(revsetlang.gethashlikesymbols(tree)) | ||||
if not symbols: | ||||
return repo | ||||
revs = _getrevsfromsymbols(repo, symbols) | ||||
if not revs: | ||||
return repo | ||||
if hiddentype == 'warn': | ||||
unfi = repo.unfiltered() | ||||
revstr = ", ".join([pycompat.bytestr(unfi[l]) for l in revs]) | ||||
repo.ui.warn(_("warning: accessing hidden changesets for write " | ||||
"operation: %s\n") % revstr) | ||||
Pulkit Goyal
|
r35515 | # we have to use new filtername to separate branch/tags cache until we can | ||
# disbale these cache when revisions are dynamically pinned. | ||||
Pulkit Goyal
|
r35512 | return repo.filtered('visible-hidden', revs) | ||
def _getrevsfromsymbols(repo, symbols): | ||||
"""parse the list of symbols and returns a set of revision numbers of hidden | ||||
changesets present in symbols""" | ||||
revs = set() | ||||
unfi = repo.unfiltered() | ||||
unficl = unfi.changelog | ||||
cl = repo.changelog | ||||
tiprev = len(unficl) | ||||
allowrevnums = repo.ui.configbool('experimental', 'directaccess.revnums') | ||||
for s in symbols: | ||||
try: | ||||
n = int(s) | ||||
if n <= tiprev: | ||||
if not allowrevnums: | ||||
continue | ||||
else: | ||||
if n not in cl: | ||||
revs.add(n) | ||||
continue | ||||
except ValueError: | ||||
pass | ||||
try: | ||||
Martin von Zweigbergk
|
r37885 | s = resolvehexnodeidprefix(unfi, s) | ||
Yuya Nishihara
|
r37112 | except (error.LookupError, error.WdirUnsupported): | ||
Pulkit Goyal
|
r35512 | s = None | ||
if s is not None: | ||||
rev = unficl.rev(s) | ||||
if rev not in cl: | ||||
revs.add(rev) | ||||
return revs | ||||
David Demelier
|
r38146 | |||
def bookmarkrevs(repo, mark): | ||||
""" | ||||
Select revisions reachable by a given bookmark | ||||
""" | ||||
return repo.revs("ancestors(bookmark(%s)) - " | ||||
"ancestors(head() and not bookmark(%s)) - " | ||||
"ancestors(bookmark() and not bookmark(%s))", | ||||
mark, mark, mark) | ||||