scmutil.py
1425 lines
| 49.3 KiB
| text/x-python
|
PythonLexer
/ mercurial / scmutil.py
Adrian Buehlmann
|
r13962 | # scmutil.py - Mercurial core utility functions | ||
# | ||||
# Copyright Matt Mackall <mpm@selenic.com> | ||||
# | ||||
# This software may be used and distributed according to the terms of the | ||||
# GNU General Public License version 2 or any later version. | ||||
Gregory Szorc
|
r27482 | from __future__ import absolute_import | ||
import errno | ||||
import glob | ||||
Augie Fackler
|
r29341 | import hashlib | ||
Gregory Szorc
|
r27482 | import os | ||
import re | ||||
Jun Wu
|
r30520 | import socket | ||
Yuya Nishihara
|
r34462 | import subprocess | ||
r33249 | import weakref | |||
Gregory Szorc
|
r27482 | |||
from .i18n import _ | ||||
Yuya Nishihara
|
r32656 | from .node import ( | ||
Jun Wu
|
r33088 | hex, | ||
nullid, | ||||
Yuya Nishihara
|
r34328 | short, | ||
Yuya Nishihara
|
r32656 | wdirid, | ||
wdirrev, | ||||
) | ||||
Gregory Szorc
|
r27482 | from . import ( | ||
encoding, | ||||
error, | ||||
match as matchmod, | ||||
Jun Wu
|
r33088 | obsolete, | ||
r33249 | obsutil, | |||
Gregory Szorc
|
r27482 | pathutil, | ||
phases, | ||||
Pulkit Goyal
|
r30305 | pycompat, | ||
Yuya Nishihara
|
r31024 | revsetlang, | ||
Gregory Szorc
|
r27482 | similar, | ||
Matt Mackall
|
r34457 | url, | ||
Gregory Szorc
|
r27482 | util, | ||
Mark Thomas
|
r34544 | vfs, | ||
Gregory Szorc
|
r27482 | ) | ||
Kevin Bullock
|
r18690 | |||
Jun Wu
|
r34646 | if pycompat.iswindows: | ||
Gregory Szorc
|
r27482 | from . import scmwindows as scmplatform | ||
Kevin Bullock
|
r18690 | else: | ||
Gregory Szorc
|
r27482 | from . import scmposix as scmplatform | ||
Kevin Bullock
|
r18690 | |||
Yuya Nishihara
|
r30314 | termsize = scmplatform.termsize | ||
Adrian Buehlmann
|
r13962 | |||
Martin von Zweigbergk
|
r22913 | class status(tuple): | ||
'''Named tuple with a list of files per status. The 'deleted', 'unknown' | ||||
and 'ignored' properties are only relevant to the working copy. | ||||
''' | ||||
__slots__ = () | ||||
def __new__(cls, modified, added, removed, deleted, unknown, ignored, | ||||
clean): | ||||
return tuple.__new__(cls, (modified, added, removed, deleted, unknown, | ||||
ignored, clean)) | ||||
@property | ||||
def modified(self): | ||||
Martin von Zweigbergk
|
r22915 | '''files that have been modified''' | ||
Martin von Zweigbergk
|
r22913 | return self[0] | ||
@property | ||||
def added(self): | ||||
Martin von Zweigbergk
|
r22915 | '''files that have been added''' | ||
Martin von Zweigbergk
|
r22913 | return self[1] | ||
@property | ||||
def removed(self): | ||||
Martin von Zweigbergk
|
r22915 | '''files that have been removed''' | ||
Martin von Zweigbergk
|
r22913 | return self[2] | ||
@property | ||||
def deleted(self): | ||||
Martin von Zweigbergk
|
r22915 | '''files that are in the dirstate, but have been deleted from the | ||
working copy (aka "missing") | ||||
''' | ||||
Martin von Zweigbergk
|
r22913 | return self[3] | ||
@property | ||||
def unknown(self): | ||||
Martin von Zweigbergk
|
r22915 | '''files not in the dirstate that are not ignored''' | ||
Martin von Zweigbergk
|
r22913 | return self[4] | ||
@property | ||||
def ignored(self): | ||||
Martin von Zweigbergk
|
r22915 | '''files not in the dirstate that are ignored (by _dirignore())''' | ||
Martin von Zweigbergk
|
r22913 | return self[5] | ||
@property | ||||
def clean(self): | ||||
Martin von Zweigbergk
|
r22915 | '''files that have not been modified''' | ||
Martin von Zweigbergk
|
r22913 | return self[6] | ||
def __repr__(self, *args, **kwargs): | ||||
return (('<status modified=%r, added=%r, removed=%r, deleted=%r, ' | ||||
'unknown=%r, ignored=%r, clean=%r>') % self) | ||||
Augie Fackler
|
r20392 | def itersubrepos(ctx1, ctx2): | ||
"""find subrepos in ctx1 or ctx2""" | ||||
# Create a (subpath, ctx) mapping where we prefer subpaths from | ||||
# ctx1. The subpaths from ctx2 are important when the .hgsub file | ||||
# has been modified (in ctx2) but not yet committed (in ctx1). | ||||
subpaths = dict.fromkeys(ctx2.substate, ctx2) | ||||
subpaths.update(dict.fromkeys(ctx1.substate, ctx1)) | ||||
Matt Harbison
|
r25418 | |||
missing = set() | ||||
for subpath in ctx2.substate: | ||||
if subpath not in ctx1.substate: | ||||
del subpaths[subpath] | ||||
missing.add(subpath) | ||||
Augie Fackler
|
r20392 | for subpath, ctx in sorted(subpaths.iteritems()): | ||
yield subpath, ctx.sub(subpath) | ||||
Matt Harbison
|
r25418 | # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way, | ||
# status and diff will have an accurate result when it does | ||||
# 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared | ||||
# against itself. | ||||
for subpath in missing: | ||||
yield subpath, ctx2.nullsub(subpath, ctx1) | ||||
Patrick Mezard
|
r17248 | def nochangesfound(ui, repo, excluded=None): | ||
'''Report no changes for push/pull, excluded is None or a list of | ||||
nodes excluded from the push/pull. | ||||
''' | ||||
secretlist = [] | ||||
if excluded: | ||||
for n in excluded: | ||||
ctx = repo[n] | ||||
if ctx.phase() >= phases.secret and not ctx.extinct(): | ||||
secretlist.append(n) | ||||
Matt Mackall
|
r15993 | if secretlist: | ||
ui.status(_("no changes found (ignored %d secret changesets)\n") | ||||
% len(secretlist)) | ||||
else: | ||||
ui.status(_("no changes found\n")) | ||||
Jun Wu
|
r30520 | def callcatch(ui, func): | ||
"""call func() with global exception handling | ||||
return func() if no exception happens. otherwise do some error handling | ||||
and return an exit code accordingly. does not handle all exceptions. | ||||
""" | ||||
try: | ||||
Yuya Nishihara
|
r32041 | try: | ||
return func() | ||||
except: # re-raises | ||||
ui.traceback() | ||||
raise | ||||
Jun Wu
|
r30520 | # Global exception handling, alphabetically | ||
# Mercurial-specific first, followed by built-in and library exceptions | ||||
except error.LockHeld as inst: | ||||
if inst.errno == errno.ETIMEDOUT: | ||||
FUJIWARA Katsunori
|
r32089 | reason = _('timed out waiting for lock held by %r') % inst.locker | ||
Jun Wu
|
r30520 | else: | ||
FUJIWARA Katsunori
|
r32089 | reason = _('lock held by %r') % inst.locker | ||
Yuya Nishihara
|
r36659 | ui.warn(_("abort: %s: %s\n") | ||
Yuya Nishihara
|
r36676 | % (inst.desc or util.forcebytestr(inst.filename), reason)) | ||
FUJIWARA Katsunori
|
r32089 | if not inst.locker: | ||
ui.warn(_("(lock might be very busy)\n")) | ||||
Jun Wu
|
r30520 | except error.LockUnavailable as inst: | ||
ui.warn(_("abort: could not lock %s: %s\n") % | ||||
Yuya Nishihara
|
r36676 | (inst.desc or util.forcebytestr(inst.filename), | ||
Yuya Nishihara
|
r36659 | encoding.strtolocal(inst.strerror))) | ||
Jun Wu
|
r30520 | except error.OutOfBandError as inst: | ||
if inst.args: | ||||
msg = _("abort: remote error:\n") | ||||
else: | ||||
msg = _("abort: remote error\n") | ||||
ui.warn(msg) | ||||
if inst.args: | ||||
ui.warn(''.join(inst.args)) | ||||
if inst.hint: | ||||
ui.warn('(%s)\n' % inst.hint) | ||||
except error.RepoError as inst: | ||||
ui.warn(_("abort: %s!\n") % inst) | ||||
if inst.hint: | ||||
ui.warn(_("(%s)\n") % inst.hint) | ||||
except error.ResponseError as inst: | ||||
ui.warn(_("abort: %s") % inst.args[0]) | ||||
Augie Fackler
|
r36679 | msg = inst.args[1] | ||
if isinstance(msg, type(u'')): | ||||
msg = pycompat.sysbytes(msg) | ||||
elif not isinstance(inst.args[1], bytes): | ||||
Jun Wu
|
r30520 | ui.warn(" %r\n" % (inst.args[1],)) | ||
elif not inst.args[1]: | ||||
ui.warn(_(" empty string\n")) | ||||
else: | ||||
ui.warn("\n%r\n" % util.ellipsis(inst.args[1])) | ||||
except error.CensoredNodeError as inst: | ||||
ui.warn(_("abort: file censored %s!\n") % inst) | ||||
except error.RevlogError as inst: | ||||
ui.warn(_("abort: %s!\n") % inst) | ||||
except error.InterventionRequired as inst: | ||||
ui.warn("%s\n" % inst) | ||||
if inst.hint: | ||||
ui.warn(_("(%s)\n") % inst.hint) | ||||
return 1 | ||||
Yuya Nishihara
|
r32657 | except error.WdirUnsupported: | ||
ui.warn(_("abort: working directory revision cannot be specified\n")) | ||||
Jun Wu
|
r30520 | except error.Abort as inst: | ||
ui.warn(_("abort: %s\n") % inst) | ||||
if inst.hint: | ||||
ui.warn(_("(%s)\n") % inst.hint) | ||||
except ImportError as inst: | ||||
Yuya Nishihara
|
r36659 | ui.warn(_("abort: %s!\n") % util.forcebytestr(inst)) | ||
Augie Fackler
|
r36440 | m = util.forcebytestr(inst).split()[-1] | ||
Jun Wu
|
r30520 | if m in "mpatch bdiff".split(): | ||
ui.warn(_("(did you forget to compile extensions?)\n")) | ||||
elif m in "zlib".split(): | ||||
ui.warn(_("(is your Python install correct?)\n")) | ||||
except IOError as inst: | ||||
if util.safehasattr(inst, "code"): | ||||
Augie Fackler
|
r36276 | ui.warn(_("abort: %s\n") % util.forcebytestr(inst)) | ||
Jun Wu
|
r30520 | elif util.safehasattr(inst, "reason"): | ||
try: # usually it is in the form (errno, strerror) | ||||
reason = inst.reason.args[1] | ||||
except (AttributeError, IndexError): | ||||
# it might be anything, for example a string | ||||
reason = inst.reason | ||||
if isinstance(reason, unicode): | ||||
# SSLError of Python 2.7.9 contains a unicode | ||||
Pulkit Goyal
|
r32152 | reason = encoding.unitolocal(reason) | ||
Jun Wu
|
r30520 | ui.warn(_("abort: error: %s\n") % reason) | ||
elif (util.safehasattr(inst, "args") | ||||
and inst.args and inst.args[0] == errno.EPIPE): | ||||
pass | ||||
elif getattr(inst, "strerror", None): | ||||
if getattr(inst, "filename", None): | ||||
Augie Fackler
|
r34024 | ui.warn(_("abort: %s: %s\n") % ( | ||
Yuya Nishihara
|
r36659 | encoding.strtolocal(inst.strerror), | ||
Yuya Nishihara
|
r36676 | util.forcebytestr(inst.filename))) | ||
Jun Wu
|
r30520 | else: | ||
Augie Fackler
|
r34024 | ui.warn(_("abort: %s\n") % encoding.strtolocal(inst.strerror)) | ||
Jun Wu
|
r30520 | else: | ||
raise | ||||
except OSError as inst: | ||||
if getattr(inst, "filename", None) is not None: | ||||
Augie Fackler
|
r34024 | ui.warn(_("abort: %s: '%s'\n") % ( | ||
Yuya Nishihara
|
r36659 | encoding.strtolocal(inst.strerror), | ||
Yuya Nishihara
|
r36676 | util.forcebytestr(inst.filename))) | ||
Jun Wu
|
r30520 | else: | ||
Augie Fackler
|
r34024 | ui.warn(_("abort: %s\n") % encoding.strtolocal(inst.strerror)) | ||
Jun Wu
|
r30520 | except MemoryError: | ||
ui.warn(_("abort: out of memory\n")) | ||||
except SystemExit as inst: | ||||
# Commands shouldn't sys.exit directly, but give a return code. | ||||
# Just in case catch this and and pass exit code to caller. | ||||
return inst.code | ||||
except socket.error as inst: | ||||
Yuya Nishihara
|
r36659 | ui.warn(_("abort: %s\n") % util.forcebytestr(inst.args[-1])) | ||
Jun Wu
|
r30520 | |||
return -1 | ||||
Kevin Bullock
|
r17821 | def checknewlabel(repo, lbl, kind): | ||
Durham Goode
|
r19070 | # Do not use the "kind" parameter in ui output. | ||
# It makes strings difficult to translate. | ||||
Kevin Bullock
|
r17817 | if lbl in ['tip', '.', 'null']: | ||
Pierre-Yves David
|
r26587 | raise error.Abort(_("the name '%s' is reserved") % lbl) | ||
Kevin Bullock
|
r17821 | for c in (':', '\0', '\n', '\r'): | ||
if c in lbl: | ||||
Augie Fackler
|
r36587 | raise error.Abort( | ||
_("%r cannot be used in a name") % pycompat.bytestr(c)) | ||||
Durham Goode
|
r18566 | try: | ||
int(lbl) | ||||
Pierre-Yves David
|
r26587 | raise error.Abort(_("cannot use an integer as a name")) | ||
Durham Goode
|
r18566 | except ValueError: | ||
pass | ||||
Boris Feld
|
r36162 | if lbl.strip() != lbl: | ||
raise error.Abort(_("leading or trailing whitespace in name %r") % lbl) | ||||
Kevin Bullock
|
r17817 | |||
Adrian Buehlmann
|
r13974 | def checkfilename(f): | ||
'''Check that the filename f is an acceptable filename for a tracked file''' | ||||
if '\r' in f or '\n' in f: | ||||
Pierre-Yves David
|
r26587 | raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f) | ||
Adrian Buehlmann
|
r13974 | |||
Adrian Buehlmann
|
r13962 | def checkportable(ui, f): | ||
'''Check if filename f is portable and warn or abort depending on config''' | ||||
Adrian Buehlmann
|
r13974 | checkfilename(f) | ||
Adrian Buehlmann
|
r14138 | abort, warn = checkportabilityalert(ui) | ||
if abort or warn: | ||||
Adrian Buehlmann
|
r13962 | msg = util.checkwinfilename(f) | ||
if msg: | ||||
Augie Fackler
|
r33795 | msg = "%s: %s" % (msg, util.shellquote(f)) | ||
Adrian Buehlmann
|
r14138 | if abort: | ||
Pierre-Yves David
|
r26587 | raise error.Abort(msg) | ||
Adrian Buehlmann
|
r14138 | ui.warn(_("warning: %s\n") % msg) | ||
Kevin Gessner
|
r14068 | |||
Kevin Gessner
|
r14067 | def checkportabilityalert(ui): | ||
'''check if the user's config requests nothing, a warning, or abort for | ||||
non-portable filenames''' | ||||
Jun Wu
|
r33499 | val = ui.config('ui', 'portablefilenames') | ||
Kevin Gessner
|
r14067 | lval = val.lower() | ||
bval = util.parsebool(val) | ||||
Jun Wu
|
r34646 | abort = pycompat.iswindows or lval == 'abort' | ||
Kevin Gessner
|
r14067 | warn = bval or lval == 'warn' | ||
if bval is None and not (warn or abort or lval == 'ignore'): | ||||
Adrian Buehlmann
|
r13962 | raise error.ConfigError( | ||
_("ui.portablefilenames value is invalid ('%s')") % val) | ||||
Kevin Gessner
|
r14067 | return abort, warn | ||
Adrian Buehlmann
|
r14138 | class casecollisionauditor(object): | ||
Joshua Redstone
|
r17201 | def __init__(self, ui, abort, dirstate): | ||
Adrian Buehlmann
|
r14138 | self._ui = ui | ||
self._abort = abort | ||||
Joshua Redstone
|
r17201 | allfiles = '\0'.join(dirstate._map) | ||
self._loweredfiles = set(encoding.lower(allfiles).split('\0')) | ||||
self._dirstate = dirstate | ||||
# The purpose of _newfiles is so that we don't complain about | ||||
# case collisions if someone were to call this object with the | ||||
# same filename twice. | ||||
self._newfiles = set() | ||||
Kevin Gessner
|
r14067 | |||
Adrian Buehlmann
|
r14138 | def __call__(self, f): | ||
FUJIWARA Katsunori
|
r20006 | if f in self._newfiles: | ||
return | ||||
FUJIWARA Katsunori
|
r14980 | fl = encoding.lower(f) | ||
FUJIWARA Katsunori
|
r20006 | if fl in self._loweredfiles and f not in self._dirstate: | ||
Adrian Buehlmann
|
r14138 | msg = _('possible case-folding collision for %s') % f | ||
if self._abort: | ||||
Pierre-Yves David
|
r26587 | raise error.Abort(msg) | ||
Adrian Buehlmann
|
r14138 | self._ui.warn(_("warning: %s\n") % msg) | ||
Joshua Redstone
|
r17201 | self._loweredfiles.add(fl) | ||
self._newfiles.add(f) | ||||
Adrian Buehlmann
|
r13970 | |||
Gregory Szorc
|
r24723 | def filteredhash(repo, maxrev): | ||
"""build hash of filtered revisions in the current repoview. | ||||
Multiple caches perform up-to-date validation by checking that the | ||||
tiprev and tipnode stored in the cache file match the current repository. | ||||
However, this is not sufficient for validating repoviews because the set | ||||
of revisions in the view may change without the repository tiprev and | ||||
tipnode changing. | ||||
This function hashes all the revs filtered from the view and returns | ||||
that SHA-1 digest. | ||||
""" | ||||
cl = repo.changelog | ||||
if not cl.filteredrevs: | ||||
return None | ||||
key = None | ||||
revs = sorted(r for r in cl.filteredrevs if r <= maxrev) | ||||
if revs: | ||||
Augie Fackler
|
r29341 | s = hashlib.sha1() | ||
Gregory Szorc
|
r24723 | for rev in revs: | ||
Augie Fackler
|
r31349 | s.update('%d;' % rev) | ||
Gregory Szorc
|
r24723 | key = s.digest() | ||
return key | ||||
Adrian Buehlmann
|
r13975 | def walkrepos(path, followsym=False, seen_dirs=None, recurse=False): | ||
Mads Kiilerich
|
r17104 | '''yield every hg repository under path, always recursively. | ||
The recurse flag will only control recursion into repo working dirs''' | ||||
Adrian Buehlmann
|
r13975 | def errhandler(err): | ||
if err.filename == path: | ||||
raise err | ||||
Augie Fackler
|
r14961 | samestat = getattr(os.path, 'samestat', None) | ||
if followsym and samestat is not None: | ||||
Adrian Buehlmann
|
r14227 | def adddir(dirlst, dirname): | ||
Adrian Buehlmann
|
r13975 | dirstat = os.stat(dirname) | ||
Martin von Zweigbergk
|
r36356 | match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst) | ||
Adrian Buehlmann
|
r13975 | if not match: | ||
dirlst.append(dirstat) | ||||
return not match | ||||
else: | ||||
followsym = False | ||||
if (seen_dirs is None) and followsym: | ||||
seen_dirs = [] | ||||
Adrian Buehlmann
|
r14227 | adddir(seen_dirs, path) | ||
Adrian Buehlmann
|
r13975 | for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler): | ||
dirs.sort() | ||||
if '.hg' in dirs: | ||||
yield root # found a repository | ||||
qroot = os.path.join(root, '.hg', 'patches') | ||||
if os.path.isdir(os.path.join(qroot, '.hg')): | ||||
yield qroot # we have a patch queue repo here | ||||
if recurse: | ||||
# avoid recursing inside the .hg directory | ||||
dirs.remove('.hg') | ||||
else: | ||||
dirs[:] = [] # don't descend further | ||||
elif followsym: | ||||
newdirs = [] | ||||
for d in dirs: | ||||
fname = os.path.join(root, d) | ||||
Adrian Buehlmann
|
r14227 | if adddir(seen_dirs, fname): | ||
Adrian Buehlmann
|
r13975 | if os.path.islink(fname): | ||
for hgname in walkrepos(fname, True, seen_dirs): | ||||
yield hgname | ||||
else: | ||||
newdirs.append(d) | ||||
dirs[:] = newdirs | ||||
Adrian Buehlmann
|
r13984 | |||
Yuya Nishihara
|
r32656 | def binnode(ctx): | ||
"""Return binary node id for a given basectx""" | ||||
node = ctx.node() | ||||
if node is None: | ||||
return wdirid | ||||
return node | ||||
Yuya Nishihara
|
r32654 | def intrev(ctx): | ||
"""Return integer for a given basectx that can be used in comparison or | ||||
Yuya Nishihara
|
r24582 | arithmetic operation""" | ||
Yuya Nishihara
|
r32654 | rev = ctx.rev() | ||
Yuya Nishihara
|
r24582 | if rev is None: | ||
Yuya Nishihara
|
r25739 | return wdirrev | ||
Yuya Nishihara
|
r24582 | return rev | ||
Yuya Nishihara
|
r34328 | def formatchangeid(ctx): | ||
"""Format changectx as '{rev}:{node|formatnode}', which is the default | ||||
Yuya Nishihara
|
r35906 | template provided by logcmdutil.changesettemplater""" | ||
Yuya Nishihara
|
r34328 | repo = ctx.repo() | ||
return formatrevnode(repo.ui, intrev(ctx), binnode(ctx)) | ||||
def formatrevnode(ui, rev, node): | ||||
"""Format given revision and node depending on the current verbosity""" | ||||
if ui.debugflag: | ||||
hexfunc = hex | ||||
else: | ||||
hexfunc = short | ||||
return '%d:%s' % (rev, hexfunc(node)) | ||||
Jun Wu
|
r34007 | def revsingle(repo, revspec, default='.', localalias=None): | ||
Matt Mackall
|
r19509 | if not revspec and revspec != 0: | ||
Matt Mackall
|
r14319 | return repo[default] | ||
Jun Wu
|
r34007 | l = revrange(repo, [revspec], localalias=localalias) | ||
Pierre-Yves David
|
r22814 | if not l: | ||
Pierre-Yves David
|
r26587 | raise error.Abort(_('empty revision set')) | ||
Pierre-Yves David
|
r22815 | return repo[l.last()] | ||
Matt Mackall
|
r14319 | |||
Yuya Nishihara
|
r26020 | def _pairspec(revspec): | ||
Yuya Nishihara
|
r31024 | tree = revsetlang.parse(revspec) | ||
Yuya Nishihara
|
r26020 | return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall') | ||
Matt Mackall
|
r14319 | def revpair(repo, revs): | ||
if not revs: | ||||
return repo.dirstate.p1(), None | ||||
l = revrange(repo, revs) | ||||
Pierre-Yves David
|
r20862 | if not l: | ||
first = second = None | ||||
elif l.isascending(): | ||||
first = l.min() | ||||
second = l.max() | ||||
elif l.isdescending(): | ||||
first = l.max() | ||||
second = l.min() | ||||
else: | ||||
Pierre-Yves David
|
r22816 | first = l.first() | ||
second = l.last() | ||||
Pierre-Yves David
|
r20862 | |||
if first is None: | ||||
Pierre-Yves David
|
r26587 | raise error.Abort(_('empty revision range')) | ||
Matt Harbison
|
r26836 | if (first == second and len(revs) >= 2 | ||
and not all(revrange(repo, [r]) for r in revs)): | ||||
raise error.Abort(_('empty revision on one side of range')) | ||||
Matt Mackall
|
r14319 | |||
Yuya Nishihara
|
r26020 | # if top-level is range expression, the result must always be a pair | ||
if first == second and len(revs) == 1 and not _pairspec(revs[0]): | ||||
Pierre-Yves David
|
r20862 | return repo.lookup(first), None | ||
Matt Mackall
|
r14319 | |||
Pierre-Yves David
|
r20862 | return repo.lookup(first), repo.lookup(second) | ||
Matt Mackall
|
r14319 | |||
Jun Wu
|
r34007 | def revrange(repo, specs, localalias=None): | ||
Gregory Szorc
|
r29417 | """Execute 1 to many revsets and return the union. | ||
This is the preferred mechanism for executing revsets using user-specified | ||||
config options, such as revset aliases. | ||||
The revsets specified by ``specs`` will be executed via a chained ``OR`` | ||||
expression. If ``specs`` is empty, an empty result is returned. | ||||
``specs`` can contain integers, in which case they are assumed to be | ||||
revision numbers. | ||||
It is assumed the revsets are already formatted. If you have arguments | ||||
Yuya Nishihara
|
r31024 | that need to be expanded in the revset, call ``revsetlang.formatspec()`` | ||
Gregory Szorc
|
r29417 | and pass the result as an element of ``specs``. | ||
Specifying a single revset is allowed. | ||||
Returns a ``revset.abstractsmartset`` which is a list-like interface over | ||||
integer revisions. | ||||
""" | ||||
Yuya Nishihara
|
r25928 | allspecs = [] | ||
Gregory Szorc
|
r29417 | for spec in specs: | ||
Yuya Nishihara
|
r25904 | if isinstance(spec, int): | ||
Yuya Nishihara
|
r31024 | spec = revsetlang.formatspec('rev(%d)', spec) | ||
Yuya Nishihara
|
r25928 | allspecs.append(spec) | ||
Jun Wu
|
r34007 | return repo.anyrevs(allspecs, user=True, localalias=localalias) | ||
Matt Mackall
|
r14320 | |||
Yuya Nishihara
|
r26433 | def meaningfulparents(repo, ctx): | ||
"""Return list of meaningful (or all if debug) parentrevs for rev. | ||||
For merges (two non-nullrev revisions) both parents are meaningful. | ||||
Otherwise the first parent revision is considered meaningful if it | ||||
is not the preceding revision. | ||||
""" | ||||
parents = ctx.parents() | ||||
if len(parents) > 1: | ||||
return parents | ||||
if repo.ui.debugflag: | ||||
return [parents[0], repo['null']] | ||||
Yuya Nishihara
|
r32654 | if parents[0].rev() >= intrev(ctx) - 1: | ||
Yuya Nishihara
|
r26433 | return [] | ||
return parents | ||||
Matt Mackall
|
r14320 | def expandpats(pats): | ||
Mads Kiilerich
|
r21111 | '''Expand bare globs when running on windows. | ||
On posix we assume it already has already been done by sh.''' | ||||
Matt Mackall
|
r14320 | if not util.expandglobs: | ||
return list(pats) | ||||
ret = [] | ||||
Mads Kiilerich
|
r21111 | for kindpat in pats: | ||
kind, pat = matchmod._patsplit(kindpat, None) | ||||
Matt Mackall
|
r14320 | if kind is None: | ||
try: | ||||
Mads Kiilerich
|
r21111 | globbed = glob.glob(pat) | ||
Matt Mackall
|
r14320 | except re.error: | ||
Mads Kiilerich
|
r21111 | globbed = [pat] | ||
Matt Mackall
|
r14320 | if globbed: | ||
ret.extend(globbed) | ||||
continue | ||||
Mads Kiilerich
|
r21111 | ret.append(kindpat) | ||
Matt Mackall
|
r14320 | return ret | ||
Pierre-Yves David
|
r26326 | def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath', | ||
Matt Harbison
|
r25467 | badfn=None): | ||
Mads Kiilerich
|
r21111 | '''Return a matcher and the patterns that were used. | ||
Matt Harbison
|
r25467 | The matcher will warn about bad matches, unless an alternate badfn callback | ||
is provided.''' | ||||
Matt Mackall
|
r14320 | if pats == ("",): | ||
pats = [] | ||||
Pierre-Yves David
|
r26326 | if opts is None: | ||
opts = {} | ||||
Matt Mackall
|
r14320 | if not globbed and default == 'relpath': | ||
pats = expandpats(pats or []) | ||||
Matt Mackall
|
r14670 | |||
Matt Harbison
|
r25467 | def bad(f, msg): | ||
Matt Harbison
|
r24338 | ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg)) | ||
Matt Harbison
|
r25466 | |||
Matt Harbison
|
r25467 | if badfn is None: | ||
badfn = bad | ||||
Matt Harbison
|
r25466 | m = ctx.match(pats, opts.get('include'), opts.get('exclude'), | ||
default, listsubrepos=opts.get('subrepos'), badfn=badfn) | ||||
Martin von Zweigbergk
|
r24447 | if m.always(): | ||
pats = [] | ||||
Patrick Mezard
|
r16171 | return m, pats | ||
Pierre-Yves David
|
r26328 | def match(ctx, pats=(), opts=None, globbed=False, default='relpath', | ||
badfn=None): | ||||
Mads Kiilerich
|
r21111 | '''Return a matcher that will warn about bad matches.''' | ||
Matt Harbison
|
r25467 | return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0] | ||
Matt Mackall
|
r14320 | |||
def matchall(repo): | ||||
Mads Kiilerich
|
r21111 | '''Return a matcher that will efficiently match everything.''' | ||
Matt Mackall
|
r14320 | return matchmod.always(repo.root, repo.getcwd()) | ||
Matt Harbison
|
r25467 | def matchfiles(repo, files, badfn=None): | ||
Mads Kiilerich
|
r21111 | '''Return a matcher that will efficiently match exactly these files.''' | ||
Matt Harbison
|
r25467 | return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn) | ||
Matt Mackall
|
r14320 | |||
Denis Laxalde
|
r34855 | def parsefollowlinespattern(repo, rev, pat, msg): | ||
"""Return a file name from `pat` pattern suitable for usage in followlines | ||||
logic. | ||||
""" | ||||
if not matchmod.patkind(pat): | ||||
return pathutil.canonpath(repo.root, repo.getcwd(), pat) | ||||
else: | ||||
ctx = repo[rev] | ||||
m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx) | ||||
files = [f for f in ctx if m(f)] | ||||
if len(files) != 1: | ||||
raise error.ParseError(msg) | ||||
return files[0] | ||||
Siddharth Agarwal
|
r27651 | def origpath(ui, repo, filepath): | ||
'''customize where .orig files are created | ||||
Fetch user defined path from config file: [ui] origbackuppath = <path> | ||||
Mark Thomas
|
r34145 | Fall back to default (filepath with .orig suffix) if not specified | ||
Siddharth Agarwal
|
r27651 | ''' | ||
Jun Wu
|
r33499 | origbackuppath = ui.config('ui', 'origbackuppath') | ||
Mark Thomas
|
r34544 | if not origbackuppath: | ||
Siddharth Agarwal
|
r27651 | return filepath + ".orig" | ||
Mark Thomas
|
r34544 | # Convert filepath from an absolute path into a path inside the repo. | ||
filepathfromroot = util.normpath(os.path.relpath(filepath, | ||||
start=repo.root)) | ||||
origvfs = vfs.vfs(repo.wjoin(origbackuppath)) | ||||
origbackupdir = origvfs.dirname(filepathfromroot) | ||||
if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir): | ||||
ui.note(_('creating directory: %s\n') % origvfs.join(origbackupdir)) | ||||
Siddharth Agarwal
|
r27651 | |||
Mark Thomas
|
r34544 | # Remove any files that conflict with the backup file's path | ||
for f in reversed(list(util.finddirs(filepathfromroot))): | ||||
if origvfs.isfileorlink(f): | ||||
ui.note(_('removing conflicting file: %s\n') | ||||
% origvfs.join(f)) | ||||
origvfs.unlink(f) | ||||
break | ||||
Siddharth Agarwal
|
r27651 | |||
Mark Thomas
|
r34544 | origvfs.makedirs(origbackupdir) | ||
Mark Thomas
|
r35008 | if origvfs.isdir(filepathfromroot) and not origvfs.islink(filepathfromroot): | ||
Mark Thomas
|
r34544 | ui.note(_('removing conflicting directory: %s\n') | ||
% origvfs.join(filepathfromroot)) | ||||
origvfs.rmtree(filepathfromroot, forcibly=True) | ||||
return origvfs.join(filepathfromroot) | ||||
Siddharth Agarwal
|
r27651 | |||
Jun Wu
|
r33331 | class _containsnode(object): | ||
"""proxy __contains__(node) to container.__contains__ which accepts revs""" | ||||
def __init__(self, repo, revcontainer): | ||||
self._torev = repo.changelog.rev | ||||
self._revcontains = revcontainer.__contains__ | ||||
def __contains__(self, node): | ||||
return self._revcontains(self._torev(node)) | ||||
Pulkit Goyal
|
r34794 | def cleanupnodes(repo, replacements, operation, moves=None, metadata=None): | ||
Jun Wu
|
r33088 | """do common cleanups when old nodes are replaced by new nodes | ||
That includes writing obsmarkers or stripping nodes, and moving bookmarks. | ||||
(we might also want to move working directory parent in the future) | ||||
Jun Wu
|
r34364 | By default, bookmark moves are calculated automatically from 'replacements', | ||
but 'moves' can be used to override that. Also, 'moves' may include | ||||
additional bookmark moves that should not have associated obsmarkers. | ||||
Martin von Zweigbergk
|
r34363 | replacements is {oldnode: [newnode]} or a iterable of nodes if they do not | ||
have replacements. operation is a string, like "rebase". | ||||
Pulkit Goyal
|
r34794 | |||
metadata is dictionary containing metadata to be stored in obsmarker if | ||||
obsolescence is enabled. | ||||
Jun Wu
|
r33088 | """ | ||
Jun Wu
|
r34364 | if not replacements and not moves: | ||
return | ||||
# translate mapping's other forms | ||||
Martin von Zweigbergk
|
r34363 | if not util.safehasattr(replacements, 'items'): | ||
replacements = {n: () for n in replacements} | ||||
Jun Wu
|
r33088 | |||
Martin von Zweigbergk
|
r34362 | # Calculate bookmark movements | ||
Jun Wu
|
r34364 | if moves is None: | ||
moves = {} | ||||
Martin von Zweigbergk
|
r34363 | # Unfiltered repo is needed since nodes in replacements might be hidden. | ||
Martin von Zweigbergk
|
r34362 | unfi = repo.unfiltered() | ||
Martin von Zweigbergk
|
r34363 | for oldnode, newnodes in replacements.items(): | ||
Jun Wu
|
r34364 | if oldnode in moves: | ||
continue | ||||
Martin von Zweigbergk
|
r34362 | if len(newnodes) > 1: | ||
# usually a split, take the one with biggest rev number | ||||
newnode = next(unfi.set('max(%ln)', newnodes)).node() | ||||
elif len(newnodes) == 0: | ||||
# move bookmark backwards | ||||
roots = list(unfi.set('max((::%n) - %ln)', oldnode, | ||||
Martin von Zweigbergk
|
r34363 | list(replacements))) | ||
Martin von Zweigbergk
|
r34362 | if roots: | ||
newnode = roots[0].node() | ||||
else: | ||||
newnode = nullid | ||||
else: | ||||
newnode = newnodes[0] | ||||
moves[oldnode] = newnode | ||||
Jun Wu
|
r33088 | with repo.transaction('cleanup') as tr: | ||
# Move bookmarks | ||||
bmarks = repo._bookmarks | ||||
Boris Feld
|
r33511 | bmarkchanges = [] | ||
Martin von Zweigbergk
|
r34363 | allnewnodes = [n for ns in replacements.values() for n in ns] | ||
Martin von Zweigbergk
|
r34362 | for oldnode, newnode in moves.items(): | ||
Jun Wu
|
r33088 | oldbmarks = repo.nodebookmarks(oldnode) | ||
if not oldbmarks: | ||||
continue | ||||
Jun Wu
|
r33331 | from . import bookmarks # avoid import cycle | ||
Jun Wu
|
r33088 | repo.ui.debug('moving bookmarks %r from %s to %s\n' % | ||
(oldbmarks, hex(oldnode), hex(newnode))) | ||||
Jun Wu
|
r33331 | # Delete divergent bookmarks being parents of related newnodes | ||
deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)', | ||||
allnewnodes, newnode, oldnode) | ||||
deletenodes = _containsnode(repo, deleterevs) | ||||
Jun Wu
|
r33088 | for name in oldbmarks: | ||
Boris Feld
|
r33511 | bmarkchanges.append((name, newnode)) | ||
for b in bookmarks.divergent2delete(repo, deletenodes, name): | ||||
bmarkchanges.append((b, None)) | ||||
if bmarkchanges: | ||||
bmarks.applychanges(repo, tr, bmarkchanges) | ||||
Jun Wu
|
r33088 | |||
# Obsolete or strip nodes | ||||
if obsolete.isenabled(repo, obsolete.createmarkersopt): | ||||
# If a node is already obsoleted, and we want to obsolete it | ||||
# without a successor, skip that obssolete request since it's | ||||
# unnecessary. That's the "if s or not isobs(n)" check below. | ||||
# Also sort the node in topology order, that might be useful for | ||||
# some obsstore logic. | ||||
# NOTE: the filtering and sorting might belong to createmarkers. | ||||
Jun Wu
|
r33330 | isobs = unfi.obsstore.successors.__contains__ | ||
torev = unfi.changelog.rev | ||||
sortfunc = lambda ns: torev(ns[0]) | ||||
Octobus
|
r33352 | rels = [(unfi[n], tuple(unfi[m] for m in s)) | ||
Martin von Zweigbergk
|
r34363 | for n, s in sorted(replacements.items(), key=sortfunc) | ||
Jun Wu
|
r33088 | if s or not isobs(n)] | ||
Jun Wu
|
r34364 | if rels: | ||
Pulkit Goyal
|
r34794 | obsolete.createmarkers(repo, rels, operation=operation, | ||
metadata=metadata) | ||||
Jun Wu
|
r33088 | else: | ||
from . import repair # avoid import cycle | ||||
Jun Wu
|
r34364 | tostrip = list(replacements) | ||
if tostrip: | ||||
repair.delayedstrip(repo.ui, repo, tostrip, operation) | ||||
Jun Wu
|
r33088 | |||
Pierre-Yves David
|
r26329 | def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None): | ||
if opts is None: | ||||
opts = {} | ||||
Matt Harbison
|
r23533 | m = matcher | ||
Matt Mackall
|
r14320 | if dry_run is None: | ||
dry_run = opts.get('dry_run') | ||||
if similarity is None: | ||||
similarity = float(opts.get('similarity') or 0) | ||||
Matt Harbison
|
r23533 | |||
Matt Harbison
|
r23537 | ret = 0 | ||
join = lambda f: os.path.join(prefix, f) | ||||
wctx = repo[None] | ||||
for subpath in sorted(wctx.substate): | ||||
Hannes Oldenburg
|
r29802 | submatch = matchmod.subdirmatcher(subpath, m) | ||
if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()): | ||||
Matt Harbison
|
r23537 | sub = wctx.sub(subpath) | ||
try: | ||||
if sub.addremove(submatch, prefix, opts, dry_run, similarity): | ||||
ret = 1 | ||||
except error.LookupError: | ||||
repo.ui.status(_("skipping missing subrepository: %s\n") | ||||
% join(subpath)) | ||||
Matt Mackall
|
r16167 | rejected = [] | ||
Matt Harbison
|
r23534 | def badfn(f, msg): | ||
if f in m.files(): | ||||
Matt Harbison
|
r25434 | m.bad(f, msg) | ||
Matt Harbison
|
r23534 | rejected.append(f) | ||
Matt Mackall
|
r16167 | |||
Matt Harbison
|
r25434 | badmatch = matchmod.badmatch(m, badfn) | ||
added, unknown, deleted, removed, forgotten = _interestingfiles(repo, | ||||
badmatch) | ||||
Siddharth Agarwal
|
r18863 | |||
Martin von Zweigbergk
|
r23259 | unknownset = set(unknown + forgotten) | ||
Siddharth Agarwal
|
r18863 | toprint = unknownset.copy() | ||
toprint.update(deleted) | ||||
for abs in sorted(toprint): | ||||
if repo.ui.verbose or not m.exact(abs): | ||||
if abs in unknownset: | ||||
Matt Harbison
|
r23686 | status = _('adding %s\n') % m.uipath(abs) | ||
Siddharth Agarwal
|
r18863 | else: | ||
Matt Harbison
|
r23686 | status = _('removing %s\n') % m.uipath(abs) | ||
Siddharth Agarwal
|
r18863 | repo.ui.status(status) | ||
Siddharth Agarwal
|
r19152 | renames = _findrenames(repo, m, added + unknown, removed + deleted, | ||
similarity) | ||||
Matt Mackall
|
r14320 | |||
if not dry_run: | ||||
Martin von Zweigbergk
|
r23259 | _markchanges(repo, unknown + forgotten, deleted, renames) | ||
Matt Mackall
|
r14320 | |||
Matt Mackall
|
r16167 | for f in rejected: | ||
if f in m.files(): | ||||
return 1 | ||||
Matt Harbison
|
r23537 | return ret | ||
Matt Mackall
|
r16167 | |||
Siddharth Agarwal
|
r19154 | def marktouched(repo, files, similarity=0.0): | ||
'''Assert that files have somehow been operated upon. files are relative to | ||||
the repo root.''' | ||||
Matt Harbison
|
r25467 | m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x)) | ||
Siddharth Agarwal
|
r19154 | rejected = [] | ||
Martin von Zweigbergk
|
r23259 | added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m) | ||
Siddharth Agarwal
|
r19154 | |||
if repo.ui.verbose: | ||||
Martin von Zweigbergk
|
r23259 | unknownset = set(unknown + forgotten) | ||
Siddharth Agarwal
|
r19154 | toprint = unknownset.copy() | ||
toprint.update(deleted) | ||||
for abs in sorted(toprint): | ||||
if abs in unknownset: | ||||
status = _('adding %s\n') % abs | ||||
else: | ||||
status = _('removing %s\n') % abs | ||||
repo.ui.status(status) | ||||
renames = _findrenames(repo, m, added + unknown, removed + deleted, | ||||
similarity) | ||||
Martin von Zweigbergk
|
r23259 | _markchanges(repo, unknown + forgotten, deleted, renames) | ||
Siddharth Agarwal
|
r19154 | |||
for f in rejected: | ||||
if f in m.files(): | ||||
return 1 | ||||
return 0 | ||||
Siddharth Agarwal
|
r19150 | def _interestingfiles(repo, matcher): | ||
'''Walk dirstate with matcher, looking for files that addremove would care | ||||
about. | ||||
This is different from dirstate.status because it doesn't care about | ||||
whether files are modified or clean.''' | ||||
Martin von Zweigbergk
|
r23259 | added, unknown, deleted, removed, forgotten = [], [], [], [], [] | ||
Yuya Nishihara
|
r33722 | audit_path = pathutil.pathauditor(repo.root, cached=True) | ||
Siddharth Agarwal
|
r19150 | |||
ctx = repo[None] | ||||
dirstate = repo.dirstate | ||||
Martin von Zweigbergk
|
r34344 | walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate), | ||
unknown=True, ignored=False, full=False) | ||||
Siddharth Agarwal
|
r19150 | for abs, st in walkresults.iteritems(): | ||
dstate = dirstate[abs] | ||||
if dstate == '?' and audit_path.check(abs): | ||||
unknown.append(abs) | ||||
elif dstate != 'r' and not st: | ||||
deleted.append(abs) | ||||
Martin von Zweigbergk
|
r23259 | elif dstate == 'r' and st: | ||
forgotten.append(abs) | ||||
Siddharth Agarwal
|
r19150 | # for finding renames | ||
Martin von Zweigbergk
|
r23259 | elif dstate == 'r' and not st: | ||
Siddharth Agarwal
|
r19150 | removed.append(abs) | ||
elif dstate == 'a': | ||||
added.append(abs) | ||||
Martin von Zweigbergk
|
r23259 | return added, unknown, deleted, removed, forgotten | ||
Siddharth Agarwal
|
r19150 | |||
Siddharth Agarwal
|
r19152 | def _findrenames(repo, matcher, added, removed, similarity): | ||
'''Find renames from removed files to added ones.''' | ||||
renames = {} | ||||
if similarity > 0: | ||||
for old, new, score in similar.findrenames(repo, added, removed, | ||||
similarity): | ||||
if (repo.ui.verbose or not matcher.exact(old) | ||||
or not matcher.exact(new)): | ||||
repo.ui.status(_('recording removal of %s as rename to %s ' | ||||
'(%d%% similar)\n') % | ||||
(matcher.rel(old), matcher.rel(new), | ||||
score * 100)) | ||||
renames[new] = old | ||||
return renames | ||||
Siddharth Agarwal
|
r19153 | def _markchanges(repo, unknown, deleted, renames): | ||
'''Marks the files in unknown as added, the files in deleted as removed, | ||||
and the files in renames as copied.''' | ||||
wctx = repo[None] | ||||
Bryan O'Sullivan
|
r27851 | with repo.wlock(): | ||
Siddharth Agarwal
|
r19153 | wctx.forget(deleted) | ||
wctx.add(unknown) | ||||
for new, old in renames.iteritems(): | ||||
wctx.copy(old, new) | ||||
Matt Mackall
|
r14320 | def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None): | ||
"""Update the dirstate to reflect the intent of copying src to dst. For | ||||
different reasons it might not end with dst being marked as copied from src. | ||||
""" | ||||
origsrc = repo.dirstate.copied(src) or src | ||||
if dst == origsrc: # copying back a copy? | ||||
if repo.dirstate[dst] not in 'mn' and not dryrun: | ||||
repo.dirstate.normallookup(dst) | ||||
else: | ||||
if repo.dirstate[origsrc] == 'a' and origsrc == src: | ||||
if not ui.quiet: | ||||
ui.warn(_("%s has not been committed yet, so no copy " | ||||
"data will be stored for %s.\n") | ||||
% (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))) | ||||
if repo.dirstate[dst] in '?r' and not dryrun: | ||||
wctx.add([dst]) | ||||
elif not dryrun: | ||||
wctx.copy(origsrc, dst) | ||||
Adrian Buehlmann
|
r14482 | |||
def readrequires(opener, supported): | ||||
'''Reads and parses .hg/requires and checks if all entries found | ||||
are in the list of supported features.''' | ||||
requirements = set(opener.read("requires").splitlines()) | ||||
Pierre-Yves David
|
r14746 | missings = [] | ||
Adrian Buehlmann
|
r14482 | for r in requirements: | ||
if r not in supported: | ||||
Augie Fackler
|
r36331 | if not r or not r[0:1].isalnum(): | ||
Matt Mackall
|
r14484 | raise error.RequirementError(_(".hg/requires file is corrupt")) | ||
Pierre-Yves David
|
r14746 | missings.append(r) | ||
missings.sort() | ||||
if missings: | ||||
Brodie Rao
|
r16683 | raise error.RequirementError( | ||
Mads Kiilerich
|
r20820 | _("repository requires features unknown to this Mercurial: %s") | ||
% " ".join(missings), | ||||
Matt Mackall
|
r26421 | hint=_("see https://mercurial-scm.org/wiki/MissingRequirement" | ||
Mads Kiilerich
|
r20820 | " for more information")) | ||
Adrian Buehlmann
|
r14482 | return requirements | ||
Idan Kamara
|
r14928 | |||
Drew Gottlieb
|
r24934 | def writerequires(opener, requirements): | ||
Gregory Szorc
|
r27706 | with opener('requires', 'w') as fp: | ||
for r in sorted(requirements): | ||||
fp.write("%s\n" % r) | ||||
Drew Gottlieb
|
r24934 | |||
Siddharth Agarwal
|
r20043 | class filecachesubentry(object): | ||
Siddharth Agarwal
|
r20042 | def __init__(self, path, stat): | ||
Idan Kamara
|
r14928 | self.path = path | ||
Idan Kamara
|
r18315 | self.cachestat = None | ||
self._cacheable = None | ||||
Idan Kamara
|
r14928 | |||
Idan Kamara
|
r18315 | if stat: | ||
Siddharth Agarwal
|
r20043 | self.cachestat = filecachesubentry.stat(self.path) | ||
Idan Kamara
|
r18315 | |||
if self.cachestat: | ||||
self._cacheable = self.cachestat.cacheable() | ||||
else: | ||||
# None means we don't know yet | ||||
self._cacheable = None | ||||
Idan Kamara
|
r14928 | |||
def refresh(self): | ||||
if self.cacheable(): | ||||
Siddharth Agarwal
|
r20043 | self.cachestat = filecachesubentry.stat(self.path) | ||
Idan Kamara
|
r14928 | |||
def cacheable(self): | ||||
if self._cacheable is not None: | ||||
return self._cacheable | ||||
# we don't know yet, assume it is for now | ||||
return True | ||||
def changed(self): | ||||
# no point in going further if we can't cache it | ||||
if not self.cacheable(): | ||||
return True | ||||
Siddharth Agarwal
|
r20043 | newstat = filecachesubentry.stat(self.path) | ||
Idan Kamara
|
r14928 | |||
# we may not know if it's cacheable yet, check again now | ||||
if newstat and self._cacheable is None: | ||||
self._cacheable = newstat.cacheable() | ||||
# check again | ||||
if not self._cacheable: | ||||
return True | ||||
if self.cachestat != newstat: | ||||
self.cachestat = newstat | ||||
return True | ||||
else: | ||||
return False | ||||
@staticmethod | ||||
def stat(path): | ||||
try: | ||||
return util.cachestat(path) | ||||
Gregory Szorc
|
r25660 | except OSError as e: | ||
Idan Kamara
|
r14928 | if e.errno != errno.ENOENT: | ||
raise | ||||
Siddharth Agarwal
|
r20044 | class filecacheentry(object): | ||
def __init__(self, paths, stat=True): | ||||
self._entries = [] | ||||
for path in paths: | ||||
self._entries.append(filecachesubentry(path, stat)) | ||||
def changed(self): | ||||
'''true if any entry has changed''' | ||||
for entry in self._entries: | ||||
if entry.changed(): | ||||
return True | ||||
return False | ||||
def refresh(self): | ||||
for entry in self._entries: | ||||
entry.refresh() | ||||
Idan Kamara
|
r14928 | class filecache(object): | ||
Siddharth Agarwal
|
r20045 | '''A property like decorator that tracks files under .hg/ for updates. | ||
Idan Kamara
|
r14928 | |||
Records stat info when called in _filecache. | ||||
Siddharth Agarwal
|
r20045 | On subsequent calls, compares old stat info with new info, and recreates the | ||
object when any of the files changes, updating the new stat info in | ||||
_filecache. | ||||
Idan Kamara
|
r14928 | |||
Mercurial either atomic renames or appends for files under .hg, | ||||
so to ensure the cache is reliable we need the filesystem to be able | ||||
to tell us if a file has been replaced. If it can't, we fallback to | ||||
timeless@mozdev.org
|
r26098 | recreating the object on every call (essentially the same behavior as | ||
Siddharth Agarwal
|
r20045 | propertycache). | ||
''' | ||||
def __init__(self, *paths): | ||||
self.paths = paths | ||||
Idan Kamara
|
r16198 | |||
def join(self, obj, fname): | ||||
Siddharth Agarwal
|
r20045 | """Used to compute the runtime path of a cached file. | ||
Idan Kamara
|
r16198 | |||
Users should subclass filecache and provide their own version of this | ||||
function to call the appropriate join function on 'obj' (an instance | ||||
of the class that its member function was decorated). | ||||
""" | ||||
Pierre-Yves David
|
r31285 | raise NotImplementedError | ||
Idan Kamara
|
r14928 | |||
def __call__(self, func): | ||||
self.func = func | ||||
Pulkit Goyal
|
r31419 | self.name = func.__name__.encode('ascii') | ||
Idan Kamara
|
r14928 | return self | ||
def __get__(self, obj, type=None): | ||||
Martijn Pieters
|
r29373 | # if accessed on the class, return the descriptor itself. | ||
if obj is None: | ||||
return self | ||||
Idan Kamara
|
r16115 | # do we need to check if the file changed? | ||
if self.name in obj.__dict__: | ||||
Idan Kamara
|
r18316 | assert self.name in obj._filecache, self.name | ||
Idan Kamara
|
r16115 | return obj.__dict__[self.name] | ||
Idan Kamara
|
r14928 | entry = obj._filecache.get(self.name) | ||
if entry: | ||||
if entry.changed(): | ||||
entry.obj = self.func(obj) | ||||
else: | ||||
Siddharth Agarwal
|
r20045 | paths = [self.join(obj, path) for path in self.paths] | ||
Idan Kamara
|
r14928 | |||
# We stat -before- creating the object so our cache doesn't lie if | ||||
# a writer modified between the time we read and stat | ||||
Siddharth Agarwal
|
r20045 | entry = filecacheentry(paths, True) | ||
Idan Kamara
|
r14928 | entry.obj = self.func(obj) | ||
obj._filecache[self.name] = entry | ||||
Idan Kamara
|
r16115 | obj.__dict__[self.name] = entry.obj | ||
Idan Kamara
|
r14928 | return entry.obj | ||
Idan Kamara
|
r16115 | |||
def __set__(self, obj, value): | ||||
Idan Kamara
|
r18316 | if self.name not in obj._filecache: | ||
# we add an entry for the missing value because X in __dict__ | ||||
# implies X in _filecache | ||||
Siddharth Agarwal
|
r20045 | paths = [self.join(obj, path) for path in self.paths] | ||
ce = filecacheentry(paths, False) | ||||
Idan Kamara
|
r18316 | obj._filecache[self.name] = ce | ||
else: | ||||
ce = obj._filecache[self.name] | ||||
ce.obj = value # update cached copy | ||||
Idan Kamara
|
r16115 | obj.__dict__[self.name] = value # update copy returned by obj.x | ||
def __delete__(self, obj): | ||||
try: | ||||
del obj.__dict__[self.name] | ||||
except KeyError: | ||||
Augie Fackler
|
r18177 | raise AttributeError(self.name) | ||
Siddharth Agarwal
|
r26490 | |||
Matt Mackall
|
r34457 | def extdatasource(repo, source): | ||
"""Gather a map of rev -> value dict from the specified source | ||||
A source spec is treated as a URL, with a special case shell: type | ||||
for parsing the output from a shell command. | ||||
The data is parsed as a series of newline-separated records where | ||||
each record is a revision specifier optionally followed by a space | ||||
and a freeform string value. If the revision is known locally, it | ||||
is converted to a rev, otherwise the record is skipped. | ||||
Note that both key and value are treated as UTF-8 and converted to | ||||
the local encoding. This allows uniformity between local and | ||||
remote data sources. | ||||
""" | ||||
spec = repo.ui.config("extdata", source) | ||||
if not spec: | ||||
raise error.Abort(_("unknown extdata source '%s'") % source) | ||||
data = {} | ||||
Yuya Nishihara
|
r34462 | src = proc = None | ||
Matt Mackall
|
r34457 | try: | ||
Yuya Nishihara
|
r34462 | if spec.startswith("shell:"): | ||
# external commands should be run relative to the repo root | ||||
cmd = spec[6:] | ||||
proc = subprocess.Popen(cmd, shell=True, bufsize=-1, | ||||
close_fds=util.closefds, | ||||
stdout=subprocess.PIPE, cwd=repo.root) | ||||
src = proc.stdout | ||||
else: | ||||
# treat as a URL or file | ||||
src = url.open(repo.ui, spec) | ||||
Yuya Nishihara
|
r34461 | for l in src: | ||
Matt Mackall
|
r34457 | if " " in l: | ||
k, v = l.strip().split(" ", 1) | ||||
else: | ||||
k, v = l.strip(), "" | ||||
k = encoding.tolocal(k) | ||||
Yuya Nishihara
|
r34460 | try: | ||
Matt Mackall
|
r34457 | data[repo[k].rev()] = encoding.tolocal(v) | ||
Yuya Nishihara
|
r34460 | except (error.LookupError, error.RepoLookupError): | ||
pass # we ignore data for nodes that don't exist locally | ||||
Matt Mackall
|
r34457 | finally: | ||
Yuya Nishihara
|
r34462 | if proc: | ||
proc.communicate() | ||||
if src: | ||||
src.close() | ||||
Yuya Nishihara
|
r35413 | if proc and proc.returncode != 0: | ||
raise error.Abort(_("extdata command '%s' failed: %s") | ||||
% (cmd, util.explainexit(proc.returncode)[0])) | ||||
Matt Mackall
|
r34457 | |||
return data | ||||
Siddharth Agarwal
|
r26490 | def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs): | ||
if lock is None: | ||||
raise error.LockInheritanceContractViolation( | ||||
'lock can only be inherited while held') | ||||
if environ is None: | ||||
environ = {} | ||||
with lock.inherit() as locker: | ||||
environ[envvar] = locker | ||||
return repo.ui.system(cmd, environ=environ, *args, **kwargs) | ||||
Siddharth Agarwal
|
r26491 | |||
def wlocksub(repo, cmd, *args, **kwargs): | ||||
"""run cmd as a subprocess that allows inheriting repo's wlock | ||||
This can only be called while the wlock is held. This takes all the | ||||
arguments that ui.system does, and returns the exit code of the | ||||
subprocess.""" | ||||
return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args, | ||||
**kwargs) | ||||
Pierre-Yves David
|
r26906 | |||
def gdinitconfig(ui): | ||||
"""helper function to know if a repo should be created as general delta | ||||
Pierre-Yves David
|
r26907 | """ | ||
# experimental config: format.generaldelta | ||||
r33235 | return (ui.configbool('format', 'generaldelta') | |||
r33243 | or ui.configbool('format', 'usegeneraldelta')) | |||
Pierre-Yves David
|
r26906 | |||
Pierre-Yves David
|
r26907 | def gddeltaconfig(ui): | ||
"""helper function to know if incoming delta should be optimised | ||||
""" | ||||
Pierre-Yves David
|
r26906 | # experimental config: format.generaldelta | ||
r33235 | return ui.configbool('format', 'generaldelta') | |||
Kostia Balytskyi
|
r31553 | |||
class simplekeyvaluefile(object): | ||||
"""A simple file with key=value lines | ||||
Keys must be alphanumerics and start with a letter, values must not | ||||
contain '\n' characters""" | ||||
Kostia Balytskyi
|
r32270 | firstlinekey = '__firstline' | ||
Kostia Balytskyi
|
r31553 | |||
def __init__(self, vfs, path, keys=None): | ||||
self.vfs = vfs | ||||
self.path = path | ||||
Kostia Balytskyi
|
r32270 | def read(self, firstlinenonkeyval=False): | ||
"""Read the contents of a simple key-value file | ||||
'firstlinenonkeyval' indicates whether the first line of file should | ||||
be treated as a key-value pair or reuturned fully under the | ||||
__firstline key.""" | ||||
Kostia Balytskyi
|
r31553 | lines = self.vfs.readlines(self.path) | ||
Kostia Balytskyi
|
r32270 | d = {} | ||
if firstlinenonkeyval: | ||||
if not lines: | ||||
e = _("empty simplekeyvalue file") | ||||
raise error.CorruptedState(e) | ||||
# we don't want to include '\n' in the __firstline | ||||
d[self.firstlinekey] = lines[0][:-1] | ||||
del lines[0] | ||||
Kostia Balytskyi
|
r31553 | try: | ||
Kostia Balytskyi
|
r32269 | # the 'if line.strip()' part prevents us from failing on empty | ||
# lines which only contain '\n' therefore are not skipped | ||||
# by 'if line' | ||||
Kostia Balytskyi
|
r32270 | updatedict = dict(line[:-1].split('=', 1) for line in lines | ||
if line.strip()) | ||||
if self.firstlinekey in updatedict: | ||||
e = _("%r can't be used as a key") | ||||
raise error.CorruptedState(e % self.firstlinekey) | ||||
d.update(updatedict) | ||||
Kostia Balytskyi
|
r31553 | except ValueError as e: | ||
raise error.CorruptedState(str(e)) | ||||
return d | ||||
Kostia Balytskyi
|
r32270 | def write(self, data, firstline=None): | ||
Kostia Balytskyi
|
r31553 | """Write key=>value mapping to a file | ||
data is a dict. Keys must be alphanumerical and start with a letter. | ||||
Kostia Balytskyi
|
r32270 | Values must not contain newline characters. | ||
If 'firstline' is not None, it is written to file before | ||||
everything else, as it is, not in a key=value form""" | ||||
Kostia Balytskyi
|
r31553 | lines = [] | ||
Kostia Balytskyi
|
r32270 | if firstline is not None: | ||
lines.append('%s\n' % firstline) | ||||
Kostia Balytskyi
|
r31553 | for k, v in data.items(): | ||
Kostia Balytskyi
|
r32270 | if k == self.firstlinekey: | ||
e = "key name '%s' is reserved" % self.firstlinekey | ||||
raise error.ProgrammingError(e) | ||||
Pulkit Goyal
|
r35931 | if not k[0:1].isalpha(): | ||
Kostia Balytskyi
|
r31553 | e = "keys must start with a letter in a key-value file" | ||
raise error.ProgrammingError(e) | ||||
if not k.isalnum(): | ||||
e = "invalid key name in a simple key-value file" | ||||
raise error.ProgrammingError(e) | ||||
if '\n' in v: | ||||
e = "invalid value in a simple key-value file" | ||||
raise error.ProgrammingError(e) | ||||
lines.append("%s=%s\n" % (k, v)) | ||||
with self.vfs(self.path, mode='wb', atomictemp=True) as fp: | ||||
fp.write(''.join(lines)) | ||||
r33249 | ||||
Boris Feld
|
r33541 | _reportobsoletedsource = [ | ||
Boris Feld
|
r33542 | 'debugobsolete', | ||
Boris Feld
|
r33541 | 'pull', | ||
'push', | ||||
'serve', | ||||
'unbundle', | ||||
] | ||||
Denis Laxalde
|
r34662 | _reportnewcssource = [ | ||
'pull', | ||||
'unbundle', | ||||
] | ||||
Matt Harbison
|
r36154 | # a list of (repo, ctx, files) functions called by various commands to allow | ||
# extensions to ensure the corresponding files are available locally, before the | ||||
# command uses them. | ||||
fileprefetchhooks = util.hooks() | ||||
Martin von Zweigbergk
|
r35727 | # A marker that tells the evolve extension to suppress its own reporting | ||
_reportstroubledchangesets = True | ||||
Boris Feld
|
r33541 | def registersummarycallback(repo, otr, txnname=''): | ||
r33249 | """register a callback to issue a summary after the transaction is closed | |||
""" | ||||
Denis Laxalde
|
r34620 | def txmatch(sources): | ||
return any(txnname.startswith(source) for source in sources) | ||||
Denis Laxalde
|
r34621 | categories = [] | ||
def reportsummary(func): | ||||
"""decorator for report callbacks.""" | ||||
Boris Feld
|
r35140 | # The repoview life cycle is shorter than the one of the actual | ||
# underlying repository. So the filtered object can die before the | ||||
# weakref is used leading to troubles. We keep a reference to the | ||||
# unfiltered object and restore the filtering when retrieving the | ||||
# repository through the weakref. | ||||
filtername = repo.filtername | ||||
reporef = weakref.ref(repo.unfiltered()) | ||||
Denis Laxalde
|
r34621 | def wrapped(tr): | ||
Denis Laxalde
|
r34620 | repo = reporef() | ||
Boris Feld
|
r35140 | if filtername: | ||
repo = repo.filtered(filtername) | ||||
Denis Laxalde
|
r34621 | func(repo, tr) | ||
Martin von Zweigbergk
|
r35766 | newcat = '%02i-txnreport' % len(categories) | ||
Denis Laxalde
|
r34621 | otr.addpostclose(newcat, wrapped) | ||
categories.append(newcat) | ||||
return wrapped | ||||
if txmatch(_reportobsoletedsource): | ||||
@reportsummary | ||||
def reportobsoleted(repo, tr): | ||||
Denis Laxalde
|
r34620 | obsoleted = obsutil.getobsoleted(repo, tr) | ||
if obsoleted: | ||||
repo.ui.status(_('obsoleted %i changesets\n') | ||||
% len(obsoleted)) | ||||
Denis Laxalde
|
r34662 | |||
Martin von Zweigbergk
|
r35728 | if (obsolete.isenabled(repo, obsolete.createmarkersopt) and | ||
repo.ui.configbool('experimental', 'evolution.report-instabilities')): | ||||
Martin von Zweigbergk
|
r35727 | instabilitytypes = [ | ||
('orphan', 'orphan'), | ||||
('phase-divergent', 'phasedivergent'), | ||||
('content-divergent', 'contentdivergent'), | ||||
] | ||||
def getinstabilitycounts(repo): | ||||
filtered = repo.changelog.filteredrevs | ||||
counts = {} | ||||
for instability, revset in instabilitytypes: | ||||
counts[instability] = len(set(obsolete.getrevs(repo, revset)) - | ||||
filtered) | ||||
return counts | ||||
oldinstabilitycounts = getinstabilitycounts(repo) | ||||
@reportsummary | ||||
def reportnewinstabilities(repo, tr): | ||||
newinstabilitycounts = getinstabilitycounts(repo) | ||||
for instability, revset in instabilitytypes: | ||||
delta = (newinstabilitycounts[instability] - | ||||
oldinstabilitycounts[instability]) | ||||
if delta > 0: | ||||
repo.ui.warn(_('%i new %s changesets\n') % | ||||
(delta, instability)) | ||||
Denis Laxalde
|
r34662 | if txmatch(_reportnewcssource): | ||
@reportsummary | ||||
def reportnewcs(repo, tr): | ||||
"""Report the range of new revisions pulled/unbundled.""" | ||||
Joerg Sonnenberger
|
r35309 | newrevs = tr.changes.get('revs', xrange(0, 0)) | ||
Denis Laxalde
|
r34662 | if not newrevs: | ||
return | ||||
# Compute the bounds of new revisions' range, excluding obsoletes. | ||||
unfi = repo.unfiltered() | ||||
Denis Laxalde
|
r34738 | revs = unfi.revs('%ld and not obsolete()', newrevs) | ||
if not revs: | ||||
Denis Laxalde
|
r34662 | # Got only obsoletes. | ||
return | ||||
Denis Laxalde
|
r34738 | minrev, maxrev = repo[revs.min()], repo[revs.max()] | ||
Denis Laxalde
|
r34662 | |||
if minrev == maxrev: | ||||
revrange = minrev | ||||
else: | ||||
revrange = '%s:%s' % (minrev, maxrev) | ||||
repo.ui.status(_('new changesets %s\n') % revrange) | ||||
Matt Harbison
|
r35169 | |||
Boris Feld
|
r35185 | def nodesummaries(repo, nodes, maxnumnodes=4): | ||
if len(nodes) <= maxnumnodes or repo.ui.verbose: | ||||
return ' '.join(short(h) for h in nodes) | ||||
first = ' '.join(short(h) for h in nodes[:maxnumnodes]) | ||||
Boris Feld
|
r35207 | return _("%s and %d others") % (first, len(nodes) - maxnumnodes) | ||
Boris Feld
|
r35185 | |||
Boris Feld
|
r35186 | def enforcesinglehead(repo, tr, desc): | ||
"""check that no named branch has multiple heads""" | ||||
if desc in ('strip', 'repair'): | ||||
# skip the logic during strip | ||||
return | ||||
visible = repo.filtered('visible') | ||||
# possible improvement: we could restrict the check to affected branch | ||||
for name, heads in visible.branchmap().iteritems(): | ||||
if len(heads) > 1: | ||||
msg = _('rejecting multiple heads on branch "%s"') | ||||
msg %= name | ||||
hint = _('%d heads: %s') | ||||
hint %= (len(heads), nodesummaries(repo, heads)) | ||||
raise error.Abort(msg, hint=hint) | ||||
Matt Harbison
|
r35169 | def wrapconvertsink(sink): | ||
"""Allow extensions to wrap the sink returned by convcmd.convertsink() | ||||
before it is used, whether or not the convert extension was formally loaded. | ||||
""" | ||||
return sink | ||||
Pulkit Goyal
|
r35512 | |||
def unhidehashlikerevs(repo, specs, hiddentype): | ||||
"""parse the user specs and unhide changesets whose hash or revision number | ||||
is passed. | ||||
hiddentype can be: 1) 'warn': warn while unhiding changesets | ||||
2) 'nowarn': don't warn while unhiding changesets | ||||
returns a repo object with the required changesets unhidden | ||||
""" | ||||
if not repo.filtername or not repo.ui.configbool('experimental', | ||||
'directaccess'): | ||||
return repo | ||||
Pulkit Goyal
|
r35515 | if repo.filtername not in ('visible', 'visible-hidden'): | ||
Pulkit Goyal
|
r35512 | return repo | ||
symbols = set() | ||||
for spec in specs: | ||||
try: | ||||
tree = revsetlang.parse(spec) | ||||
except error.ParseError: # will be reported by scmutil.revrange() | ||||
continue | ||||
symbols.update(revsetlang.gethashlikesymbols(tree)) | ||||
if not symbols: | ||||
return repo | ||||
revs = _getrevsfromsymbols(repo, symbols) | ||||
if not revs: | ||||
return repo | ||||
if hiddentype == 'warn': | ||||
unfi = repo.unfiltered() | ||||
revstr = ", ".join([pycompat.bytestr(unfi[l]) for l in revs]) | ||||
repo.ui.warn(_("warning: accessing hidden changesets for write " | ||||
"operation: %s\n") % revstr) | ||||
Pulkit Goyal
|
r35515 | # we have to use new filtername to separate branch/tags cache until we can | ||
# disbale these cache when revisions are dynamically pinned. | ||||
Pulkit Goyal
|
r35512 | return repo.filtered('visible-hidden', revs) | ||
def _getrevsfromsymbols(repo, symbols): | ||||
"""parse the list of symbols and returns a set of revision numbers of hidden | ||||
changesets present in symbols""" | ||||
revs = set() | ||||
unfi = repo.unfiltered() | ||||
unficl = unfi.changelog | ||||
cl = repo.changelog | ||||
tiprev = len(unficl) | ||||
pmatch = unficl._partialmatch | ||||
allowrevnums = repo.ui.configbool('experimental', 'directaccess.revnums') | ||||
for s in symbols: | ||||
try: | ||||
n = int(s) | ||||
if n <= tiprev: | ||||
if not allowrevnums: | ||||
continue | ||||
else: | ||||
if n not in cl: | ||||
revs.add(n) | ||||
continue | ||||
except ValueError: | ||||
pass | ||||
try: | ||||
s = pmatch(s) | ||||
except error.LookupError: | ||||
s = None | ||||
if s is not None: | ||||
rev = unficl.rev(s) | ||||
if rev not in cl: | ||||
revs.add(rev) | ||||
return revs | ||||