revset.py
3667 lines
| 120.5 KiB
| text/x-python
|
PythonLexer
/ mercurial / revset.py
Matt Mackall
|
r11275 | # revset.py - revision set queries for mercurial | ||
# | ||||
# Copyright 2010 Matt Mackall <mpm@selenic.com> | ||||
# | ||||
# This software may be used and distributed according to the terms of the | ||||
# GNU General Public License version 2 or any later version. | ||||
Gregory Szorc
|
r25971 | from __future__ import absolute_import | ||
Lucas Moscovicz
|
r20690 | import heapq | ||
Gregory Szorc
|
r25971 | import re | ||
from .i18n import _ | ||||
from . import ( | ||||
Pierre-Yves David
|
r26713 | destutil, | ||
Gregory Szorc
|
r25971 | encoding, | ||
error, | ||||
hbisect, | ||||
match as matchmod, | ||||
node, | ||||
obsolete as obsmod, | ||||
parser, | ||||
pathutil, | ||||
phases, | ||||
FUJIWARA Katsunori
|
r27584 | registrar, | ||
Gregory Szorc
|
r25971 | repoview, | ||
util, | ||||
) | ||||
Matt Mackall
|
r11275 | |||
Patrick Mezard
|
r16409 | def _revancestors(repo, revs, followfirst): | ||
"""Like revlog.ancestors(), but supports followfirst.""" | ||||
Jordi Gutiérrez Hermoso
|
r24306 | if followfirst: | ||
cut = 1 | ||||
else: | ||||
cut = None | ||||
Patrick Mezard
|
r16409 | cl = repo.changelog | ||
Lucas Moscovicz
|
r20690 | |||
def iterate(): | ||||
Pierre-Yves David
|
r22832 | revs.sort(reverse=True) | ||
Pierre-Yves David
|
r24939 | irevs = iter(revs) | ||
Lucas Moscovicz
|
r20691 | h = [] | ||
Pierre-Yves David
|
r25143 | inputrev = next(irevs, None) | ||
if inputrev is not None: | ||||
Pierre-Yves David
|
r24939 | heapq.heappush(h, -inputrev) | ||
Lucas Moscovicz
|
r20691 | |||
Yuya Nishihara
|
r23956 | seen = set() | ||
Lucas Moscovicz
|
r20690 | while h: | ||
current = -heapq.heappop(h) | ||||
Pierre-Yves David
|
r24940 | if current == inputrev: | ||
Pierre-Yves David
|
r25143 | inputrev = next(irevs, None) | ||
if inputrev is not None: | ||||
Pierre-Yves David
|
r24940 | heapq.heappush(h, -inputrev) | ||
Lucas Moscovicz
|
r20690 | if current not in seen: | ||
seen.add(current) | ||||
yield current | ||||
for parent in cl.parentrevs(current)[:cut]: | ||||
if parent != node.nullrev: | ||||
heapq.heappush(h, -parent) | ||||
Pierre-Yves David
|
r22795 | return generatorset(iterate(), iterasc=False) | ||
Patrick Mezard
|
r16409 | |||
def _revdescendants(repo, revs, followfirst): | ||||
"""Like revlog.descendants() but supports followfirst.""" | ||||
Jordi Gutiérrez Hermoso
|
r24306 | if followfirst: | ||
cut = 1 | ||||
else: | ||||
cut = None | ||||
Patrick Mezard
|
r16409 | |||
Lucas Moscovicz
|
r20692 | def iterate(): | ||
cl = repo.changelog | ||||
Pierre-Yves David
|
r25549 | # XXX this should be 'parentset.min()' assuming 'parentset' is a | ||
# smartset (and if it is not, it should.) | ||||
Lucas Moscovicz
|
r20692 | first = min(revs) | ||
nullrev = node.nullrev | ||||
if first == nullrev: | ||||
# Are there nodes with a null first parent and a non-null | ||||
# second one? Maybe. Do we care? Probably not. | ||||
for i in cl: | ||||
Patrick Mezard
|
r16409 | yield i | ||
Lucas Moscovicz
|
r20692 | else: | ||
seen = set(revs) | ||||
for i in cl.revs(first + 1): | ||||
for x in cl.parentrevs(i)[:cut]: | ||||
if x != nullrev and x in seen: | ||||
seen.add(i) | ||||
yield i | ||||
break | ||||
Pierre-Yves David
|
r22795 | return generatorset(iterate(), iterasc=True) | ||
Patrick Mezard
|
r16409 | |||
Yuya Nishihara
|
r26095 | def _reachablerootspure(repo, minroot, roots, heads, includepath): | ||
Laurent Charignon
|
r26002 | """return (heads(::<roots> and ::<heads>)) | ||
If includepath is True, return (<roots>::<heads>).""" | ||||
Bryan O'Sullivan
|
r16862 | if not roots: | ||
Yuya Nishihara
|
r26094 | return [] | ||
Bryan O'Sullivan
|
r16862 | parentrevs = repo.changelog.parentrevs | ||
Yuya Nishihara
|
r26053 | roots = set(roots) | ||
Pierre-Yves David
|
r22487 | visit = list(heads) | ||
Bryan O'Sullivan
|
r16862 | reachable = set() | ||
seen = {} | ||||
Pierre-Yves David
|
r25566 | # prefetch all the things! (because python is slow) | ||
reached = reachable.add | ||||
dovisit = visit.append | ||||
nextvisit = visit.pop | ||||
Bryan O'Sullivan
|
r16862 | # open-code the post-order traversal due to the tiny size of | ||
# sys.getrecursionlimit() | ||||
while visit: | ||||
Pierre-Yves David
|
r25566 | rev = nextvisit() | ||
Bryan O'Sullivan
|
r16862 | if rev in roots: | ||
Pierre-Yves David
|
r25566 | reached(rev) | ||
Laurent Charignon
|
r26002 | if not includepath: | ||
continue | ||||
Bryan O'Sullivan
|
r16862 | parents = parentrevs(rev) | ||
seen[rev] = parents | ||||
for parent in parents: | ||||
if parent >= minroot and parent not in seen: | ||||
Pierre-Yves David
|
r25566 | dovisit(parent) | ||
Bryan O'Sullivan
|
r16862 | if not reachable: | ||
Pierre-Yves David
|
r22802 | return baseset() | ||
Laurent Charignon
|
r26002 | if not includepath: | ||
return reachable | ||||
Bryan O'Sullivan
|
r16862 | for rev in sorted(seen): | ||
for parent in seen[rev]: | ||||
if parent in reachable: | ||||
Pierre-Yves David
|
r25566 | reached(rev) | ||
Pierre-Yves David
|
r26091 | return reachable | ||
Bryan O'Sullivan
|
r16862 | |||
Laurent Charignon
|
r26006 | def reachableroots(repo, roots, heads, includepath=False): | ||
"""return (heads(::<roots> and ::<heads>)) | ||||
If includepath is True, return (<roots>::<heads>).""" | ||||
if not roots: | ||||
return baseset() | ||||
Pierre-Yves David
|
r26093 | minroot = roots.min() | ||
Yuya Nishihara
|
r26053 | roots = list(roots) | ||
Laurent Charignon
|
r26006 | heads = list(heads) | ||
try: | ||||
Yuya Nishihara
|
r26094 | revs = repo.changelog.reachableroots(minroot, heads, roots, includepath) | ||
Laurent Charignon
|
r26006 | except AttributeError: | ||
Yuya Nishihara
|
r26095 | revs = _reachablerootspure(repo, minroot, roots, heads, includepath) | ||
Yuya Nishihara
|
r26094 | revs = baseset(revs) | ||
revs.sort() | ||||
return revs | ||||
Laurent Charignon
|
r26006 | |||
Matt Mackall
|
r11275 | elements = { | ||
Yuya Nishihara
|
r25815 | # token-type: binding-strength, primary, prefix, infix, suffix | ||
"(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None), | ||||
"##": (20, None, None, ("_concat", 20), None), | ||||
"~": (18, None, None, ("ancestor", 18), None), | ||||
"^": (18, None, None, ("parent", 18), ("parentpost", 18)), | ||||
"-": (5, None, ("negate", 19), ("minus", 5), None), | ||||
"::": (17, None, ("dagrangepre", 17), ("dagrange", 17), | ||||
Matt Mackall
|
r11278 | ("dagrangepost", 17)), | ||
Yuya Nishihara
|
r25815 | "..": (17, None, ("dagrangepre", 17), ("dagrange", 17), | ||
Matt Mackall
|
r11278 | ("dagrangepost", 17)), | ||
Yuya Nishihara
|
r25819 | ":": (15, "rangeall", ("rangepre", 15), ("range", 15), ("rangepost", 15)), | ||
Yuya Nishihara
|
r25815 | "not": (10, None, ("not", 10), None, None), | ||
"!": (10, None, ("not", 10), None, None), | ||||
"and": (5, None, None, ("and", 5), None), | ||||
"&": (5, None, None, ("and", 5), None), | ||||
"%": (5, None, None, ("only", 5), ("onlypost", 5)), | ||||
"or": (4, None, None, ("or", 4), None), | ||||
"|": (4, None, None, ("or", 4), None), | ||||
"+": (4, None, None, ("or", 4), None), | ||||
"=": (3, None, None, ("keyvalue", 3), None), | ||||
",": (2, None, None, ("list", 2), None), | ||||
")": (0, None, None, None, None), | ||||
"symbol": (0, "symbol", None, None, None), | ||||
"string": (0, "string", None, None, None), | ||||
"end": (0, None, None, None, None), | ||||
Matt Mackall
|
r11275 | } | ||
keywords = set(['and', 'or', 'not']) | ||||
FUJIWARA Katsunori
|
r23842 | # default set of valid characters for the initial letter of symbols | ||
_syminitletters = set(c for c in [chr(i) for i in xrange(256)] | ||||
if c.isalnum() or c in '._@' or ord(c) > 127) | ||||
# default set of valid characters for non-initial letters of symbols | ||||
_symletters = set(c for c in [chr(i) for i in xrange(256)] | ||||
if c.isalnum() or c in '-._/@' or ord(c) > 127) | ||||
def tokenize(program, lookup=None, syminitletters=None, symletters=None): | ||||
Matt Mackall
|
r17886 | ''' | ||
Parse a revset statement into a stream of tokens | ||||
FUJIWARA Katsunori
|
r23842 | ``syminitletters`` is the set of valid characters for the initial | ||
letter of symbols. | ||||
By default, character ``c`` is recognized as valid for initial | ||||
letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``. | ||||
``symletters`` is the set of valid characters for non-initial | ||||
letters of symbols. | ||||
By default, character ``c`` is recognized as valid for non-initial | ||||
letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``. | ||||
Matt Mackall
|
r17886 | Check that @ is a valid unquoted token character (issue3686): | ||
>>> list(tokenize("@::")) | ||||
[('symbol', '@', 0), ('::', None, 1), ('end', None, 3)] | ||||
''' | ||||
FUJIWARA Katsunori
|
r23842 | if syminitletters is None: | ||
syminitletters = _syminitletters | ||||
if symletters is None: | ||||
symletters = _symletters | ||||
Matt Mackall
|
r17886 | |||
Yuya Nishihara
|
r25902 | if program and lookup: | ||
# attempt to parse old-style ranges first to deal with | ||||
# things like old-tag which contain query metacharacters | ||||
parts = program.split(':', 1) | ||||
if all(lookup(sym) for sym in parts if sym): | ||||
if parts[0]: | ||||
yield ('symbol', parts[0], 0) | ||||
if len(parts) > 1: | ||||
s = len(parts[0]) | ||||
yield (':', None, s) | ||||
if parts[1]: | ||||
yield ('symbol', parts[1], s + 1) | ||||
yield ('end', None, len(program)) | ||||
return | ||||
Matt Mackall
|
r11275 | pos, l = 0, len(program) | ||
while pos < l: | ||||
c = program[pos] | ||||
if c.isspace(): # skip inter-token whitespace | ||||
pass | ||||
Matt Mackall
|
r11278 | elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully | ||
Matt Mackall
|
r11289 | yield ('::', None, pos) | ||
Matt Mackall
|
r11278 | pos += 1 # skip ahead | ||
Matt Mackall
|
r11275 | elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully | ||
Matt Mackall
|
r11289 | yield ('..', None, pos) | ||
Matt Mackall
|
r11275 | pos += 1 # skip ahead | ||
FUJIWARA Katsunori
|
r23742 | elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully | ||
yield ('##', None, pos) | ||||
pos += 1 # skip ahead | ||||
Yuya Nishihara
|
r25704 | elif c in "():=,-|&+!~^%": # handle simple operators | ||
Matt Mackall
|
r11289 | yield (c, None, pos) | ||
Brodie Rao
|
r12408 | elif (c in '"\'' or c == 'r' and | ||
program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings | ||||
if c == 'r': | ||||
pos += 1 | ||||
c = program[pos] | ||||
decode = lambda x: x | ||||
else: | ||||
Yuya Nishihara
|
r26232 | decode = parser.unescapestr | ||
Matt Mackall
|
r11275 | pos += 1 | ||
s = pos | ||||
while pos < l: # find closing quote | ||||
d = program[pos] | ||||
if d == '\\': # skip over escaped characters | ||||
pos += 2 | ||||
continue | ||||
if d == c: | ||||
Brodie Rao
|
r12408 | yield ('string', decode(program[s:pos]), s) | ||
Matt Mackall
|
r11275 | break | ||
pos += 1 | ||||
else: | ||||
Martin Geisler
|
r11383 | raise error.ParseError(_("unterminated string"), s) | ||
Brodie Rao
|
r16683 | # gather up a symbol/keyword | ||
FUJIWARA Katsunori
|
r23842 | elif c in syminitletters: | ||
Matt Mackall
|
r11275 | s = pos | ||
pos += 1 | ||||
while pos < l: # find end of symbol | ||||
d = program[pos] | ||||
FUJIWARA Katsunori
|
r23842 | if d not in symletters: | ||
Matt Mackall
|
r11275 | break | ||
if d == '.' and program[pos - 1] == '.': # special case for .. | ||||
pos -= 1 | ||||
break | ||||
pos += 1 | ||||
sym = program[s:pos] | ||||
if sym in keywords: # operator keywords | ||||
Matt Mackall
|
r11289 | yield (sym, None, s) | ||
Matt Mackall
|
r20780 | elif '-' in sym: | ||
# some jerk gave us foo-bar-baz, try to check if it's a symbol | ||||
if lookup and lookup(sym): | ||||
# looks like a real symbol | ||||
yield ('symbol', sym, s) | ||||
else: | ||||
# looks like an expression | ||||
parts = sym.split('-') | ||||
for p in parts[:-1]: | ||||
if p: # possible consecutive - | ||||
yield ('symbol', p, s) | ||||
s += len(p) | ||||
yield ('-', None, pos) | ||||
s += 1 | ||||
if parts[-1]: # possible trailing - | ||||
yield ('symbol', parts[-1], s) | ||||
Matt Mackall
|
r11275 | else: | ||
Matt Mackall
|
r11289 | yield ('symbol', sym, s) | ||
Matt Mackall
|
r11275 | pos -= 1 | ||
else: | ||||
Ryan McElroy
|
r24708 | raise error.ParseError(_("syntax error in revset '%s'") % | ||
program, pos) | ||||
Matt Mackall
|
r11275 | pos += 1 | ||
Matt Mackall
|
r11289 | yield ('end', None, pos) | ||
Matt Mackall
|
r11275 | |||
# helpers | ||||
Yuya Nishihara
|
r29441 | def getsymbol(x): | ||
if x and x[0] == 'symbol': | ||||
return x[1] | ||||
raise error.ParseError(_('not a symbol')) | ||||
Matt Mackall
|
r11275 | def getstring(x, err): | ||
Matt Mackall
|
r11406 | if x and (x[0] == 'string' or x[0] == 'symbol'): | ||
Matt Mackall
|
r11275 | return x[1] | ||
Matt Mackall
|
r11289 | raise error.ParseError(err) | ||
Matt Mackall
|
r11275 | |||
def getlist(x): | ||||
if not x: | ||||
return [] | ||||
if x[0] == 'list': | ||||
Yuya Nishihara
|
r27987 | return list(x[1:]) | ||
Matt Mackall
|
r11275 | return [x] | ||
Matt Mackall
|
r11339 | def getargs(x, min, max, err): | ||
Matt Mackall
|
r11275 | l = getlist(x) | ||
Patrick Mezard
|
r16161 | if len(l) < min or (max >= 0 and len(l) > max): | ||
Matt Mackall
|
r11289 | raise error.ParseError(err) | ||
Matt Mackall
|
r11275 | return l | ||
Yuya Nishihara
|
r25767 | def getargsdict(x, funcname, keys): | ||
Yuya Nishihara
|
r25705 | return parser.buildargsdict(getlist(x), funcname, keys.split(), | ||
keyvaluenode='keyvalue', keynode='symbol') | ||||
Matt Mackall
|
r11275 | def getset(repo, subset, x): | ||
if not x: | ||||
Martin Geisler
|
r11383 | raise error.ParseError(_("missing argument")) | ||
Lucas Moscovicz
|
r20527 | s = methods[x[0]](repo, subset, *x[1:]) | ||
Pierre-Yves David
|
r22884 | if util.safehasattr(s, 'isascending'): | ||
Lucas Moscovicz
|
r20527 | return s | ||
Pierre-Yves David
|
r29098 | # else case should not happen, because all non-func are internal, | ||
# ignoring for now. | ||||
if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols: | ||||
Pierre-Yves David
|
r29147 | repo.ui.deprecwarn('revset "%s" uses list instead of smartset' | ||
Pierre-Yves David
|
r29146 | % x[1][1], | ||
'3.9') | ||||
Lucas Moscovicz
|
r20527 | return baseset(s) | ||
Matt Mackall
|
r11275 | |||
Matt Harbison
|
r17003 | def _getrevsource(repo, r): | ||
extra = repo[r].extra() | ||||
for label in ('source', 'transplant_source', 'rebase_source'): | ||||
if label in extra: | ||||
try: | ||||
return repo[extra[label]].rev() | ||||
except error.RepoLookupError: | ||||
pass | ||||
return None | ||||
Matt Mackall
|
r11275 | # operator methods | ||
def stringset(repo, subset, x): | ||||
x = repo[x].rev() | ||||
Yuya Nishihara
|
r25265 | if (x in subset | ||
or x == node.nullrev and isinstance(subset, fullreposet)): | ||||
Lucas Moscovicz
|
r20364 | return baseset([x]) | ||
Pierre-Yves David
|
r22802 | return baseset() | ||
Matt Mackall
|
r11275 | |||
def rangeset(repo, subset, x, y): | ||||
Pierre-Yves David
|
r23162 | m = getset(repo, fullreposet(repo), x) | ||
n = getset(repo, fullreposet(repo), y) | ||||
Matt Mackall
|
r11456 | |||
if not m or not n: | ||||
Pierre-Yves David
|
r22802 | return baseset() | ||
Pierre-Yves David
|
r22817 | m, n = m.first(), n.last() | ||
Matt Mackall
|
r11456 | |||
Yuya Nishihara
|
r25766 | if m == n: | ||
r = baseset([m]) | ||||
elif n == node.wdirrev: | ||||
r = spanset(repo, m, len(repo)) + baseset([n]) | ||||
elif m == node.wdirrev: | ||||
r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1) | ||||
elif m < n: | ||||
Lucas Moscovicz
|
r20526 | r = spanset(repo, m, n + 1) | ||
Matt Mackall
|
r11456 | else: | ||
Lucas Moscovicz
|
r20526 | r = spanset(repo, m, n - 1) | ||
Pierre-Yves David
|
r25548 | # XXX We should combine with subset first: 'subset & baseset(...)'. This is | ||
# necessary to ensure we preserve the order in subset. | ||||
# | ||||
# This has performance implication, carrying the sorting over when possible | ||||
# would be more efficient. | ||||
Lucas Moscovicz
|
r20526 | return r & subset | ||
Matt Mackall
|
r11275 | |||
Bryan O'Sullivan
|
r16860 | def dagrange(repo, subset, x, y): | ||
Yuya Nishihara
|
r24115 | r = fullreposet(repo) | ||
Laurent Charignon
|
r26002 | xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y), | ||
includepath=True) | ||||
Yuya Nishihara
|
r29139 | return subset & xs | ||
Bryan O'Sullivan
|
r16860 | |||
Matt Mackall
|
r11275 | def andset(repo, subset, x, y): | ||
return getset(repo, getset(repo, subset, x), y) | ||||
Durham Goode
|
r28217 | def differenceset(repo, subset, x, y): | ||
return getset(repo, subset, x) - getset(repo, subset, y) | ||||
Yuya Nishihara
|
r25309 | def orset(repo, subset, *xs): | ||
Yuya Nishihara
|
r25929 | assert xs | ||
if len(xs) == 1: | ||||
return getset(repo, subset, xs[0]) | ||||
p = len(xs) // 2 | ||||
a = orset(repo, subset, *xs[:p]) | ||||
b = orset(repo, subset, *xs[p:]) | ||||
return a + b | ||||
Matt Mackall
|
r11275 | |||
def notset(repo, subset, x): | ||||
Lucas Moscovicz
|
r20366 | return subset - getset(repo, subset, x) | ||
Matt Mackall
|
r11275 | |||
Yuya Nishihara
|
r27987 | def listset(repo, subset, *xs): | ||
timeless
|
r27517 | raise error.ParseError(_("can't use a list in this context"), | ||
hint=_('see hg help "revsets.x or y"')) | ||||
Matt Mackall
|
r11275 | |||
Yuya Nishihara
|
r25704 | def keyvaluepair(repo, subset, k, v): | ||
raise error.ParseError(_("can't use a key-value pair in this context")) | ||||
Matt Mackall
|
r11275 | def func(repo, subset, a, b): | ||
Yuya Nishihara
|
r29441 | f = getsymbol(a) | ||
if f in symbols: | ||||
return symbols[f](repo, subset, b) | ||||
Matt Harbison
|
r25632 | |||
keep = lambda fn: getattr(fn, '__doc__', None) is not None | ||||
syms = [s for (s, fn) in symbols.items() if keep(fn)] | ||||
Yuya Nishihara
|
r29441 | raise error.UnknownIdentifier(f, syms) | ||
Matt Mackall
|
r11275 | |||
# functions | ||||
FUJIWARA Katsunori
|
r27584 | # symbols are callables like: | ||
# fn(repo, subset, x) | ||||
# with: | ||||
# repo - current repository instance | ||||
# subset - of revisions to be examined | ||||
# x - argument in tree form | ||||
symbols = {} | ||||
FUJIWARA Katsunori
|
r27587 | # symbols which can't be used for a DoS attack for any given input | ||
# (e.g. those which accept regexes as plain strings shouldn't be included) | ||||
# functions that just return a lot of changesets (like all) don't count here | ||||
safesymbols = set() | ||||
FUJIWARA Katsunori
|
r28395 | predicate = registrar.revsetpredicate() | ||
FUJIWARA Katsunori
|
r27587 | |||
FUJIWARA Katsunori
|
r27584 | @predicate('_destupdate') | ||
Pierre-Yves David
|
r26713 | def _destupdate(repo, subset, x): | ||
# experimental revset for update destination | ||||
args = getargsdict(x, 'limit', 'clean check') | ||||
return subset & baseset([destutil.destupdate(repo, **args)[0]]) | ||||
FUJIWARA Katsunori
|
r27584 | @predicate('_destmerge') | ||
Pierre-Yves David
|
r26716 | def _destmerge(repo, subset, x): | ||
# experimental revset for merge destination | ||||
Pierre-Yves David
|
r28139 | sourceset = None | ||
if x is not None: | ||||
sourceset = getset(repo, fullreposet(repo), x) | ||||
return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)]) | ||||
Pierre-Yves David
|
r26303 | |||
FUJIWARA Katsunori
|
r27587 | @predicate('adds(pattern)', safe=True) | ||
Idan Kamara
|
r13915 | def adds(repo, subset, x): | ||
FUJIWARA Katsunori
|
r27584 | """Changesets that add a file matching pattern. | ||
FUJIWARA Katsunori
|
r20289 | |||
The pattern without explicit kind like ``glob:`` is expected to be | ||||
relative to the current directory and match against a file or a | ||||
directory. | ||||
Idan Kamara
|
r13915 | """ | ||
# i18n: "adds" is a keyword | ||||
pat = getstring(x, _("adds requires a pattern")) | ||||
return checkstatus(repo, subset, pat, 1) | ||||
FUJIWARA Katsunori
|
r27587 | @predicate('ancestor(*changeset)', safe=True) | ||
Idan Kamara
|
r13915 | def ancestor(repo, subset, x): | ||
FUJIWARA Katsunori
|
r27584 | """A greatest common ancestor of the changesets. | ||
Paul Cavallaro
|
r18536 | |||
Accepts 0 or more changesets. | ||||
Will return empty list when passed no args. | ||||
Greatest common ancestor of a single changeset is that changeset. | ||||
Idan Kamara
|
r13915 | """ | ||
# i18n: "ancestor" is a keyword | ||||
Paul Cavallaro
|
r18536 | l = getlist(x) | ||
Yuya Nishihara
|
r24115 | rl = fullreposet(repo) | ||
Paul Cavallaro
|
r18536 | anc = None | ||
Idan Kamara
|
r13915 | |||
Paul Cavallaro
|
r18536 | # (getset(repo, rl, i) for i in l) generates a list of lists | ||
for revs in (getset(repo, rl, i) for i in l): | ||||
for r in revs: | ||||
if anc is None: | ||||
Mads Kiilerich
|
r20991 | anc = repo[r] | ||
Paul Cavallaro
|
r18536 | else: | ||
Mads Kiilerich
|
r20991 | anc = anc.ancestor(repo[r]) | ||
if anc is not None and anc.rev() in subset: | ||||
return baseset([anc.rev()]) | ||||
Pierre-Yves David
|
r22802 | return baseset() | ||
Idan Kamara
|
r13915 | |||
Patrick Mezard
|
r16409 | def _ancestors(repo, subset, x, followfirst=False): | ||
Yuya Nishihara
|
r24115 | heads = getset(repo, fullreposet(repo), x) | ||
Mads Kiilerich
|
r22944 | if not heads: | ||
Pierre-Yves David
|
r22802 | return baseset() | ||
Mads Kiilerich
|
r22944 | s = _revancestors(repo, heads, followfirst) | ||
Pierre-Yves David
|
r23003 | return subset & s | ||
Patrick Mezard
|
r16409 | |||
FUJIWARA Katsunori
|
r27587 | @predicate('ancestors(set)', safe=True) | ||
Idan Kamara
|
r13915 | def ancestors(repo, subset, x): | ||
FUJIWARA Katsunori
|
r27584 | """Changesets that are ancestors of a changeset in set. | ||
Idan Kamara
|
r13915 | """ | ||
Patrick Mezard
|
r16409 | return _ancestors(repo, subset, x) | ||
FUJIWARA Katsunori
|
r27587 | @predicate('_firstancestors', safe=True) | ||
Patrick Mezard
|
r16409 | def _firstancestors(repo, subset, x): | ||
# ``_firstancestors(set)`` | ||||
# Like ``ancestors(set)`` but follows only the first parents. | ||||
return _ancestors(repo, subset, x, followfirst=True) | ||||
Idan Kamara
|
r13915 | |||
Kevin Gessner
|
r14070 | def ancestorspec(repo, subset, x, n): | ||
"""``set~n`` | ||||
Brodie Rao
|
r16683 | Changesets that are the Nth ancestor (first parents only) of a changeset | ||
in set. | ||||
Kevin Gessner
|
r14070 | """ | ||
try: | ||||
n = int(n[1]) | ||||
Matt Mackall
|
r14851 | except (TypeError, ValueError): | ||
Kevin Gessner
|
r14070 | raise error.ParseError(_("~ expects a number")) | ||
ps = set() | ||||
cl = repo.changelog | ||||
Pierre-Yves David
|
r23163 | for r in getset(repo, fullreposet(repo), x): | ||
Kevin Gessner
|
r14070 | for i in range(n): | ||
r = cl.parentrevs(r)[0] | ||||
ps.add(r) | ||||
Pierre-Yves David
|
r22531 | return subset & ps | ||
Kevin Gessner
|
r14070 | |||
FUJIWARA Katsunori
|
r27587 | @predicate('author(string)', safe=True) | ||
Idan Kamara
|
r13915 | def author(repo, subset, x): | ||
FUJIWARA Katsunori
|
r27584 | """Alias for ``user(string)``. | ||
Idan Kamara
|
r13915 | """ | ||
# i18n: "author" is a keyword | ||||
FUJIWARA Katsunori
|
r15726 | n = encoding.lower(getstring(x, _("author requires a string"))) | ||
Simon King
|
r16823 | kind, pattern, matcher = _substringmatcher(n) | ||
Yuya Nishihara
|
r28424 | return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())), | ||
condrepr=('<user %r>', n)) | ||||
Idan Kamara
|
r13915 | |||
FUJIWARA Katsunori
|
r27587 | @predicate('bisect(string)', safe=True) | ||
"Yann E. MORIN"
|
r15134 | def bisect(repo, subset, x): | ||
FUJIWARA Katsunori
|
r27584 | """Changesets marked in the specified bisect status: | ||
"Yann E. MORIN"
|
r15136 | |||
"Yann E. MORIN"
|
r15153 | - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip | ||
Mads Kiilerich
|
r17424 | - ``goods``, ``bads`` : csets topologically good/bad | ||
"Yann E. MORIN"
|
r15153 | - ``range`` : csets taking part in the bisection | ||
- ``pruned`` : csets that are goods, bads or skipped | ||||
- ``untested`` : csets whose fate is yet unknown | ||||
- ``ignored`` : csets ignored due to DAG topology | ||||
Bryan O'Sullivan
|
r16647 | - ``current`` : the cset currently being bisected | ||
Idan Kamara
|
r13915 | """ | ||
FUJIWARA Katsunori
|
r17259 | # i18n: "bisect" is a keyword | ||
"Yann E. MORIN"
|
r15135 | status = getstring(x, _("bisect requires a string")).lower() | ||
Bryan O'Sullivan
|
r16467 | state = set(hbisect.get(repo, status)) | ||
Pierre-Yves David
|
r22532 | return subset & state | ||
Idan Kamara
|
r13915 | |||
"Yann E. MORIN"
|
r15134 | # Backward-compatibility | ||
# - no help entry so that we do not advertise it any more | ||||
FUJIWARA Katsunori
|
r27587 | @predicate('bisected', safe=True) | ||
"Yann E. MORIN"
|
r15134 | def bisected(repo, subset, x): | ||
return bisect(repo, subset, x) | ||||
FUJIWARA Katsunori
|
r27587 | @predicate('bookmark([name])', safe=True) | ||
Idan Kamara
|
r13915 | def bookmark(repo, subset, x): | ||
FUJIWARA Katsunori
|
r27584 | """The named bookmark or all bookmarks. | ||
Simon King
|
r16822 | |||
If `name` starts with `re:`, the remainder of the name is treated as | ||||
a regular expression. To match a bookmark that actually starts with `re:`, | ||||
use the prefix `literal:`. | ||||
Idan Kamara
|
r13915 | """ | ||
# i18n: "bookmark" is a keyword | ||||
args = getargs(x, 0, 1, _('bookmark takes one or no arguments')) | ||||
if args: | ||||
bm = getstring(args[0], | ||||
# i18n: "bookmark" is a keyword | ||||
_('the argument to bookmark must be a string')) | ||||
Matt Harbison
|
r26481 | kind, pattern, matcher = util.stringmatcher(bm) | ||
Pierre-Yves David
|
r22499 | bms = set() | ||
Simon King
|
r16822 | if kind == 'literal': | ||
Michael O'Connor
|
r22105 | bmrev = repo._bookmarks.get(pattern, None) | ||
Simon King
|
r16822 | if not bmrev: | ||
FUJIWARA Katsunori
|
r23978 | raise error.RepoLookupError(_("bookmark '%s' does not exist") | ||
Yuya Nishihara
|
r26538 | % pattern) | ||
Pierre-Yves David
|
r22499 | bms.add(repo[bmrev].rev()) | ||
Simon King
|
r16822 | else: | ||
matchrevs = set() | ||||
Kevin Bullock
|
r18495 | for name, bmrev in repo._bookmarks.iteritems(): | ||
Simon King
|
r16822 | if matcher(name): | ||
matchrevs.add(bmrev) | ||||
if not matchrevs: | ||||
FUJIWARA Katsunori
|
r23978 | raise error.RepoLookupError(_("no bookmarks exist" | ||
" that match '%s'") % pattern) | ||||
Simon King
|
r16822 | for bmrev in matchrevs: | ||
Pierre-Yves David
|
r22499 | bms.add(repo[bmrev].rev()) | ||
else: | ||||
bms = set([repo[r].rev() | ||||
for r in repo._bookmarks.values()]) | ||||
Pierre-Yves David
|
r22500 | bms -= set([node.nullrev]) | ||
Pierre-Yves David
|
r22530 | return subset & bms | ||
Idan Kamara
|
r13915 | |||
FUJIWARA Katsunori
|
r27587 | @predicate('branch(string or set)', safe=True) | ||
Idan Kamara
|
r13915 | def branch(repo, subset, x): | ||
FUJIWARA Katsunori
|
r27584 | """ | ||
Idan Kamara
|
r13915 | All changesets belonging to the given branch or the branches of the given | ||
changesets. | ||||
Simon King
|
r16821 | |||
If `string` starts with `re:`, the remainder of the name is treated as | ||||
a regular expression. To match a branch that actually starts with `re:`, | ||||
use the prefix `literal:`. | ||||
Idan Kamara
|
r13915 | """ | ||
Durham Goode
|
r24374 | getbi = repo.revbranchcache().branchinfo | ||
Mads Kiilerich
|
r23787 | |||
Idan Kamara
|
r13915 | try: | ||
b = getstring(x, '') | ||||
except error.ParseError: | ||||
# not a string, but another revspec, e.g. tip() | ||||
pass | ||||
Simon King
|
r16821 | else: | ||
Matt Harbison
|
r26481 | kind, pattern, matcher = util.stringmatcher(b) | ||
Simon King
|
r16821 | if kind == 'literal': | ||
# note: falls through to the revspec case if no branch with | ||||
Yuya Nishihara
|
r26537 | # this name exists and pattern kind is not specified explicitly | ||
Simon King
|
r16821 | if pattern in repo.branchmap(): | ||
Yuya Nishihara
|
r28424 | return subset.filter(lambda r: matcher(getbi(r)[0]), | ||
condrepr=('<branch %r>', b)) | ||||
Yuya Nishihara
|
r26537 | if b.startswith('literal:'): | ||
raise error.RepoLookupError(_("branch '%s' does not exist") | ||||
% pattern) | ||||
Simon King
|
r16821 | else: | ||
Yuya Nishihara
|
r28424 | return subset.filter(lambda r: matcher(getbi(r)[0]), | ||
condrepr=('<branch %r>', b)) | ||||
Idan Kamara
|
r13915 | |||
Yuya Nishihara
|
r24115 | s = getset(repo, fullreposet(repo), x) | ||
Idan Kamara
|
r13915 | b = set() | ||
for r in s: | ||||
Durham Goode
|
r24374 | b.add(getbi(r)[0]) | ||
Pierre-Yves David
|
r22867 | c = s.__contains__ | ||
Yuya Nishihara
|
r28424 | return subset.filter(lambda r: c(r) or getbi(r)[0] in b, | ||
condrepr=lambda: '<branch %r>' % sorted(b)) | ||||
Idan Kamara
|
r13915 | |||
FUJIWARA Katsunori
|
r27587 | @predicate('bumped()', safe=True) | ||
Pierre-Yves David
|
r17829 | def bumped(repo, subset, x): | ||
FUJIWARA Katsunori
|
r27584 | """Mutable changesets marked as successors of public changesets. | ||
Pierre-Yves David
|
r17829 | |||
Only non-public and non-obsolete changesets can be `bumped`. | ||||
""" | ||||
# i18n: "bumped" is a keyword | ||||
getargs(x, 0, 0, _("bumped takes no arguments")) | ||||
bumped = obsmod.getrevs(repo, 'bumped') | ||||
Lucas Moscovicz
|
r20367 | return subset & bumped | ||
Pierre-Yves David
|
r17829 | |||
FUJIWARA Katsunori
|
r27587 | @predicate('bundle()', safe=True) | ||
Tomasz Kleczek
|
r17913 | def bundle(repo, subset, x): | ||
FUJIWARA Katsunori
|
r27584 | """Changesets in the bundle. | ||
Tomasz Kleczek
|
r17913 | |||
Bundle must be specified by the -R option.""" | ||||
try: | ||||
Mads Kiilerich
|
r18411 | bundlerevs = repo.changelog.bundlerevs | ||
Tomasz Kleczek
|
r17913 | except AttributeError: | ||
Pierre-Yves David
|
r26587 | raise error.Abort(_("no bundle provided - specify with -R")) | ||
Lucas Moscovicz
|
r20367 | return subset & bundlerevs | ||
Tomasz Kleczek
|
r17913 | |||
Idan Kamara
|
r13915 | def checkstatus(repo, subset, pat, field): | ||
Patrick Mezard
|
r16521 | hasset = matchmod.patkind(pat) == 'set' | ||
Lucas Moscovicz
|
r20457 | |||
Martin von Zweigbergk
|
r23115 | mcache = [None] | ||
Lucas Moscovicz
|
r20457 | def matches(x): | ||
c = repo[x] | ||||
Martin von Zweigbergk
|
r23115 | if not mcache[0] or hasset: | ||
mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c) | ||||
m = mcache[0] | ||||
fname = None | ||||
if not m.anypats() and len(m.files()) == 1: | ||||
fname = m.files()[0] | ||||
Patrick Mezard
|
r16521 | if fname is not None: | ||
if fname not in c.files(): | ||||
Lucas Moscovicz
|
r20457 | return False | ||
Idan Kamara
|
r13915 | else: | ||
for f in c.files(): | ||||
if m(f): | ||||
break | ||||
else: | ||||
Lucas Moscovicz
|
r20457 | return False | ||
Idan Kamara
|
r13915 | files = repo.status(c.p1().node(), c.node())[field] | ||
Patrick Mezard
|
r16521 | if fname is not None: | ||
if fname in files: | ||||
Lucas Moscovicz
|
r20457 | return True | ||
Idan Kamara
|
r13915 | else: | ||
for f in files: | ||||
if m(f): | ||||
Lucas Moscovicz
|
r20457 | return True | ||
Yuya Nishihara
|
r28424 | return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat)) | ||
Idan Kamara
|
r13915 | |||
Martin von Zweigbergk
|
r29406 | def _children(repo, subset, parentset): | ||
Pierre-Yves David
|
r25550 | if not parentset: | ||
return baseset() | ||||
Matt Mackall
|
r15899 | cs = set() | ||
pr = repo.changelog.parentrevs | ||||
Pierre-Yves David
|
r25567 | minrev = parentset.min() | ||
Martin von Zweigbergk
|
r29406 | for r in subset: | ||
Siddharth Agarwal
|
r18063 | if r <= minrev: | ||
continue | ||||
Matt Mackall
|
r15899 | for p in pr(r): | ||
Patrick Mezard
|
r16396 | if p in parentset: | ||
Matt Mackall
|
r15899 | cs.add(r) | ||
Matt Mackall
|
r20709 | return baseset(cs) | ||
Matt Mackall
|
r15899 | |||
FUJIWARA Katsunori
|
r27587 | @predicate('children(set)', safe=True) | ||
Idan Kamara
|
r13915 | def children(repo, subset, x): | ||
FUJIWARA Katsunori
|
r27584 | """Child changesets of changesets in set. | ||
Idan Kamara
|
r13915 | """ | ||
Pierre-Yves David
|
r23164 | s = getset(repo, fullreposet(repo), x) | ||
Matt Mackall
|
r15899 | cs = _children(repo, subset, s) | ||
Lucas Moscovicz
|
r20367 | return subset & cs | ||
Idan Kamara
|
r13915 | |||
FUJIWARA Katsunori
|
r27587 | @predicate('closed()', safe=True) | ||
Idan Kamara
|
r13915 | def closed(repo, subset, x): | ||
FUJIWARA Katsunori
|
r27584 | """Changeset is closed. | ||
Idan Kamara
|
r13915 | """ | ||
# i18n: "closed" is a keyword | ||||
getargs(x, 0, 0, _("closed takes no arguments")) | ||||
Yuya Nishihara
|
r28424 | return subset.filter(lambda r: repo[r].closesbranch(), | ||
condrepr='<branch closed>') | ||||
Idan Kamara
|
r13915 | |||
FUJIWARA Katsunori
|
r27584 | @predicate('contains(pattern)') | ||
Idan Kamara
|
r13915 | def contains(repo, subset, x): | ||
FUJIWARA Katsunori
|
r27584 | """The revision's manifest contains a file matching pattern (but might not | ||
Greg Hurrell
|
r21199 | modify it). See :hg:`help patterns` for information about file patterns. | ||
FUJIWARA Katsunori
|
r20289 | |||
The pattern without explicit kind like ``glob:`` is expected to be | ||||
relative to the current directory and match against a file exactly | ||||
for efficiency. | ||||
Idan Kamara
|
r13915 | """ | ||
# i18n: "contains" is a keyword | ||||
pat = getstring(x, _("contains requires a pattern")) | ||||
Lucas Moscovicz
|
r20461 | |||
def matches(x): | ||||
if not matchmod.patkind(pat): | ||||
pats = pathutil.canonpath(repo.root, repo.getcwd(), pat) | ||||
if pats in repo[x]: | ||||
return True | ||||
else: | ||||
c = repo[x] | ||||
m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c) | ||||
Matt Mackall
|
r15964 | for f in c.manifest(): | ||
Idan Kamara
|
r13915 | if m(f): | ||
Lucas Moscovicz
|
r20461 | return True | ||
return False | ||||
Yuya Nishihara
|
r28424 | return subset.filter(matches, condrepr=('<contains %r>', pat)) | ||
Idan Kamara
|
r13915 | |||
FUJIWARA Katsunori
|
r27587 | @predicate('converted([id])', safe=True) | ||
Matt Harbison
|
r17002 | def converted(repo, subset, x): | ||
FUJIWARA Katsunori
|
r27584 | """Changesets converted from the given identifier in the old repository if | ||
Matt Harbison
|
r17002 | present, or all converted changesets if no identifier is specified. | ||
""" | ||||
# There is exactly no chance of resolving the revision, so do a simple | ||||
# string compare and hope for the best | ||||
FUJIWARA Katsunori
|
r17259 | rev = None | ||
Matt Harbison
|
r17002 | # i18n: "converted" is a keyword | ||
l = getargs(x, 0, 1, _('converted takes one or no arguments')) | ||||
if l: | ||||
FUJIWARA Katsunori
|
r17259 | # i18n: "converted" is a keyword | ||
Matt Harbison
|
r17002 | rev = getstring(l[0], _('converted requires a revision')) | ||
def _matchvalue(r): | ||||
source = repo[r].extra().get('convert_revision', None) | ||||
return source is not None and (rev is None or source.startswith(rev)) | ||||
Yuya Nishihara
|
r28424 | return subset.filter(lambda r: _matchvalue(r), | ||
condrepr=('<converted %r>', rev)) | ||||
Matt Harbison
|
r17002 | |||
FUJIWARA Katsunori
|
r27587 | @predicate('date(interval)', safe=True) | ||
Idan Kamara
|
r13915 | def date(repo, subset, x): | ||
FUJIWARA Katsunori
|
r27584 | """Changesets within the interval, see :hg:`help dates`. | ||
Idan Kamara
|
r13915 | """ | ||
# i18n: "date" is a keyword | ||||
ds = getstring(x, _("date requires a string")) | ||||
dm = util.matchdate(ds) | ||||
Yuya Nishihara
|
r28424 | return subset.filter(lambda x: dm(repo[x].date()[0]), | ||
condrepr=('<date %r>', ds)) | ||||
Idan Kamara
|
r13915 | |||
FUJIWARA Katsunori
|
r27587 | @predicate('desc(string)', safe=True) | ||
Thomas Arendsen Hein
|
r14650 | def desc(repo, subset, x): | ||
FUJIWARA Katsunori
|
r27584 | """Search commit message for string. The match is case-insensitive. | ||
Thomas Arendsen Hein
|
r14650 | """ | ||
# i18n: "desc" is a keyword | ||||
FUJIWARA Katsunori
|
r15726 | ds = encoding.lower(getstring(x, _("desc requires a string"))) | ||
Lucas Moscovicz
|
r20452 | |||
def matches(x): | ||||
c = repo[x] | ||||
return ds in encoding.lower(c.description()) | ||||
Yuya Nishihara
|
r28424 | return subset.filter(matches, condrepr=('<desc %r>', ds)) | ||
Thomas Arendsen Hein
|
r14650 | |||
Patrick Mezard
|
r16409 | def _descendants(repo, subset, x, followfirst=False): | ||
Yuya Nishihara
|
r24115 | roots = getset(repo, fullreposet(repo), x) | ||
Mads Kiilerich
|
r22944 | if not roots: | ||
Pierre-Yves David
|
r22802 | return baseset() | ||
Mads Kiilerich
|
r22944 | s = _revdescendants(repo, roots, followfirst) | ||
Durham Goode
|
r20894 | |||
# Both sets need to be ascending in order to lazily return the union | ||||
# in the correct order. | ||||
Mads Kiilerich
|
r22944 | base = subset & roots | ||
Pierre-Yves David
|
r22860 | desc = subset & s | ||
result = base + desc | ||||
if subset.isascending(): | ||||
result.sort() | ||||
elif subset.isdescending(): | ||||
result.sort(reverse=True) | ||||
else: | ||||
result = subset & result | ||||
Pierre-Yves David
|
r22830 | return result | ||
Patrick Mezard
|
r16409 | |||
FUJIWARA Katsunori
|
r27587 | @predicate('descendants(set)', safe=True) | ||
Idan Kamara
|
r13915 | def descendants(repo, subset, x): | ||
FUJIWARA Katsunori
|
r27584 | """Changesets which are descendants of changesets in set. | ||
Idan Kamara
|
r13915 | """ | ||
Patrick Mezard
|
r16409 | return _descendants(repo, subset, x) | ||
FUJIWARA Katsunori
|
r27587 | @predicate('_firstdescendants', safe=True) | ||
Patrick Mezard
|
r16409 | def _firstdescendants(repo, subset, x): | ||
# ``_firstdescendants(set)`` | ||||
# Like ``descendants(set)`` but follows only the first parents. | ||||
return _descendants(repo, subset, x, followfirst=True) | ||||
Idan Kamara
|
r13915 | |||
FUJIWARA Katsunori
|
r27587 | @predicate('destination([set])', safe=True) | ||
Matt Harbison
|
r17186 | def destination(repo, subset, x): | ||
FUJIWARA Katsunori
|
r27584 | """Changesets that were created by a graft, transplant or rebase operation, | ||
Matt Harbison
|
r17186 | with the given revisions specified as the source. Omitting the optional set | ||
is the same as passing all(). | ||||
""" | ||||
if x is not None: | ||||
Yuya Nishihara
|
r24115 | sources = getset(repo, fullreposet(repo), x) | ||
Matt Harbison
|
r17186 | else: | ||
Yuya Nishihara
|
r24201 | sources = fullreposet(repo) | ||
Matt Harbison
|
r17186 | |||
dests = set() | ||||
# subset contains all of the possible destinations that can be returned, so | ||||
Mads Kiilerich
|
r22944 | # iterate over them and see if their source(s) were provided in the arg set. | ||
# Even if the immediate src of r is not in the arg set, src's source (or | ||||
Matt Harbison
|
r17186 | # further back) may be. Scanning back further than the immediate src allows | ||
# transitive transplants and rebases to yield the same results as transitive | ||||
# grafts. | ||||
for r in subset: | ||||
src = _getrevsource(repo, r) | ||||
lineage = None | ||||
while src is not None: | ||||
if lineage is None: | ||||
lineage = list() | ||||
lineage.append(r) | ||||
# The visited lineage is a match if the current source is in the arg | ||||
# set. Since every candidate dest is visited by way of iterating | ||||
timeless@mozdev.org
|
r17494 | # subset, any dests further back in the lineage will be tested by a | ||
Matt Harbison
|
r17186 | # different iteration over subset. Likewise, if the src was already | ||
# selected, the current lineage can be selected without going back | ||||
# further. | ||||
Mads Kiilerich
|
r22944 | if src in sources or src in dests: | ||
Matt Harbison
|
r17186 | dests.update(lineage) | ||
break | ||||
r = src | ||||
src = _getrevsource(repo, r) | ||||
Yuya Nishihara
|
r28424 | return subset.filter(dests.__contains__, | ||
condrepr=lambda: '<destination %r>' % sorted(dests)) | ||||
Matt Harbison
|
r17186 | |||
FUJIWARA Katsunori
|
r27587 | @predicate('divergent()', safe=True) | ||
Pierre-Yves David
|
r18071 | def divergent(repo, subset, x): | ||
FUJIWARA Katsunori
|
r27584 | """ | ||
Pierre-Yves David
|
r18071 | Final successors of changesets with an alternative set of final successors. | ||
""" | ||||
# i18n: "divergent" is a keyword | ||||
getargs(x, 0, 0, _("divergent takes no arguments")) | ||||
divergent = obsmod.getrevs(repo, 'divergent') | ||||
Pierre-Yves David
|
r22533 | return subset & divergent | ||
Pierre-Yves David
|
r18071 | |||
FUJIWARA Katsunori
|
r27587 | @predicate('extinct()', safe=True) | ||
Pierre-Yves David
|
r17173 | def extinct(repo, subset, x): | ||
FUJIWARA Katsunori
|
r27584 | """Obsolete changesets with obsolete descendants only. | ||
Patrick Mezard
|
r17291 | """ | ||
FUJIWARA Katsunori
|
r17259 | # i18n: "extinct" is a keyword | ||
FUJIWARA Katsunori
|
r17258 | getargs(x, 0, 0, _("extinct takes no arguments")) | ||
Pierre-Yves David
|
r17825 | extincts = obsmod.getrevs(repo, 'extinct') | ||
Lucas Moscovicz
|
r20367 | return subset & extincts | ||
Pierre-Yves David
|
r17173 | |||
FUJIWARA Katsunori
|
r27587 | @predicate('extra(label, [value])', safe=True) | ||
Henrik Stuart
|
r16661 | def extra(repo, subset, x): | ||
FUJIWARA Katsunori
|
r27584 | """Changesets with the given label in the extra metadata, with the given | ||
Simon King
|
r16824 | optional value. | ||
If `value` starts with `re:`, the remainder of the value is treated as | ||||
a regular expression. To match a value that actually starts with `re:`, | ||||
use the prefix `literal:`. | ||||
""" | ||||
Yuya Nishihara
|
r25767 | args = getargsdict(x, 'extra', 'label value') | ||
Yuya Nishihara
|
r25706 | if 'label' not in args: | ||
# i18n: "extra" is a keyword | ||||
raise error.ParseError(_('extra takes at least 1 argument')) | ||||
FUJIWARA Katsunori
|
r17259 | # i18n: "extra" is a keyword | ||
Yuya Nishihara
|
r25706 | label = getstring(args['label'], _('first argument to extra must be ' | ||
'a string')) | ||||
Henrik Stuart
|
r16661 | value = None | ||
Yuya Nishihara
|
r25706 | if 'value' in args: | ||
FUJIWARA Katsunori
|
r17259 | # i18n: "extra" is a keyword | ||
Yuya Nishihara
|
r25706 | value = getstring(args['value'], _('second argument to extra must be ' | ||
'a string')) | ||||
Matt Harbison
|
r26481 | kind, value, matcher = util.stringmatcher(value) | ||
Henrik Stuart
|
r16661 | |||
def _matchvalue(r): | ||||
extra = repo[r].extra() | ||||
Simon King
|
r16824 | return label in extra and (value is None or matcher(extra[label])) | ||
Henrik Stuart
|
r16661 | |||
Yuya Nishihara
|
r28424 | return subset.filter(lambda r: _matchvalue(r), | ||
condrepr=('<extra[%r] %r>', label, value)) | ||||
Pierre-Yves David
|
r15819 | |||
FUJIWARA Katsunori
|
r27587 | @predicate('filelog(pattern)', safe=True) | ||
Matt Mackall
|
r14342 | def filelog(repo, subset, x): | ||
FUJIWARA Katsunori
|
r27584 | """Changesets connected to the specified filelog. | ||
FUJIWARA Katsunori
|
r17244 | |||
Greg Hurrell
|
r21199 | For performance reasons, visits only revisions mentioned in the file-level | ||
filelog, rather than filtering through all changesets (much faster, but | ||||
doesn't include deletes or duplicate changes). For a slower, more accurate | ||||
result, use ``file()``. | ||||
FUJIWARA Katsunori
|
r20289 | |||
The pattern without explicit kind like ``glob:`` is expected to be | ||||
relative to the current directory and match against a file exactly | ||||
for efficiency. | ||||
Pierre-Yves David
|
r23719 | |||
If some linkrev points to revisions filtered by the current repoview, we'll | ||||
work around it to return a non-filtered value. | ||||
Matt Mackall
|
r14342 | """ | ||
FUJIWARA Katsunori
|
r17259 | # i18n: "filelog" is a keyword | ||
Matt Mackall
|
r14342 | pat = getstring(x, _("filelog requires a pattern")) | ||
s = set() | ||||
Pierre-Yves David
|
r23719 | cl = repo.changelog | ||
Matt Mackall
|
r14342 | |||
Matt Mackall
|
r15964 | if not matchmod.patkind(pat): | ||
FUJIWARA Katsunori
|
r20288 | f = pathutil.canonpath(repo.root, repo.getcwd(), pat) | ||
Pierre-Yves David
|
r23719 | files = [f] | ||
Matt Mackall
|
r14342 | else: | ||
FUJIWARA Katsunori
|
r20288 | m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None]) | ||
Pierre-Yves David
|
r23719 | files = (f for f in repo[None] if m(f)) | ||
for f in files: | ||||
fl = repo.file(f) | ||||
Matt Mackall
|
r27945 | known = {} | ||
scanpos = 0 | ||||
Pierre-Yves David
|
r23719 | for fr in list(fl): | ||
Matt Mackall
|
r27945 | fn = fl.node(fr) | ||
if fn in known: | ||||
s.add(known[fn]) | ||||
Martin von Zweigbergk
|
r23821 | continue | ||
Matt Mackall
|
r27945 | |||
lr = fl.linkrev(fr) | ||||
if lr in cl: | ||||
s.add(lr) | ||||
elif scanpos is not None: | ||||
# lowest matching changeset is filtered, scan further | ||||
# ahead in changelog | ||||
start = max(lr, scanpos) + 1 | ||||
scanpos = None | ||||
for r in cl.revs(start): | ||||
# minimize parsing of non-matching entries | ||||
if f in cl.revision(r) and f in cl.readfiles(r): | ||||
try: | ||||
# try to use manifest delta fastpath | ||||
n = repo[r].filenode(f) | ||||
if n not in known: | ||||
if n == fn: | ||||
s.add(r) | ||||
scanpos = r | ||||
break | ||||
else: | ||||
known[n] = r | ||||
except error.ManifestLookupError: | ||||
# deletion in changelog | ||||
continue | ||||
Matt Mackall
|
r14342 | |||
Pierre-Yves David
|
r22534 | return subset & s | ||
Matt Mackall
|
r14342 | |||
FUJIWARA Katsunori
|
r27587 | @predicate('first(set, [n])', safe=True) | ||
Matt Mackall
|
r15117 | def first(repo, subset, x): | ||
FUJIWARA Katsunori
|
r27584 | """An alias for limit(). | ||
Matt Mackall
|
r15117 | """ | ||
return limit(repo, subset, x) | ||||
Patrick Mezard
|
r16185 | def _follow(repo, subset, x, name, followfirst=False): | ||
liscju
|
r26102 | l = getargs(x, 0, 1, _("%s takes no arguments or a pattern") % name) | ||
Patrick Mezard
|
r16185 | c = repo['.'] | ||
if l: | ||||
liscju
|
r26102 | x = getstring(l[0], _("%s expected a pattern") % name) | ||
matcher = matchmod.match(repo.root, repo.getcwd(), [x], | ||||
ctx=repo[None], default='path') | ||||
Durham Goode
|
r28008 | files = c.manifest().walk(matcher) | ||
liscju
|
r26102 | s = set() | ||
Durham Goode
|
r28008 | for fname in files: | ||
fctx = c[fname] | ||||
s = s.union(set(c.rev() for c in fctx.ancestors(followfirst))) | ||||
# include the revision responsible for the most recent version | ||||
s.add(fctx.introrev()) | ||||
Patrick Mezard
|
r16185 | else: | ||
Lucas Moscovicz
|
r20690 | s = _revancestors(repo, baseset([c.rev()]), followfirst) | ||
Patrick Mezard
|
r16185 | |||
Pierre-Yves David
|
r22535 | return subset & s | ||
Patrick Mezard
|
r16185 | |||
FUJIWARA Katsunori
|
r27587 | @predicate('follow([pattern])', safe=True) | ||
Idan Kamara
|
r13915 | def follow(repo, subset, x): | ||
FUJIWARA Katsunori
|
r27584 | """ | ||
Yuya Nishihara
|
r24366 | An alias for ``::.`` (ancestors of the working directory's first parent). | ||
liscju
|
r26102 | If pattern is specified, the histories of files matching given | ||
pattern is followed, including copies. | ||||
Matt Mackall
|
r14343 | """ | ||
Patrick Mezard
|
r16185 | return _follow(repo, subset, x, 'follow') | ||
Matt Mackall
|
r14343 | |||
FUJIWARA Katsunori
|
r27587 | @predicate('_followfirst', safe=True) | ||
Patrick Mezard
|
r16174 | def _followfirst(repo, subset, x): | ||
liscju
|
r26102 | # ``followfirst([pattern])`` | ||
# Like ``follow([pattern])`` but follows only the first parent of | ||||
# every revisions or files revisions. | ||||
Patrick Mezard
|
r16185 | return _follow(repo, subset, x, '_followfirst', followfirst=True) | ||
Matt Mackall
|
r14343 | |||
FUJIWARA Katsunori
|
r27587 | @predicate('all()', safe=True) | ||
Idan Kamara
|
r13915 | def getall(repo, subset, x): | ||
FUJIWARA Katsunori
|
r27584 | """All changesets, the same as ``0:tip``. | ||
Idan Kamara
|
r13915 | """ | ||
# i18n: "all" is a keyword | ||||
getargs(x, 0, 0, _("all takes no arguments")) | ||||
Yuya Nishihara
|
r24202 | return subset & spanset(repo) # drop "null" if any | ||
Idan Kamara
|
r13915 | |||
FUJIWARA Katsunori
|
r27584 | @predicate('grep(regex)') | ||
Idan Kamara
|
r13915 | def grep(repo, subset, x): | ||
FUJIWARA Katsunori
|
r27584 | """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')`` | ||
Martin Geisler
|
r14357 | to ensure special escape characters are handled correctly. Unlike | ||
``keyword(string)``, the match is case-sensitive. | ||||
Idan Kamara
|
r13915 | """ | ||
try: | ||||
# i18n: "grep" is a keyword | ||||
gr = re.compile(getstring(x, _("grep requires a string"))) | ||||
Gregory Szorc
|
r25660 | except re.error as e: | ||
Idan Kamara
|
r13915 | raise error.ParseError(_('invalid match pattern: %s') % e) | ||
Lucas Moscovicz
|
r20453 | |||
def matches(x): | ||||
c = repo[x] | ||||
Idan Kamara
|
r13915 | for e in c.files() + [c.user(), c.description()]: | ||
if gr.search(e): | ||||
Lucas Moscovicz
|
r20453 | return True | ||
return False | ||||
Yuya Nishihara
|
r28424 | return subset.filter(matches, condrepr=('<grep %r>', gr.pattern)) | ||
Idan Kamara
|
r13915 | |||
FUJIWARA Katsunori
|
r27587 | @predicate('_matchfiles', safe=True) | ||
Patrick Mezard
|
r16161 | def _matchfiles(repo, subset, x): | ||
# _matchfiles takes a revset list of prefixed arguments: | ||||
# | ||||
# [p:foo, i:bar, x:baz] | ||||
# | ||||
# builds a match object from them and filters subset. Allowed | ||||
# prefixes are 'p:' for regular patterns, 'i:' for include | ||||
Patrick Mezard
|
r16181 | # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass | ||
# a revision identifier, or the empty string to reference the | ||||
# working directory, from which the match object is | ||||
Patrick Mezard
|
r16411 | # initialized. Use 'd:' to set the default matching mode, default | ||
# to 'glob'. At most one 'r:' and 'd:' argument can be passed. | ||||
Patrick Mezard
|
r16161 | |||
Yuya Nishihara
|
r28271 | l = getargs(x, 1, -1, "_matchfiles requires at least one argument") | ||
Patrick Mezard
|
r16161 | pats, inc, exc = [], [], [] | ||
Patrick Mezard
|
r16411 | rev, default = None, None | ||
Patrick Mezard
|
r16161 | for arg in l: | ||
Yuya Nishihara
|
r28271 | s = getstring(arg, "_matchfiles requires string arguments") | ||
Patrick Mezard
|
r16161 | prefix, value = s[:2], s[2:] | ||
if prefix == 'p:': | ||||
pats.append(value) | ||||
elif prefix == 'i:': | ||||
inc.append(value) | ||||
elif prefix == 'x:': | ||||
exc.append(value) | ||||
Patrick Mezard
|
r16181 | elif prefix == 'r:': | ||
if rev is not None: | ||||
Yuya Nishihara
|
r28271 | raise error.ParseError('_matchfiles expected at most one ' | ||
'revision') | ||||
Martin von Zweigbergk
|
r23950 | if value != '': # empty means working directory; leave rev as None | ||
rev = value | ||||
Patrick Mezard
|
r16411 | elif prefix == 'd:': | ||
if default is not None: | ||||
Yuya Nishihara
|
r28271 | raise error.ParseError('_matchfiles expected at most one ' | ||
'default mode') | ||||
Patrick Mezard
|
r16411 | default = value | ||
Patrick Mezard
|
r16161 | else: | ||
Yuya Nishihara
|
r28271 | raise error.ParseError('invalid _matchfiles prefix: %s' % prefix) | ||
Patrick Mezard
|
r16411 | if not default: | ||
default = 'glob' | ||||
Lucas Moscovicz
|
r20458 | |||
Matt Mackall
|
r23061 | m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc, | ||
exclude=exc, ctx=repo[rev], default=default) | ||||
Pierre-Yves David
|
r27028 | # This directly read the changelog data as creating changectx for all | ||
# revisions is quite expensive. | ||||
Laurent Charignon
|
r27440 | getfiles = repo.changelog.readfiles | ||
Pierre-Yves David
|
r27028 | wdirrev = node.wdirrev | ||
Lucas Moscovicz
|
r20458 | def matches(x): | ||
Pierre-Yves David
|
r27028 | if x == wdirrev: | ||
files = repo[x].files() | ||||
else: | ||||
Laurent Charignon
|
r27440 | files = getfiles(x) | ||
Pierre-Yves David
|
r27028 | for f in files: | ||
Patrick Mezard
|
r16161 | if m(f): | ||
Lucas Moscovicz
|
r20458 | return True | ||
return False | ||||
Yuya Nishihara
|
r28424 | return subset.filter(matches, | ||
condrepr=('<matchfiles patterns=%r, include=%r ' | ||||
'exclude=%r, default=%r, rev=%r>', | ||||
pats, inc, exc, default, rev)) | ||||
Patrick Mezard
|
r16161 | |||
FUJIWARA Katsunori
|
r27587 | @predicate('file(pattern)', safe=True) | ||
Idan Kamara
|
r13915 | def hasfile(repo, subset, x): | ||
FUJIWARA Katsunori
|
r27584 | """Changesets affecting files matched by pattern. | ||
FUJIWARA Katsunori
|
r17244 | |||
Greg Ward
|
r17265 | For a faster but less accurate result, consider using ``filelog()`` | ||
instead. | ||||
FUJIWARA Katsunori
|
r20289 | |||
This predicate uses ``glob:`` as the default kind of pattern. | ||||
Idan Kamara
|
r13915 | """ | ||
# i18n: "file" is a keyword | ||||
pat = getstring(x, _("file requires a pattern")) | ||||
Patrick Mezard
|
r16161 | return _matchfiles(repo, subset, ('string', 'p:' + pat)) | ||
Idan Kamara
|
r13915 | |||
FUJIWARA Katsunori
|
r27587 | @predicate('head()', safe=True) | ||
Idan Kamara
|
r13915 | def head(repo, subset, x): | ||
FUJIWARA Katsunori
|
r27584 | """Changeset is a named branch head. | ||
Idan Kamara
|
r13915 | """ | ||
# i18n: "head" is a keyword | ||||
getargs(x, 0, 0, _("head takes no arguments")) | ||||
hs = set() | ||||
Pierre-Yves David
|
r25620 | cl = repo.changelog | ||
Martin von Zweigbergk
|
r29407 | for ls in repo.branchmap().itervalues(): | ||
Pierre-Yves David
|
r25620 | hs.update(cl.rev(h) for h in ls) | ||
Martin von Zweigbergk
|
r29408 | return subset & baseset(hs) | ||
Idan Kamara
|
r13915 | |||
FUJIWARA Katsunori
|
r27587 | @predicate('heads(set)', safe=True) | ||
Idan Kamara
|
r13915 | def heads(repo, subset, x): | ||
FUJIWARA Katsunori
|
r27584 | """Members of set with no children in set. | ||
Idan Kamara
|
r13915 | """ | ||
s = getset(repo, subset, x) | ||||
Lucas Moscovicz
|
r20366 | ps = parents(repo, subset, x) | ||
return s - ps | ||||
Idan Kamara
|
r13915 | |||
FUJIWARA Katsunori
|
r27587 | @predicate('hidden()', safe=True) | ||
Patrick Mezard
|
r17390 | def hidden(repo, subset, x): | ||
FUJIWARA Katsunori
|
r27584 | """Hidden changesets. | ||
Patrick Mezard
|
r17390 | """ | ||
# i18n: "hidden" is a keyword | ||||
getargs(x, 0, 0, _("hidden takes no arguments")) | ||||
Kevin Bullock
|
r18382 | hiddenrevs = repoview.filterrevs(repo, 'visible') | ||
Lucas Moscovicz
|
r20367 | return subset & hiddenrevs | ||
Patrick Mezard
|
r17390 | |||
FUJIWARA Katsunori
|
r27587 | @predicate('keyword(string)', safe=True) | ||
Idan Kamara
|
r13915 | def keyword(repo, subset, x): | ||
FUJIWARA Katsunori
|
r27584 | """Search commit message, user name, and names of changed files for | ||
Martin Geisler
|
r14357 | string. The match is case-insensitive. | ||
Idan Kamara
|
r13915 | """ | ||
# i18n: "keyword" is a keyword | ||||
FUJIWARA Katsunori
|
r15726 | kw = encoding.lower(getstring(x, _("keyword requires a string"))) | ||
Lucas Moscovicz
|
r20447 | |||
def matches(r): | ||||
Idan Kamara
|
r13915 | c = repo[r] | ||
Pierre-Yves David
|
r25551 | return any(kw in encoding.lower(t) | ||
for t in c.files() + [c.user(), c.description()]) | ||||
Lucas Moscovicz
|
r20447 | |||
Yuya Nishihara
|
r28424 | return subset.filter(matches, condrepr=('<keyword %r>', kw)) | ||
Idan Kamara
|
r13915 | |||
FUJIWARA Katsunori
|
r27587 | @predicate('limit(set[, n[, offset]])', safe=True) | ||
Idan Kamara
|
r13915 | def limit(repo, subset, x): | ||
FUJIWARA Katsunori
|
r27584 | """First n members of set, defaulting to 1, starting from offset. | ||
Idan Kamara
|
r13915 | """ | ||
Yuya Nishihara
|
r26638 | args = getargsdict(x, 'limit', 'set n offset') | ||
Yuya Nishihara
|
r26637 | if 'set' not in args: | ||
# i18n: "limit" is a keyword | ||||
Yuya Nishihara
|
r26638 | raise error.ParseError(_("limit requires one to three arguments")) | ||
Idan Kamara
|
r13915 | try: | ||
Yuya Nishihara
|
r26638 | lim, ofs = 1, 0 | ||
Yuya Nishihara
|
r26637 | if 'n' in args: | ||
Matt Mackall
|
r15116 | # i18n: "limit" is a keyword | ||
Yuya Nishihara
|
r26637 | lim = int(getstring(args['n'], _("limit requires a number"))) | ||
Yuya Nishihara
|
r26638 | if 'offset' in args: | ||
# i18n: "limit" is a keyword | ||||
ofs = int(getstring(args['offset'], _("limit requires a number"))) | ||||
if ofs < 0: | ||||
raise error.ParseError(_("negative offset")) | ||||
Matt Mackall
|
r14851 | except (TypeError, ValueError): | ||
Idan Kamara
|
r13915 | # i18n: "limit" is a keyword | ||
raise error.ParseError(_("limit expects a number")) | ||||
Yuya Nishihara
|
r26637 | os = getset(repo, fullreposet(repo), args['set']) | ||
Pierre-Yves David
|
r22804 | result = [] | ||
Lucas Moscovicz
|
r20446 | it = iter(os) | ||
Yuya Nishihara
|
r26638 | for x in xrange(ofs): | ||
y = next(it, None) | ||||
if y is None: | ||||
break | ||||
Lucas Moscovicz
|
r20446 | for x in xrange(lim): | ||
Pierre-Yves David
|
r25144 | y = next(it, None) | ||
if y is None: | ||||
Lucas Moscovicz
|
r20446 | break | ||
Yuya Nishihara
|
r26636 | elif y in subset: | ||
Pierre-Yves David
|
r25144 | result.append(y) | ||
Yuya Nishihara
|
r28426 | return baseset(result, datarepr=('<limit n=%d, offset=%d, %r, %r>', | ||
lim, ofs, subset, os)) | ||||
Idan Kamara
|
r13915 | |||
FUJIWARA Katsunori
|
r27587 | @predicate('last(set, [n])', safe=True) | ||
Matt Mackall
|
r14061 | def last(repo, subset, x): | ||
FUJIWARA Katsunori
|
r27584 | """Last n members of set, defaulting to 1. | ||
Matt Mackall
|
r14061 | """ | ||
# i18n: "last" is a keyword | ||||
Matt Mackall
|
r15116 | l = getargs(x, 1, 2, _("last requires one or two arguments")) | ||
Matt Mackall
|
r14061 | try: | ||
Matt Mackall
|
r15116 | lim = 1 | ||
if len(l) == 2: | ||||
# i18n: "last" is a keyword | ||||
lim = int(getstring(l[1], _("last requires a number"))) | ||||
Matt Mackall
|
r14851 | except (TypeError, ValueError): | ||
Matt Mackall
|
r14061 | # i18n: "last" is a keyword | ||
raise error.ParseError(_("last expects a number")) | ||||
Yuya Nishihara
|
r24115 | os = getset(repo, fullreposet(repo), l[0]) | ||
Lucas Moscovicz
|
r20534 | os.reverse() | ||
Pierre-Yves David
|
r22805 | result = [] | ||
Lucas Moscovicz
|
r20534 | it = iter(os) | ||
for x in xrange(lim): | ||||
Pierre-Yves David
|
r25145 | y = next(it, None) | ||
if y is None: | ||||
Lucas Moscovicz
|
r20534 | break | ||
Yuya Nishihara
|
r26636 | elif y in subset: | ||
Pierre-Yves David
|
r25145 | result.append(y) | ||
Yuya Nishihara
|
r28426 | return baseset(result, datarepr=('<last n=%d, %r, %r>', lim, subset, os)) | ||
Matt Mackall
|
r14061 | |||
FUJIWARA Katsunori
|
r27587 | @predicate('max(set)', safe=True) | ||
Idan Kamara
|
r13915 | def maxrev(repo, subset, x): | ||
FUJIWARA Katsunori
|
r27584 | """Changeset with highest revision number in set. | ||
Idan Kamara
|
r13915 | """ | ||
Yuya Nishihara
|
r24115 | os = getset(repo, fullreposet(repo), x) | ||
Durham Goode
|
r26305 | try: | ||
Lucas Moscovicz
|
r20754 | m = os.max() | ||
Idan Kamara
|
r13915 | if m in subset: | ||
Yuya Nishihara
|
r28427 | return baseset([m], datarepr=('<max %r, %r>', subset, os)) | ||
Durham Goode
|
r26305 | except ValueError: | ||
# os.max() throws a ValueError when the collection is empty. | ||||
# Same as python's max(). | ||||
pass | ||||
Yuya Nishihara
|
r28427 | return baseset(datarepr=('<max %r, %r>', subset, os)) | ||
Idan Kamara
|
r13915 | |||
FUJIWARA Katsunori
|
r27587 | @predicate('merge()', safe=True) | ||
Idan Kamara
|
r13915 | def merge(repo, subset, x): | ||
FUJIWARA Katsunori
|
r27584 | """Changeset is a merge changeset. | ||
Idan Kamara
|
r13915 | """ | ||
# i18n: "merge" is a keyword | ||||
getargs(x, 0, 0, _("merge takes no arguments")) | ||||
cl = repo.changelog | ||||
Yuya Nishihara
|
r28424 | return subset.filter(lambda r: cl.parentrevs(r)[1] != -1, | ||
condrepr='<merge>') | ||||
Idan Kamara
|
r13915 | |||
FUJIWARA Katsunori
|
r27587 | @predicate('branchpoint()', safe=True) | ||
Ivan Andrus
|
r17753 | def branchpoint(repo, subset, x): | ||
FUJIWARA Katsunori
|
r27584 | """Changesets with more than one child. | ||
Ivan Andrus
|
r17753 | """ | ||
# i18n: "branchpoint" is a keyword | ||||
getargs(x, 0, 0, _("branchpoint takes no arguments")) | ||||
cl = repo.changelog | ||||
if not subset: | ||||
Pierre-Yves David
|
r22802 | return baseset() | ||
Pierre-Yves David
|
r25549 | # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset | ||
# (and if it is not, it should.) | ||||
Ivan Andrus
|
r17753 | baserev = min(subset) | ||
parentscount = [0]*(len(repo) - baserev) | ||||
Pierre-Yves David
|
r17785 | for r in cl.revs(start=baserev + 1): | ||
Ivan Andrus
|
r17753 | for p in cl.parentrevs(r): | ||
if p >= baserev: | ||||
parentscount[p - baserev] += 1 | ||||
Yuya Nishihara
|
r28424 | return subset.filter(lambda r: parentscount[r - baserev] > 1, | ||
condrepr='<branchpoint>') | ||||
Ivan Andrus
|
r17753 | |||
FUJIWARA Katsunori
|
r27587 | @predicate('min(set)', safe=True) | ||
Idan Kamara
|
r13915 | def minrev(repo, subset, x): | ||
FUJIWARA Katsunori
|
r27584 | """Changeset with lowest revision number in set. | ||
Idan Kamara
|
r13915 | """ | ||
Yuya Nishihara
|
r24115 | os = getset(repo, fullreposet(repo), x) | ||
Durham Goode
|
r26305 | try: | ||
Lucas Moscovicz
|
r20754 | m = os.min() | ||
Idan Kamara
|
r13915 | if m in subset: | ||
Yuya Nishihara
|
r28427 | return baseset([m], datarepr=('<min %r, %r>', subset, os)) | ||
Durham Goode
|
r26305 | except ValueError: | ||
# os.min() throws a ValueError when the collection is empty. | ||||
# Same as python's min(). | ||||
pass | ||||
Yuya Nishihara
|
r28427 | return baseset(datarepr=('<min %r, %r>', subset, os)) | ||
Idan Kamara
|
r13915 | |||
FUJIWARA Katsunori
|
r27587 | @predicate('modifies(pattern)', safe=True) | ||
Idan Kamara
|
r13915 | def modifies(repo, subset, x): | ||
FUJIWARA Katsunori
|
r27584 | """Changesets modifying files matched by pattern. | ||
FUJIWARA Katsunori
|
r20289 | |||
The pattern without explicit kind like ``glob:`` is expected to be | ||||
relative to the current directory and match against a file or a | ||||
directory. | ||||
Idan Kamara
|
r13915 | """ | ||
# i18n: "modifies" is a keyword | ||||
pat = getstring(x, _("modifies requires a pattern")) | ||||
return checkstatus(repo, subset, pat, 0) | ||||
FUJIWARA Katsunori
|
r27584 | @predicate('named(namespace)') | ||
Sean Farley
|
r23836 | def named(repo, subset, x): | ||
FUJIWARA Katsunori
|
r27584 | """The changesets in a given namespace. | ||
Sean Farley
|
r23836 | |||
If `namespace` starts with `re:`, the remainder of the string is treated as | ||||
a regular expression. To match a namespace that actually starts with `re:`, | ||||
use the prefix `literal:`. | ||||
""" | ||||
# i18n: "named" is a keyword | ||||
args = getargs(x, 1, 1, _('named requires a namespace argument')) | ||||
ns = getstring(args[0], | ||||
# i18n: "named" is a keyword | ||||
_('the argument to named must be a string')) | ||||
Matt Harbison
|
r26481 | kind, pattern, matcher = util.stringmatcher(ns) | ||
Sean Farley
|
r23836 | namespaces = set() | ||
if kind == 'literal': | ||||
if pattern not in repo.names: | ||||
FUJIWARA Katsunori
|
r23978 | raise error.RepoLookupError(_("namespace '%s' does not exist") | ||
% ns) | ||||
Sean Farley
|
r23836 | namespaces.add(repo.names[pattern]) | ||
else: | ||||
for name, ns in repo.names.iteritems(): | ||||
if matcher(name): | ||||
namespaces.add(ns) | ||||
if not namespaces: | ||||
FUJIWARA Katsunori
|
r23978 | raise error.RepoLookupError(_("no namespace exists" | ||
" that match '%s'") % pattern) | ||||
Sean Farley
|
r23836 | |||
names = set() | ||||
for ns in namespaces: | ||||
for name in ns.listnames(repo): | ||||
FUJIWARA Katsunori
|
r24151 | if name not in ns.deprecated: | ||
names.update(repo[n].rev() for n in ns.nodes(repo, name)) | ||||
Sean Farley
|
r23836 | |||
names -= set([node.nullrev]) | ||||
return subset & names | ||||
FUJIWARA Katsunori
|
r27587 | @predicate('id(string)', safe=True) | ||
Matt Mackall
|
r16417 | def node_(repo, subset, x): | ||
FUJIWARA Katsunori
|
r27584 | """Revision non-ambiguously specified by the given hex string prefix. | ||
Patrick Mezard
|
r12821 | """ | ||
Martin Geisler
|
r12815 | # i18n: "id" is a keyword | ||
Benoit Boissinot
|
r12736 | l = getargs(x, 1, 1, _("id requires one argument")) | ||
Martin Geisler
|
r12815 | # i18n: "id" is a keyword | ||
Benoit Boissinot
|
r12736 | n = getstring(l[0], _("id requires a string")) | ||
Augie Fackler
|
r12716 | if len(n) == 40: | ||
Alexander Drozdov
|
r24904 | try: | ||
rn = repo.changelog.rev(node.bin(n)) | ||||
except (LookupError, TypeError): | ||||
rn = None | ||||
Augie Fackler
|
r12716 | else: | ||
Matt Harbison
|
r16735 | rn = None | ||
pm = repo.changelog._partialmatch(n) | ||||
if pm is not None: | ||||
rn = repo.changelog.rev(pm) | ||||
Pierre-Yves David
|
r23005 | if rn is None: | ||
return baseset() | ||||
result = baseset([rn]) | ||||
return result & subset | ||||
Augie Fackler
|
r12716 | |||
FUJIWARA Katsunori
|
r27587 | @predicate('obsolete()', safe=True) | ||
Pierre-Yves David
|
r17170 | def obsolete(repo, subset, x): | ||
FUJIWARA Katsunori
|
r27584 | """Mutable changeset with a newer version.""" | ||
FUJIWARA Katsunori
|
r17259 | # i18n: "obsolete" is a keyword | ||
Pierre-Yves David
|
r17170 | getargs(x, 0, 0, _("obsolete takes no arguments")) | ||
Pierre-Yves David
|
r17825 | obsoletes = obsmod.getrevs(repo, 'obsolete') | ||
Lucas Moscovicz
|
r20367 | return subset & obsoletes | ||
Pierre-Yves David
|
r17170 | |||
FUJIWARA Katsunori
|
r27587 | @predicate('only(set, [set])', safe=True) | ||
Yuya Nishihara
|
r23466 | def only(repo, subset, x): | ||
FUJIWARA Katsunori
|
r27584 | """Changesets that are ancestors of the first set that are not ancestors | ||
Yuya Nishihara
|
r23466 | of any other head in the repo. If a second set is specified, the result | ||
is ancestors of the first set that are not ancestors of the second set | ||||
(i.e. ::<set1> - ::<set2>). | ||||
""" | ||||
cl = repo.changelog | ||||
# i18n: "only" is a keyword | ||||
args = getargs(x, 1, 2, _('only takes one or two arguments')) | ||||
Yuya Nishihara
|
r24115 | include = getset(repo, fullreposet(repo), args[0]) | ||
Yuya Nishihara
|
r23466 | if len(args) == 1: | ||
if not include: | ||||
return baseset() | ||||
descendants = set(_revdescendants(repo, include, False)) | ||||
exclude = [rev for rev in cl.headrevs() | ||||
if not rev in descendants and not rev in include] | ||||
else: | ||||
Yuya Nishihara
|
r24115 | exclude = getset(repo, fullreposet(repo), args[1]) | ||
Yuya Nishihara
|
r23466 | |||
results = set(cl.findmissingrevs(common=exclude, heads=include)) | ||||
Pierre-Yves David
|
r25554 | # XXX we should turn this into a baseset instead of a set, smartset may do | ||
# some optimisations from the fact this is a baseset. | ||||
Yuya Nishihara
|
r23466 | return subset & results | ||
FUJIWARA Katsunori
|
r27587 | @predicate('origin([set])', safe=True) | ||
Matt Harbison
|
r17185 | def origin(repo, subset, x): | ||
FUJIWARA Katsunori
|
r27584 | """ | ||
Matt Harbison
|
r17185 | Changesets that were specified as a source for the grafts, transplants or | ||
rebases that created the given revisions. Omitting the optional set is the | ||||
same as passing all(). If a changeset created by these operations is itself | ||||
specified as a source for one of these operations, only the source changeset | ||||
for the first operation is selected. | ||||
""" | ||||
if x is not None: | ||||
Yuya Nishihara
|
r24115 | dests = getset(repo, fullreposet(repo), x) | ||
Matt Harbison
|
r17185 | else: | ||
Yuya Nishihara
|
r24201 | dests = fullreposet(repo) | ||
Matt Harbison
|
r17185 | |||
def _firstsrc(rev): | ||||
src = _getrevsource(repo, rev) | ||||
if src is None: | ||||
return None | ||||
while True: | ||||
prev = _getrevsource(repo, src) | ||||
if prev is None: | ||||
return src | ||||
src = prev | ||||
Mads Kiilerich
|
r22944 | o = set([_firstsrc(r) for r in dests]) | ||
Pierre-Yves David
|
r22498 | o -= set([None]) | ||
Pierre-Yves David
|
r25554 | # XXX we should turn this into a baseset instead of a set, smartset may do | ||
# some optimisations from the fact this is a baseset. | ||||
Pierre-Yves David
|
r22536 | return subset & o | ||
Matt Harbison
|
r17185 | |||
FUJIWARA Katsunori
|
r27587 | @predicate('outgoing([path])', safe=True) | ||
Idan Kamara
|
r13915 | def outgoing(repo, subset, x): | ||
FUJIWARA Katsunori
|
r27584 | """Changesets not found in the specified destination repository, or the | ||
Idan Kamara
|
r13915 | default push location. | ||
Patrick Mezard
|
r12821 | """ | ||
Gregory Szorc
|
r24722 | # Avoid cycles. | ||
Gregory Szorc
|
r25971 | from . import ( | ||
discovery, | ||||
hg, | ||||
) | ||||
Idan Kamara
|
r13915 | # i18n: "outgoing" is a keyword | ||
Mads Kiilerich
|
r14717 | l = getargs(x, 0, 1, _("outgoing takes one or no arguments")) | ||
Idan Kamara
|
r13915 | # i18n: "outgoing" is a keyword | ||
dest = l and getstring(l[0], _("outgoing requires a repository path")) or '' | ||||
dest = repo.ui.expandpath(dest or 'default-push', dest or 'default') | ||||
dest, branches = hg.parseurl(dest) | ||||
revs, checkout = hg.addbranchrevs(repo, repo, branches, []) | ||||
if revs: | ||||
revs = [repo.lookup(rev) for rev in revs] | ||||
Matt Mackall
|
r14556 | other = hg.peer(repo, {}, dest) | ||
Idan Kamara
|
r13915 | repo.ui.pushbuffer() | ||
Pierre-Yves David
|
r15837 | outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs) | ||
Idan Kamara
|
r13915 | repo.ui.popbuffer() | ||
cl = repo.changelog | ||||
Pierre-Yves David
|
r15837 | o = set([cl.rev(r) for r in outgoing.missing]) | ||
Pierre-Yves David
|
r22529 | return subset & o | ||
Augie Fackler
|
r12716 | |||
FUJIWARA Katsunori
|
r27587 | @predicate('p1([set])', safe=True) | ||
Matt Mackall
|
r11275 | def p1(repo, subset, x): | ||
FUJIWARA Katsunori
|
r27584 | """First parent of changesets in set, or the working directory. | ||
Patrick Mezard
|
r12821 | """ | ||
Kevin Bullock
|
r12928 | if x is None: | ||
Matt Mackall
|
r13878 | p = repo[x].p1().rev() | ||
Pierre-Yves David
|
r22538 | if p >= 0: | ||
return subset & baseset([p]) | ||||
Pierre-Yves David
|
r22802 | return baseset() | ||
Kevin Bullock
|
r12928 | |||
Matt Mackall
|
r11275 | ps = set() | ||
cl = repo.changelog | ||||
Yuya Nishihara
|
r24115 | for r in getset(repo, fullreposet(repo), x): | ||
Matt Mackall
|
r11275 | ps.add(cl.parentrevs(r)[0]) | ||
Pierre-Yves David
|
r22495 | ps -= set([node.nullrev]) | ||
Pierre-Yves David
|
r25554 | # XXX we should turn this into a baseset instead of a set, smartset may do | ||
# some optimisations from the fact this is a baseset. | ||||
Lucas Moscovicz
|
r20367 | return subset & ps | ||
Matt Mackall
|
r11275 | |||
FUJIWARA Katsunori
|
r27587 | @predicate('p2([set])', safe=True) | ||
Matt Mackall
|
r11275 | def p2(repo, subset, x): | ||
FUJIWARA Katsunori
|
r27584 | """Second parent of changesets in set, or the working directory. | ||
Patrick Mezard
|
r12821 | """ | ||
Kevin Bullock
|
r12928 | if x is None: | ||
ps = repo[x].parents() | ||||
try: | ||||
Patrick Mezard
|
r12935 | p = ps[1].rev() | ||
Pierre-Yves David
|
r22539 | if p >= 0: | ||
return subset & baseset([p]) | ||||
Pierre-Yves David
|
r22802 | return baseset() | ||
Kevin Bullock
|
r12928 | except IndexError: | ||
Pierre-Yves David
|
r22802 | return baseset() | ||
Kevin Bullock
|
r12928 | |||
Matt Mackall
|
r11275 | ps = set() | ||
cl = repo.changelog | ||||
Yuya Nishihara
|
r24115 | for r in getset(repo, fullreposet(repo), x): | ||
Matt Mackall
|
r11275 | ps.add(cl.parentrevs(r)[1]) | ||
Pierre-Yves David
|
r22495 | ps -= set([node.nullrev]) | ||
Pierre-Yves David
|
r25554 | # XXX we should turn this into a baseset instead of a set, smartset may do | ||
# some optimisations from the fact this is a baseset. | ||||
Lucas Moscovicz
|
r20367 | return subset & ps | ||
Matt Mackall
|
r11275 | |||
FUJIWARA Katsunori
|
r27587 | @predicate('parents([set])', safe=True) | ||
Matt Mackall
|
r11275 | def parents(repo, subset, x): | ||
FUJIWARA Katsunori
|
r27584 | """ | ||
Kevin Bullock
|
r12929 | The set of all parents for all changesets in set, or the working directory. | ||
Patrick Mezard
|
r12821 | """ | ||
Kevin Bullock
|
r12929 | if x is None: | ||
Pierre-Yves David
|
r22496 | ps = set(p.rev() for p in repo[x].parents()) | ||
else: | ||||
ps = set() | ||||
cl = repo.changelog | ||||
Pierre-Yves David
|
r25716 | up = ps.update | ||
parentrevs = cl.parentrevs | ||||
Yuya Nishihara
|
r24115 | for r in getset(repo, fullreposet(repo), x): | ||
Yuya Nishihara
|
r25765 | if r == node.wdirrev: | ||
Pierre-Yves David
|
r25716 | up(p.rev() for p in repo[r].parents()) | ||
Matt Harbison
|
r25689 | else: | ||
Pierre-Yves David
|
r25716 | up(parentrevs(r)) | ||
Pierre-Yves David
|
r22497 | ps -= set([node.nullrev]) | ||
Pierre-Yves David
|
r22712 | return subset & ps | ||
Matt Mackall
|
r11275 | |||
Pierre-Yves David
|
r25621 | def _phase(repo, subset, target): | ||
"""helper to select all rev in phase <target>""" | ||||
Pierre-Yves David
|
r25622 | repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded | ||
if repo._phasecache._phasesets: | ||||
s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs | ||||
s = baseset(s) | ||||
s.sort() # set are non ordered, so we enforce ascending | ||||
return subset & s | ||||
else: | ||||
phase = repo._phasecache.phase | ||||
condition = lambda r: phase(repo, r) == target | ||||
Yuya Nishihara
|
r28424 | return subset.filter(condition, condrepr=('<phase %r>', target), | ||
cache=False) | ||||
Pierre-Yves David
|
r25621 | |||
FUJIWARA Katsunori
|
r27587 | @predicate('draft()', safe=True) | ||
Pierre-Yves David
|
r25621 | def draft(repo, subset, x): | ||
FUJIWARA Katsunori
|
r27584 | """Changeset in draft phase.""" | ||
Pierre-Yves David
|
r25621 | # i18n: "draft" is a keyword | ||
getargs(x, 0, 0, _("draft takes no arguments")) | ||||
target = phases.draft | ||||
return _phase(repo, subset, target) | ||||
FUJIWARA Katsunori
|
r27587 | @predicate('secret()', safe=True) | ||
Pierre-Yves David
|
r25621 | def secret(repo, subset, x): | ||
FUJIWARA Katsunori
|
r27584 | """Changeset in secret phase.""" | ||
Pierre-Yves David
|
r25621 | # i18n: "secret" is a keyword | ||
getargs(x, 0, 0, _("secret takes no arguments")) | ||||
target = phases.secret | ||||
return _phase(repo, subset, target) | ||||
Kevin Gessner
|
r14070 | def parentspec(repo, subset, x, n): | ||
"""``set^0`` | ||||
The set. | ||||
``set^1`` (or ``set^``), ``set^2`` | ||||
First or second parent, respectively, of all changesets in set. | ||||
Patrick Mezard
|
r12821 | """ | ||
Brodie Rao
|
r12320 | try: | ||
Kevin Gessner
|
r14070 | n = int(n[1]) | ||
Kevin Gessner
|
r14072 | if n not in (0, 1, 2): | ||
Kevin Gessner
|
r14070 | raise ValueError | ||
Matt Mackall
|
r14851 | except (TypeError, ValueError): | ||
Kevin Gessner
|
r14070 | raise error.ParseError(_("^ expects a number 0, 1, or 2")) | ||
ps = set() | ||||
Matt Mackall
|
r11275 | cl = repo.changelog | ||
Pierre-Yves David
|
r23165 | for r in getset(repo, fullreposet(repo), x): | ||
Kevin Gessner
|
r14070 | if n == 0: | ||
ps.add(r) | ||||
elif n == 1: | ||||
ps.add(cl.parentrevs(r)[0]) | ||||
elif n == 2: | ||||
parents = cl.parentrevs(r) | ||||
if len(parents) > 1: | ||||
ps.add(parents[1]) | ||||
Lucas Moscovicz
|
r20367 | return subset & ps | ||
Matt Mackall
|
r11275 | |||
FUJIWARA Katsunori
|
r27587 | @predicate('present(set)', safe=True) | ||
Wagner Bruna
|
r11944 | def present(repo, subset, x): | ||
FUJIWARA Katsunori
|
r27584 | """An empty set, if any revision in set isn't found; otherwise, | ||
Patrick Mezard
|
r12821 | all revisions in set. | ||
FUJIWARA Katsunori
|
r16748 | |||
If any of specified revisions is not present in the local repository, | ||||
the query is normally aborted. But this predicate allows the query | ||||
to continue even in such cases. | ||||
Patrick Mezard
|
r12821 | """ | ||
Wagner Bruna
|
r11944 | try: | ||
return getset(repo, subset, x) | ||||
except error.RepoLookupError: | ||||
Pierre-Yves David
|
r22802 | return baseset() | ||
Wagner Bruna
|
r11944 | |||
Yuya Nishihara
|
r25224 | # for internal use | ||
FUJIWARA Katsunori
|
r27587 | @predicate('_notpublic', safe=True) | ||
Laurent Charignon
|
r25191 | def _notpublic(repo, subset, x): | ||
Yuya Nishihara
|
r25225 | getargs(x, 0, 0, "_notpublic takes no arguments") | ||
Pierre-Yves David
|
r25612 | repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded | ||
Laurent Charignon
|
r25191 | if repo._phasecache._phasesets: | ||
s = set() | ||||
for u in repo._phasecache._phasesets[1:]: | ||||
s.update(u) | ||||
Pierre-Yves David
|
r25619 | s = baseset(s - repo.changelog.filteredrevs) | ||
s.sort() | ||||
Laurent Charignon
|
r25191 | return subset & s | ||
else: | ||||
phase = repo._phasecache.phase | ||||
target = phases.public | ||||
condition = lambda r: phase(repo, r) != target | ||||
Yuya Nishihara
|
r28424 | return subset.filter(condition, condrepr=('<phase %r>', target), | ||
cache=False) | ||||
Laurent Charignon
|
r25191 | |||
FUJIWARA Katsunori
|
r27587 | @predicate('public()', safe=True) | ||
Pierre-Yves David
|
r15819 | def public(repo, subset, x): | ||
FUJIWARA Katsunori
|
r27584 | """Changeset in public phase.""" | ||
FUJIWARA Katsunori
|
r17259 | # i18n: "public" is a keyword | ||
Pierre-Yves David
|
r15819 | getargs(x, 0, 0, _("public takes no arguments")) | ||
Pierre-Yves David
|
r23019 | phase = repo._phasecache.phase | ||
target = phases.public | ||||
condition = lambda r: phase(repo, r) == target | ||||
Yuya Nishihara
|
r28424 | return subset.filter(condition, condrepr=('<phase %r>', target), | ||
cache=False) | ||||
Pierre-Yves David
|
r15819 | |||
FUJIWARA Katsunori
|
r27587 | @predicate('remote([id [,path]])', safe=True) | ||
Matt Mackall
|
r15936 | def remote(repo, subset, x): | ||
FUJIWARA Katsunori
|
r27584 | """Local revision that corresponds to the given identifier in a | ||
Matt Mackall
|
r15936 | remote repository, if present. Here, the '.' identifier is a | ||
synonym for the current local branch. | ||||
""" | ||||
Gregory Szorc
|
r25971 | from . import hg # avoid start-up nasties | ||
Matt Mackall
|
r15936 | # i18n: "remote" is a keyword | ||
timeless
|
r27293 | l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments")) | ||
Matt Mackall
|
r15936 | |||
q = '.' | ||||
if len(l) > 0: | ||||
# i18n: "remote" is a keyword | ||||
q = getstring(l[0], _("remote requires a string id")) | ||||
if q == '.': | ||||
q = repo['.'].branch() | ||||
dest = '' | ||||
if len(l) > 1: | ||||
# i18n: "remote" is a keyword | ||||
dest = getstring(l[1], _("remote requires a repository path")) | ||||
dest = repo.ui.expandpath(dest or 'default') | ||||
dest, branches = hg.parseurl(dest) | ||||
revs, checkout = hg.addbranchrevs(repo, repo, branches, []) | ||||
if revs: | ||||
revs = [repo.lookup(rev) for rev in revs] | ||||
other = hg.peer(repo, {}, dest) | ||||
n = other.lookup(q) | ||||
if n in repo: | ||||
r = repo[n].rev() | ||||
FUJIWARA Katsunori
|
r16006 | if r in subset: | ||
Lucas Moscovicz
|
r20364 | return baseset([r]) | ||
Pierre-Yves David
|
r22802 | return baseset() | ||
Matt Mackall
|
r15936 | |||
FUJIWARA Katsunori
|
r27587 | @predicate('removes(pattern)', safe=True) | ||
Matt Mackall
|
r11275 | def removes(repo, subset, x): | ||
FUJIWARA Katsunori
|
r27584 | """Changesets which remove files matching pattern. | ||
FUJIWARA Katsunori
|
r20289 | |||
The pattern without explicit kind like ``glob:`` is expected to be | ||||
relative to the current directory and match against a file or a | ||||
directory. | ||||
Patrick Mezard
|
r12821 | """ | ||
Martin Geisler
|
r12815 | # i18n: "removes" is a keyword | ||
Benoit Boissinot
|
r12736 | pat = getstring(x, _("removes requires a pattern")) | ||
Matt Mackall
|
r11275 | return checkstatus(repo, subset, pat, 2) | ||
FUJIWARA Katsunori
|
r27587 | @predicate('rev(number)', safe=True) | ||
Idan Kamara
|
r13915 | def rev(repo, subset, x): | ||
FUJIWARA Katsunori
|
r27584 | """Revision with the given numeric identifier. | ||
Patrick Mezard
|
r12821 | """ | ||
Idan Kamara
|
r13915 | # i18n: "rev" is a keyword | ||
l = getargs(x, 1, 1, _("rev requires one argument")) | ||||
try: | ||||
# i18n: "rev" is a keyword | ||||
l = int(getstring(l[0], _("rev requires a number"))) | ||||
Matt Mackall
|
r14851 | except (TypeError, ValueError): | ||
Idan Kamara
|
r13915 | # i18n: "rev" is a keyword | ||
raise error.ParseError(_("rev expects a number")) | ||||
Yuya Nishihara
|
r24031 | if l not in repo.changelog and l != node.nullrev: | ||
Yuya Nishihara
|
r23062 | return baseset() | ||
Pierre-Yves David
|
r22537 | return subset & baseset([l]) | ||
Matt Mackall
|
r11275 | |||
FUJIWARA Katsunori
|
r27587 | @predicate('matching(revision [, field])', safe=True) | ||
Angel Ezquerra
|
r16402 | def matching(repo, subset, x): | ||
FUJIWARA Katsunori
|
r27584 | """Changesets in which a given set of fields match the set of fields in the | ||
Angel Ezquerra
|
r16402 | selected revision or set. | ||
FUJIWARA Katsunori
|
r16528 | |||
Angel Ezquerra
|
r16402 | To match more than one field pass the list of fields to match separated | ||
FUJIWARA Katsunori
|
r16528 | by spaces (e.g. ``author description``). | ||
Valid fields are most regular revision fields and some special fields. | ||||
Regular revision fields are ``description``, ``author``, ``branch``, | ||||
Angel Ezquerra
|
r17102 | ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user`` | ||
and ``diff``. | ||||
Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the | ||||
contents of the revision. Two revisions matching their ``diff`` will | ||||
also match their ``files``. | ||||
FUJIWARA Katsunori
|
r16528 | |||
Special fields are ``summary`` and ``metadata``: | ||||
``summary`` matches the first line of the description. | ||||
Jesse Glick
|
r16639 | ``metadata`` is equivalent to matching ``description user date`` | ||
FUJIWARA Katsunori
|
r16528 | (i.e. it matches the main metadata fields). | ||
``metadata`` is the default field which is used when no fields are | ||||
specified. You can match more than one field at a time. | ||||
Angel Ezquerra
|
r16402 | """ | ||
FUJIWARA Katsunori
|
r17259 | # i18n: "matching" is a keyword | ||
Angel Ezquerra
|
r16402 | l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments")) | ||
Pierre-Yves David
|
r23166 | revs = getset(repo, fullreposet(repo), l[0]) | ||
Angel Ezquerra
|
r16402 | |||
fieldlist = ['metadata'] | ||||
if len(l) > 1: | ||||
fieldlist = getstring(l[1], | ||||
FUJIWARA Katsunori
|
r17259 | # i18n: "matching" is a keyword | ||
Angel Ezquerra
|
r16402 | _("matching requires a string " | ||
"as its second argument")).split() | ||||
Angel Ezquerra
|
r17102 | # Make sure that there are no repeated fields, | ||
# expand the 'special' 'metadata' field type | ||||
# and check the 'files' whenever we check the 'diff' | ||||
Angel Ezquerra
|
r16402 | fields = [] | ||
for field in fieldlist: | ||||
if field == 'metadata': | ||||
fields += ['user', 'description', 'date'] | ||||
Angel Ezquerra
|
r17102 | elif field == 'diff': | ||
# a revision matching the diff must also match the files | ||||
# since matching the diff is very costly, make sure to | ||||
# also match the files first | ||||
fields += ['files', 'diff'] | ||||
Angel Ezquerra
|
r16402 | else: | ||
if field == 'author': | ||||
field = 'user' | ||||
fields.append(field) | ||||
fields = set(fields) | ||||
Angel Ezquerra
|
r16444 | if 'summary' in fields and 'description' in fields: | ||
# If a revision matches its description it also matches its summary | ||||
fields.discard('summary') | ||||
Angel Ezquerra
|
r16402 | |||
# We may want to match more than one field | ||||
Angel Ezquerra
|
r16446 | # Not all fields take the same amount of time to be matched | ||
# Sort the selected fields in order of increasing matching cost | ||||
Patrick Mezard
|
r16453 | fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary', | ||
Angel Ezquerra
|
r17102 | 'files', 'description', 'substate', 'diff'] | ||
Angel Ezquerra
|
r16446 | def fieldkeyfunc(f): | ||
try: | ||||
return fieldorder.index(f) | ||||
except ValueError: | ||||
# assume an unknown field is very costly | ||||
return len(fieldorder) | ||||
fields = list(fields) | ||||
fields.sort(key=fieldkeyfunc) | ||||
Angel Ezquerra
|
r16402 | # Each field will be matched with its own "getfield" function | ||
# which will be added to the getfieldfuncs array of functions | ||||
getfieldfuncs = [] | ||||
_funcs = { | ||||
'user': lambda r: repo[r].user(), | ||||
'branch': lambda r: repo[r].branch(), | ||||
'date': lambda r: repo[r].date(), | ||||
'description': lambda r: repo[r].description(), | ||||
'files': lambda r: repo[r].files(), | ||||
'parents': lambda r: repo[r].parents(), | ||||
'phase': lambda r: repo[r].phase(), | ||||
'substate': lambda r: repo[r].substate, | ||||
'summary': lambda r: repo[r].description().splitlines()[0], | ||||
Angel Ezquerra
|
r17102 | 'diff': lambda r: list(repo[r].diff(git=True),) | ||
Angel Ezquerra
|
r16402 | } | ||
for info in fields: | ||||
getfield = _funcs.get(info, None) | ||||
if getfield is None: | ||||
raise error.ParseError( | ||||
FUJIWARA Katsunori
|
r17259 | # i18n: "matching" is a keyword | ||
Angel Ezquerra
|
r16402 | _("unexpected field name passed to matching: %s") % info) | ||
getfieldfuncs.append(getfield) | ||||
# convert the getfield array of functions into a "getinfo" function | ||||
# which returns an array of field values (or a single value if there | ||||
# is only one field to match) | ||||
Angel Ezquerra
|
r16445 | getinfo = lambda r: [f(r) for f in getfieldfuncs] | ||
Angel Ezquerra
|
r16402 | |||
Lucas Moscovicz
|
r20459 | def matches(x): | ||
for rev in revs: | ||||
target = getinfo(rev) | ||||
Angel Ezquerra
|
r16445 | match = True | ||
for n, f in enumerate(getfieldfuncs): | ||||
Lucas Moscovicz
|
r20459 | if target[n] != f(x): | ||
Angel Ezquerra
|
r16445 | match = False | ||
if match: | ||||
Lucas Moscovicz
|
r20459 | return True | ||
return False | ||||
Yuya Nishihara
|
r28424 | return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs)) | ||
Angel Ezquerra
|
r16402 | |||
FUJIWARA Katsunori
|
r27587 | @predicate('reverse(set)', safe=True) | ||
Matt Mackall
|
r11275 | def reverse(repo, subset, x): | ||
FUJIWARA Katsunori
|
r27584 | """Reverse order of set. | ||
Patrick Mezard
|
r12821 | """ | ||
Matt Mackall
|
r11275 | l = getset(repo, subset, x) | ||
l.reverse() | ||||
return l | ||||
FUJIWARA Katsunori
|
r27587 | @predicate('roots(set)', safe=True) | ||
Idan Kamara
|
r13915 | def roots(repo, subset, x): | ||
FUJIWARA Katsunori
|
r27584 | """Changesets in set with no parent changeset in set. | ||
Patrick Mezard
|
r12821 | """ | ||
Yuya Nishihara
|
r24115 | s = getset(repo, fullreposet(repo), x) | ||
Pierre-Yves David
|
r25647 | parents = repo.changelog.parentrevs | ||
def filter(r): | ||||
for p in parents(r): | ||||
if 0 <= p and p in s: | ||||
return False | ||||
return True | ||||
Yuya Nishihara
|
r28424 | return subset & s.filter(filter, condrepr='<roots>') | ||
Wagner Bruna
|
r11944 | |||
Yuya Nishihara
|
r29265 | _sortkeyfuncs = { | ||
'rev': lambda c: c.rev(), | ||||
'branch': lambda c: c.branch(), | ||||
'desc': lambda c: c.description(), | ||||
'user': lambda c: c.user(), | ||||
'author': lambda c: c.user(), | ||||
'date': lambda c: c.date()[0], | ||||
} | ||||
Yuya Nishihara
|
r29365 | def _getsortargs(x): | ||
"""Parse sort options into (set, [(key, reverse)], opts)""" | ||||
Martijn Pieters
|
r29348 | args = getargsdict(x, 'sort', 'set keys topo.firstbranch') | ||
Martijn Pieters
|
r29238 | if 'set' not in args: | ||
# i18n: "sort" is a keyword | ||||
raise error.ParseError(_('sort requires one or two arguments')) | ||||
Matt Mackall
|
r11275 | keys = "rev" | ||
Martijn Pieters
|
r29238 | if 'keys' in args: | ||
FUJIWARA Katsunori
|
r17259 | # i18n: "sort" is a keyword | ||
Martijn Pieters
|
r29238 | keys = getstring(args['keys'], _("sort spec must be a string")) | ||
Yuya Nishihara
|
r29363 | keyflags = [] | ||
for k in keys.split(): | ||||
fk = k | ||||
reverse = (k[0] == '-') | ||||
if reverse: | ||||
k = k[1:] | ||||
if k not in _sortkeyfuncs and k != 'topo': | ||||
raise error.ParseError(_("unknown sort key %r") % fk) | ||||
keyflags.append((k, reverse)) | ||||
if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags): | ||||
Martijn Pieters
|
r29348 | # i18n: "topo" is a keyword | ||
raise error.ParseError(_( | ||||
'topo sort order cannot be combined with other sort keys')) | ||||
Yuya Nishihara
|
r29364 | opts = {} | ||
Martijn Pieters
|
r29348 | if 'topo.firstbranch' in args: | ||
Yuya Nishihara
|
r29363 | if any(k == 'topo' for k, reverse in keyflags): | ||
Yuya Nishihara
|
r29364 | opts['topo.firstbranch'] = args['topo.firstbranch'] | ||
Martijn Pieters
|
r29348 | else: | ||
# i18n: "topo" and "topo.firstbranch" are keywords | ||||
raise error.ParseError(_( | ||||
'topo.firstbranch can only be used when using the topo sort ' | ||||
'key')) | ||||
Yuya Nishihara
|
r29365 | return args['set'], keyflags, opts | ||
@predicate('sort(set[, [-]key... [, ...]])', safe=True) | ||||
def sort(repo, subset, x): | ||||
"""Sort set by keys. The default sort order is ascending, specify a key | ||||
as ``-key`` to sort in descending order. | ||||
The keys can be: | ||||
- ``rev`` for the revision number, | ||||
- ``branch`` for the branch name, | ||||
- ``desc`` for the commit message (description), | ||||
- ``user`` for user name (``author`` can be used as an alias), | ||||
- ``date`` for the commit date | ||||
- ``topo`` for a reverse topographical sort | ||||
The ``topo`` sort order cannot be combined with other sort keys. This sort | ||||
takes one optional argument, ``topo.firstbranch``, which takes a revset that | ||||
specifies what topographical branches to prioritize in the sort. | ||||
""" | ||||
s, keyflags, opts = _getsortargs(x) | ||||
Yuya Nishihara
|
r29364 | revs = getset(repo, subset, s) | ||
Yuya Nishihara
|
r29363 | if not keyflags: | ||
Lucas Moscovicz
|
r20719 | return revs | ||
Yuya Nishihara
|
r29363 | if len(keyflags) == 1 and keyflags[0][0] == "rev": | ||
revs.sort(reverse=keyflags[0][1]) | ||||
Lucas Moscovicz
|
r20719 | return revs | ||
Yuya Nishihara
|
r29363 | elif keyflags[0][0] == "topo": | ||
Yuya Nishihara
|
r29364 | firstbranch = () | ||
if 'topo.firstbranch' in opts: | ||||
firstbranch = getset(repo, subset, opts['topo.firstbranch']) | ||||
Martijn Pieters
|
r29348 | revs = baseset(_toposort(revs, repo.changelog.parentrevs, firstbranch), | ||
istopo=True) | ||||
Yuya Nishihara
|
r29363 | if keyflags[0][1]: | ||
Martijn Pieters
|
r29348 | revs.reverse() | ||
return revs | ||||
Yuya Nishihara
|
r29001 | # sort() is guaranteed to be stable | ||
ctxs = [repo[r] for r in revs] | ||||
Yuya Nishihara
|
r29363 | for k, reverse in reversed(keyflags): | ||
ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse) | ||||
Yuya Nishihara
|
r29001 | return baseset([c.rev() for c in ctxs]) | ||
Matt Mackall
|
r11275 | |||
Martijn Pieters
|
r29348 | def _toposort(revs, parentsfunc, firstbranch=()): | ||
Martijn Pieters
|
r29347 | """Yield revisions from heads to roots one (topo) branch at a time. | ||
This function aims to be used by a graph generator that wishes to minimize | ||||
the number of parallel branches and their interleaving. | ||||
Example iteration order (numbers show the "true" order in a changelog): | ||||
o 4 | ||||
| | ||||
o 1 | ||||
| | ||||
| o 3 | ||||
| | | ||||
| o 2 | ||||
|/ | ||||
o 0 | ||||
Note that the ancestors of merges are understood by the current | ||||
algorithm to be on the same branch. This means no reordering will | ||||
occur behind a merge. | ||||
""" | ||||
### Quick summary of the algorithm | ||||
# | ||||
# This function is based around a "retention" principle. We keep revisions | ||||
# in memory until we are ready to emit a whole branch that immediately | ||||
# "merges" into an existing one. This reduces the number of parallel | ||||
# branches with interleaved revisions. | ||||
# | ||||
# During iteration revs are split into two groups: | ||||
# A) revision already emitted | ||||
# B) revision in "retention". They are stored as different subgroups. | ||||
# | ||||
# for each REV, we do the following logic: | ||||
# | ||||
# 1) if REV is a parent of (A), we will emit it. If there is a | ||||
# retention group ((B) above) that is blocked on REV being | ||||
# available, we emit all the revisions out of that retention | ||||
# group first. | ||||
# | ||||
# 2) else, we'll search for a subgroup in (B) awaiting for REV to be | ||||
# available, if such subgroup exist, we add REV to it and the subgroup is | ||||
# now awaiting for REV.parents() to be available. | ||||
# | ||||
# 3) finally if no such group existed in (B), we create a new subgroup. | ||||
# | ||||
# | ||||
# To bootstrap the algorithm, we emit the tipmost revision (which | ||||
# puts it in group (A) from above). | ||||
revs.sort(reverse=True) | ||||
# Set of parents of revision that have been emitted. They can be considered | ||||
# unblocked as the graph generator is already aware of them so there is no | ||||
# need to delay the revisions that reference them. | ||||
# | ||||
# If someone wants to prioritize a branch over the others, pre-filling this | ||||
# set will force all other branches to wait until this branch is ready to be | ||||
# emitted. | ||||
unblocked = set(firstbranch) | ||||
# list of groups waiting to be displayed, each group is defined by: | ||||
# | ||||
# (revs: lists of revs waiting to be displayed, | ||||
# blocked: set of that cannot be displayed before those in 'revs') | ||||
# | ||||
# The second value ('blocked') correspond to parents of any revision in the | ||||
# group ('revs') that is not itself contained in the group. The main idea | ||||
# of this algorithm is to delay as much as possible the emission of any | ||||
# revision. This means waiting for the moment we are about to display | ||||
# these parents to display the revs in a group. | ||||
# | ||||
# This first implementation is smart until it encounters a merge: it will | ||||
# emit revs as soon as any parent is about to be emitted and can grow an | ||||
# arbitrary number of revs in 'blocked'. In practice this mean we properly | ||||
# retains new branches but gives up on any special ordering for ancestors | ||||
# of merges. The implementation can be improved to handle this better. | ||||
# | ||||
# The first subgroup is special. It corresponds to all the revision that | ||||
# were already emitted. The 'revs' lists is expected to be empty and the | ||||
# 'blocked' set contains the parents revisions of already emitted revision. | ||||
# | ||||
# You could pre-seed the <parents> set of groups[0] to a specific | ||||
# changesets to select what the first emitted branch should be. | ||||
groups = [([], unblocked)] | ||||
pendingheap = [] | ||||
pendingset = set() | ||||
heapq.heapify(pendingheap) | ||||
heappop = heapq.heappop | ||||
heappush = heapq.heappush | ||||
for currentrev in revs: | ||||
# Heap works with smallest element, we want highest so we invert | ||||
if currentrev not in pendingset: | ||||
heappush(pendingheap, -currentrev) | ||||
pendingset.add(currentrev) | ||||
# iterates on pending rev until after the current rev have been | ||||
# processed. | ||||
rev = None | ||||
while rev != currentrev: | ||||
rev = -heappop(pendingheap) | ||||
pendingset.remove(rev) | ||||
# Seek for a subgroup blocked, waiting for the current revision. | ||||
matching = [i for i, g in enumerate(groups) if rev in g[1]] | ||||
if matching: | ||||
# The main idea is to gather together all sets that are blocked | ||||
# on the same revision. | ||||
# | ||||
# Groups are merged when a common blocking ancestor is | ||||
# observed. For example, given two groups: | ||||
# | ||||
# revs [5, 4] waiting for 1 | ||||
# revs [3, 2] waiting for 1 | ||||
# | ||||
# These two groups will be merged when we process | ||||
# 1. In theory, we could have merged the groups when | ||||
# we added 2 to the group it is now in (we could have | ||||
# noticed the groups were both blocked on 1 then), but | ||||
# the way it works now makes the algorithm simpler. | ||||
# | ||||
# We also always keep the oldest subgroup first. We can | ||||
# probably improve the behavior by having the longest set | ||||
# first. That way, graph algorithms could minimise the length | ||||
# of parallel lines their drawing. This is currently not done. | ||||
targetidx = matching.pop(0) | ||||
trevs, tparents = groups[targetidx] | ||||
for i in matching: | ||||
gr = groups[i] | ||||
trevs.extend(gr[0]) | ||||
tparents |= gr[1] | ||||
# delete all merged subgroups (except the one we kept) | ||||
# (starting from the last subgroup for performance and | ||||
# sanity reasons) | ||||
for i in reversed(matching): | ||||
del groups[i] | ||||
else: | ||||
# This is a new head. We create a new subgroup for it. | ||||
targetidx = len(groups) | ||||
groups.append(([], set([rev]))) | ||||
gr = groups[targetidx] | ||||
# We now add the current nodes to this subgroups. This is done | ||||
# after the subgroup merging because all elements from a subgroup | ||||
# that relied on this rev must precede it. | ||||
# | ||||
# we also update the <parents> set to include the parents of the | ||||
# new nodes. | ||||
if rev == currentrev: # only display stuff in rev | ||||
gr[0].append(rev) | ||||
gr[1].remove(rev) | ||||
parents = [p for p in parentsfunc(rev) if p > node.nullrev] | ||||
gr[1].update(parents) | ||||
for p in parents: | ||||
if p not in pendingset: | ||||
pendingset.add(p) | ||||
heappush(pendingheap, -p) | ||||
# Look for a subgroup to display | ||||
# | ||||
# When unblocked is empty (if clause), we were not waiting for any | ||||
# revisions during the first iteration (if no priority was given) or | ||||
# if we emitted a whole disconnected set of the graph (reached a | ||||
# root). In that case we arbitrarily take the oldest known | ||||
# subgroup. The heuristic could probably be better. | ||||
# | ||||
# Otherwise (elif clause) if the subgroup is blocked on | ||||
# a revision we just emitted, we can safely emit it as | ||||
# well. | ||||
if not unblocked: | ||||
if len(groups) > 1: # display other subset | ||||
targetidx = 1 | ||||
gr = groups[1] | ||||
elif not gr[1] & unblocked: | ||||
gr = None | ||||
if gr is not None: | ||||
# update the set of awaited revisions with the one from the | ||||
# subgroup | ||||
unblocked |= gr[1] | ||||
# output all revisions in the subgroup | ||||
for r in gr[0]: | ||||
yield r | ||||
# delete the subgroup that you just output | ||||
# unless it is groups[0] in which case you just empty it. | ||||
if targetidx: | ||||
del groups[targetidx] | ||||
else: | ||||
gr[0][:] = [] | ||||
# Check if we have some subgroup waiting for revisions we are not going to | ||||
# iterate over | ||||
for g in groups: | ||||
for r in g[0]: | ||||
yield r | ||||
FUJIWARA Katsunori
|
r27584 | @predicate('subrepo([pattern])') | ||
Matt Harbison
|
r24446 | def subrepo(repo, subset, x): | ||
FUJIWARA Katsunori
|
r27584 | """Changesets that add, modify or remove the given subrepo. If no subrepo | ||
Matt Harbison
|
r24446 | pattern is named, any subrepo changes are returned. | ||
""" | ||||
# i18n: "subrepo" is a keyword | ||||
args = getargs(x, 0, 1, _('subrepo takes at most one argument')) | ||||
Yuya Nishihara
|
r28272 | pat = None | ||
Matt Harbison
|
r24446 | if len(args) != 0: | ||
pat = getstring(args[0], _("subrepo requires a pattern")) | ||||
m = matchmod.exact(repo.root, repo.root, ['.hgsubstate']) | ||||
def submatches(names): | ||||
Matt Harbison
|
r26481 | k, p, m = util.stringmatcher(pat) | ||
Matt Harbison
|
r24446 | for name in names: | ||
if m(name): | ||||
yield name | ||||
def matches(x): | ||||
c = repo[x] | ||||
s = repo.status(c.p1().node(), c.node(), match=m) | ||||
Yuya Nishihara
|
r28272 | if pat is None: | ||
Matt Harbison
|
r24446 | return s.added or s.modified or s.removed | ||
if s.added: | ||||
Augie Fackler
|
r25149 | return any(submatches(c.substate.keys())) | ||
Matt Harbison
|
r24446 | |||
if s.modified: | ||||
subs = set(c.p1().substate.keys()) | ||||
subs.update(c.substate.keys()) | ||||
for path in submatches(subs): | ||||
if c.p1().substate.get(path) != c.substate.get(path): | ||||
return True | ||||
if s.removed: | ||||
Augie Fackler
|
r25149 | return any(submatches(c.p1().substate.keys())) | ||
Matt Harbison
|
r24446 | |||
return False | ||||
Yuya Nishihara
|
r28424 | return subset.filter(matches, condrepr=('<subrepo %r>', pat)) | ||
Matt Harbison
|
r24446 | |||
Simon King
|
r16823 | def _substringmatcher(pattern): | ||
Matt Harbison
|
r26481 | kind, pattern, matcher = util.stringmatcher(pattern) | ||
Simon King
|
r16823 | if kind == 'literal': | ||
matcher = lambda s: pattern in s | ||||
return kind, pattern, matcher | ||||
Simon King
|
r16819 | |||
FUJIWARA Katsunori
|
r27587 | @predicate('tag([name])', safe=True) | ||
Augie Fackler
|
r12715 | def tag(repo, subset, x): | ||
FUJIWARA Katsunori
|
r27584 | """The specified tag by name, or all tagged revisions if no name is given. | ||
Matt Harbison
|
r20824 | |||
If `name` starts with `re:`, the remainder of the name is treated as | ||||
a regular expression. To match a tag that actually starts with `re:`, | ||||
use the prefix `literal:`. | ||||
Patrick Mezard
|
r12821 | """ | ||
Martin Geisler
|
r12815 | # i18n: "tag" is a keyword | ||
Augie Fackler
|
r12715 | args = getargs(x, 0, 1, _("tag takes one or no arguments")) | ||
Matt Mackall
|
r11280 | cl = repo.changelog | ||
Augie Fackler
|
r12715 | if args: | ||
Simon King
|
r16820 | pattern = getstring(args[0], | ||
# i18n: "tag" is a keyword | ||||
_('the argument to tag must be a string')) | ||||
Matt Harbison
|
r26481 | kind, pattern, matcher = util.stringmatcher(pattern) | ||
Simon King
|
r16820 | if kind == 'literal': | ||
Matt Mackall
|
r16825 | # avoid resolving all tags | ||
tn = repo._tagscache.tags.get(pattern, None) | ||||
if tn is None: | ||||
FUJIWARA Katsunori
|
r23978 | raise error.RepoLookupError(_("tag '%s' does not exist") | ||
% pattern) | ||||
Matt Mackall
|
r16825 | s = set([repo[tn].rev()]) | ||
Simon King
|
r16820 | else: | ||
s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)]) | ||||
Augie Fackler
|
r12715 | else: | ||
s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip']) | ||||
Lucas Moscovicz
|
r20367 | return subset & s | ||
Matt Mackall
|
r11280 | |||
FUJIWARA Katsunori
|
r27587 | @predicate('tagged', safe=True) | ||
Patrick Mezard
|
r12821 | def tagged(repo, subset, x): | ||
return tag(repo, subset, x) | ||||
FUJIWARA Katsunori
|
r27587 | @predicate('unstable()', safe=True) | ||
Pierre-Yves David
|
r17171 | def unstable(repo, subset, x): | ||
FUJIWARA Katsunori
|
r27584 | """Non-obsolete changesets with obsolete ancestors. | ||
Patrick Mezard
|
r17291 | """ | ||
FUJIWARA Katsunori
|
r17259 | # i18n: "unstable" is a keyword | ||
FUJIWARA Katsunori
|
r17258 | getargs(x, 0, 0, _("unstable takes no arguments")) | ||
Pierre-Yves David
|
r17825 | unstables = obsmod.getrevs(repo, 'unstable') | ||
Lucas Moscovicz
|
r20367 | return subset & unstables | ||
Pierre-Yves David
|
r17171 | |||
FUJIWARA Katsunori
|
r27587 | @predicate('user(string)', safe=True) | ||
Idan Kamara
|
r13915 | def user(repo, subset, x): | ||
FUJIWARA Katsunori
|
r27584 | """User name contains string. The match is case-insensitive. | ||
Simon King
|
r16823 | |||
If `string` starts with `re:`, the remainder of the string is treated as | ||||
a regular expression. To match a user that actually contains `re:`, use | ||||
the prefix `literal:`. | ||||
Matt Mackall
|
r13359 | """ | ||
Idan Kamara
|
r13915 | return author(repo, subset, x) | ||
Matt Mackall
|
r13359 | |||
Yuya Nishihara
|
r24777 | # experimental | ||
FUJIWARA Katsunori
|
r27587 | @predicate('wdir', safe=True) | ||
Yuya Nishihara
|
r24419 | def wdir(repo, subset, x): | ||
# i18n: "wdir" is a keyword | ||||
getargs(x, 0, 0, _("wdir takes no arguments")) | ||||
Yuya Nishihara
|
r25765 | if node.wdirrev in subset or isinstance(subset, fullreposet): | ||
return baseset([node.wdirrev]) | ||||
Yuya Nishihara
|
r24419 | return baseset() | ||
Matt Mackall
|
r15898 | # for internal use | ||
FUJIWARA Katsunori
|
r27587 | @predicate('_list', safe=True) | ||
Matt Mackall
|
r15898 | def _list(repo, subset, x): | ||
s = getstring(x, "internal error") | ||||
if not s: | ||||
Pierre-Yves David
|
r22802 | return baseset() | ||
Yuya Nishihara
|
r25341 | # remove duplicates here. it's difficult for caller to deduplicate sets | ||
# because different symbols can point to the same rev. | ||||
Yuya Nishihara
|
r25344 | cl = repo.changelog | ||
Yuya Nishihara
|
r25341 | ls = [] | ||
seen = set() | ||||
for t in s.split('\0'): | ||||
Yuya Nishihara
|
r25344 | try: | ||
# fast path for integer revision | ||||
r = int(t) | ||||
if str(r) != t or r not in cl: | ||||
raise ValueError | ||||
Durham Goode
|
r26143 | revs = [r] | ||
Yuya Nishihara
|
r25344 | except ValueError: | ||
Durham Goode
|
r26143 | revs = stringset(repo, subset, t) | ||
for r in revs: | ||||
if r in seen: | ||||
continue | ||||
if (r in subset | ||||
or r == node.nullrev and isinstance(subset, fullreposet)): | ||||
ls.append(r) | ||||
seen.add(r) | ||||
Yuya Nishihara
|
r25341 | return baseset(ls) | ||
Matt Mackall
|
r15898 | |||
Lucas Moscovicz
|
r20566 | # for internal use | ||
FUJIWARA Katsunori
|
r27587 | @predicate('_intlist', safe=True) | ||
Lucas Moscovicz
|
r20566 | def _intlist(repo, subset, x): | ||
s = getstring(x, "internal error") | ||||
if not s: | ||||
Pierre-Yves David
|
r22802 | return baseset() | ||
Lucas Moscovicz
|
r20566 | ls = [int(r) for r in s.split('\0')] | ||
Pierre-Yves David
|
r22876 | s = subset | ||
Lucas Moscovicz
|
r20566 | return baseset([r for r in ls if r in s]) | ||
Lucas Moscovicz
|
r20569 | # for internal use | ||
FUJIWARA Katsunori
|
r27587 | @predicate('_hexlist', safe=True) | ||
Lucas Moscovicz
|
r20569 | def _hexlist(repo, subset, x): | ||
s = getstring(x, "internal error") | ||||
if not s: | ||||
Pierre-Yves David
|
r22802 | return baseset() | ||
Lucas Moscovicz
|
r20569 | cl = repo.changelog | ||
ls = [cl.rev(node.bin(r)) for r in s.split('\0')] | ||||
Pierre-Yves David
|
r22877 | s = subset | ||
Lucas Moscovicz
|
r20569 | return baseset([r for r in ls if r in s]) | ||
Matt Mackall
|
r15898 | |||
Matt Mackall
|
r11275 | methods = { | ||
"range": rangeset, | ||||
Bryan O'Sullivan
|
r16860 | "dagrange": dagrange, | ||
Matt Mackall
|
r11275 | "string": stringset, | ||
Jordi Gutiérrez Hermoso
|
r24932 | "symbol": stringset, | ||
Matt Mackall
|
r11275 | "and": andset, | ||
"or": orset, | ||||
"not": notset, | ||||
Durham Goode
|
r28217 | "difference": differenceset, | ||
Matt Mackall
|
r11275 | "list": listset, | ||
Yuya Nishihara
|
r25704 | "keyvalue": keyvaluepair, | ||
Matt Mackall
|
r11275 | "func": func, | ||
Kevin Gessner
|
r14070 | "ancestor": ancestorspec, | ||
"parent": parentspec, | ||||
"parentpost": p1, | ||||
Matt Mackall
|
r11275 | } | ||
Yuya Nishihara
|
r29117 | def _matchonly(revs, bases): | ||
""" | ||||
>>> f = lambda *args: _matchonly(*map(parse, args)) | ||||
>>> f('ancestors(A)', 'not ancestors(B)') | ||||
('list', ('symbol', 'A'), ('symbol', 'B')) | ||||
""" | ||||
if (revs is not None | ||||
Yuya Nishihara
|
r29116 | and revs[0] == 'func' | ||
Yuya Nishihara
|
r29441 | and getsymbol(revs[1]) == 'ancestors' | ||
Yuya Nishihara
|
r29116 | and bases is not None | ||
and bases[0] == 'not' | ||||
and bases[1][0] == 'func' | ||||
Yuya Nishihara
|
r29441 | and getsymbol(bases[1][1]) == 'ancestors'): | ||
Yuya Nishihara
|
r29117 | return ('list', revs[2], bases[1][2]) | ||
Yuya Nishihara
|
r29116 | |||
Yuya Nishihara
|
r29119 | def _optimize(x, small): | ||
Martin Geisler
|
r13031 | if x is None: | ||
Matt Mackall
|
r11279 | return 0, x | ||
Matt Mackall
|
r11275 | smallbonus = 1 | ||
if small: | ||||
smallbonus = .5 | ||||
op = x[0] | ||||
Matt Mackall
|
r11283 | if op == 'minus': | ||
Yuya Nishihara
|
r29119 | return _optimize(('and', x[1], ('not', x[2])), small) | ||
Sean Farley
|
r23765 | elif op == 'only': | ||
Yuya Nishihara
|
r29118 | t = ('func', ('symbol', 'only'), ('list', x[1], x[2])) | ||
Yuya Nishihara
|
r29119 | return _optimize(t, small) | ||
Yuya Nishihara
|
r25094 | elif op == 'onlypost': | ||
Yuya Nishihara
|
r29119 | return _optimize(('func', ('symbol', 'only'), x[1]), small) | ||
Matt Mackall
|
r11279 | elif op == 'dagrangepre': | ||
Yuya Nishihara
|
r29119 | return _optimize(('func', ('symbol', 'ancestors'), x[1]), small) | ||
Matt Mackall
|
r11279 | elif op == 'dagrangepost': | ||
Yuya Nishihara
|
r29119 | return _optimize(('func', ('symbol', 'descendants'), x[1]), small) | ||
Yuya Nishihara
|
r25819 | elif op == 'rangeall': | ||
Yuya Nishihara
|
r29119 | return _optimize(('range', ('string', '0'), ('string', 'tip')), small) | ||
Matt Mackall
|
r11279 | elif op == 'rangepre': | ||
Yuya Nishihara
|
r29119 | return _optimize(('range', ('string', '0'), x[1]), small) | ||
Matt Mackall
|
r11279 | elif op == 'rangepost': | ||
Yuya Nishihara
|
r29119 | return _optimize(('range', x[1], ('string', 'tip')), small) | ||
Matt Mackall
|
r11467 | elif op == 'negate': | ||
Yuya Nishihara
|
r29118 | s = getstring(x[1], _("can't negate that")) | ||
Yuya Nishihara
|
r29119 | return _optimize(('string', '-' + s), small) | ||
Matt Mackall
|
r11279 | elif op in 'string symbol negate': | ||
return smallbonus, x # single revisions are small | ||||
Bryan O'Sullivan
|
r16859 | elif op == 'and': | ||
Yuya Nishihara
|
r29119 | wa, ta = _optimize(x[1], True) | ||
wb, tb = _optimize(x[2], True) | ||||
Yuya Nishihara
|
r29116 | w = min(wa, wb) | ||
Siddharth Agarwal
|
r20499 | |||
# (::x and not ::y)/(not ::y and ::x) have a fast path | ||||
Yuya Nishihara
|
r29117 | tm = _matchonly(ta, tb) or _matchonly(tb, ta) | ||
if tm: | ||||
return w, ('func', ('symbol', 'only'), tm) | ||||
Siddharth Agarwal
|
r20499 | |||
Durham Goode
|
r28217 | if tb is not None and tb[0] == 'not': | ||
return wa, ('difference', ta, tb[1]) | ||||
Matt Mackall
|
r11279 | if wa > wb: | ||
return w, (op, tb, ta) | ||||
return w, (op, ta, tb) | ||||
elif op == 'or': | ||||
Yuya Nishihara
|
r25343 | # fast path for machine-generated expression, that is likely to have | ||
# lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()' | ||||
ws, ts, ss = [], [], [] | ||||
def flushss(): | ||||
if not ss: | ||||
return | ||||
if len(ss) == 1: | ||||
w, t = ss[0] | ||||
else: | ||||
s = '\0'.join(t[1] for w, t in ss) | ||||
y = ('func', ('symbol', '_list'), ('string', s)) | ||||
Yuya Nishihara
|
r29119 | w, t = _optimize(y, False) | ||
Yuya Nishihara
|
r25343 | ws.append(w) | ||
ts.append(t) | ||||
del ss[:] | ||||
for y in x[1:]: | ||||
Yuya Nishihara
|
r29119 | w, t = _optimize(y, False) | ||
Yuya Nishihara
|
r25996 | if t is not None and (t[0] == 'string' or t[0] == 'symbol'): | ||
Yuya Nishihara
|
r25343 | ss.append((w, t)) | ||
continue | ||||
flushss() | ||||
ws.append(w) | ||||
ts.append(t) | ||||
flushss() | ||||
if len(ts) == 1: | ||||
return ws[0], ts[0] # 'or' operation is fully optimized out | ||||
Yuya Nishihara
|
r25307 | # we can't reorder trees by weight because it would change the order. | ||
# ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a") | ||||
Yuya Nishihara
|
r25309 | # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0])) | ||
Yuya Nishihara
|
r25343 | return max(ws), (op,) + tuple(ts) | ||
Matt Mackall
|
r11275 | elif op == 'not': | ||
Laurent Charignon
|
r25191 | # Optimize not public() to _notpublic() because we have a fast version | ||
if x[1] == ('func', ('symbol', 'public'), None): | ||||
timeless
|
r27637 | newsym = ('func', ('symbol', '_notpublic'), None) | ||
Yuya Nishihara
|
r29119 | o = _optimize(newsym, not small) | ||
Laurent Charignon
|
r25191 | return o[0], o[1] | ||
else: | ||||
Yuya Nishihara
|
r29119 | o = _optimize(x[1], not small) | ||
Laurent Charignon
|
r25191 | return o[0], (op, o[1]) | ||
Kevin Gessner
|
r14070 | elif op == 'parentpost': | ||
Yuya Nishihara
|
r29119 | o = _optimize(x[1], small) | ||
Kevin Gessner
|
r14070 | return o[0], (op, o[1]) | ||
Matt Mackall
|
r11275 | elif op == 'group': | ||
Yuya Nishihara
|
r29119 | return _optimize(x[1], small) | ||
Yuya Nishihara
|
r27987 | elif op in 'dagrange range parent ancestorspec': | ||
Matt Mackall
|
r14842 | if op == 'parent': | ||
# x^:y means (x^) : y, not x ^ (:y) | ||||
post = ('parentpost', x[1]) | ||||
if x[2][0] == 'dagrangepre': | ||||
Yuya Nishihara
|
r29119 | return _optimize(('dagrange', post, x[2][1]), small) | ||
Matt Mackall
|
r14842 | elif x[2][0] == 'rangepre': | ||
Yuya Nishihara
|
r29119 | return _optimize(('range', post, x[2][1]), small) | ||
wa, ta = _optimize(x[1], small) | ||||
wb, tb = _optimize(x[2], small) | ||||
Matt Mackall
|
r11279 | return wa + wb, (op, ta, tb) | ||
Yuya Nishihara
|
r27987 | elif op == 'list': | ||
Yuya Nishihara
|
r29119 | ws, ts = zip(*(_optimize(y, small) for y in x[1:])) | ||
Yuya Nishihara
|
r27987 | return sum(ws), (op,) + ts | ||
Matt Mackall
|
r11275 | elif op == 'func': | ||
Yuya Nishihara
|
r29441 | f = getsymbol(x[1]) | ||
Yuya Nishihara
|
r29119 | wa, ta = _optimize(x[2], small) | ||
Thomas Arendsen Hein
|
r14650 | if f in ("author branch closed date desc file grep keyword " | ||
"outgoing user"): | ||||
Matt Mackall
|
r11279 | w = 10 # slow | ||
Matt Mackall
|
r12351 | elif f in "modifies adds removes": | ||
Matt Mackall
|
r11279 | w = 30 # slower | ||
Matt Mackall
|
r11275 | elif f == "contains": | ||
Matt Mackall
|
r11279 | w = 100 # very slow | ||
Matt Mackall
|
r11275 | elif f == "ancestor": | ||
Matt Mackall
|
r11279 | w = 1 * smallbonus | ||
Durham Goode
|
r22451 | elif f in "reverse limit first _intlist": | ||
Matt Mackall
|
r11279 | w = 0 | ||
Matt Mackall
|
r11275 | elif f in "sort": | ||
Matt Mackall
|
r11279 | w = 10 # assume most sorts look at changelog | ||
Matt Mackall
|
r11275 | else: | ||
Matt Mackall
|
r11279 | w = 1 | ||
return w + wa, (op, x[1], ta) | ||||
return 1, x | ||||
Matt Mackall
|
r11275 | |||
Yuya Nishihara
|
r29119 | def optimize(tree): | ||
_weight, newtree = _optimize(tree, small=True) | ||||
return newtree | ||||
FUJIWARA Katsunori
|
r23845 | # the set of valid characters for the initial letter of symbols in | ||
# alias declarations and definitions | ||||
_aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)] | ||||
if c.isalnum() or c in '._@$' or ord(c) > 127) | ||||
Yuya Nishihara
|
r29073 | def _parsewith(spec, lookup=None, syminitletters=None): | ||
"""Generate a parse tree of given spec with given tokenizing options | ||||
>>> _parsewith('foo($1)', syminitletters=_aliassyminitletters) | ||||
('func', ('symbol', 'foo'), ('symbol', '$1')) | ||||
>>> _parsewith('$1') | ||||
Traceback (most recent call last): | ||||
... | ||||
ParseError: ("syntax error in revset '$1'", 0) | ||||
>>> _parsewith('foo bar') | ||||
Traceback (most recent call last): | ||||
... | ||||
ParseError: ('invalid token', 4) | ||||
""" | ||||
p = parser.parser(elements) | ||||
tree, pos = p.parse(tokenize(spec, lookup=lookup, | ||||
syminitletters=syminitletters)) | ||||
if pos != len(spec): | ||||
raise error.ParseError(_('invalid token'), pos) | ||||
return parser.simplifyinfixops(tree, ('list', 'or')) | ||||
Yuya Nishihara
|
r28870 | class _aliasrules(parser.basealiasrules): | ||
"""Parsing and expansion rule set of revset aliases""" | ||||
_section = _('revset alias') | ||||
Yuya Nishihara
|
r29074 | |||
@staticmethod | ||||
def _parse(spec): | ||||
"""Parse alias declaration/definition ``spec`` | ||||
This allows symbol names to use also ``$`` as an initial letter | ||||
(for backward compatibility), and callers of this function should | ||||
examine whether ``$`` is used also for unexpected symbols or not. | ||||
""" | ||||
return _parsewith(spec, syminitletters=_aliassyminitletters) | ||||
Yuya Nishihara
|
r28910 | |||
@staticmethod | ||||
def _trygetfunc(tree): | ||||
if tree[0] == 'func' and tree[1][0] == 'symbol': | ||||
return tree[1][1], getlist(tree[2]) | ||||
Yuya Nishihara
|
r28870 | |||
Yuya Nishihara
|
r28898 | def expandaliases(ui, tree, showwarning=None): | ||
Yuya Nishihara
|
r28893 | aliases = _aliasrules.buildmap(ui.configitems('revsetalias')) | ||
Yuya Nishihara
|
r28895 | tree = _aliasrules.expand(aliases, tree) | ||
FUJIWARA Katsunori
|
r23725 | if showwarning: | ||
# warn about problematic (but not referred) aliases | ||||
for name, alias in sorted(aliases.iteritems()): | ||||
if alias.error and not alias.warned: | ||||
FUJIWARA Katsunori
|
r23844 | showwarning(_('warning: %s\n') % (alias.error)) | ||
FUJIWARA Katsunori
|
r23725 | alias.warned = True | ||
return tree | ||||
Alexander Solovyov
|
r14098 | |||
FUJIWARA Katsunori
|
r23742 | def foldconcat(tree): | ||
"""Fold elements to be concatenated by `##` | ||||
""" | ||||
if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'): | ||||
return tree | ||||
if tree[0] == '_concat': | ||||
pending = [tree] | ||||
l = [] | ||||
while pending: | ||||
e = pending.pop() | ||||
if e[0] == '_concat': | ||||
pending.extend(reversed(e[1:])) | ||||
elif e[0] in ('string', 'symbol'): | ||||
l.append(e[1]) | ||||
else: | ||||
msg = _("\"##\" can't concatenate \"%s\" element") % (e[0]) | ||||
raise error.ParseError(msg) | ||||
return ('string', ''.join(l)) | ||||
else: | ||||
return tuple(foldconcat(t) for t in tree) | ||||
Alexander Solovyov
|
r14098 | |||
Matt Mackall
|
r20779 | def parse(spec, lookup=None): | ||
Yuya Nishihara
|
r29073 | return _parsewith(spec, lookup=lookup) | ||
Matt Mackall
|
r20779 | |||
Laurent Charignon
|
r24518 | def posttreebuilthook(tree, repo): | ||
# hook for extensions to execute code on the optimized tree | ||||
pass | ||||
Matt Mackall
|
r20779 | def match(ui, spec, repo=None): | ||
Gregory Szorc
|
r29418 | """Create a matcher for a single revision spec.""" | ||
return matchany(ui, [spec], repo=repo) | ||||
Yuya Nishihara
|
r25926 | |||
Yuya Nishihara
|
r25927 | def matchany(ui, specs, repo=None): | ||
"""Create a matcher that will include any revisions matching one of the | ||||
given specs""" | ||||
if not specs: | ||||
def mfunc(repo, subset=None): | ||||
return baseset() | ||||
return mfunc | ||||
if not all(specs): | ||||
raise error.ParseError(_("empty query")) | ||||
lookup = None | ||||
if repo: | ||||
lookup = repo.__contains__ | ||||
if len(specs) == 1: | ||||
tree = parse(specs[0], lookup) | ||||
else: | ||||
tree = ('or',) + tuple(parse(s, lookup) for s in specs) | ||||
return _makematcher(ui, tree, repo) | ||||
Yuya Nishihara
|
r25926 | def _makematcher(ui, tree, repo): | ||
Matt Mackall
|
r14900 | if ui: | ||
Yuya Nishihara
|
r28898 | tree = expandaliases(ui, tree, showwarning=ui.warn) | ||
FUJIWARA Katsunori
|
r23742 | tree = foldconcat(tree) | ||
Yuya Nishihara
|
r29119 | tree = optimize(tree) | ||
Laurent Charignon
|
r24518 | posttreebuilthook(tree, repo) | ||
Yuya Nishihara
|
r24114 | def mfunc(repo, subset=None): | ||
if subset is None: | ||||
Yuya Nishihara
|
r24115 | subset = fullreposet(repo) | ||
Pierre-Yves David
|
r22885 | if util.safehasattr(subset, 'isascending'): | ||
Pierre-Yves David
|
r22686 | result = getset(repo, subset, tree) | ||
else: | ||||
result = getset(repo, baseset(subset), tree) | ||||
return result | ||||
Matt Mackall
|
r11275 | return mfunc | ||
Patrick Mezard
|
r12821 | |||
Matt Mackall
|
r14901 | def formatspec(expr, *args): | ||
''' | ||||
This is a convenience function for using revsets internally, and | ||||
escapes arguments appropriately. Aliases are intentionally ignored | ||||
so that intended expression behavior isn't accidentally subverted. | ||||
Supported arguments: | ||||
Matt Mackall
|
r15266 | %r = revset expression, parenthesized | ||
Matt Mackall
|
r14901 | %d = int(arg), no quoting | ||
%s = string(arg), escaped and single-quoted | ||||
%b = arg.branch(), escaped and single-quoted | ||||
%n = hex(arg), single-quoted | ||||
%% = a literal '%' | ||||
Matt Mackall
|
r15266 | Prefixing the type with 'l' specifies a parenthesized list of that type. | ||
Matt Mackall
|
r15140 | |||
Matt Mackall
|
r15268 | >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()")) | ||
'(10 or 11):: and ((this()) or (that()))' | ||||
Matt Mackall
|
r14901 | >>> formatspec('%d:: and not %d::', 10, 20) | ||
'10:: and not 20::' | ||||
Matt Mackall
|
r15325 | >>> formatspec('%ld or %ld', [], [1]) | ||
Matt Mackall
|
r15898 | "_list('') or 1" | ||
Matt Mackall
|
r14901 | >>> formatspec('keyword(%s)', 'foo\\xe9') | ||
"keyword('foo\\\\xe9')" | ||||
>>> b = lambda: 'default' | ||||
>>> b.branch = b | ||||
>>> formatspec('branch(%b)', b) | ||||
"branch('default')" | ||||
Matt Mackall
|
r15140 | >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd']) | ||
Matt Mackall
|
r15898 | "root(_list('a\\x00b\\x00c\\x00d'))" | ||
Matt Mackall
|
r14901 | ''' | ||
def quote(s): | ||||
return repr(str(s)) | ||||
Matt Mackall
|
r15140 | def argtype(c, arg): | ||
if c == 'd': | ||||
return str(int(arg)) | ||||
elif c == 's': | ||||
return quote(arg) | ||||
Matt Mackall
|
r15266 | elif c == 'r': | ||
parse(arg) # make sure syntax errors are confined | ||||
return '(%s)' % arg | ||||
Matt Mackall
|
r15140 | elif c == 'n': | ||
Matt Mackall
|
r16417 | return quote(node.hex(arg)) | ||
Matt Mackall
|
r15140 | elif c == 'b': | ||
return quote(arg.branch()) | ||||
Matt Mackall
|
r15595 | def listexp(s, t): | ||
l = len(s) | ||||
if l == 0: | ||||
Matt Mackall
|
r15898 | return "_list('')" | ||
elif l == 1: | ||||
Matt Mackall
|
r15595 | return argtype(t, s[0]) | ||
Matt Mackall
|
r15898 | elif t == 'd': | ||
Lucas Moscovicz
|
r20566 | return "_intlist('%s')" % "\0".join(str(int(a)) for a in s) | ||
Matt Mackall
|
r15898 | elif t == 's': | ||
return "_list('%s')" % "\0".join(s) | ||||
elif t == 'n': | ||||
Lucas Moscovicz
|
r20569 | return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s) | ||
Matt Mackall
|
r15898 | elif t == 'b': | ||
return "_list('%s')" % "\0".join(a.branch() for a in s) | ||||
Martin Geisler
|
r15791 | m = l // 2 | ||
Matt Mackall
|
r15595 | return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t)) | ||
Matt Mackall
|
r14901 | ret = '' | ||
pos = 0 | ||||
arg = 0 | ||||
while pos < len(expr): | ||||
c = expr[pos] | ||||
if c == '%': | ||||
pos += 1 | ||||
d = expr[pos] | ||||
if d == '%': | ||||
ret += d | ||||
Matt Mackall
|
r15268 | elif d in 'dsnbr': | ||
Matt Mackall
|
r15140 | ret += argtype(d, args[arg]) | ||
Matt Mackall
|
r14901 | arg += 1 | ||
Matt Mackall
|
r15140 | elif d == 'l': | ||
# a list of some type | ||||
pos += 1 | ||||
d = expr[pos] | ||||
Matt Mackall
|
r15596 | ret += listexp(list(args[arg]), d) | ||
Matt Mackall
|
r14901 | arg += 1 | ||
else: | ||||
liscju
|
r29389 | raise error.Abort(_('unexpected revspec format character %s') | ||
% d) | ||||
Matt Mackall
|
r14901 | else: | ||
ret += c | ||||
pos += 1 | ||||
return ret | ||||
Patrick Mezard
|
r16218 | def prettyformat(tree): | ||
Yuya Nishihara
|
r25253 | return parser.prettyformat(tree, ('string', 'symbol')) | ||
Patrick Mezard
|
r16218 | |||
Alexander Plavin
|
r19719 | def depth(tree): | ||
if isinstance(tree, tuple): | ||||
return max(map(depth, tree)) + 1 | ||||
else: | ||||
return 0 | ||||
Alexander Plavin
|
r19720 | def funcsused(tree): | ||
if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'): | ||||
return set() | ||||
else: | ||||
funcs = set() | ||||
for s in tree[1:]: | ||||
funcs |= funcsused(s) | ||||
if tree[0] == 'func': | ||||
funcs.add(tree[1][1]) | ||||
return funcs | ||||
Yuya Nishihara
|
r28423 | def _formatsetrepr(r): | ||
"""Format an optional printable representation of a set | ||||
======== ================================= | ||||
type(r) example | ||||
======== ================================= | ||||
tuple ('<not %r>', other) | ||||
str '<branch closed>' | ||||
callable lambda: '<branch %r>' % sorted(b) | ||||
object other | ||||
======== ================================= | ||||
""" | ||||
if r is None: | ||||
return '' | ||||
elif isinstance(r, tuple): | ||||
return r[0] % r[1:] | ||||
elif isinstance(r, str): | ||||
return r | ||||
elif callable(r): | ||||
return r() | ||||
else: | ||||
return repr(r) | ||||
Pierre-Yves David
|
r22692 | class abstractsmartset(object): | ||
def __nonzero__(self): | ||||
"""True if the smartset is not empty""" | ||||
raise NotImplementedError() | ||||
def __contains__(self, rev): | ||||
"""provide fast membership testing""" | ||||
raise NotImplementedError() | ||||
def __iter__(self): | ||||
"""iterate the set in the order it is supposed to be iterated""" | ||||
raise NotImplementedError() | ||||
Pierre-Yves David
|
r22716 | # Attributes containing a function to perform a fast iteration in a given | ||
# direction. A smartset can have none, one, or both defined. | ||||
# | ||||
# Default value is None instead of a function returning None to avoid | ||||
# initializing an iterator just for testing if a fast method exists. | ||||
fastasc = None | ||||
fastdesc = None | ||||
Pierre-Yves David
|
r22692 | def isascending(self): | ||
"""True if the set will iterate in ascending order""" | ||||
raise NotImplementedError() | ||||
def isdescending(self): | ||||
"""True if the set will iterate in descending order""" | ||||
raise NotImplementedError() | ||||
Martijn Pieters
|
r29346 | def istopo(self): | ||
"""True if the set will iterate in topographical order""" | ||||
raise NotImplementedError() | ||||
Pierre-Yves David
|
r26099 | @util.cachefunc | ||
Pierre-Yves David
|
r22692 | def min(self): | ||
"""return the minimum element in the set""" | ||||
Pierre-Yves David
|
r22722 | if self.fastasc is not None: | ||
for r in self.fastasc(): | ||||
return r | ||||
raise ValueError('arg is an empty sequence') | ||||
return min(self) | ||||
Pierre-Yves David
|
r22692 | |||
Pierre-Yves David
|
r26099 | @util.cachefunc | ||
Pierre-Yves David
|
r22692 | def max(self): | ||
"""return the maximum element in the set""" | ||||
Pierre-Yves David
|
r22722 | if self.fastdesc is not None: | ||
for r in self.fastdesc(): | ||||
return r | ||||
raise ValueError('arg is an empty sequence') | ||||
return max(self) | ||||
Pierre-Yves David
|
r22692 | |||
Pierre-Yves David
|
r22808 | def first(self): | ||
"""return the first element in the set (user iteration perspective) | ||||
Return None if the set is empty""" | ||||
raise NotImplementedError() | ||||
def last(self): | ||||
"""return the last element in the set (user iteration perspective) | ||||
Return None if the set is empty""" | ||||
raise NotImplementedError() | ||||
Pierre-Yves David
|
r22995 | def __len__(self): | ||
"""return the length of the smartsets | ||||
This can be expensive on smartset that could be lazy otherwise.""" | ||||
raise NotImplementedError() | ||||
Pierre-Yves David
|
r22692 | def reverse(self): | ||
"""reverse the expected iteration order""" | ||||
raise NotImplementedError() | ||||
def sort(self, reverse=True): | ||||
"""get the set to iterate in an ascending or descending order""" | ||||
raise NotImplementedError() | ||||
def __and__(self, other): | ||||
"""Returns a new object with the intersection of the two collections. | ||||
This is part of the mandatory API for smartset.""" | ||||
Yuya Nishihara
|
r24459 | if isinstance(other, fullreposet): | ||
return self | ||||
Yuya Nishihara
|
r28423 | return self.filter(other.__contains__, condrepr=other, cache=False) | ||
Pierre-Yves David
|
r22692 | |||
def __add__(self, other): | ||||
"""Returns a new object with the union of the two collections. | ||||
This is part of the mandatory API for smartset.""" | ||||
Pierre-Yves David
|
r22861 | return addset(self, other) | ||
Pierre-Yves David
|
r22692 | |||
def __sub__(self, other): | ||||
"""Returns a new object with the substraction of the two collections. | ||||
This is part of the mandatory API for smartset.""" | ||||
Pierre-Yves David
|
r22730 | c = other.__contains__ | ||
Yuya Nishihara
|
r28423 | return self.filter(lambda r: not c(r), condrepr=('<not %r>', other), | ||
cache=False) | ||||
def filter(self, condition, condrepr=None, cache=True): | ||||
Pierre-Yves David
|
r22692 | """Returns this smartset filtered by condition as a new smartset. | ||
`condition` is a callable which takes a revision number and returns a | ||||
Yuya Nishihara
|
r28423 | boolean. Optional `condrepr` provides a printable representation of | ||
the given `condition`. | ||||
Pierre-Yves David
|
r22692 | |||
This is part of the mandatory API for smartset.""" | ||||
Pierre-Yves David
|
r22864 | # builtin cannot be cached. but do not needs to | ||
if cache and util.safehasattr(condition, 'func_code'): | ||||
condition = util.cachefunc(condition) | ||||
Yuya Nishihara
|
r28423 | return filteredset(self, condition, condrepr) | ||
Pierre-Yves David
|
r22692 | |||
Pierre-Yves David
|
r22825 | class baseset(abstractsmartset): | ||
Lucas Moscovicz
|
r20416 | """Basic data structure that represents a revset and contains the basic | ||
operation that it should be able to perform. | ||||
Lucas Moscovicz
|
r20727 | |||
Every method in this class should be implemented by any smartset class. | ||||
Lucas Moscovicz
|
r20416 | """ | ||
Martijn Pieters
|
r29346 | def __init__(self, data=(), datarepr=None, istopo=False): | ||
Yuya Nishihara
|
r28425 | """ | ||
datarepr: a tuple of (format, obj, ...), a function or an object that | ||||
provides a printable representation of the given data. | ||||
""" | ||||
Pierre-Yves David
|
r28786 | self._ascending = None | ||
Martijn Pieters
|
r29346 | self._istopo = istopo | ||
Pierre-Yves David
|
r22825 | if not isinstance(data, list): | ||
Pierre-Yves David
|
r26060 | if isinstance(data, set): | ||
self._set = data | ||||
Pierre-Yves David
|
r28786 | # set has no order we pick one for stability purpose | ||
self._ascending = True | ||||
Pierre-Yves David
|
r22825 | data = list(data) | ||
self._list = data | ||||
Yuya Nishihara
|
r28425 | self._datarepr = datarepr | ||
Lucas Moscovicz
|
r20365 | |||
Pierre-Yves David
|
r22826 | @util.propertycache | ||
Pierre-Yves David
|
r22879 | def _set(self): | ||
return set(self._list) | ||||
@util.propertycache | ||||
Pierre-Yves David
|
r22826 | def _asclist(self): | ||
asclist = self._list[:] | ||||
asclist.sort() | ||||
return asclist | ||||
Pierre-Yves David
|
r22827 | def __iter__(self): | ||
if self._ascending is None: | ||||
return iter(self._list) | ||||
elif self._ascending: | ||||
return iter(self._asclist) | ||||
else: | ||||
return reversed(self._asclist) | ||||
Pierre-Yves David
|
r22826 | def fastasc(self): | ||
return iter(self._asclist) | ||||
def fastdesc(self): | ||||
return reversed(self._asclist) | ||||
Pierre-Yves David
|
r22503 | @util.propertycache | ||
def __contains__(self): | ||||
Pierre-Yves David
|
r22880 | return self._set.__contains__ | ||
Pierre-Yves David
|
r22503 | |||
Pierre-Yves David
|
r22691 | def __nonzero__(self): | ||
Pierre-Yves David
|
r22825 | return bool(self._list) | ||
def sort(self, reverse=False): | ||||
Pierre-Yves David
|
r22829 | self._ascending = not bool(reverse) | ||
Martijn Pieters
|
r29346 | self._istopo = False | ||
Pierre-Yves David
|
r22825 | |||
def reverse(self): | ||||
Pierre-Yves David
|
r22829 | if self._ascending is None: | ||
self._list.reverse() | ||||
else: | ||||
self._ascending = not self._ascending | ||||
Martijn Pieters
|
r29346 | self._istopo = False | ||
Pierre-Yves David
|
r22825 | |||
def __len__(self): | ||||
return len(self._list) | ||||
Pierre-Yves David
|
r22691 | |||
Lucas Moscovicz
|
r20725 | def isascending(self): | ||
Lucas Moscovicz
|
r20727 | """Returns True if the collection is ascending order, False if not. | ||
This is part of the mandatory API for smartset.""" | ||||
Pierre-Yves David
|
r22863 | if len(self) <= 1: | ||
return True | ||||
Pierre-Yves David
|
r22828 | return self._ascending is not None and self._ascending | ||
Lucas Moscovicz
|
r20725 | |||
def isdescending(self): | ||||
Lucas Moscovicz
|
r20727 | """Returns True if the collection is descending order, False if not. | ||
This is part of the mandatory API for smartset.""" | ||||
Pierre-Yves David
|
r22863 | if len(self) <= 1: | ||
return True | ||||
Pierre-Yves David
|
r22828 | return self._ascending is not None and not self._ascending | ||
Lucas Moscovicz
|
r20725 | |||
Martijn Pieters
|
r29346 | def istopo(self): | ||
"""Is the collection is in topographical order or not. | ||||
This is part of the mandatory API for smartset.""" | ||||
if len(self) <= 1: | ||||
return True | ||||
return self._istopo | ||||
Pierre-Yves David
|
r22812 | def first(self): | ||
if self: | ||||
Pierre-Yves David
|
r22829 | if self._ascending is None: | ||
return self._list[0] | ||||
elif self._ascending: | ||||
return self._asclist[0] | ||||
else: | ||||
return self._asclist[-1] | ||||
Pierre-Yves David
|
r22812 | return None | ||
def last(self): | ||||
if self: | ||||
Pierre-Yves David
|
r22829 | if self._ascending is None: | ||
return self._list[-1] | ||||
elif self._ascending: | ||||
return self._asclist[-1] | ||||
else: | ||||
return self._asclist[0] | ||||
Pierre-Yves David
|
r22812 | return None | ||
Yuya Nishihara
|
r24457 | def __repr__(self): | ||
d = {None: '', False: '-', True: '+'}[self._ascending] | ||||
Yuya Nishihara
|
r28425 | s = _formatsetrepr(self._datarepr) | ||
if not s: | ||||
Pierre-Yves David
|
r28785 | l = self._list | ||
# if _list has been built from a set, it might have a different | ||||
# order from one python implementation to another. | ||||
# We fallback to the sorted version for a stable output. | ||||
if self._ascending is not None: | ||||
l = self._asclist | ||||
s = repr(l) | ||||
Yuya Nishihara
|
r28425 | return '<%s%s %s>' % (type(self).__name__, d, s) | ||
Yuya Nishihara
|
r24457 | |||
Pierre-Yves David
|
r22726 | class filteredset(abstractsmartset): | ||
Lucas Moscovicz
|
r20427 | """Duck type for baseset class which iterates lazily over the revisions in | ||
the subset and contains a function which tests for membership in the | ||||
revset | ||||
""" | ||||
Yuya Nishihara
|
r28423 | def __init__(self, subset, condition=lambda x: True, condrepr=None): | ||
Pierre-Yves David
|
r20738 | """ | ||
condition: a function that decide whether a revision in the subset | ||||
belongs to the revset or not. | ||||
Yuya Nishihara
|
r28423 | condrepr: a tuple of (format, obj, ...), a function or an object that | ||
provides a printable representation of the given condition. | ||||
Pierre-Yves David
|
r20738 | """ | ||
Lucas Moscovicz
|
r20427 | self._subset = subset | ||
self._condition = condition | ||||
Yuya Nishihara
|
r28423 | self._condrepr = condrepr | ||
Lucas Moscovicz
|
r20427 | |||
def __contains__(self, x): | ||||
Yuya Nishihara
|
r26212 | return x in self._subset and self._condition(x) | ||
Lucas Moscovicz
|
r20427 | |||
def __iter__(self): | ||||
Pierre-Yves David
|
r22719 | return self._iterfilter(self._subset) | ||
def _iterfilter(self, it): | ||||
Lucas Moscovicz
|
r20427 | cond = self._condition | ||
Pierre-Yves David
|
r22719 | for x in it: | ||
Lucas Moscovicz
|
r20427 | if cond(x): | ||
yield x | ||||
Pierre-Yves David
|
r22720 | @property | ||
def fastasc(self): | ||||
it = self._subset.fastasc | ||||
if it is None: | ||||
return None | ||||
return lambda: self._iterfilter(it()) | ||||
@property | ||||
def fastdesc(self): | ||||
it = self._subset.fastdesc | ||||
if it is None: | ||||
return None | ||||
return lambda: self._iterfilter(it()) | ||||
Lucas Moscovicz
|
r20552 | def __nonzero__(self): | ||
Kostia Balytskyi
|
r29304 | fast = None | ||
candidates = [self.fastasc if self.isascending() else None, | ||||
self.fastdesc if self.isdescending() else None, | ||||
self.fastasc, | ||||
self.fastdesc] | ||||
for candidate in candidates: | ||||
if candidate is not None: | ||||
fast = candidate | ||||
break | ||||
Pierre-Yves David
|
r26307 | if fast is not None: | ||
Durham Goode
|
r26306 | it = fast() | ||
Pierre-Yves David
|
r26307 | else: | ||
it = self | ||||
Durham Goode
|
r26306 | |||
for r in it: | ||||
Lucas Moscovicz
|
r20552 | return True | ||
return False | ||||
Lucas Moscovicz
|
r20429 | def __len__(self): | ||
# Basic implementation to be changed in future patches. | ||||
Maciej Fijalkowski
|
r28718 | # until this gets improved, we use generator expression | ||
# here, since list compr is free to call __len__ again | ||||
# causing infinite recursion | ||||
l = baseset(r for r in self) | ||||
Lucas Moscovicz
|
r20429 | return len(l) | ||
def sort(self, reverse=False): | ||||
Pierre-Yves David
|
r22862 | self._subset.sort(reverse=reverse) | ||
Lucas Moscovicz
|
r20429 | |||
def reverse(self): | ||||
self._subset.reverse() | ||||
Lucas Moscovicz
|
r20725 | def isascending(self): | ||
Pierre-Yves David
|
r22862 | return self._subset.isascending() | ||
Lucas Moscovicz
|
r20725 | |||
def isdescending(self): | ||||
Pierre-Yves David
|
r22862 | return self._subset.isdescending() | ||
Lucas Moscovicz
|
r20725 | |||
Martijn Pieters
|
r29346 | def istopo(self): | ||
return self._subset.istopo() | ||||
Pierre-Yves David
|
r22813 | def first(self): | ||
for x in self: | ||||
return x | ||||
return None | ||||
def last(self): | ||||
it = None | ||||
Pierre-Yves David
|
r25648 | if self.isascending(): | ||
Pierre-Yves David
|
r22862 | it = self.fastdesc | ||
Pierre-Yves David
|
r25648 | elif self.isdescending(): | ||
it = self.fastasc | ||||
if it is not None: | ||||
for x in it(): | ||||
return x | ||||
return None #empty case | ||||
else: | ||||
x = None | ||||
for x in self: | ||||
pass | ||||
Pierre-Yves David
|
r22813 | return x | ||
Yuya Nishihara
|
r24457 | def __repr__(self): | ||
Yuya Nishihara
|
r28423 | xs = [repr(self._subset)] | ||
s = _formatsetrepr(self._condrepr) | ||||
if s: | ||||
xs.append(s) | ||||
return '<%s %s>' % (type(self).__name__, ', '.join(xs)) | ||||
Yuya Nishihara
|
r24457 | |||
Yuya Nishihara
|
r25131 | def _iterordered(ascending, iter1, iter2): | ||
"""produce an ordered iteration from two iterators with the same order | ||||
The ascending is used to indicated the iteration direction. | ||||
""" | ||||
choice = max | ||||
if ascending: | ||||
choice = min | ||||
val1 = None | ||||
val2 = None | ||||
try: | ||||
# Consume both iterators in an ordered way until one is empty | ||||
while True: | ||||
if val1 is None: | ||||
timeless
|
r29216 | val1 = next(iter1) | ||
Yuya Nishihara
|
r25131 | if val2 is None: | ||
timeless
|
r29216 | val2 = next(iter2) | ||
timeless
|
r29215 | n = choice(val1, val2) | ||
yield n | ||||
if val1 == n: | ||||
Yuya Nishihara
|
r25131 | val1 = None | ||
timeless
|
r29215 | if val2 == n: | ||
Yuya Nishihara
|
r25131 | val2 = None | ||
except StopIteration: | ||||
# Flush any remaining values and consume the other one | ||||
it = iter2 | ||||
if val1 is not None: | ||||
yield val1 | ||||
it = iter1 | ||||
elif val2 is not None: | ||||
# might have been equality and both are empty | ||||
yield val2 | ||||
for val in it: | ||||
yield val | ||||
Pierre-Yves David
|
r22793 | class addset(abstractsmartset): | ||
Lucas Moscovicz
|
r20708 | """Represent the addition of two sets | ||
Wrapper structure for lazily adding two structures without losing much | ||||
Lucas Moscovicz
|
r20694 | performance on the __contains__ method | ||
Lucas Moscovicz
|
r20708 | |||
Lucas Moscovicz
|
r20712 | If the ascending attribute is set, that means the two structures are | ||
ordered in either an ascending or descending way. Therefore, we can add | ||||
Mads Kiilerich
|
r21024 | them maintaining the order by iterating over both at the same time | ||
Yuya Nishihara
|
r25024 | |||
>>> xs = baseset([0, 3, 2]) | ||||
>>> ys = baseset([5, 2, 4]) | ||||
>>> rs = addset(xs, ys) | ||||
>>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last() | ||||
(True, True, False, True, 0, 4) | ||||
>>> rs = addset(xs, baseset([])) | ||||
>>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last() | ||||
(True, True, False, 0, 2) | ||||
>>> rs = addset(baseset([]), baseset([])) | ||||
>>> bool(rs), 0 in rs, rs.first(), rs.last() | ||||
(False, False, None, None) | ||||
iterate unsorted: | ||||
>>> rs = addset(xs, ys) | ||||
Maciej Fijalkowski
|
r28717 | >>> # (use generator because pypy could call len()) | ||
>>> list(x for x in rs) # without _genlist | ||||
Yuya Nishihara
|
r25024 | [0, 3, 2, 5, 4] | ||
>>> assert not rs._genlist | ||||
>>> len(rs) | ||||
5 | ||||
>>> [x for x in rs] # with _genlist | ||||
[0, 3, 2, 5, 4] | ||||
>>> assert rs._genlist | ||||
iterate ascending: | ||||
>>> rs = addset(xs, ys, ascending=True) | ||||
Maciej Fijalkowski
|
r28717 | >>> # (use generator because pypy could call len()) | ||
>>> list(x for x in rs), list(x for x in rs.fastasc()) # without _asclist | ||||
Yuya Nishihara
|
r25024 | ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5]) | ||
>>> assert not rs._asclist | ||||
Pierre-Yves David
|
r25115 | >>> len(rs) | ||
5 | ||||
>>> [x for x in rs], [x for x in rs.fastasc()] | ||||
([0, 2, 3, 4, 5], [0, 2, 3, 4, 5]) | ||||
Yuya Nishihara
|
r25024 | >>> assert rs._asclist | ||
iterate descending: | ||||
>>> rs = addset(xs, ys, ascending=False) | ||||
Maciej Fijalkowski
|
r28717 | >>> # (use generator because pypy could call len()) | ||
>>> list(x for x in rs), list(x for x in rs.fastdesc()) # without _asclist | ||||
Yuya Nishihara
|
r25024 | ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0]) | ||
>>> assert not rs._asclist | ||||
Pierre-Yves David
|
r25115 | >>> len(rs) | ||
5 | ||||
>>> [x for x in rs], [x for x in rs.fastdesc()] | ||||
([5, 4, 3, 2, 0], [5, 4, 3, 2, 0]) | ||||
Yuya Nishihara
|
r25024 | >>> assert rs._asclist | ||
iterate ascending without fastasc: | ||||
>>> rs = addset(xs, generatorset(ys), ascending=True) | ||||
>>> assert rs.fastasc is None | ||||
Pierre-Yves David
|
r25115 | >>> [x for x in rs] | ||
[0, 2, 3, 4, 5] | ||||
Yuya Nishihara
|
r25024 | |||
iterate descending without fastdesc: | ||||
>>> rs = addset(generatorset(xs), ys, ascending=False) | ||||
>>> assert rs.fastdesc is None | ||||
Pierre-Yves David
|
r25115 | >>> [x for x in rs] | ||
[5, 4, 3, 2, 0] | ||||
Lucas Moscovicz
|
r20694 | """ | ||
Lucas Moscovicz
|
r20712 | def __init__(self, revs1, revs2, ascending=None): | ||
Lucas Moscovicz
|
r20694 | self._r1 = revs1 | ||
self._r2 = revs2 | ||||
self._iter = None | ||||
Lucas Moscovicz
|
r20712 | self._ascending = ascending | ||
Lucas Moscovicz
|
r20720 | self._genlist = None | ||
Pierre-Yves David
|
r22859 | self._asclist = None | ||
Lucas Moscovicz
|
r20720 | |||
Pierre-Yves David
|
r20845 | def __len__(self): | ||
return len(self._list) | ||||
Pierre-Yves David
|
r22743 | def __nonzero__(self): | ||
Durham Goode
|
r23100 | return bool(self._r1) or bool(self._r2) | ||
Pierre-Yves David
|
r22743 | |||
Lucas Moscovicz
|
r20720 | @util.propertycache | ||
def _list(self): | ||||
if not self._genlist: | ||||
Pierre-Yves David
|
r25115 | self._genlist = baseset(iter(self)) | ||
Lucas Moscovicz
|
r20720 | return self._genlist | ||
Lucas Moscovicz
|
r20694 | |||
Pierre-Yves David
|
r25115 | def __iter__(self): | ||
Lucas Moscovicz
|
r20722 | """Iterate over both collections without repeating elements | ||
If the ascending attribute is not set, iterate over the first one and | ||||
then over the second one checking for membership on the first one so we | ||||
dont yield any duplicates. | ||||
If the ascending attribute is set, iterate over both collections at the | ||||
same time, yielding only one value at a time in the given order. | ||||
""" | ||||
Pierre-Yves David
|
r22799 | if self._ascending is None: | ||
Pierre-Yves David
|
r25115 | if self._genlist: | ||
return iter(self._genlist) | ||||
def arbitraryordergen(): | ||||
Pierre-Yves David
|
r22799 | for r in self._r1: | ||
yield r | ||||
Pierre-Yves David
|
r22881 | inr1 = self._r1.__contains__ | ||
Pierre-Yves David
|
r22799 | for r in self._r2: | ||
Pierre-Yves David
|
r22881 | if not inr1(r): | ||
Lucas Moscovicz
|
r20694 | yield r | ||
Pierre-Yves David
|
r25115 | return arbitraryordergen() | ||
# try to use our own fast iterator if it exists | ||||
Pierre-Yves David
|
r22859 | self._trysetasclist() | ||
if self._ascending: | ||||
Yuya Nishihara
|
r25130 | attr = 'fastasc' | ||
Pierre-Yves David
|
r22859 | else: | ||
Yuya Nishihara
|
r25130 | attr = 'fastdesc' | ||
it = getattr(self, attr) | ||||
Pierre-Yves David
|
r25115 | if it is not None: | ||
return it() | ||||
# maybe half of the component supports fast | ||||
# get iterator for _r1 | ||||
iter1 = getattr(self._r1, attr) | ||||
if iter1 is None: | ||||
# let's avoid side effect (not sure it matters) | ||||
iter1 = iter(sorted(self._r1, reverse=not self._ascending)) | ||||
else: | ||||
iter1 = iter1() | ||||
# get iterator for _r2 | ||||
iter2 = getattr(self._r2, attr) | ||||
if iter2 is None: | ||||
# let's avoid side effect (not sure it matters) | ||||
iter2 = iter(sorted(self._r2, reverse=not self._ascending)) | ||||
else: | ||||
iter2 = iter2() | ||||
Yuya Nishihara
|
r25131 | return _iterordered(self._ascending, iter1, iter2) | ||
Pierre-Yves David
|
r22859 | |||
def _trysetasclist(self): | ||||
Mads Kiilerich
|
r23139 | """populate the _asclist attribute if possible and necessary""" | ||
Pierre-Yves David
|
r22859 | if self._genlist is not None and self._asclist is None: | ||
self._asclist = sorted(self._genlist) | ||||
Lucas Moscovicz
|
r20694 | |||
Pierre-Yves David
|
r22742 | @property | ||
def fastasc(self): | ||||
Pierre-Yves David
|
r22859 | self._trysetasclist() | ||
if self._asclist is not None: | ||||
return self._asclist.__iter__ | ||||
Pierre-Yves David
|
r22742 | iter1 = self._r1.fastasc | ||
iter2 = self._r2.fastasc | ||||
if None in (iter1, iter2): | ||||
return None | ||||
Yuya Nishihara
|
r25131 | return lambda: _iterordered(True, iter1(), iter2()) | ||
Pierre-Yves David
|
r22742 | |||
@property | ||||
def fastdesc(self): | ||||
Pierre-Yves David
|
r22859 | self._trysetasclist() | ||
if self._asclist is not None: | ||||
return self._asclist.__reversed__ | ||||
Pierre-Yves David
|
r22742 | iter1 = self._r1.fastdesc | ||
iter2 = self._r2.fastdesc | ||||
if None in (iter1, iter2): | ||||
return None | ||||
Yuya Nishihara
|
r25131 | return lambda: _iterordered(False, iter1(), iter2()) | ||
Pierre-Yves David
|
r22741 | |||
Lucas Moscovicz
|
r20694 | def __contains__(self, x): | ||
return x in self._r1 or x in self._r2 | ||||
Lucas Moscovicz
|
r20724 | def sort(self, reverse=False): | ||
"""Sort the added set | ||||
For this we use the cached list with all the generated values and if we | ||||
know they are ascending or descending we can sort them in a smart way. | ||||
""" | ||||
Pierre-Yves David
|
r22859 | self._ascending = not reverse | ||
Lucas Moscovicz
|
r20724 | |||
Lucas Moscovicz
|
r20733 | def isascending(self): | ||
return self._ascending is not None and self._ascending | ||||
def isdescending(self): | ||||
return self._ascending is not None and not self._ascending | ||||
Martijn Pieters
|
r29346 | def istopo(self): | ||
# not worth the trouble asserting if the two sets combined are still | ||||
# in topographical order. Use the sort() predicate to explicitly sort | ||||
# again instead. | ||||
return False | ||||
Lucas Moscovicz
|
r20723 | def reverse(self): | ||
Pierre-Yves David
|
r22859 | if self._ascending is None: | ||
self._list.reverse() | ||||
else: | ||||
Lucas Moscovicz
|
r20723 | self._ascending = not self._ascending | ||
Pierre-Yves David
|
r22810 | def first(self): | ||
Pierre-Yves David
|
r23127 | for x in self: | ||
return x | ||||
Pierre-Yves David
|
r22810 | return None | ||
def last(self): | ||||
Pierre-Yves David
|
r23127 | self.reverse() | ||
val = self.first() | ||||
self.reverse() | ||||
return val | ||||
Pierre-Yves David
|
r22810 | |||
Yuya Nishihara
|
r24457 | def __repr__(self): | ||
d = {None: '', False: '-', True: '+'}[self._ascending] | ||||
return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2) | ||||
Pierre-Yves David
|
r22795 | class generatorset(abstractsmartset): | ||
Lucas Moscovicz
|
r20705 | """Wrap a generator for lazy iteration | ||
Wrapper structure for generators that provides lazy membership and can | ||||
Lucas Moscovicz
|
r20540 | be iterated more than once. | ||
When asked for membership it generates values until either it finds the | ||||
requested one or has gone through all the elements in the generator | ||||
""" | ||||
Pierre-Yves David
|
r22755 | def __init__(self, gen, iterasc=None): | ||
Pierre-Yves David
|
r20739 | """ | ||
gen: a generator producing the values for the generatorset. | ||||
""" | ||||
Lucas Moscovicz
|
r20536 | self._gen = gen | ||
Pierre-Yves David
|
r22798 | self._asclist = None | ||
Lucas Moscovicz
|
r20536 | self._cache = {} | ||
Pierre-Yves David
|
r22796 | self._genlist = [] | ||
Lucas Moscovicz
|
r20703 | self._finished = False | ||
Pierre-Yves David
|
r22800 | self._ascending = True | ||
Pierre-Yves David
|
r22755 | if iterasc is not None: | ||
if iterasc: | ||||
Pierre-Yves David
|
r22797 | self.fastasc = self._iterator | ||
Pierre-Yves David
|
r22757 | self.__contains__ = self._asccontains | ||
Pierre-Yves David
|
r22755 | else: | ||
Pierre-Yves David
|
r22797 | self.fastdesc = self._iterator | ||
Pierre-Yves David
|
r22757 | self.__contains__ = self._desccontains | ||
Lucas Moscovicz
|
r20540 | |||
Pierre-Yves David
|
r22739 | def __nonzero__(self): | ||
Pierre-Yves David
|
r24936 | # Do not use 'for r in self' because it will enforce the iteration | ||
# order (default ascending), possibly unrolling a whole descending | ||||
# iterator. | ||||
if self._genlist: | ||||
return True | ||||
for r in self._consumegen(): | ||||
Pierre-Yves David
|
r22739 | return True | ||
return False | ||||
Lucas Moscovicz
|
r20536 | def __contains__(self, x): | ||
if x in self._cache: | ||||
return self._cache[x] | ||||
Gregory Szorc
|
r20828 | # Use new values only, as existing values would be cached. | ||
for l in self._consumegen(): | ||||
Lucas Moscovicz
|
r20634 | if l == x: | ||
return True | ||||
Lucas Moscovicz
|
r20536 | |||
self._cache[x] = False | ||||
return False | ||||
Pierre-Yves David
|
r22757 | def _asccontains(self, x): | ||
"""version of contains optimised for ascending generator""" | ||||
if x in self._cache: | ||||
return self._cache[x] | ||||
# Use new values only, as existing values would be cached. | ||||
for l in self._consumegen(): | ||||
if l == x: | ||||
return True | ||||
if l > x: | ||||
break | ||||
self._cache[x] = False | ||||
return False | ||||
def _desccontains(self, x): | ||||
"""version of contains optimised for descending generator""" | ||||
if x in self._cache: | ||||
return self._cache[x] | ||||
# Use new values only, as existing values would be cached. | ||||
for l in self._consumegen(): | ||||
if l == x: | ||||
return True | ||||
if l < x: | ||||
break | ||||
self._cache[x] = False | ||||
return False | ||||
Lucas Moscovicz
|
r20536 | def __iter__(self): | ||
Pierre-Yves David
|
r22800 | if self._ascending: | ||
it = self.fastasc | ||||
else: | ||||
it = self.fastdesc | ||||
if it is not None: | ||||
return it() | ||||
# we need to consume the iterator | ||||
for x in self._consumegen(): | ||||
pass | ||||
# recall the same code | ||||
return iter(self) | ||||
Pierre-Yves David
|
r22797 | |||
def _iterator(self): | ||||
Durham Goode
|
r20833 | if self._finished: | ||
Pierre-Yves David
|
r22670 | return iter(self._genlist) | ||
Durham Goode
|
r20833 | |||
Pierre-Yves David
|
r22494 | # We have to use this complex iteration strategy to allow multiple | ||
# iterations at the same time. We need to be able to catch revision | ||||
Mads Kiilerich
|
r23139 | # removed from _consumegen and added to genlist in another instance. | ||
Pierre-Yves David
|
r22494 | # | ||
# Getting rid of it would provide an about 15% speed up on this | ||||
# iteration. | ||||
Durham Goode
|
r20833 | genlist = self._genlist | ||
Pierre-Yves David
|
r22669 | nextrev = self._consumegen().next | ||
_len = len # cache global lookup | ||||
Pierre-Yves David
|
r22670 | def gen(): | ||
i = 0 | ||||
while True: | ||||
if i < _len(genlist): | ||||
yield genlist[i] | ||||
else: | ||||
yield nextrev() | ||||
i += 1 | ||||
return gen() | ||||
Gregory Szorc
|
r20828 | |||
def _consumegen(self): | ||||
Pierre-Yves David
|
r22528 | cache = self._cache | ||
genlist = self._genlist.append | ||||
Lucas Moscovicz
|
r20634 | for item in self._gen: | ||
Pierre-Yves David
|
r22528 | cache[item] = True | ||
genlist(item) | ||||
Lucas Moscovicz
|
r20634 | yield item | ||
Pierre-Yves David
|
r22798 | if not self._finished: | ||
self._finished = True | ||||
asc = self._genlist[:] | ||||
asc.sort() | ||||
self._asclist = asc | ||||
self.fastasc = asc.__iter__ | ||||
self.fastdesc = asc.__reversed__ | ||||
Lucas Moscovicz
|
r20703 | |||
Pierre-Yves David
|
r22996 | def __len__(self): | ||
for x in self._consumegen(): | ||||
pass | ||||
return len(self._genlist) | ||||
Lucas Moscovicz
|
r20703 | def sort(self, reverse=False): | ||
Pierre-Yves David
|
r22800 | self._ascending = not reverse | ||
def reverse(self): | ||||
self._ascending = not self._ascending | ||||
Lucas Moscovicz
|
r20703 | |||
Pierre-Yves David
|
r22801 | def isascending(self): | ||
return self._ascending | ||||
def isdescending(self): | ||||
return not self._ascending | ||||
Martijn Pieters
|
r29346 | def istopo(self): | ||
# not worth the trouble asserting if the two sets combined are still | ||||
# in topographical order. Use the sort() predicate to explicitly sort | ||||
# again instead. | ||||
return False | ||||
Pierre-Yves David
|
r22811 | def first(self): | ||
if self._ascending: | ||||
it = self.fastasc | ||||
else: | ||||
it = self.fastdesc | ||||
if it is None: | ||||
# we need to consume all and try again | ||||
for x in self._consumegen(): | ||||
pass | ||||
return self.first() | ||||
Pierre-Yves David
|
r25146 | return next(it(), None) | ||
Pierre-Yves David
|
r22811 | |||
def last(self): | ||||
if self._ascending: | ||||
it = self.fastdesc | ||||
else: | ||||
it = self.fastasc | ||||
if it is None: | ||||
# we need to consume all and try again | ||||
for x in self._consumegen(): | ||||
pass | ||||
return self.first() | ||||
Pierre-Yves David
|
r25146 | return next(it(), None) | ||
Pierre-Yves David
|
r22811 | |||
Yuya Nishihara
|
r24457 | def __repr__(self): | ||
d = {False: '-', True: '+'}[self._ascending] | ||||
return '<%s%s>' % (type(self).__name__, d) | ||||
Yuya Nishihara
|
r24116 | class spanset(abstractsmartset): | ||
Lucas Moscovicz
|
r20482 | """Duck type for baseset class which represents a range of revisions and | ||
can work lazily and without having all the range in memory | ||||
Lucas Moscovicz
|
r20737 | |||
Note that spanset(x, y) behave almost like xrange(x, y) except for two | ||||
notable points: | ||||
- when x < y it will be automatically descending, | ||||
- revision filtered with this repoview will be skipped. | ||||
Lucas Moscovicz
|
r20482 | """ | ||
Lucas Moscovicz
|
r20525 | def __init__(self, repo, start=0, end=None): | ||
Lucas Moscovicz
|
r20737 | """ | ||
start: first revision included the set | ||||
(default to 0) | ||||
end: first revision excluded (last+1) | ||||
(default to len(repo) | ||||
Spanset will be descending if `end` < `start`. | ||||
""" | ||||
Pierre-Yves David
|
r22717 | if end is None: | ||
end = len(repo) | ||||
self._ascending = start <= end | ||||
if not self._ascending: | ||||
start, end = end + 1, start +1 | ||||
Lucas Moscovicz
|
r20482 | self._start = start | ||
Pierre-Yves David
|
r22717 | self._end = end | ||
Lucas Moscovicz
|
r20525 | self._hiddenrevs = repo.changelog.filteredrevs | ||
Lucas Moscovicz
|
r20521 | |||
Pierre-Yves David
|
r22717 | def sort(self, reverse=False): | ||
self._ascending = not reverse | ||||
def reverse(self): | ||||
self._ascending = not self._ascending | ||||
Martijn Pieters
|
r29346 | def istopo(self): | ||
# not worth the trouble asserting if the two sets combined are still | ||||
# in topographical order. Use the sort() predicate to explicitly sort | ||||
# again instead. | ||||
return False | ||||
Pierre-Yves David
|
r22717 | def _iterfilter(self, iterrange): | ||
s = self._hiddenrevs | ||||
for r in iterrange: | ||||
if r not in s: | ||||
yield r | ||||
Lucas Moscovicz
|
r20482 | def __iter__(self): | ||
Pierre-Yves David
|
r22717 | if self._ascending: | ||
return self.fastasc() | ||||
Lucas Moscovicz
|
r20482 | else: | ||
Pierre-Yves David
|
r22717 | return self.fastdesc() | ||
def fastasc(self): | ||||
iterrange = xrange(self._start, self._end) | ||||
Lucas Moscovicz
|
r20521 | if self._hiddenrevs: | ||
Pierre-Yves David
|
r22717 | return self._iterfilter(iterrange) | ||
return iter(iterrange) | ||||
def fastdesc(self): | ||||
iterrange = xrange(self._end - 1, self._start - 1, -1) | ||||
if self._hiddenrevs: | ||||
return self._iterfilter(iterrange) | ||||
return iter(iterrange) | ||||
Lucas Moscovicz
|
r20482 | |||
Pierre-Yves David
|
r21201 | def __contains__(self, rev): | ||
Pierre-Yves David
|
r22526 | hidden = self._hiddenrevs | ||
Pierre-Yves David
|
r22718 | return ((self._start <= rev < self._end) | ||
Pierre-Yves David
|
r22526 | and not (hidden and rev in hidden)) | ||
Lucas Moscovicz
|
r20482 | |||
Lucas Moscovicz
|
r20716 | def __nonzero__(self): | ||
for r in self: | ||||
return True | ||||
return False | ||||
Lucas Moscovicz
|
r20484 | def __len__(self): | ||
Lucas Moscovicz
|
r20521 | if not self._hiddenrevs: | ||
return abs(self._end - self._start) | ||||
else: | ||||
count = 0 | ||||
Pierre-Yves David
|
r21205 | start = self._start | ||
end = self._end | ||||
Lucas Moscovicz
|
r20521 | for rev in self._hiddenrevs: | ||
Pierre-Yves David
|
r21284 | if (end < rev <= start) or (start <= rev < end): | ||
Lucas Moscovicz
|
r20521 | count += 1 | ||
return abs(self._end - self._start) - count | ||||
Lucas Moscovicz
|
r20484 | |||
Lucas Moscovicz
|
r20725 | def isascending(self): | ||
Yuya Nishihara
|
r23826 | return self._ascending | ||
Lucas Moscovicz
|
r20725 | |||
def isdescending(self): | ||||
Yuya Nishihara
|
r23826 | return not self._ascending | ||
Lucas Moscovicz
|
r20725 | |||
Pierre-Yves David
|
r22809 | def first(self): | ||
if self._ascending: | ||||
it = self.fastasc | ||||
else: | ||||
it = self.fastdesc | ||||
for x in it(): | ||||
return x | ||||
return None | ||||
def last(self): | ||||
if self._ascending: | ||||
it = self.fastdesc | ||||
else: | ||||
it = self.fastasc | ||||
for x in it(): | ||||
return x | ||||
return None | ||||
Yuya Nishihara
|
r24457 | def __repr__(self): | ||
d = {False: '-', True: '+'}[self._ascending] | ||||
return '<%s%s %d:%d>' % (type(self).__name__, d, | ||||
self._start, self._end - 1) | ||||
Yuya Nishihara
|
r24116 | class fullreposet(spanset): | ||
Pierre-Yves David
|
r22508 | """a set containing all revisions in the repo | ||
Yuya Nishihara
|
r24204 | This class exists to host special optimization and magic to handle virtual | ||
revisions such as "null". | ||||
Pierre-Yves David
|
r22508 | """ | ||
def __init__(self, repo): | ||||
super(fullreposet, self).__init__(repo) | ||||
Pierre-Yves David
|
r22510 | def __and__(self, other): | ||
Mads Kiilerich
|
r23139 | """As self contains the whole repo, all of the other set should also be | ||
in self. Therefore `self & other = other`. | ||||
Pierre-Yves David
|
r22510 | |||
This boldly assumes the other contains valid revs only. | ||||
""" | ||||
# other not a smartset, make is so | ||||
Pierre-Yves David
|
r22883 | if not util.safehasattr(other, 'isascending'): | ||
Pierre-Yves David
|
r22510 | # filter out hidden revision | ||
# (this boldly assumes all smartset are pure) | ||||
# | ||||
# `other` was used with "&", let's assume this is a set like | ||||
# object. | ||||
other = baseset(other - self._hiddenrevs) | ||||
Pierre-Yves David
|
r25547 | # XXX As fullreposet is also used as bootstrap, this is wrong. | ||
# | ||||
# With a giveme312() revset returning [3,1,2], this makes | ||||
# 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong) | ||||
# We cannot just drop it because other usage still need to sort it: | ||||
# 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right) | ||||
# | ||||
# There is also some faulty revset implementations that rely on it | ||||
# (eg: children as of its state in e8075329c5fb) | ||||
# | ||||
# When we fix the two points above we can move this into the if clause | ||||
Yuya Nishihara
|
r23827 | other.sort(reverse=self.isdescending()) | ||
Pierre-Yves David
|
r22510 | return other | ||
Yuya Nishihara
|
r24458 | def prettyformatset(revs): | ||
lines = [] | ||||
rs = repr(revs) | ||||
p = 0 | ||||
while p < len(rs): | ||||
q = rs.find('<', p + 1) | ||||
if q < 0: | ||||
q = len(rs) | ||||
l = rs.count('<', 0, p) - rs.count('>', 0, p) | ||||
assert l >= 0 | ||||
lines.append((l, rs[p:q].rstrip())) | ||||
p = q | ||||
return '\n'.join(' ' * l + s for l, s in lines) | ||||
FUJIWARA Katsunori
|
r28393 | def loadpredicate(ui, extname, registrarobj): | ||
"""Load revset predicates from specified registrarobj | ||||
""" | ||||
for name, func in registrarobj._table.iteritems(): | ||||
symbols[name] = func | ||||
if func._safe: | ||||
safesymbols.add(name) | ||||
FUJIWARA Katsunori
|
r28395 | # load built-in predicates explicitly to setup safesymbols | ||
loadpredicate(None, None, predicate) | ||||
Patrick Mezard
|
r12823 | # tell hggettext to extract docstrings from these functions: | ||
i18nfunctions = symbols.values() | ||||