revset.py
3661 lines
| 116.9 KiB
| text/x-python
|
PythonLexer
/ mercurial / revset.py
Matt Mackall
|
r11275 | # revset.py - revision set queries for mercurial | ||
# | ||||
# Copyright 2010 Matt Mackall <mpm@selenic.com> | ||||
# | ||||
# This software may be used and distributed according to the terms of the | ||||
# GNU General Public License version 2 or any later version. | ||||
Bryan O'Sullivan
|
r16834 | import re | ||
Gregory Szorc
|
r24722 | import parser, util, error, hbisect, phases | ||
Matt Mackall
|
r16417 | import node | ||
Lucas Moscovicz
|
r20690 | import heapq | ||
Martin Geisler
|
r12085 | import match as matchmod | ||
Patrick Mezard
|
r13593 | from i18n import _ | ||
FUJIWARA Katsunori
|
r15726 | import encoding | ||
Pierre-Yves David
|
r17469 | import obsolete as obsmod | ||
FUJIWARA Katsunori
|
r20286 | import pathutil | ||
Pierre-Yves David
|
r18251 | import repoview | ||
Matt Mackall
|
r11275 | |||
Patrick Mezard
|
r16409 | def _revancestors(repo, revs, followfirst): | ||
"""Like revlog.ancestors(), but supports followfirst.""" | ||||
Jordi Gutiérrez Hermoso
|
r24306 | if followfirst: | ||
cut = 1 | ||||
else: | ||||
cut = None | ||||
Patrick Mezard
|
r16409 | cl = repo.changelog | ||
Lucas Moscovicz
|
r20690 | |||
def iterate(): | ||||
Pierre-Yves David
|
r22832 | revs.sort(reverse=True) | ||
Pierre-Yves David
|
r24939 | irevs = iter(revs) | ||
Lucas Moscovicz
|
r20691 | h = [] | ||
Pierre-Yves David
|
r25143 | inputrev = next(irevs, None) | ||
if inputrev is not None: | ||||
Pierre-Yves David
|
r24939 | heapq.heappush(h, -inputrev) | ||
Lucas Moscovicz
|
r20691 | |||
Yuya Nishihara
|
r23956 | seen = set() | ||
Lucas Moscovicz
|
r20690 | while h: | ||
current = -heapq.heappop(h) | ||||
Pierre-Yves David
|
r24940 | if current == inputrev: | ||
Pierre-Yves David
|
r25143 | inputrev = next(irevs, None) | ||
if inputrev is not None: | ||||
Pierre-Yves David
|
r24940 | heapq.heappush(h, -inputrev) | ||
Lucas Moscovicz
|
r20690 | if current not in seen: | ||
seen.add(current) | ||||
yield current | ||||
for parent in cl.parentrevs(current)[:cut]: | ||||
if parent != node.nullrev: | ||||
heapq.heappush(h, -parent) | ||||
Pierre-Yves David
|
r22795 | return generatorset(iterate(), iterasc=False) | ||
Patrick Mezard
|
r16409 | |||
def _revdescendants(repo, revs, followfirst): | ||||
"""Like revlog.descendants() but supports followfirst.""" | ||||
Jordi Gutiérrez Hermoso
|
r24306 | if followfirst: | ||
cut = 1 | ||||
else: | ||||
cut = None | ||||
Patrick Mezard
|
r16409 | |||
Lucas Moscovicz
|
r20692 | def iterate(): | ||
cl = repo.changelog | ||||
Pierre-Yves David
|
r25549 | # XXX this should be 'parentset.min()' assuming 'parentset' is a | ||
# smartset (and if it is not, it should.) | ||||
Lucas Moscovicz
|
r20692 | first = min(revs) | ||
nullrev = node.nullrev | ||||
if first == nullrev: | ||||
# Are there nodes with a null first parent and a non-null | ||||
# second one? Maybe. Do we care? Probably not. | ||||
for i in cl: | ||||
Patrick Mezard
|
r16409 | yield i | ||
Lucas Moscovicz
|
r20692 | else: | ||
seen = set(revs) | ||||
for i in cl.revs(first + 1): | ||||
for x in cl.parentrevs(i)[:cut]: | ||||
if x != nullrev and x in seen: | ||||
seen.add(i) | ||||
yield i | ||||
break | ||||
Pierre-Yves David
|
r22795 | return generatorset(iterate(), iterasc=True) | ||
Patrick Mezard
|
r16409 | |||
Bryan O'Sullivan
|
r16862 | def _revsbetween(repo, roots, heads): | ||
"""Return all paths between roots and heads, inclusive of both endpoint | ||||
sets.""" | ||||
if not roots: | ||||
Pierre-Yves David
|
r22802 | return baseset() | ||
Bryan O'Sullivan
|
r16862 | parentrevs = repo.changelog.parentrevs | ||
Pierre-Yves David
|
r22487 | visit = list(heads) | ||
Bryan O'Sullivan
|
r16862 | reachable = set() | ||
seen = {} | ||||
Pierre-Yves David
|
r25549 | # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset | ||
# (and if it is not, it should.) | ||||
Bryan O'Sullivan
|
r16862 | minroot = min(roots) | ||
roots = set(roots) | ||||
Pierre-Yves David
|
r25566 | # prefetch all the things! (because python is slow) | ||
reached = reachable.add | ||||
dovisit = visit.append | ||||
nextvisit = visit.pop | ||||
Bryan O'Sullivan
|
r16862 | # open-code the post-order traversal due to the tiny size of | ||
# sys.getrecursionlimit() | ||||
while visit: | ||||
Pierre-Yves David
|
r25566 | rev = nextvisit() | ||
Bryan O'Sullivan
|
r16862 | if rev in roots: | ||
Pierre-Yves David
|
r25566 | reached(rev) | ||
Bryan O'Sullivan
|
r16862 | parents = parentrevs(rev) | ||
seen[rev] = parents | ||||
for parent in parents: | ||||
if parent >= minroot and parent not in seen: | ||||
Pierre-Yves David
|
r25566 | dovisit(parent) | ||
Bryan O'Sullivan
|
r16862 | if not reachable: | ||
Pierre-Yves David
|
r22802 | return baseset() | ||
Bryan O'Sullivan
|
r16862 | for rev in sorted(seen): | ||
for parent in seen[rev]: | ||||
if parent in reachable: | ||||
Pierre-Yves David
|
r25566 | reached(rev) | ||
Lucas Moscovicz
|
r20364 | return baseset(sorted(reachable)) | ||
Bryan O'Sullivan
|
r16862 | |||
Matt Mackall
|
r11275 | elements = { | ||
Yuya Nishihara
|
r25815 | # token-type: binding-strength, primary, prefix, infix, suffix | ||
"(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None), | ||||
"##": (20, None, None, ("_concat", 20), None), | ||||
"~": (18, None, None, ("ancestor", 18), None), | ||||
"^": (18, None, None, ("parent", 18), ("parentpost", 18)), | ||||
"-": (5, None, ("negate", 19), ("minus", 5), None), | ||||
"::": (17, None, ("dagrangepre", 17), ("dagrange", 17), | ||||
Matt Mackall
|
r11278 | ("dagrangepost", 17)), | ||
Yuya Nishihara
|
r25815 | "..": (17, None, ("dagrangepre", 17), ("dagrange", 17), | ||
Matt Mackall
|
r11278 | ("dagrangepost", 17)), | ||
Yuya Nishihara
|
r25819 | ":": (15, "rangeall", ("rangepre", 15), ("range", 15), ("rangepost", 15)), | ||
Yuya Nishihara
|
r25815 | "not": (10, None, ("not", 10), None, None), | ||
"!": (10, None, ("not", 10), None, None), | ||||
"and": (5, None, None, ("and", 5), None), | ||||
"&": (5, None, None, ("and", 5), None), | ||||
"%": (5, None, None, ("only", 5), ("onlypost", 5)), | ||||
"or": (4, None, None, ("or", 4), None), | ||||
"|": (4, None, None, ("or", 4), None), | ||||
"+": (4, None, None, ("or", 4), None), | ||||
"=": (3, None, None, ("keyvalue", 3), None), | ||||
",": (2, None, None, ("list", 2), None), | ||||
")": (0, None, None, None, None), | ||||
"symbol": (0, "symbol", None, None, None), | ||||
"string": (0, "string", None, None, None), | ||||
"end": (0, None, None, None, None), | ||||
Matt Mackall
|
r11275 | } | ||
keywords = set(['and', 'or', 'not']) | ||||
FUJIWARA Katsunori
|
r23842 | # default set of valid characters for the initial letter of symbols | ||
_syminitletters = set(c for c in [chr(i) for i in xrange(256)] | ||||
if c.isalnum() or c in '._@' or ord(c) > 127) | ||||
# default set of valid characters for non-initial letters of symbols | ||||
_symletters = set(c for c in [chr(i) for i in xrange(256)] | ||||
if c.isalnum() or c in '-._/@' or ord(c) > 127) | ||||
def tokenize(program, lookup=None, syminitletters=None, symletters=None): | ||||
Matt Mackall
|
r17886 | ''' | ||
Parse a revset statement into a stream of tokens | ||||
FUJIWARA Katsunori
|
r23842 | ``syminitletters`` is the set of valid characters for the initial | ||
letter of symbols. | ||||
By default, character ``c`` is recognized as valid for initial | ||||
letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``. | ||||
``symletters`` is the set of valid characters for non-initial | ||||
letters of symbols. | ||||
By default, character ``c`` is recognized as valid for non-initial | ||||
letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``. | ||||
Matt Mackall
|
r17886 | Check that @ is a valid unquoted token character (issue3686): | ||
>>> list(tokenize("@::")) | ||||
[('symbol', '@', 0), ('::', None, 1), ('end', None, 3)] | ||||
''' | ||||
FUJIWARA Katsunori
|
r23842 | if syminitletters is None: | ||
syminitletters = _syminitletters | ||||
if symletters is None: | ||||
symletters = _symletters | ||||
Matt Mackall
|
r17886 | |||
Matt Mackall
|
r11275 | pos, l = 0, len(program) | ||
while pos < l: | ||||
c = program[pos] | ||||
if c.isspace(): # skip inter-token whitespace | ||||
pass | ||||
Matt Mackall
|
r11278 | elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully | ||
Matt Mackall
|
r11289 | yield ('::', None, pos) | ||
Matt Mackall
|
r11278 | pos += 1 # skip ahead | ||
Matt Mackall
|
r11275 | elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully | ||
Matt Mackall
|
r11289 | yield ('..', None, pos) | ||
Matt Mackall
|
r11275 | pos += 1 # skip ahead | ||
FUJIWARA Katsunori
|
r23742 | elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully | ||
yield ('##', None, pos) | ||||
pos += 1 # skip ahead | ||||
Yuya Nishihara
|
r25704 | elif c in "():=,-|&+!~^%": # handle simple operators | ||
Matt Mackall
|
r11289 | yield (c, None, pos) | ||
Brodie Rao
|
r12408 | elif (c in '"\'' or c == 'r' and | ||
program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings | ||||
if c == 'r': | ||||
pos += 1 | ||||
c = program[pos] | ||||
decode = lambda x: x | ||||
else: | ||||
decode = lambda x: x.decode('string-escape') | ||||
Matt Mackall
|
r11275 | pos += 1 | ||
s = pos | ||||
while pos < l: # find closing quote | ||||
d = program[pos] | ||||
if d == '\\': # skip over escaped characters | ||||
pos += 2 | ||||
continue | ||||
if d == c: | ||||
Brodie Rao
|
r12408 | yield ('string', decode(program[s:pos]), s) | ||
Matt Mackall
|
r11275 | break | ||
pos += 1 | ||||
else: | ||||
Martin Geisler
|
r11383 | raise error.ParseError(_("unterminated string"), s) | ||
Brodie Rao
|
r16683 | # gather up a symbol/keyword | ||
FUJIWARA Katsunori
|
r23842 | elif c in syminitletters: | ||
Matt Mackall
|
r11275 | s = pos | ||
pos += 1 | ||||
while pos < l: # find end of symbol | ||||
d = program[pos] | ||||
FUJIWARA Katsunori
|
r23842 | if d not in symletters: | ||
Matt Mackall
|
r11275 | break | ||
if d == '.' and program[pos - 1] == '.': # special case for .. | ||||
pos -= 1 | ||||
break | ||||
pos += 1 | ||||
sym = program[s:pos] | ||||
if sym in keywords: # operator keywords | ||||
Matt Mackall
|
r11289 | yield (sym, None, s) | ||
Matt Mackall
|
r20780 | elif '-' in sym: | ||
# some jerk gave us foo-bar-baz, try to check if it's a symbol | ||||
if lookup and lookup(sym): | ||||
# looks like a real symbol | ||||
yield ('symbol', sym, s) | ||||
else: | ||||
# looks like an expression | ||||
parts = sym.split('-') | ||||
for p in parts[:-1]: | ||||
if p: # possible consecutive - | ||||
yield ('symbol', p, s) | ||||
s += len(p) | ||||
yield ('-', None, pos) | ||||
s += 1 | ||||
if parts[-1]: # possible trailing - | ||||
yield ('symbol', parts[-1], s) | ||||
Matt Mackall
|
r11275 | else: | ||
Matt Mackall
|
r11289 | yield ('symbol', sym, s) | ||
Matt Mackall
|
r11275 | pos -= 1 | ||
else: | ||||
Ryan McElroy
|
r24708 | raise error.ParseError(_("syntax error in revset '%s'") % | ||
program, pos) | ||||
Matt Mackall
|
r11275 | pos += 1 | ||
Matt Mackall
|
r11289 | yield ('end', None, pos) | ||
Matt Mackall
|
r11275 | |||
FUJIWARA Katsunori
|
r23843 | def parseerrordetail(inst): | ||
"""Compose error message from specified ParseError object | ||||
""" | ||||
if len(inst.args) > 1: | ||||
return _('at %s: %s') % (inst.args[1], inst.args[0]) | ||||
else: | ||||
return inst.args[0] | ||||
Matt Mackall
|
r11275 | # helpers | ||
def getstring(x, err): | ||||
Matt Mackall
|
r11406 | if x and (x[0] == 'string' or x[0] == 'symbol'): | ||
Matt Mackall
|
r11275 | return x[1] | ||
Matt Mackall
|
r11289 | raise error.ParseError(err) | ||
Matt Mackall
|
r11275 | |||
def getlist(x): | ||||
if not x: | ||||
return [] | ||||
if x[0] == 'list': | ||||
return getlist(x[1]) + [x[2]] | ||||
return [x] | ||||
Matt Mackall
|
r11339 | def getargs(x, min, max, err): | ||
Matt Mackall
|
r11275 | l = getlist(x) | ||
Patrick Mezard
|
r16161 | if len(l) < min or (max >= 0 and len(l) > max): | ||
Matt Mackall
|
r11289 | raise error.ParseError(err) | ||
Matt Mackall
|
r11275 | return l | ||
Yuya Nishihara
|
r25767 | def getargsdict(x, funcname, keys): | ||
Yuya Nishihara
|
r25705 | return parser.buildargsdict(getlist(x), funcname, keys.split(), | ||
keyvaluenode='keyvalue', keynode='symbol') | ||||
FUJIWARA Katsunori
|
r23845 | def isvalidsymbol(tree): | ||
"""Examine whether specified ``tree`` is valid ``symbol`` or not | ||||
""" | ||||
return tree[0] == 'symbol' and len(tree) > 1 | ||||
def getsymbol(tree): | ||||
"""Get symbol name from valid ``symbol`` in ``tree`` | ||||
This assumes that ``tree`` is already examined by ``isvalidsymbol``. | ||||
""" | ||||
return tree[1] | ||||
def isvalidfunc(tree): | ||||
"""Examine whether specified ``tree`` is valid ``func`` or not | ||||
""" | ||||
return tree[0] == 'func' and len(tree) > 1 and isvalidsymbol(tree[1]) | ||||
def getfuncname(tree): | ||||
"""Get function name from valid ``func`` in ``tree`` | ||||
This assumes that ``tree`` is already examined by ``isvalidfunc``. | ||||
""" | ||||
return getsymbol(tree[1]) | ||||
def getfuncargs(tree): | ||||
"""Get list of function arguments from valid ``func`` in ``tree`` | ||||
This assumes that ``tree`` is already examined by ``isvalidfunc``. | ||||
""" | ||||
if len(tree) > 2: | ||||
return getlist(tree[2]) | ||||
else: | ||||
return [] | ||||
Matt Mackall
|
r11275 | def getset(repo, subset, x): | ||
if not x: | ||||
Martin Geisler
|
r11383 | raise error.ParseError(_("missing argument")) | ||
Lucas Moscovicz
|
r20527 | s = methods[x[0]](repo, subset, *x[1:]) | ||
Pierre-Yves David
|
r22884 | if util.safehasattr(s, 'isascending'): | ||
Lucas Moscovicz
|
r20527 | return s | ||
Pierre-Yves David
|
r25630 | if (repo.ui.configbool('devel', 'all-warnings') | ||
or repo.ui.configbool('devel', 'old-revset')): | ||||
# else case should not happen, because all non-func are internal, | ||||
# ignoring for now. | ||||
if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols: | ||||
repo.ui.develwarn('revset "%s" use list instead of smartset, ' | ||||
'(upgrade your code)' % x[1][1]) | ||||
Lucas Moscovicz
|
r20527 | return baseset(s) | ||
Matt Mackall
|
r11275 | |||
Matt Harbison
|
r17003 | def _getrevsource(repo, r): | ||
extra = repo[r].extra() | ||||
for label in ('source', 'transplant_source', 'rebase_source'): | ||||
if label in extra: | ||||
try: | ||||
return repo[extra[label]].rev() | ||||
except error.RepoLookupError: | ||||
pass | ||||
return None | ||||
Matt Mackall
|
r11275 | # operator methods | ||
def stringset(repo, subset, x): | ||||
x = repo[x].rev() | ||||
Yuya Nishihara
|
r25265 | if (x in subset | ||
or x == node.nullrev and isinstance(subset, fullreposet)): | ||||
Lucas Moscovicz
|
r20364 | return baseset([x]) | ||
Pierre-Yves David
|
r22802 | return baseset() | ||
Matt Mackall
|
r11275 | |||
def rangeset(repo, subset, x, y): | ||||
Pierre-Yves David
|
r23162 | m = getset(repo, fullreposet(repo), x) | ||
n = getset(repo, fullreposet(repo), y) | ||||
Matt Mackall
|
r11456 | |||
if not m or not n: | ||||
Pierre-Yves David
|
r22802 | return baseset() | ||
Pierre-Yves David
|
r22817 | m, n = m.first(), n.last() | ||
Matt Mackall
|
r11456 | |||
Yuya Nishihara
|
r25766 | if m == n: | ||
r = baseset([m]) | ||||
elif n == node.wdirrev: | ||||
r = spanset(repo, m, len(repo)) + baseset([n]) | ||||
elif m == node.wdirrev: | ||||
r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1) | ||||
elif m < n: | ||||
Lucas Moscovicz
|
r20526 | r = spanset(repo, m, n + 1) | ||
Matt Mackall
|
r11456 | else: | ||
Lucas Moscovicz
|
r20526 | r = spanset(repo, m, n - 1) | ||
Pierre-Yves David
|
r25548 | # XXX We should combine with subset first: 'subset & baseset(...)'. This is | ||
# necessary to ensure we preserve the order in subset. | ||||
# | ||||
# This has performance implication, carrying the sorting over when possible | ||||
# would be more efficient. | ||||
Lucas Moscovicz
|
r20526 | return r & subset | ||
Matt Mackall
|
r11275 | |||
Bryan O'Sullivan
|
r16860 | def dagrange(repo, subset, x, y): | ||
Yuya Nishihara
|
r24115 | r = fullreposet(repo) | ||
Alexander Plavin
|
r18991 | xs = _revsbetween(repo, getset(repo, r, x), getset(repo, r, y)) | ||
Pierre-Yves David
|
r25548 | # XXX We should combine with subset first: 'subset & baseset(...)'. This is | ||
# necessary to ensure we preserve the order in subset. | ||||
Pierre-Yves David
|
r22866 | return xs & subset | ||
Bryan O'Sullivan
|
r16860 | |||
Matt Mackall
|
r11275 | def andset(repo, subset, x, y): | ||
return getset(repo, getset(repo, subset, x), y) | ||||
Yuya Nishihara
|
r25309 | def orset(repo, subset, *xs): | ||
rs = [getset(repo, subset, x) for x in xs] | ||||
return _combinesets(rs) | ||||
Matt Mackall
|
r11275 | |||
def notset(repo, subset, x): | ||||
Lucas Moscovicz
|
r20366 | return subset - getset(repo, subset, x) | ||
Matt Mackall
|
r11275 | |||
def listset(repo, subset, a, b): | ||||
Martin Geisler
|
r11383 | raise error.ParseError(_("can't use a list in this context")) | ||
Matt Mackall
|
r11275 | |||
Yuya Nishihara
|
r25704 | def keyvaluepair(repo, subset, k, v): | ||
raise error.ParseError(_("can't use a key-value pair in this context")) | ||||
Matt Mackall
|
r11275 | def func(repo, subset, a, b): | ||
if a[0] == 'symbol' and a[1] in symbols: | ||||
return symbols[a[1]](repo, subset, b) | ||||
Matt Harbison
|
r25632 | |||
keep = lambda fn: getattr(fn, '__doc__', None) is not None | ||||
syms = [s for (s, fn) in symbols.items() if keep(fn)] | ||||
raise error.UnknownIdentifier(a[1], syms) | ||||
Matt Mackall
|
r11275 | |||
# functions | ||||
Idan Kamara
|
r13915 | def adds(repo, subset, x): | ||
"""``adds(pattern)`` | ||||
Changesets that add a file matching pattern. | ||||
FUJIWARA Katsunori
|
r20289 | |||
The pattern without explicit kind like ``glob:`` is expected to be | ||||
relative to the current directory and match against a file or a | ||||
directory. | ||||
Idan Kamara
|
r13915 | """ | ||
# i18n: "adds" is a keyword | ||||
pat = getstring(x, _("adds requires a pattern")) | ||||
return checkstatus(repo, subset, pat, 1) | ||||
def ancestor(repo, subset, x): | ||||
Paul Cavallaro
|
r18536 | """``ancestor(*changeset)`` | ||
Mads Kiilerich
|
r20991 | A greatest common ancestor of the changesets. | ||
Paul Cavallaro
|
r18536 | |||
Accepts 0 or more changesets. | ||||
Will return empty list when passed no args. | ||||
Greatest common ancestor of a single changeset is that changeset. | ||||
Idan Kamara
|
r13915 | """ | ||
# i18n: "ancestor" is a keyword | ||||
Paul Cavallaro
|
r18536 | l = getlist(x) | ||
Yuya Nishihara
|
r24115 | rl = fullreposet(repo) | ||
Paul Cavallaro
|
r18536 | anc = None | ||
Idan Kamara
|
r13915 | |||
Paul Cavallaro
|
r18536 | # (getset(repo, rl, i) for i in l) generates a list of lists | ||
for revs in (getset(repo, rl, i) for i in l): | ||||
for r in revs: | ||||
if anc is None: | ||||
Mads Kiilerich
|
r20991 | anc = repo[r] | ||
Paul Cavallaro
|
r18536 | else: | ||
Mads Kiilerich
|
r20991 | anc = anc.ancestor(repo[r]) | ||
if anc is not None and anc.rev() in subset: | ||||
return baseset([anc.rev()]) | ||||
Pierre-Yves David
|
r22802 | return baseset() | ||
Idan Kamara
|
r13915 | |||
Patrick Mezard
|
r16409 | def _ancestors(repo, subset, x, followfirst=False): | ||
Yuya Nishihara
|
r24115 | heads = getset(repo, fullreposet(repo), x) | ||
Mads Kiilerich
|
r22944 | if not heads: | ||
Pierre-Yves David
|
r22802 | return baseset() | ||
Mads Kiilerich
|
r22944 | s = _revancestors(repo, heads, followfirst) | ||
Pierre-Yves David
|
r23003 | return subset & s | ||
Patrick Mezard
|
r16409 | |||
Idan Kamara
|
r13915 | def ancestors(repo, subset, x): | ||
"""``ancestors(set)`` | ||||
Changesets that are ancestors of a changeset in set. | ||||
""" | ||||
Patrick Mezard
|
r16409 | return _ancestors(repo, subset, x) | ||
def _firstancestors(repo, subset, x): | ||||
# ``_firstancestors(set)`` | ||||
# Like ``ancestors(set)`` but follows only the first parents. | ||||
return _ancestors(repo, subset, x, followfirst=True) | ||||
Idan Kamara
|
r13915 | |||
Kevin Gessner
|
r14070 | def ancestorspec(repo, subset, x, n): | ||
"""``set~n`` | ||||
Brodie Rao
|
r16683 | Changesets that are the Nth ancestor (first parents only) of a changeset | ||
in set. | ||||
Kevin Gessner
|
r14070 | """ | ||
try: | ||||
n = int(n[1]) | ||||
Matt Mackall
|
r14851 | except (TypeError, ValueError): | ||
Kevin Gessner
|
r14070 | raise error.ParseError(_("~ expects a number")) | ||
ps = set() | ||||
cl = repo.changelog | ||||
Pierre-Yves David
|
r23163 | for r in getset(repo, fullreposet(repo), x): | ||
Kevin Gessner
|
r14070 | for i in range(n): | ||
r = cl.parentrevs(r)[0] | ||||
ps.add(r) | ||||
Pierre-Yves David
|
r22531 | return subset & ps | ||
Kevin Gessner
|
r14070 | |||
Idan Kamara
|
r13915 | def author(repo, subset, x): | ||
"""``author(string)`` | ||||
Alias for ``user(string)``. | ||||
""" | ||||
# i18n: "author" is a keyword | ||||
FUJIWARA Katsunori
|
r15726 | n = encoding.lower(getstring(x, _("author requires a string"))) | ||
Simon King
|
r16823 | kind, pattern, matcher = _substringmatcher(n) | ||
Lucas Moscovicz
|
r20611 | return subset.filter(lambda x: matcher(encoding.lower(repo[x].user()))) | ||
Idan Kamara
|
r13915 | |||
"Yann E. MORIN"
|
r15134 | def bisect(repo, subset, x): | ||
"""``bisect(string)`` | ||||
"Yann E. MORIN"
|
r15153 | Changesets marked in the specified bisect status: | ||
"Yann E. MORIN"
|
r15136 | |||
"Yann E. MORIN"
|
r15153 | - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip | ||
Mads Kiilerich
|
r17424 | - ``goods``, ``bads`` : csets topologically good/bad | ||
"Yann E. MORIN"
|
r15153 | - ``range`` : csets taking part in the bisection | ||
- ``pruned`` : csets that are goods, bads or skipped | ||||
- ``untested`` : csets whose fate is yet unknown | ||||
- ``ignored`` : csets ignored due to DAG topology | ||||
Bryan O'Sullivan
|
r16647 | - ``current`` : the cset currently being bisected | ||
Idan Kamara
|
r13915 | """ | ||
FUJIWARA Katsunori
|
r17259 | # i18n: "bisect" is a keyword | ||
"Yann E. MORIN"
|
r15135 | status = getstring(x, _("bisect requires a string")).lower() | ||
Bryan O'Sullivan
|
r16467 | state = set(hbisect.get(repo, status)) | ||
Pierre-Yves David
|
r22532 | return subset & state | ||
Idan Kamara
|
r13915 | |||
"Yann E. MORIN"
|
r15134 | # Backward-compatibility | ||
# - no help entry so that we do not advertise it any more | ||||
def bisected(repo, subset, x): | ||||
return bisect(repo, subset, x) | ||||
Idan Kamara
|
r13915 | def bookmark(repo, subset, x): | ||
"""``bookmark([name])`` | ||||
The named bookmark or all bookmarks. | ||||
Simon King
|
r16822 | |||
If `name` starts with `re:`, the remainder of the name is treated as | ||||
a regular expression. To match a bookmark that actually starts with `re:`, | ||||
use the prefix `literal:`. | ||||
Idan Kamara
|
r13915 | """ | ||
# i18n: "bookmark" is a keyword | ||||
args = getargs(x, 0, 1, _('bookmark takes one or no arguments')) | ||||
if args: | ||||
bm = getstring(args[0], | ||||
# i18n: "bookmark" is a keyword | ||||
_('the argument to bookmark must be a string')) | ||||
Simon King
|
r16822 | kind, pattern, matcher = _stringmatcher(bm) | ||
Pierre-Yves David
|
r22499 | bms = set() | ||
Simon King
|
r16822 | if kind == 'literal': | ||
Michael O'Connor
|
r22105 | bmrev = repo._bookmarks.get(pattern, None) | ||
Simon King
|
r16822 | if not bmrev: | ||
FUJIWARA Katsunori
|
r23978 | raise error.RepoLookupError(_("bookmark '%s' does not exist") | ||
% bm) | ||||
Pierre-Yves David
|
r22499 | bms.add(repo[bmrev].rev()) | ||
Simon King
|
r16822 | else: | ||
matchrevs = set() | ||||
Kevin Bullock
|
r18495 | for name, bmrev in repo._bookmarks.iteritems(): | ||
Simon King
|
r16822 | if matcher(name): | ||
matchrevs.add(bmrev) | ||||
if not matchrevs: | ||||
FUJIWARA Katsunori
|
r23978 | raise error.RepoLookupError(_("no bookmarks exist" | ||
" that match '%s'") % pattern) | ||||
Simon King
|
r16822 | for bmrev in matchrevs: | ||
Pierre-Yves David
|
r22499 | bms.add(repo[bmrev].rev()) | ||
else: | ||||
bms = set([repo[r].rev() | ||||
for r in repo._bookmarks.values()]) | ||||
Pierre-Yves David
|
r22500 | bms -= set([node.nullrev]) | ||
Pierre-Yves David
|
r22530 | return subset & bms | ||
Idan Kamara
|
r13915 | |||
def branch(repo, subset, x): | ||||
"""``branch(string or set)`` | ||||
All changesets belonging to the given branch or the branches of the given | ||||
changesets. | ||||
Simon King
|
r16821 | |||
If `string` starts with `re:`, the remainder of the name is treated as | ||||
a regular expression. To match a branch that actually starts with `re:`, | ||||
use the prefix `literal:`. | ||||
Idan Kamara
|
r13915 | """ | ||
Durham Goode
|
r24374 | getbi = repo.revbranchcache().branchinfo | ||
Mads Kiilerich
|
r23787 | |||
Idan Kamara
|
r13915 | try: | ||
b = getstring(x, '') | ||||
except error.ParseError: | ||||
# not a string, but another revspec, e.g. tip() | ||||
pass | ||||
Simon King
|
r16821 | else: | ||
kind, pattern, matcher = _stringmatcher(b) | ||||
if kind == 'literal': | ||||
# note: falls through to the revspec case if no branch with | ||||
# this name exists | ||||
if pattern in repo.branchmap(): | ||||
Durham Goode
|
r24374 | return subset.filter(lambda r: matcher(getbi(r)[0])) | ||
Simon King
|
r16821 | else: | ||
Durham Goode
|
r24374 | return subset.filter(lambda r: matcher(getbi(r)[0])) | ||
Idan Kamara
|
r13915 | |||
Yuya Nishihara
|
r24115 | s = getset(repo, fullreposet(repo), x) | ||
Idan Kamara
|
r13915 | b = set() | ||
for r in s: | ||||
Durham Goode
|
r24374 | b.add(getbi(r)[0]) | ||
Pierre-Yves David
|
r22867 | c = s.__contains__ | ||
Durham Goode
|
r24374 | return subset.filter(lambda r: c(r) or getbi(r)[0] in b) | ||
Idan Kamara
|
r13915 | |||
Pierre-Yves David
|
r17829 | def bumped(repo, subset, x): | ||
"""``bumped()`` | ||||
Mutable changesets marked as successors of public changesets. | ||||
Only non-public and non-obsolete changesets can be `bumped`. | ||||
""" | ||||
# i18n: "bumped" is a keyword | ||||
getargs(x, 0, 0, _("bumped takes no arguments")) | ||||
bumped = obsmod.getrevs(repo, 'bumped') | ||||
Lucas Moscovicz
|
r20367 | return subset & bumped | ||
Pierre-Yves David
|
r17829 | |||
Tomasz Kleczek
|
r17913 | def bundle(repo, subset, x): | ||
"""``bundle()`` | ||||
Changesets in the bundle. | ||||
Bundle must be specified by the -R option.""" | ||||
try: | ||||
Mads Kiilerich
|
r18411 | bundlerevs = repo.changelog.bundlerevs | ||
Tomasz Kleczek
|
r17913 | except AttributeError: | ||
raise util.Abort(_("no bundle provided - specify with -R")) | ||||
Lucas Moscovicz
|
r20367 | return subset & bundlerevs | ||
Tomasz Kleczek
|
r17913 | |||
Idan Kamara
|
r13915 | def checkstatus(repo, subset, pat, field): | ||
Patrick Mezard
|
r16521 | hasset = matchmod.patkind(pat) == 'set' | ||
Lucas Moscovicz
|
r20457 | |||
Martin von Zweigbergk
|
r23115 | mcache = [None] | ||
Lucas Moscovicz
|
r20457 | def matches(x): | ||
c = repo[x] | ||||
Martin von Zweigbergk
|
r23115 | if not mcache[0] or hasset: | ||
mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c) | ||||
m = mcache[0] | ||||
fname = None | ||||
if not m.anypats() and len(m.files()) == 1: | ||||
fname = m.files()[0] | ||||
Patrick Mezard
|
r16521 | if fname is not None: | ||
if fname not in c.files(): | ||||
Lucas Moscovicz
|
r20457 | return False | ||
Idan Kamara
|
r13915 | else: | ||
for f in c.files(): | ||||
if m(f): | ||||
break | ||||
else: | ||||
Lucas Moscovicz
|
r20457 | return False | ||
Idan Kamara
|
r13915 | files = repo.status(c.p1().node(), c.node())[field] | ||
Patrick Mezard
|
r16521 | if fname is not None: | ||
if fname in files: | ||||
Lucas Moscovicz
|
r20457 | return True | ||
Idan Kamara
|
r13915 | else: | ||
for f in files: | ||||
if m(f): | ||||
Lucas Moscovicz
|
r20457 | return True | ||
Lucas Moscovicz
|
r20611 | return subset.filter(matches) | ||
Idan Kamara
|
r13915 | |||
Patrick Mezard
|
r16396 | def _children(repo, narrow, parentset): | ||
Pierre-Yves David
|
r25550 | if not parentset: | ||
return baseset() | ||||
Matt Mackall
|
r15899 | cs = set() | ||
pr = repo.changelog.parentrevs | ||||
Pierre-Yves David
|
r25567 | minrev = parentset.min() | ||
Patrick Mezard
|
r16394 | for r in narrow: | ||
Siddharth Agarwal
|
r18063 | if r <= minrev: | ||
continue | ||||
Matt Mackall
|
r15899 | for p in pr(r): | ||
Patrick Mezard
|
r16396 | if p in parentset: | ||
Matt Mackall
|
r15899 | cs.add(r) | ||
Pierre-Yves David
|
r25553 | # XXX using a set to feed the baseset is wrong. Sets are not ordered. | ||
# This does not break because of other fullreposet misbehavior. | ||||
Matt Mackall
|
r20709 | return baseset(cs) | ||
Matt Mackall
|
r15899 | |||
Idan Kamara
|
r13915 | def children(repo, subset, x): | ||
"""``children(set)`` | ||||
Child changesets of changesets in set. | ||||
""" | ||||
Pierre-Yves David
|
r23164 | s = getset(repo, fullreposet(repo), x) | ||
Matt Mackall
|
r15899 | cs = _children(repo, subset, s) | ||
Lucas Moscovicz
|
r20367 | return subset & cs | ||
Idan Kamara
|
r13915 | |||
def closed(repo, subset, x): | ||||
"""``closed()`` | ||||
Changeset is closed. | ||||
""" | ||||
# i18n: "closed" is a keyword | ||||
getargs(x, 0, 0, _("closed takes no arguments")) | ||||
Lucas Moscovicz
|
r20611 | return subset.filter(lambda r: repo[r].closesbranch()) | ||
Idan Kamara
|
r13915 | |||
def contains(repo, subset, x): | ||||
"""``contains(pattern)`` | ||||
Greg Hurrell
|
r21199 | The revision's manifest contains a file matching pattern (but might not | ||
modify it). See :hg:`help patterns` for information about file patterns. | ||||
FUJIWARA Katsunori
|
r20289 | |||
The pattern without explicit kind like ``glob:`` is expected to be | ||||
relative to the current directory and match against a file exactly | ||||
for efficiency. | ||||
Idan Kamara
|
r13915 | """ | ||
# i18n: "contains" is a keyword | ||||
pat = getstring(x, _("contains requires a pattern")) | ||||
Lucas Moscovicz
|
r20461 | |||
def matches(x): | ||||
if not matchmod.patkind(pat): | ||||
pats = pathutil.canonpath(repo.root, repo.getcwd(), pat) | ||||
if pats in repo[x]: | ||||
return True | ||||
else: | ||||
c = repo[x] | ||||
m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c) | ||||
Matt Mackall
|
r15964 | for f in c.manifest(): | ||
Idan Kamara
|
r13915 | if m(f): | ||
Lucas Moscovicz
|
r20461 | return True | ||
return False | ||||
Lucas Moscovicz
|
r20611 | return subset.filter(matches) | ||
Idan Kamara
|
r13915 | |||
Matt Harbison
|
r17002 | def converted(repo, subset, x): | ||
"""``converted([id])`` | ||||
Changesets converted from the given identifier in the old repository if | ||||
present, or all converted changesets if no identifier is specified. | ||||
""" | ||||
# There is exactly no chance of resolving the revision, so do a simple | ||||
# string compare and hope for the best | ||||
FUJIWARA Katsunori
|
r17259 | rev = None | ||
Matt Harbison
|
r17002 | # i18n: "converted" is a keyword | ||
l = getargs(x, 0, 1, _('converted takes one or no arguments')) | ||||
if l: | ||||
FUJIWARA Katsunori
|
r17259 | # i18n: "converted" is a keyword | ||
Matt Harbison
|
r17002 | rev = getstring(l[0], _('converted requires a revision')) | ||
def _matchvalue(r): | ||||
source = repo[r].extra().get('convert_revision', None) | ||||
return source is not None and (rev is None or source.startswith(rev)) | ||||
Lucas Moscovicz
|
r20611 | return subset.filter(lambda r: _matchvalue(r)) | ||
Matt Harbison
|
r17002 | |||
Idan Kamara
|
r13915 | def date(repo, subset, x): | ||
"""``date(interval)`` | ||||
Changesets within the interval, see :hg:`help dates`. | ||||
""" | ||||
# i18n: "date" is a keyword | ||||
ds = getstring(x, _("date requires a string")) | ||||
dm = util.matchdate(ds) | ||||
Lucas Moscovicz
|
r20611 | return subset.filter(lambda x: dm(repo[x].date()[0])) | ||
Idan Kamara
|
r13915 | |||
Thomas Arendsen Hein
|
r14650 | def desc(repo, subset, x): | ||
"""``desc(string)`` | ||||
Search commit message for string. The match is case-insensitive. | ||||
""" | ||||
# i18n: "desc" is a keyword | ||||
FUJIWARA Katsunori
|
r15726 | ds = encoding.lower(getstring(x, _("desc requires a string"))) | ||
Lucas Moscovicz
|
r20452 | |||
def matches(x): | ||||
c = repo[x] | ||||
return ds in encoding.lower(c.description()) | ||||
Lucas Moscovicz
|
r20611 | return subset.filter(matches) | ||
Thomas Arendsen Hein
|
r14650 | |||
Patrick Mezard
|
r16409 | def _descendants(repo, subset, x, followfirst=False): | ||
Yuya Nishihara
|
r24115 | roots = getset(repo, fullreposet(repo), x) | ||
Mads Kiilerich
|
r22944 | if not roots: | ||
Pierre-Yves David
|
r22802 | return baseset() | ||
Mads Kiilerich
|
r22944 | s = _revdescendants(repo, roots, followfirst) | ||
Durham Goode
|
r20894 | |||
# Both sets need to be ascending in order to lazily return the union | ||||
# in the correct order. | ||||
Mads Kiilerich
|
r22944 | base = subset & roots | ||
Pierre-Yves David
|
r22860 | desc = subset & s | ||
result = base + desc | ||||
if subset.isascending(): | ||||
result.sort() | ||||
elif subset.isdescending(): | ||||
result.sort(reverse=True) | ||||
else: | ||||
result = subset & result | ||||
Pierre-Yves David
|
r22830 | return result | ||
Patrick Mezard
|
r16409 | |||
Idan Kamara
|
r13915 | def descendants(repo, subset, x): | ||
"""``descendants(set)`` | ||||
Changesets which are descendants of changesets in set. | ||||
""" | ||||
Patrick Mezard
|
r16409 | return _descendants(repo, subset, x) | ||
def _firstdescendants(repo, subset, x): | ||||
# ``_firstdescendants(set)`` | ||||
# Like ``descendants(set)`` but follows only the first parents. | ||||
return _descendants(repo, subset, x, followfirst=True) | ||||
Idan Kamara
|
r13915 | |||
Matt Harbison
|
r17186 | def destination(repo, subset, x): | ||
"""``destination([set])`` | ||||
Changesets that were created by a graft, transplant or rebase operation, | ||||
with the given revisions specified as the source. Omitting the optional set | ||||
is the same as passing all(). | ||||
""" | ||||
if x is not None: | ||||
Yuya Nishihara
|
r24115 | sources = getset(repo, fullreposet(repo), x) | ||
Matt Harbison
|
r17186 | else: | ||
Yuya Nishihara
|
r24201 | sources = fullreposet(repo) | ||
Matt Harbison
|
r17186 | |||
dests = set() | ||||
# subset contains all of the possible destinations that can be returned, so | ||||
Mads Kiilerich
|
r22944 | # iterate over them and see if their source(s) were provided in the arg set. | ||
# Even if the immediate src of r is not in the arg set, src's source (or | ||||
Matt Harbison
|
r17186 | # further back) may be. Scanning back further than the immediate src allows | ||
# transitive transplants and rebases to yield the same results as transitive | ||||
# grafts. | ||||
for r in subset: | ||||
src = _getrevsource(repo, r) | ||||
lineage = None | ||||
while src is not None: | ||||
if lineage is None: | ||||
lineage = list() | ||||
lineage.append(r) | ||||
# The visited lineage is a match if the current source is in the arg | ||||
# set. Since every candidate dest is visited by way of iterating | ||||
timeless@mozdev.org
|
r17494 | # subset, any dests further back in the lineage will be tested by a | ||
Matt Harbison
|
r17186 | # different iteration over subset. Likewise, if the src was already | ||
# selected, the current lineage can be selected without going back | ||||
# further. | ||||
Mads Kiilerich
|
r22944 | if src in sources or src in dests: | ||
Matt Harbison
|
r17186 | dests.update(lineage) | ||
break | ||||
r = src | ||||
src = _getrevsource(repo, r) | ||||
Pierre-Yves David
|
r21217 | return subset.filter(dests.__contains__) | ||
Matt Harbison
|
r17186 | |||
Pierre-Yves David
|
r18071 | def divergent(repo, subset, x): | ||
"""``divergent()`` | ||||
Final successors of changesets with an alternative set of final successors. | ||||
""" | ||||
# i18n: "divergent" is a keyword | ||||
getargs(x, 0, 0, _("divergent takes no arguments")) | ||||
divergent = obsmod.getrevs(repo, 'divergent') | ||||
Pierre-Yves David
|
r22533 | return subset & divergent | ||
Pierre-Yves David
|
r18071 | |||
Pierre-Yves David
|
r17173 | def extinct(repo, subset, x): | ||
"""``extinct()`` | ||||
Patrick Mezard
|
r17291 | Obsolete changesets with obsolete descendants only. | ||
""" | ||||
FUJIWARA Katsunori
|
r17259 | # i18n: "extinct" is a keyword | ||
FUJIWARA Katsunori
|
r17258 | getargs(x, 0, 0, _("extinct takes no arguments")) | ||
Pierre-Yves David
|
r17825 | extincts = obsmod.getrevs(repo, 'extinct') | ||
Lucas Moscovicz
|
r20367 | return subset & extincts | ||
Pierre-Yves David
|
r17173 | |||
Henrik Stuart
|
r16661 | def extra(repo, subset, x): | ||
"""``extra(label, [value])`` | ||||
Changesets with the given label in the extra metadata, with the given | ||||
Simon King
|
r16824 | optional value. | ||
If `value` starts with `re:`, the remainder of the value is treated as | ||||
a regular expression. To match a value that actually starts with `re:`, | ||||
use the prefix `literal:`. | ||||
""" | ||||
Yuya Nishihara
|
r25767 | args = getargsdict(x, 'extra', 'label value') | ||
Yuya Nishihara
|
r25706 | if 'label' not in args: | ||
# i18n: "extra" is a keyword | ||||
raise error.ParseError(_('extra takes at least 1 argument')) | ||||
FUJIWARA Katsunori
|
r17259 | # i18n: "extra" is a keyword | ||
Yuya Nishihara
|
r25706 | label = getstring(args['label'], _('first argument to extra must be ' | ||
'a string')) | ||||
Henrik Stuart
|
r16661 | value = None | ||
Yuya Nishihara
|
r25706 | if 'value' in args: | ||
FUJIWARA Katsunori
|
r17259 | # i18n: "extra" is a keyword | ||
Yuya Nishihara
|
r25706 | value = getstring(args['value'], _('second argument to extra must be ' | ||
'a string')) | ||||
Simon King
|
r16824 | kind, value, matcher = _stringmatcher(value) | ||
Henrik Stuart
|
r16661 | |||
def _matchvalue(r): | ||||
extra = repo[r].extra() | ||||
Simon King
|
r16824 | return label in extra and (value is None or matcher(extra[label])) | ||
Henrik Stuart
|
r16661 | |||
Lucas Moscovicz
|
r20611 | return subset.filter(lambda r: _matchvalue(r)) | ||
Pierre-Yves David
|
r15819 | |||
Matt Mackall
|
r14342 | def filelog(repo, subset, x): | ||
"""``filelog(pattern)`` | ||||
Changesets connected to the specified filelog. | ||||
FUJIWARA Katsunori
|
r17244 | |||
Greg Hurrell
|
r21199 | For performance reasons, visits only revisions mentioned in the file-level | ||
filelog, rather than filtering through all changesets (much faster, but | ||||
doesn't include deletes or duplicate changes). For a slower, more accurate | ||||
result, use ``file()``. | ||||
FUJIWARA Katsunori
|
r20289 | |||
The pattern without explicit kind like ``glob:`` is expected to be | ||||
relative to the current directory and match against a file exactly | ||||
for efficiency. | ||||
Pierre-Yves David
|
r23719 | |||
If some linkrev points to revisions filtered by the current repoview, we'll | ||||
work around it to return a non-filtered value. | ||||
Matt Mackall
|
r14342 | """ | ||
FUJIWARA Katsunori
|
r17259 | # i18n: "filelog" is a keyword | ||
Matt Mackall
|
r14342 | pat = getstring(x, _("filelog requires a pattern")) | ||
s = set() | ||||
Pierre-Yves David
|
r23719 | cl = repo.changelog | ||
Matt Mackall
|
r14342 | |||
Matt Mackall
|
r15964 | if not matchmod.patkind(pat): | ||
FUJIWARA Katsunori
|
r20288 | f = pathutil.canonpath(repo.root, repo.getcwd(), pat) | ||
Pierre-Yves David
|
r23719 | files = [f] | ||
Matt Mackall
|
r14342 | else: | ||
FUJIWARA Katsunori
|
r20288 | m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None]) | ||
Pierre-Yves David
|
r23719 | files = (f for f in repo[None] if m(f)) | ||
for f in files: | ||||
Martin von Zweigbergk
|
r23822 | backrevref = {} # final value for: filerev -> changerev | ||
Pierre-Yves David
|
r23719 | lowestchild = {} # lowest known filerev child of a filerev | ||
delayed = [] # filerev with filtered linkrev, for post-processing | ||||
Pierre-Yves David
|
r23720 | lowesthead = None # cache for manifest content of all head revisions | ||
Pierre-Yves David
|
r23719 | fl = repo.file(f) | ||
for fr in list(fl): | ||||
Martin von Zweigbergk
|
r23820 | rev = fl.linkrev(fr) | ||
Pierre-Yves David
|
r23719 | if rev not in cl: | ||
# changerev pointed in linkrev is filtered | ||||
# record it for post processing. | ||||
delayed.append((fr, rev)) | ||||
Martin von Zweigbergk
|
r23821 | continue | ||
Pierre-Yves David
|
r23719 | for p in fl.parentrevs(fr): | ||
if 0 <= p and p not in lowestchild: | ||||
lowestchild[p] = fr | ||||
backrevref[fr] = rev | ||||
s.add(rev) | ||||
# Post-processing of all filerevs we skipped because they were | ||||
# filtered. If such filerevs have known and unfiltered children, this | ||||
# means they have an unfiltered appearance out there. We'll use linkrev | ||||
# adjustment to find one of these appearances. The lowest known child | ||||
# will be used as a starting point because it is the best upper-bound we | ||||
# have. | ||||
# | ||||
# This approach will fail when an unfiltered but linkrev-shadowed | ||||
# appearance exists in a head changeset without unfiltered filerev | ||||
# children anywhere. | ||||
while delayed: | ||||
# must be a descending iteration. To slowly fill lowest child | ||||
# information that is of potential use by the next item. | ||||
fr, rev = delayed.pop() | ||||
lkr = rev | ||||
child = lowestchild.get(fr) | ||||
if child is None: | ||||
Pierre-Yves David
|
r23720 | # search for existence of this file revision in a head revision. | ||
# There are three possibilities: | ||||
# - the revision exists in a head and we can find an | ||||
# introduction from there, | ||||
# - the revision does not exist in a head because it has been | ||||
# changed since its introduction: we would have found a child | ||||
# and be in the other 'else' clause, | ||||
# - all versions of the revision are hidden. | ||||
if lowesthead is None: | ||||
lowesthead = {} | ||||
for h in repo.heads(): | ||||
Pierre-Yves David
|
r23729 | fnode = repo[h].manifest().get(f) | ||
if fnode is not None: | ||||
lowesthead[fl.rev(fnode)] = h | ||||
Pierre-Yves David
|
r23720 | headrev = lowesthead.get(fr) | ||
if headrev is None: | ||||
# content is nowhere unfiltered | ||||
continue | ||||
rev = repo[headrev][f].introrev() | ||||
Pierre-Yves David
|
r23719 | else: | ||
# the lowest known child is a good upper bound | ||||
childcrev = backrevref[child] | ||||
# XXX this does not guarantee returning the lowest | ||||
# introduction of this revision, but this gives a | ||||
# result which is a good start and will fit in most | ||||
# cases. We probably need to fix the multiple | ||||
# introductions case properly (report each | ||||
# introduction, even for identical file revisions) | ||||
# once and for all at some point anyway. | ||||
for p in repo[childcrev][f].parents(): | ||||
if p.filerev() == fr: | ||||
rev = p.rev() | ||||
break | ||||
if rev == lkr: # no shadowed entry found | ||||
# XXX This should never happen unless some manifest points | ||||
# to biggish file revisions (like a revision that uses a | ||||
# parent that never appears in the manifest ancestors) | ||||
continue | ||||
# Fill the data for the next iteration. | ||||
for p in fl.parentrevs(fr): | ||||
if 0 <= p and p not in lowestchild: | ||||
lowestchild[p] = fr | ||||
backrevref[fr] = rev | ||||
s.add(rev) | ||||
Matt Mackall
|
r14342 | |||
Pierre-Yves David
|
r22534 | return subset & s | ||
Matt Mackall
|
r14342 | |||
Matt Mackall
|
r15117 | def first(repo, subset, x): | ||
"""``first(set, [n])`` | ||||
An alias for limit(). | ||||
""" | ||||
return limit(repo, subset, x) | ||||
Patrick Mezard
|
r16185 | def _follow(repo, subset, x, name, followfirst=False): | ||
l = getargs(x, 0, 1, _("%s takes no arguments or a filename") % name) | ||||
c = repo['.'] | ||||
if l: | ||||
x = getstring(l[0], _("%s expected a filename") % name) | ||||
if x in c: | ||||
cx = c[x] | ||||
s = set(ctx.rev() for ctx in cx.ancestors(followfirst=followfirst)) | ||||
# include the revision responsible for the most recent version | ||||
Pierre-Yves David
|
r23704 | s.add(cx.introrev()) | ||
Patrick Mezard
|
r16185 | else: | ||
Pierre-Yves David
|
r22802 | return baseset() | ||
Patrick Mezard
|
r16185 | else: | ||
Lucas Moscovicz
|
r20690 | s = _revancestors(repo, baseset([c.rev()]), followfirst) | ||
Patrick Mezard
|
r16185 | |||
Pierre-Yves David
|
r22535 | return subset & s | ||
Patrick Mezard
|
r16185 | |||
Idan Kamara
|
r13915 | def follow(repo, subset, x): | ||
Matt Mackall
|
r14343 | """``follow([file])`` | ||
Yuya Nishihara
|
r24366 | An alias for ``::.`` (ancestors of the working directory's first parent). | ||
Matt Mackall
|
r14343 | If a filename is specified, the history of the given file is followed, | ||
including copies. | ||||
""" | ||||
Patrick Mezard
|
r16185 | return _follow(repo, subset, x, 'follow') | ||
Matt Mackall
|
r14343 | |||
Patrick Mezard
|
r16174 | def _followfirst(repo, subset, x): | ||
# ``followfirst([file])`` | ||||
# Like ``follow([file])`` but follows only the first parent of | ||||
# every revision or file revision. | ||||
Patrick Mezard
|
r16185 | return _follow(repo, subset, x, '_followfirst', followfirst=True) | ||
Matt Mackall
|
r14343 | |||
Idan Kamara
|
r13915 | def getall(repo, subset, x): | ||
"""``all()`` | ||||
All changesets, the same as ``0:tip``. | ||||
""" | ||||
# i18n: "all" is a keyword | ||||
getargs(x, 0, 0, _("all takes no arguments")) | ||||
Yuya Nishihara
|
r24202 | return subset & spanset(repo) # drop "null" if any | ||
Idan Kamara
|
r13915 | |||
def grep(repo, subset, x): | ||||
"""``grep(regex)`` | ||||
Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')`` | ||||
Martin Geisler
|
r14357 | to ensure special escape characters are handled correctly. Unlike | ||
``keyword(string)``, the match is case-sensitive. | ||||
Idan Kamara
|
r13915 | """ | ||
try: | ||||
# i18n: "grep" is a keyword | ||||
gr = re.compile(getstring(x, _("grep requires a string"))) | ||||
Gregory Szorc
|
r25660 | except re.error as e: | ||
Idan Kamara
|
r13915 | raise error.ParseError(_('invalid match pattern: %s') % e) | ||
Lucas Moscovicz
|
r20453 | |||
def matches(x): | ||||
c = repo[x] | ||||
Idan Kamara
|
r13915 | for e in c.files() + [c.user(), c.description()]: | ||
if gr.search(e): | ||||
Lucas Moscovicz
|
r20453 | return True | ||
return False | ||||
Lucas Moscovicz
|
r20611 | return subset.filter(matches) | ||
Idan Kamara
|
r13915 | |||
Patrick Mezard
|
r16161 | def _matchfiles(repo, subset, x): | ||
# _matchfiles takes a revset list of prefixed arguments: | ||||
# | ||||
# [p:foo, i:bar, x:baz] | ||||
# | ||||
# builds a match object from them and filters subset. Allowed | ||||
# prefixes are 'p:' for regular patterns, 'i:' for include | ||||
Patrick Mezard
|
r16181 | # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass | ||
# a revision identifier, or the empty string to reference the | ||||
# working directory, from which the match object is | ||||
Patrick Mezard
|
r16411 | # initialized. Use 'd:' to set the default matching mode, default | ||
# to 'glob'. At most one 'r:' and 'd:' argument can be passed. | ||||
Patrick Mezard
|
r16161 | |||
# i18n: "_matchfiles" is a keyword | ||||
l = getargs(x, 1, -1, _("_matchfiles requires at least one argument")) | ||||
pats, inc, exc = [], [], [] | ||||
Patrick Mezard
|
r16411 | rev, default = None, None | ||
Patrick Mezard
|
r16161 | for arg in l: | ||
FUJIWARA Katsunori
|
r17259 | # i18n: "_matchfiles" is a keyword | ||
Patrick Mezard
|
r16161 | s = getstring(arg, _("_matchfiles requires string arguments")) | ||
prefix, value = s[:2], s[2:] | ||||
if prefix == 'p:': | ||||
pats.append(value) | ||||
elif prefix == 'i:': | ||||
inc.append(value) | ||||
elif prefix == 'x:': | ||||
exc.append(value) | ||||
Patrick Mezard
|
r16181 | elif prefix == 'r:': | ||
if rev is not None: | ||||
FUJIWARA Katsunori
|
r17259 | # i18n: "_matchfiles" is a keyword | ||
Patrick Mezard
|
r16181 | raise error.ParseError(_('_matchfiles expected at most one ' | ||
'revision')) | ||||
Martin von Zweigbergk
|
r23950 | if value != '': # empty means working directory; leave rev as None | ||
rev = value | ||||
Patrick Mezard
|
r16411 | elif prefix == 'd:': | ||
if default is not None: | ||||
FUJIWARA Katsunori
|
r17259 | # i18n: "_matchfiles" is a keyword | ||
Patrick Mezard
|
r16411 | raise error.ParseError(_('_matchfiles expected at most one ' | ||
'default mode')) | ||||
default = value | ||||
Patrick Mezard
|
r16161 | else: | ||
FUJIWARA Katsunori
|
r17259 | # i18n: "_matchfiles" is a keyword | ||
Patrick Mezard
|
r16161 | raise error.ParseError(_('invalid _matchfiles prefix: %s') % prefix) | ||
Patrick Mezard
|
r16411 | if not default: | ||
default = 'glob' | ||||
Lucas Moscovicz
|
r20458 | |||
Matt Mackall
|
r23061 | m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc, | ||
exclude=exc, ctx=repo[rev], default=default) | ||||
Lucas Moscovicz
|
r20458 | def matches(x): | ||
Matt Mackall
|
r23061 | for f in repo[x].files(): | ||
Patrick Mezard
|
r16161 | if m(f): | ||
Lucas Moscovicz
|
r20458 | return True | ||
return False | ||||
Lucas Moscovicz
|
r20611 | return subset.filter(matches) | ||
Patrick Mezard
|
r16161 | |||
Idan Kamara
|
r13915 | def hasfile(repo, subset, x): | ||
"""``file(pattern)`` | ||||
Changesets affecting files matched by pattern. | ||||
FUJIWARA Katsunori
|
r17244 | |||
Greg Ward
|
r17265 | For a faster but less accurate result, consider using ``filelog()`` | ||
instead. | ||||
FUJIWARA Katsunori
|
r20289 | |||
This predicate uses ``glob:`` as the default kind of pattern. | ||||
Idan Kamara
|
r13915 | """ | ||
# i18n: "file" is a keyword | ||||
pat = getstring(x, _("file requires a pattern")) | ||||
Patrick Mezard
|
r16161 | return _matchfiles(repo, subset, ('string', 'p:' + pat)) | ||
Idan Kamara
|
r13915 | |||
def head(repo, subset, x): | ||||
"""``head()`` | ||||
Changeset is a named branch head. | ||||
""" | ||||
# i18n: "head" is a keyword | ||||
getargs(x, 0, 0, _("head takes no arguments")) | ||||
hs = set() | ||||
Pierre-Yves David
|
r25620 | cl = repo.changelog | ||
Idan Kamara
|
r13915 | for b, ls in repo.branchmap().iteritems(): | ||
Pierre-Yves David
|
r25620 | hs.update(cl.rev(h) for h in ls) | ||
Pierre-Yves David
|
r25553 | # XXX using a set to feed the baseset is wrong. Sets are not ordered. | ||
# This does not break because of other fullreposet misbehavior. | ||||
Pierre-Yves David
|
r25548 | # XXX We should combine with subset first: 'subset & baseset(...)'. This is | ||
# necessary to ensure we preserve the order in subset. | ||||
Pierre-Yves David
|
r25634 | return baseset(hs) & subset | ||
Idan Kamara
|
r13915 | |||
def heads(repo, subset, x): | ||||
"""``heads(set)`` | ||||
Members of set with no children in set. | ||||
""" | ||||
s = getset(repo, subset, x) | ||||
Lucas Moscovicz
|
r20366 | ps = parents(repo, subset, x) | ||
return s - ps | ||||
Idan Kamara
|
r13915 | |||
Patrick Mezard
|
r17390 | def hidden(repo, subset, x): | ||
"""``hidden()`` | ||||
Hidden changesets. | ||||
""" | ||||
# i18n: "hidden" is a keyword | ||||
getargs(x, 0, 0, _("hidden takes no arguments")) | ||||
Kevin Bullock
|
r18382 | hiddenrevs = repoview.filterrevs(repo, 'visible') | ||
Lucas Moscovicz
|
r20367 | return subset & hiddenrevs | ||
Patrick Mezard
|
r17390 | |||
Idan Kamara
|
r13915 | def keyword(repo, subset, x): | ||
"""``keyword(string)`` | ||||
Search commit message, user name, and names of changed files for | ||||
Martin Geisler
|
r14357 | string. The match is case-insensitive. | ||
Idan Kamara
|
r13915 | """ | ||
# i18n: "keyword" is a keyword | ||||
FUJIWARA Katsunori
|
r15726 | kw = encoding.lower(getstring(x, _("keyword requires a string"))) | ||
Lucas Moscovicz
|
r20447 | |||
def matches(r): | ||||
Idan Kamara
|
r13915 | c = repo[r] | ||
Pierre-Yves David
|
r25551 | return any(kw in encoding.lower(t) | ||
for t in c.files() + [c.user(), c.description()]) | ||||
Lucas Moscovicz
|
r20447 | |||
Lucas Moscovicz
|
r20611 | return subset.filter(matches) | ||
Idan Kamara
|
r13915 | |||
def limit(repo, subset, x): | ||||
Matt Mackall
|
r15116 | """``limit(set, [n])`` | ||
First n members of set, defaulting to 1. | ||||
Idan Kamara
|
r13915 | """ | ||
# i18n: "limit" is a keyword | ||||
Matt Mackall
|
r15116 | l = getargs(x, 1, 2, _("limit requires one or two arguments")) | ||
Idan Kamara
|
r13915 | try: | ||
Matt Mackall
|
r15116 | lim = 1 | ||
if len(l) == 2: | ||||
# i18n: "limit" is a keyword | ||||
lim = int(getstring(l[1], _("limit requires a number"))) | ||||
Matt Mackall
|
r14851 | except (TypeError, ValueError): | ||
Idan Kamara
|
r13915 | # i18n: "limit" is a keyword | ||
raise error.ParseError(_("limit expects a number")) | ||||
Pierre-Yves David
|
r22870 | ss = subset | ||
Yuya Nishihara
|
r24115 | os = getset(repo, fullreposet(repo), l[0]) | ||
Pierre-Yves David
|
r22804 | result = [] | ||
Lucas Moscovicz
|
r20446 | it = iter(os) | ||
for x in xrange(lim): | ||||
Pierre-Yves David
|
r25144 | y = next(it, None) | ||
if y is None: | ||||
Lucas Moscovicz
|
r20446 | break | ||
Pierre-Yves David
|
r25144 | elif y in ss: | ||
result.append(y) | ||||
Pierre-Yves David
|
r22804 | return baseset(result) | ||
Idan Kamara
|
r13915 | |||
Matt Mackall
|
r14061 | def last(repo, subset, x): | ||
Matt Mackall
|
r15116 | """``last(set, [n])`` | ||
Last n members of set, defaulting to 1. | ||||
Matt Mackall
|
r14061 | """ | ||
# i18n: "last" is a keyword | ||||
Matt Mackall
|
r15116 | l = getargs(x, 1, 2, _("last requires one or two arguments")) | ||
Matt Mackall
|
r14061 | try: | ||
Matt Mackall
|
r15116 | lim = 1 | ||
if len(l) == 2: | ||||
# i18n: "last" is a keyword | ||||
lim = int(getstring(l[1], _("last requires a number"))) | ||||
Matt Mackall
|
r14851 | except (TypeError, ValueError): | ||
Matt Mackall
|
r14061 | # i18n: "last" is a keyword | ||
raise error.ParseError(_("last expects a number")) | ||||
Pierre-Yves David
|
r22871 | ss = subset | ||
Yuya Nishihara
|
r24115 | os = getset(repo, fullreposet(repo), l[0]) | ||
Lucas Moscovicz
|
r20534 | os.reverse() | ||
Pierre-Yves David
|
r22805 | result = [] | ||
Lucas Moscovicz
|
r20534 | it = iter(os) | ||
for x in xrange(lim): | ||||
Pierre-Yves David
|
r25145 | y = next(it, None) | ||
if y is None: | ||||
Lucas Moscovicz
|
r20534 | break | ||
Pierre-Yves David
|
r25145 | elif y in ss: | ||
result.append(y) | ||||
Pierre-Yves David
|
r22805 | return baseset(result) | ||
Matt Mackall
|
r14061 | |||
Idan Kamara
|
r13915 | def maxrev(repo, subset, x): | ||
"""``max(set)`` | ||||
Changeset with highest revision number in set. | ||||
""" | ||||
Yuya Nishihara
|
r24115 | os = getset(repo, fullreposet(repo), x) | ||
Mads Kiilerich
|
r14153 | if os: | ||
Lucas Moscovicz
|
r20754 | m = os.max() | ||
Idan Kamara
|
r13915 | if m in subset: | ||
Lucas Moscovicz
|
r20364 | return baseset([m]) | ||
Pierre-Yves David
|
r22802 | return baseset() | ||
Idan Kamara
|
r13915 | |||
def merge(repo, subset, x): | ||||
"""``merge()`` | ||||
Changeset is a merge changeset. | ||||
""" | ||||
# i18n: "merge" is a keyword | ||||
getargs(x, 0, 0, _("merge takes no arguments")) | ||||
cl = repo.changelog | ||||
Lucas Moscovicz
|
r20611 | return subset.filter(lambda r: cl.parentrevs(r)[1] != -1) | ||
Idan Kamara
|
r13915 | |||
Ivan Andrus
|
r17753 | def branchpoint(repo, subset, x): | ||
"""``branchpoint()`` | ||||
Changesets with more than one child. | ||||
""" | ||||
# i18n: "branchpoint" is a keyword | ||||
getargs(x, 0, 0, _("branchpoint takes no arguments")) | ||||
cl = repo.changelog | ||||
if not subset: | ||||
Pierre-Yves David
|
r22802 | return baseset() | ||
Pierre-Yves David
|
r25549 | # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset | ||
# (and if it is not, it should.) | ||||
Ivan Andrus
|
r17753 | baserev = min(subset) | ||
parentscount = [0]*(len(repo) - baserev) | ||||
Pierre-Yves David
|
r17785 | for r in cl.revs(start=baserev + 1): | ||
Ivan Andrus
|
r17753 | for p in cl.parentrevs(r): | ||
if p >= baserev: | ||||
parentscount[p - baserev] += 1 | ||||
Lucas Moscovicz
|
r20611 | return subset.filter(lambda r: parentscount[r - baserev] > 1) | ||
Ivan Andrus
|
r17753 | |||
Idan Kamara
|
r13915 | def minrev(repo, subset, x): | ||
"""``min(set)`` | ||||
Changeset with lowest revision number in set. | ||||
""" | ||||
Yuya Nishihara
|
r24115 | os = getset(repo, fullreposet(repo), x) | ||
Mads Kiilerich
|
r14153 | if os: | ||
Lucas Moscovicz
|
r20754 | m = os.min() | ||
Idan Kamara
|
r13915 | if m in subset: | ||
Lucas Moscovicz
|
r20364 | return baseset([m]) | ||
Pierre-Yves David
|
r22802 | return baseset() | ||
Idan Kamara
|
r13915 | |||
def modifies(repo, subset, x): | ||||
"""``modifies(pattern)`` | ||||
Changesets modifying files matched by pattern. | ||||
FUJIWARA Katsunori
|
r20289 | |||
The pattern without explicit kind like ``glob:`` is expected to be | ||||
relative to the current directory and match against a file or a | ||||
directory. | ||||
Idan Kamara
|
r13915 | """ | ||
# i18n: "modifies" is a keyword | ||||
pat = getstring(x, _("modifies requires a pattern")) | ||||
return checkstatus(repo, subset, pat, 0) | ||||
Sean Farley
|
r23836 | def named(repo, subset, x): | ||
"""``named(namespace)`` | ||||
The changesets in a given namespace. | ||||
If `namespace` starts with `re:`, the remainder of the string is treated as | ||||
a regular expression. To match a namespace that actually starts with `re:`, | ||||
use the prefix `literal:`. | ||||
""" | ||||
# i18n: "named" is a keyword | ||||
args = getargs(x, 1, 1, _('named requires a namespace argument')) | ||||
ns = getstring(args[0], | ||||
# i18n: "named" is a keyword | ||||
_('the argument to named must be a string')) | ||||
kind, pattern, matcher = _stringmatcher(ns) | ||||
namespaces = set() | ||||
if kind == 'literal': | ||||
if pattern not in repo.names: | ||||
FUJIWARA Katsunori
|
r23978 | raise error.RepoLookupError(_("namespace '%s' does not exist") | ||
% ns) | ||||
Sean Farley
|
r23836 | namespaces.add(repo.names[pattern]) | ||
else: | ||||
for name, ns in repo.names.iteritems(): | ||||
if matcher(name): | ||||
namespaces.add(ns) | ||||
if not namespaces: | ||||
FUJIWARA Katsunori
|
r23978 | raise error.RepoLookupError(_("no namespace exists" | ||
" that match '%s'") % pattern) | ||||
Sean Farley
|
r23836 | |||
names = set() | ||||
for ns in namespaces: | ||||
for name in ns.listnames(repo): | ||||
FUJIWARA Katsunori
|
r24151 | if name not in ns.deprecated: | ||
names.update(repo[n].rev() for n in ns.nodes(repo, name)) | ||||
Sean Farley
|
r23836 | |||
names -= set([node.nullrev]) | ||||
return subset & names | ||||
Matt Mackall
|
r16417 | def node_(repo, subset, x): | ||
Patrick Mezard
|
r12821 | """``id(string)`` | ||
Wagner Bruna
|
r12859 | Revision non-ambiguously specified by the given hex string prefix. | ||
Patrick Mezard
|
r12821 | """ | ||
Martin Geisler
|
r12815 | # i18n: "id" is a keyword | ||
Benoit Boissinot
|
r12736 | l = getargs(x, 1, 1, _("id requires one argument")) | ||
Martin Geisler
|
r12815 | # i18n: "id" is a keyword | ||
Benoit Boissinot
|
r12736 | n = getstring(l[0], _("id requires a string")) | ||
Augie Fackler
|
r12716 | if len(n) == 40: | ||
Alexander Drozdov
|
r24904 | try: | ||
rn = repo.changelog.rev(node.bin(n)) | ||||
except (LookupError, TypeError): | ||||
rn = None | ||||
Augie Fackler
|
r12716 | else: | ||
Matt Harbison
|
r16735 | rn = None | ||
pm = repo.changelog._partialmatch(n) | ||||
if pm is not None: | ||||
rn = repo.changelog.rev(pm) | ||||
Pierre-Yves David
|
r23005 | if rn is None: | ||
return baseset() | ||||
result = baseset([rn]) | ||||
return result & subset | ||||
Augie Fackler
|
r12716 | |||
Pierre-Yves David
|
r17170 | def obsolete(repo, subset, x): | ||
"""``obsolete()`` | ||||
Mutable changeset with a newer version.""" | ||||
FUJIWARA Katsunori
|
r17259 | # i18n: "obsolete" is a keyword | ||
Pierre-Yves David
|
r17170 | getargs(x, 0, 0, _("obsolete takes no arguments")) | ||
Pierre-Yves David
|
r17825 | obsoletes = obsmod.getrevs(repo, 'obsolete') | ||
Lucas Moscovicz
|
r20367 | return subset & obsoletes | ||
Pierre-Yves David
|
r17170 | |||
Yuya Nishihara
|
r23466 | def only(repo, subset, x): | ||
"""``only(set, [set])`` | ||||
Changesets that are ancestors of the first set that are not ancestors | ||||
of any other head in the repo. If a second set is specified, the result | ||||
is ancestors of the first set that are not ancestors of the second set | ||||
(i.e. ::<set1> - ::<set2>). | ||||
""" | ||||
cl = repo.changelog | ||||
# i18n: "only" is a keyword | ||||
args = getargs(x, 1, 2, _('only takes one or two arguments')) | ||||
Yuya Nishihara
|
r24115 | include = getset(repo, fullreposet(repo), args[0]) | ||
Yuya Nishihara
|
r23466 | if len(args) == 1: | ||
if not include: | ||||
return baseset() | ||||
descendants = set(_revdescendants(repo, include, False)) | ||||
exclude = [rev for rev in cl.headrevs() | ||||
if not rev in descendants and not rev in include] | ||||
else: | ||||
Yuya Nishihara
|
r24115 | exclude = getset(repo, fullreposet(repo), args[1]) | ||
Yuya Nishihara
|
r23466 | |||
results = set(cl.findmissingrevs(common=exclude, heads=include)) | ||||
Pierre-Yves David
|
r25554 | # XXX we should turn this into a baseset instead of a set, smartset may do | ||
# some optimisations from the fact this is a baseset. | ||||
Yuya Nishihara
|
r23466 | return subset & results | ||
Matt Harbison
|
r17185 | def origin(repo, subset, x): | ||
"""``origin([set])`` | ||||
Changesets that were specified as a source for the grafts, transplants or | ||||
rebases that created the given revisions. Omitting the optional set is the | ||||
same as passing all(). If a changeset created by these operations is itself | ||||
specified as a source for one of these operations, only the source changeset | ||||
for the first operation is selected. | ||||
""" | ||||
if x is not None: | ||||
Yuya Nishihara
|
r24115 | dests = getset(repo, fullreposet(repo), x) | ||
Matt Harbison
|
r17185 | else: | ||
Yuya Nishihara
|
r24201 | dests = fullreposet(repo) | ||
Matt Harbison
|
r17185 | |||
def _firstsrc(rev): | ||||
src = _getrevsource(repo, rev) | ||||
if src is None: | ||||
return None | ||||
while True: | ||||
prev = _getrevsource(repo, src) | ||||
if prev is None: | ||||
return src | ||||
src = prev | ||||
Mads Kiilerich
|
r22944 | o = set([_firstsrc(r) for r in dests]) | ||
Pierre-Yves David
|
r22498 | o -= set([None]) | ||
Pierre-Yves David
|
r25554 | # XXX we should turn this into a baseset instead of a set, smartset may do | ||
# some optimisations from the fact this is a baseset. | ||||
Pierre-Yves David
|
r22536 | return subset & o | ||
Matt Harbison
|
r17185 | |||
Idan Kamara
|
r13915 | def outgoing(repo, subset, x): | ||
"""``outgoing([path])`` | ||||
Changesets not found in the specified destination repository, or the | ||||
default push location. | ||||
Patrick Mezard
|
r12821 | """ | ||
Gregory Szorc
|
r24722 | # Avoid cycles. | ||
import discovery | ||||
import hg | ||||
Idan Kamara
|
r13915 | # i18n: "outgoing" is a keyword | ||
Mads Kiilerich
|
r14717 | l = getargs(x, 0, 1, _("outgoing takes one or no arguments")) | ||
Idan Kamara
|
r13915 | # i18n: "outgoing" is a keyword | ||
dest = l and getstring(l[0], _("outgoing requires a repository path")) or '' | ||||
dest = repo.ui.expandpath(dest or 'default-push', dest or 'default') | ||||
dest, branches = hg.parseurl(dest) | ||||
revs, checkout = hg.addbranchrevs(repo, repo, branches, []) | ||||
if revs: | ||||
revs = [repo.lookup(rev) for rev in revs] | ||||
Matt Mackall
|
r14556 | other = hg.peer(repo, {}, dest) | ||
Idan Kamara
|
r13915 | repo.ui.pushbuffer() | ||
Pierre-Yves David
|
r15837 | outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs) | ||
Idan Kamara
|
r13915 | repo.ui.popbuffer() | ||
cl = repo.changelog | ||||
Pierre-Yves David
|
r15837 | o = set([cl.rev(r) for r in outgoing.missing]) | ||
Pierre-Yves David
|
r22529 | return subset & o | ||
Augie Fackler
|
r12716 | |||
Matt Mackall
|
r11275 | def p1(repo, subset, x): | ||
Kevin Bullock
|
r12928 | """``p1([set])`` | ||
First parent of changesets in set, or the working directory. | ||||
Patrick Mezard
|
r12821 | """ | ||
Kevin Bullock
|
r12928 | if x is None: | ||
Matt Mackall
|
r13878 | p = repo[x].p1().rev() | ||
Pierre-Yves David
|
r22538 | if p >= 0: | ||
return subset & baseset([p]) | ||||
Pierre-Yves David
|
r22802 | return baseset() | ||
Kevin Bullock
|
r12928 | |||
Matt Mackall
|
r11275 | ps = set() | ||
cl = repo.changelog | ||||
Yuya Nishihara
|
r24115 | for r in getset(repo, fullreposet(repo), x): | ||
Matt Mackall
|
r11275 | ps.add(cl.parentrevs(r)[0]) | ||
Pierre-Yves David
|
r22495 | ps -= set([node.nullrev]) | ||
Pierre-Yves David
|
r25554 | # XXX we should turn this into a baseset instead of a set, smartset may do | ||
# some optimisations from the fact this is a baseset. | ||||
Lucas Moscovicz
|
r20367 | return subset & ps | ||
Matt Mackall
|
r11275 | |||
def p2(repo, subset, x): | ||||
Kevin Bullock
|
r12928 | """``p2([set])`` | ||
Second parent of changesets in set, or the working directory. | ||||
Patrick Mezard
|
r12821 | """ | ||
Kevin Bullock
|
r12928 | if x is None: | ||
ps = repo[x].parents() | ||||
try: | ||||
Patrick Mezard
|
r12935 | p = ps[1].rev() | ||
Pierre-Yves David
|
r22539 | if p >= 0: | ||
return subset & baseset([p]) | ||||
Pierre-Yves David
|
r22802 | return baseset() | ||
Kevin Bullock
|
r12928 | except IndexError: | ||
Pierre-Yves David
|
r22802 | return baseset() | ||
Kevin Bullock
|
r12928 | |||
Matt Mackall
|
r11275 | ps = set() | ||
cl = repo.changelog | ||||
Yuya Nishihara
|
r24115 | for r in getset(repo, fullreposet(repo), x): | ||
Matt Mackall
|
r11275 | ps.add(cl.parentrevs(r)[1]) | ||
Pierre-Yves David
|
r22495 | ps -= set([node.nullrev]) | ||
Pierre-Yves David
|
r25554 | # XXX we should turn this into a baseset instead of a set, smartset may do | ||
# some optimisations from the fact this is a baseset. | ||||
Lucas Moscovicz
|
r20367 | return subset & ps | ||
Matt Mackall
|
r11275 | |||
def parents(repo, subset, x): | ||||
Kevin Bullock
|
r12929 | """``parents([set])`` | ||
The set of all parents for all changesets in set, or the working directory. | ||||
Patrick Mezard
|
r12821 | """ | ||
Kevin Bullock
|
r12929 | if x is None: | ||
Pierre-Yves David
|
r22496 | ps = set(p.rev() for p in repo[x].parents()) | ||
else: | ||||
ps = set() | ||||
cl = repo.changelog | ||||
Pierre-Yves David
|
r25716 | up = ps.update | ||
parentrevs = cl.parentrevs | ||||
Yuya Nishihara
|
r24115 | for r in getset(repo, fullreposet(repo), x): | ||
Yuya Nishihara
|
r25765 | if r == node.wdirrev: | ||
Pierre-Yves David
|
r25716 | up(p.rev() for p in repo[r].parents()) | ||
Matt Harbison
|
r25689 | else: | ||
Pierre-Yves David
|
r25716 | up(parentrevs(r)) | ||
Pierre-Yves David
|
r22497 | ps -= set([node.nullrev]) | ||
Pierre-Yves David
|
r22712 | return subset & ps | ||
Matt Mackall
|
r11275 | |||
Pierre-Yves David
|
r25621 | def _phase(repo, subset, target): | ||
"""helper to select all rev in phase <target>""" | ||||
Pierre-Yves David
|
r25622 | repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded | ||
if repo._phasecache._phasesets: | ||||
s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs | ||||
s = baseset(s) | ||||
s.sort() # set are non ordered, so we enforce ascending | ||||
return subset & s | ||||
else: | ||||
phase = repo._phasecache.phase | ||||
condition = lambda r: phase(repo, r) == target | ||||
return subset.filter(condition, cache=False) | ||||
Pierre-Yves David
|
r25621 | |||
def draft(repo, subset, x): | ||||
"""``draft()`` | ||||
Changeset in draft phase.""" | ||||
# i18n: "draft" is a keyword | ||||
getargs(x, 0, 0, _("draft takes no arguments")) | ||||
target = phases.draft | ||||
return _phase(repo, subset, target) | ||||
def secret(repo, subset, x): | ||||
"""``secret()`` | ||||
Changeset in secret phase.""" | ||||
# i18n: "secret" is a keyword | ||||
getargs(x, 0, 0, _("secret takes no arguments")) | ||||
target = phases.secret | ||||
return _phase(repo, subset, target) | ||||
Kevin Gessner
|
r14070 | def parentspec(repo, subset, x, n): | ||
"""``set^0`` | ||||
The set. | ||||
``set^1`` (or ``set^``), ``set^2`` | ||||
First or second parent, respectively, of all changesets in set. | ||||
Patrick Mezard
|
r12821 | """ | ||
Brodie Rao
|
r12320 | try: | ||
Kevin Gessner
|
r14070 | n = int(n[1]) | ||
Kevin Gessner
|
r14072 | if n not in (0, 1, 2): | ||
Kevin Gessner
|
r14070 | raise ValueError | ||
Matt Mackall
|
r14851 | except (TypeError, ValueError): | ||
Kevin Gessner
|
r14070 | raise error.ParseError(_("^ expects a number 0, 1, or 2")) | ||
ps = set() | ||||
Matt Mackall
|
r11275 | cl = repo.changelog | ||
Pierre-Yves David
|
r23165 | for r in getset(repo, fullreposet(repo), x): | ||
Kevin Gessner
|
r14070 | if n == 0: | ||
ps.add(r) | ||||
elif n == 1: | ||||
ps.add(cl.parentrevs(r)[0]) | ||||
elif n == 2: | ||||
parents = cl.parentrevs(r) | ||||
if len(parents) > 1: | ||||
ps.add(parents[1]) | ||||
Lucas Moscovicz
|
r20367 | return subset & ps | ||
Matt Mackall
|
r11275 | |||
Wagner Bruna
|
r11944 | def present(repo, subset, x): | ||
Patrick Mezard
|
r12821 | """``present(set)`` | ||
An empty set, if any revision in set isn't found; otherwise, | ||||
all revisions in set. | ||||
FUJIWARA Katsunori
|
r16748 | |||
If any of specified revisions is not present in the local repository, | ||||
the query is normally aborted. But this predicate allows the query | ||||
to continue even in such cases. | ||||
Patrick Mezard
|
r12821 | """ | ||
Wagner Bruna
|
r11944 | try: | ||
return getset(repo, subset, x) | ||||
except error.RepoLookupError: | ||||
Pierre-Yves David
|
r22802 | return baseset() | ||
Wagner Bruna
|
r11944 | |||
Yuya Nishihara
|
r25224 | # for internal use | ||
Laurent Charignon
|
r25191 | def _notpublic(repo, subset, x): | ||
Yuya Nishihara
|
r25225 | getargs(x, 0, 0, "_notpublic takes no arguments") | ||
Pierre-Yves David
|
r25612 | repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded | ||
Laurent Charignon
|
r25191 | if repo._phasecache._phasesets: | ||
s = set() | ||||
for u in repo._phasecache._phasesets[1:]: | ||||
s.update(u) | ||||
Pierre-Yves David
|
r25619 | s = baseset(s - repo.changelog.filteredrevs) | ||
s.sort() | ||||
Laurent Charignon
|
r25191 | return subset & s | ||
else: | ||||
phase = repo._phasecache.phase | ||||
target = phases.public | ||||
condition = lambda r: phase(repo, r) != target | ||||
return subset.filter(condition, cache=False) | ||||
Pierre-Yves David
|
r15819 | def public(repo, subset, x): | ||
"""``public()`` | ||||
Changeset in public phase.""" | ||||
FUJIWARA Katsunori
|
r17259 | # i18n: "public" is a keyword | ||
Pierre-Yves David
|
r15819 | getargs(x, 0, 0, _("public takes no arguments")) | ||
Pierre-Yves David
|
r23019 | phase = repo._phasecache.phase | ||
target = phases.public | ||||
condition = lambda r: phase(repo, r) == target | ||||
Pierre-Yves David
|
r23018 | return subset.filter(condition, cache=False) | ||
Pierre-Yves David
|
r15819 | |||
Matt Mackall
|
r15936 | def remote(repo, subset, x): | ||
FUJIWARA Katsunori
|
r16007 | """``remote([id [,path]])`` | ||
Matt Mackall
|
r15936 | Local revision that corresponds to the given identifier in a | ||
remote repository, if present. Here, the '.' identifier is a | ||||
synonym for the current local branch. | ||||
""" | ||||
import hg # avoid start-up nasties | ||||
# i18n: "remote" is a keyword | ||||
FUJIWARA Katsunori
|
r16007 | l = getargs(x, 0, 2, _("remote takes one, two or no arguments")) | ||
Matt Mackall
|
r15936 | |||
q = '.' | ||||
if len(l) > 0: | ||||
# i18n: "remote" is a keyword | ||||
q = getstring(l[0], _("remote requires a string id")) | ||||
if q == '.': | ||||
q = repo['.'].branch() | ||||
dest = '' | ||||
if len(l) > 1: | ||||
# i18n: "remote" is a keyword | ||||
dest = getstring(l[1], _("remote requires a repository path")) | ||||
dest = repo.ui.expandpath(dest or 'default') | ||||
dest, branches = hg.parseurl(dest) | ||||
revs, checkout = hg.addbranchrevs(repo, repo, branches, []) | ||||
if revs: | ||||
revs = [repo.lookup(rev) for rev in revs] | ||||
other = hg.peer(repo, {}, dest) | ||||
n = other.lookup(q) | ||||
if n in repo: | ||||
r = repo[n].rev() | ||||
FUJIWARA Katsunori
|
r16006 | if r in subset: | ||
Lucas Moscovicz
|
r20364 | return baseset([r]) | ||
Pierre-Yves David
|
r22802 | return baseset() | ||
Matt Mackall
|
r15936 | |||
Matt Mackall
|
r11275 | def removes(repo, subset, x): | ||
Patrick Mezard
|
r12821 | """``removes(pattern)`` | ||
Changesets which remove files matching pattern. | ||||
FUJIWARA Katsunori
|
r20289 | |||
The pattern without explicit kind like ``glob:`` is expected to be | ||||
relative to the current directory and match against a file or a | ||||
directory. | ||||
Patrick Mezard
|
r12821 | """ | ||
Martin Geisler
|
r12815 | # i18n: "removes" is a keyword | ||
Benoit Boissinot
|
r12736 | pat = getstring(x, _("removes requires a pattern")) | ||
Matt Mackall
|
r11275 | return checkstatus(repo, subset, pat, 2) | ||
Idan Kamara
|
r13915 | def rev(repo, subset, x): | ||
"""``rev(number)`` | ||||
Revision with the given numeric identifier. | ||||
Patrick Mezard
|
r12821 | """ | ||
Idan Kamara
|
r13915 | # i18n: "rev" is a keyword | ||
l = getargs(x, 1, 1, _("rev requires one argument")) | ||||
try: | ||||
# i18n: "rev" is a keyword | ||||
l = int(getstring(l[0], _("rev requires a number"))) | ||||
Matt Mackall
|
r14851 | except (TypeError, ValueError): | ||
Idan Kamara
|
r13915 | # i18n: "rev" is a keyword | ||
raise error.ParseError(_("rev expects a number")) | ||||
Yuya Nishihara
|
r24031 | if l not in repo.changelog and l != node.nullrev: | ||
Yuya Nishihara
|
r23062 | return baseset() | ||
Pierre-Yves David
|
r22537 | return subset & baseset([l]) | ||
Matt Mackall
|
r11275 | |||
Angel Ezquerra
|
r16402 | def matching(repo, subset, x): | ||
"""``matching(revision [, field])`` | ||||
Changesets in which a given set of fields match the set of fields in the | ||||
selected revision or set. | ||||
FUJIWARA Katsunori
|
r16528 | |||
Angel Ezquerra
|
r16402 | To match more than one field pass the list of fields to match separated | ||
FUJIWARA Katsunori
|
r16528 | by spaces (e.g. ``author description``). | ||
Valid fields are most regular revision fields and some special fields. | ||||
Regular revision fields are ``description``, ``author``, ``branch``, | ||||
Angel Ezquerra
|
r17102 | ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user`` | ||
and ``diff``. | ||||
Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the | ||||
contents of the revision. Two revisions matching their ``diff`` will | ||||
also match their ``files``. | ||||
FUJIWARA Katsunori
|
r16528 | |||
Special fields are ``summary`` and ``metadata``: | ||||
``summary`` matches the first line of the description. | ||||
Jesse Glick
|
r16639 | ``metadata`` is equivalent to matching ``description user date`` | ||
FUJIWARA Katsunori
|
r16528 | (i.e. it matches the main metadata fields). | ||
``metadata`` is the default field which is used when no fields are | ||||
specified. You can match more than one field at a time. | ||||
Angel Ezquerra
|
r16402 | """ | ||
FUJIWARA Katsunori
|
r17259 | # i18n: "matching" is a keyword | ||
Angel Ezquerra
|
r16402 | l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments")) | ||
Pierre-Yves David
|
r23166 | revs = getset(repo, fullreposet(repo), l[0]) | ||
Angel Ezquerra
|
r16402 | |||
fieldlist = ['metadata'] | ||||
if len(l) > 1: | ||||
fieldlist = getstring(l[1], | ||||
FUJIWARA Katsunori
|
r17259 | # i18n: "matching" is a keyword | ||
Angel Ezquerra
|
r16402 | _("matching requires a string " | ||
"as its second argument")).split() | ||||
Angel Ezquerra
|
r17102 | # Make sure that there are no repeated fields, | ||
# expand the 'special' 'metadata' field type | ||||
# and check the 'files' whenever we check the 'diff' | ||||
Angel Ezquerra
|
r16402 | fields = [] | ||
for field in fieldlist: | ||||
if field == 'metadata': | ||||
fields += ['user', 'description', 'date'] | ||||
Angel Ezquerra
|
r17102 | elif field == 'diff': | ||
# a revision matching the diff must also match the files | ||||
# since matching the diff is very costly, make sure to | ||||
# also match the files first | ||||
fields += ['files', 'diff'] | ||||
Angel Ezquerra
|
r16402 | else: | ||
if field == 'author': | ||||
field = 'user' | ||||
fields.append(field) | ||||
fields = set(fields) | ||||
Angel Ezquerra
|
r16444 | if 'summary' in fields and 'description' in fields: | ||
# If a revision matches its description it also matches its summary | ||||
fields.discard('summary') | ||||
Angel Ezquerra
|
r16402 | |||
# We may want to match more than one field | ||||
Angel Ezquerra
|
r16446 | # Not all fields take the same amount of time to be matched | ||
# Sort the selected fields in order of increasing matching cost | ||||
Patrick Mezard
|
r16453 | fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary', | ||
Angel Ezquerra
|
r17102 | 'files', 'description', 'substate', 'diff'] | ||
Angel Ezquerra
|
r16446 | def fieldkeyfunc(f): | ||
try: | ||||
return fieldorder.index(f) | ||||
except ValueError: | ||||
# assume an unknown field is very costly | ||||
return len(fieldorder) | ||||
fields = list(fields) | ||||
fields.sort(key=fieldkeyfunc) | ||||
Angel Ezquerra
|
r16402 | # Each field will be matched with its own "getfield" function | ||
# which will be added to the getfieldfuncs array of functions | ||||
getfieldfuncs = [] | ||||
_funcs = { | ||||
'user': lambda r: repo[r].user(), | ||||
'branch': lambda r: repo[r].branch(), | ||||
'date': lambda r: repo[r].date(), | ||||
'description': lambda r: repo[r].description(), | ||||
'files': lambda r: repo[r].files(), | ||||
'parents': lambda r: repo[r].parents(), | ||||
'phase': lambda r: repo[r].phase(), | ||||
'substate': lambda r: repo[r].substate, | ||||
'summary': lambda r: repo[r].description().splitlines()[0], | ||||
Angel Ezquerra
|
r17102 | 'diff': lambda r: list(repo[r].diff(git=True),) | ||
Angel Ezquerra
|
r16402 | } | ||
for info in fields: | ||||
getfield = _funcs.get(info, None) | ||||
if getfield is None: | ||||
raise error.ParseError( | ||||
FUJIWARA Katsunori
|
r17259 | # i18n: "matching" is a keyword | ||
Angel Ezquerra
|
r16402 | _("unexpected field name passed to matching: %s") % info) | ||
getfieldfuncs.append(getfield) | ||||
# convert the getfield array of functions into a "getinfo" function | ||||
# which returns an array of field values (or a single value if there | ||||
# is only one field to match) | ||||
Angel Ezquerra
|
r16445 | getinfo = lambda r: [f(r) for f in getfieldfuncs] | ||
Angel Ezquerra
|
r16402 | |||
Lucas Moscovicz
|
r20459 | def matches(x): | ||
for rev in revs: | ||||
target = getinfo(rev) | ||||
Angel Ezquerra
|
r16445 | match = True | ||
for n, f in enumerate(getfieldfuncs): | ||||
Lucas Moscovicz
|
r20459 | if target[n] != f(x): | ||
Angel Ezquerra
|
r16445 | match = False | ||
if match: | ||||
Lucas Moscovicz
|
r20459 | return True | ||
return False | ||||
Lucas Moscovicz
|
r20611 | return subset.filter(matches) | ||
Angel Ezquerra
|
r16402 | |||
Matt Mackall
|
r11275 | def reverse(repo, subset, x): | ||
Patrick Mezard
|
r12821 | """``reverse(set)`` | ||
Reverse order of set. | ||||
""" | ||||
Matt Mackall
|
r11275 | l = getset(repo, subset, x) | ||
l.reverse() | ||||
return l | ||||
Idan Kamara
|
r13915 | def roots(repo, subset, x): | ||
"""``roots(set)`` | ||||
Patrick Mezard
|
r16394 | Changesets in set with no parent changeset in set. | ||
Patrick Mezard
|
r12821 | """ | ||
Yuya Nishihara
|
r24115 | s = getset(repo, fullreposet(repo), x) | ||
Pierre-Yves David
|
r25647 | parents = repo.changelog.parentrevs | ||
def filter(r): | ||||
for p in parents(r): | ||||
if 0 <= p and p in s: | ||||
return False | ||||
return True | ||||
return subset & s.filter(filter) | ||||
Wagner Bruna
|
r11944 | |||
Matt Mackall
|
r11275 | def sort(repo, subset, x): | ||
Patrick Mezard
|
r12821 | """``sort(set[, [-]key...])`` | ||
Sort set by keys. The default sort order is ascending, specify a key | ||||
as ``-key`` to sort in descending order. | ||||
The keys can be: | ||||
- ``rev`` for the revision number, | ||||
- ``branch`` for the branch name, | ||||
- ``desc`` for the commit message (description), | ||||
- ``user`` for user name (``author`` can be used as an alias), | ||||
- ``date`` for the commit date | ||||
""" | ||||
Martin Geisler
|
r12815 | # i18n: "sort" is a keyword | ||
Benoit Boissinot
|
r12736 | l = getargs(x, 1, 2, _("sort requires one or two arguments")) | ||
Matt Mackall
|
r11275 | keys = "rev" | ||
if len(l) == 2: | ||||
FUJIWARA Katsunori
|
r17259 | # i18n: "sort" is a keyword | ||
Martin Geisler
|
r11383 | keys = getstring(l[1], _("sort spec must be a string")) | ||
Matt Mackall
|
r11275 | |||
s = l[0] | ||||
keys = keys.split() | ||||
l = [] | ||||
def invert(s): | ||||
return "".join(chr(255 - ord(c)) for c in s) | ||||
Lucas Moscovicz
|
r20719 | revs = getset(repo, subset, s) | ||
if keys == ["rev"]: | ||||
revs.sort() | ||||
return revs | ||||
elif keys == ["-rev"]: | ||||
revs.sort(reverse=True) | ||||
return revs | ||||
for r in revs: | ||||
Matt Mackall
|
r11275 | c = repo[r] | ||
e = [] | ||||
for k in keys: | ||||
if k == 'rev': | ||||
e.append(r) | ||||
elif k == '-rev': | ||||
e.append(-r) | ||||
elif k == 'branch': | ||||
e.append(c.branch()) | ||||
elif k == '-branch': | ||||
e.append(invert(c.branch())) | ||||
elif k == 'desc': | ||||
e.append(c.description()) | ||||
elif k == '-desc': | ||||
e.append(invert(c.description())) | ||||
elif k in 'user author': | ||||
e.append(c.user()) | ||||
elif k in '-user -author': | ||||
e.append(invert(c.user())) | ||||
elif k == 'date': | ||||
e.append(c.date()[0]) | ||||
elif k == '-date': | ||||
e.append(-c.date()[0]) | ||||
else: | ||||
Martin Geisler
|
r11383 | raise error.ParseError(_("unknown sort key %r") % k) | ||
Matt Mackall
|
r11275 | e.append(r) | ||
l.append(e) | ||||
l.sort() | ||||
Lucas Moscovicz
|
r20364 | return baseset([e[-1] for e in l]) | ||
Matt Mackall
|
r11275 | |||
Matt Harbison
|
r24446 | def subrepo(repo, subset, x): | ||
"""``subrepo([pattern])`` | ||||
Changesets that add, modify or remove the given subrepo. If no subrepo | ||||
pattern is named, any subrepo changes are returned. | ||||
""" | ||||
# i18n: "subrepo" is a keyword | ||||
args = getargs(x, 0, 1, _('subrepo takes at most one argument')) | ||||
if len(args) != 0: | ||||
pat = getstring(args[0], _("subrepo requires a pattern")) | ||||
m = matchmod.exact(repo.root, repo.root, ['.hgsubstate']) | ||||
def submatches(names): | ||||
k, p, m = _stringmatcher(pat) | ||||
for name in names: | ||||
if m(name): | ||||
yield name | ||||
def matches(x): | ||||
c = repo[x] | ||||
s = repo.status(c.p1().node(), c.node(), match=m) | ||||
if len(args) == 0: | ||||
return s.added or s.modified or s.removed | ||||
if s.added: | ||||
Augie Fackler
|
r25149 | return any(submatches(c.substate.keys())) | ||
Matt Harbison
|
r24446 | |||
if s.modified: | ||||
subs = set(c.p1().substate.keys()) | ||||
subs.update(c.substate.keys()) | ||||
for path in submatches(subs): | ||||
if c.p1().substate.get(path) != c.substate.get(path): | ||||
return True | ||||
if s.removed: | ||||
Augie Fackler
|
r25149 | return any(submatches(c.p1().substate.keys())) | ||
Matt Harbison
|
r24446 | |||
return False | ||||
return subset.filter(matches) | ||||
Simon King
|
r16819 | def _stringmatcher(pattern): | ||
""" | ||||
accepts a string, possibly starting with 're:' or 'literal:' prefix. | ||||
returns the matcher name, pattern, and matcher function. | ||||
missing or unknown prefixes are treated as literal matches. | ||||
helper for tests: | ||||
>>> def test(pattern, *tests): | ||||
... kind, pattern, matcher = _stringmatcher(pattern) | ||||
... return (kind, pattern, [bool(matcher(t)) for t in tests]) | ||||
exact matching (no prefix): | ||||
>>> test('abcdefg', 'abc', 'def', 'abcdefg') | ||||
('literal', 'abcdefg', [False, False, True]) | ||||
regex matching ('re:' prefix) | ||||
>>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar') | ||||
('re', 'a.+b', [False, False, True]) | ||||
force exact matches ('literal:' prefix) | ||||
>>> test('literal:re:foobar', 'foobar', 're:foobar') | ||||
('literal', 're:foobar', [False, True]) | ||||
unknown prefixes are ignored and treated as literals | ||||
>>> test('foo:bar', 'foo', 'bar', 'foo:bar') | ||||
('literal', 'foo:bar', [False, False, True]) | ||||
""" | ||||
if pattern.startswith('re:'): | ||||
pattern = pattern[3:] | ||||
try: | ||||
regex = re.compile(pattern) | ||||
Gregory Szorc
|
r25660 | except re.error as e: | ||
Simon King
|
r16819 | raise error.ParseError(_('invalid regular expression: %s') | ||
% e) | ||||
return 're', pattern, regex.search | ||||
elif pattern.startswith('literal:'): | ||||
pattern = pattern[8:] | ||||
return 'literal', pattern, pattern.__eq__ | ||||
Simon King
|
r16823 | def _substringmatcher(pattern): | ||
kind, pattern, matcher = _stringmatcher(pattern) | ||||
if kind == 'literal': | ||||
matcher = lambda s: pattern in s | ||||
return kind, pattern, matcher | ||||
Simon King
|
r16819 | |||
Augie Fackler
|
r12715 | def tag(repo, subset, x): | ||
Martin Geisler
|
r14356 | """``tag([name])`` | ||
Patrick Mezard
|
r12821 | The specified tag by name, or all tagged revisions if no name is given. | ||
Matt Harbison
|
r20824 | |||
If `name` starts with `re:`, the remainder of the name is treated as | ||||
a regular expression. To match a tag that actually starts with `re:`, | ||||
use the prefix `literal:`. | ||||
Patrick Mezard
|
r12821 | """ | ||
Martin Geisler
|
r12815 | # i18n: "tag" is a keyword | ||
Augie Fackler
|
r12715 | args = getargs(x, 0, 1, _("tag takes one or no arguments")) | ||
Matt Mackall
|
r11280 | cl = repo.changelog | ||
Augie Fackler
|
r12715 | if args: | ||
Simon King
|
r16820 | pattern = getstring(args[0], | ||
# i18n: "tag" is a keyword | ||||
_('the argument to tag must be a string')) | ||||
kind, pattern, matcher = _stringmatcher(pattern) | ||||
if kind == 'literal': | ||||
Matt Mackall
|
r16825 | # avoid resolving all tags | ||
tn = repo._tagscache.tags.get(pattern, None) | ||||
if tn is None: | ||||
FUJIWARA Katsunori
|
r23978 | raise error.RepoLookupError(_("tag '%s' does not exist") | ||
% pattern) | ||||
Matt Mackall
|
r16825 | s = set([repo[tn].rev()]) | ||
Simon King
|
r16820 | else: | ||
s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)]) | ||||
Augie Fackler
|
r12715 | else: | ||
s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip']) | ||||
Lucas Moscovicz
|
r20367 | return subset & s | ||
Matt Mackall
|
r11280 | |||
Patrick Mezard
|
r12821 | def tagged(repo, subset, x): | ||
return tag(repo, subset, x) | ||||
Pierre-Yves David
|
r17171 | def unstable(repo, subset, x): | ||
"""``unstable()`` | ||||
Patrick Mezard
|
r17291 | Non-obsolete changesets with obsolete ancestors. | ||
""" | ||||
FUJIWARA Katsunori
|
r17259 | # i18n: "unstable" is a keyword | ||
FUJIWARA Katsunori
|
r17258 | getargs(x, 0, 0, _("unstable takes no arguments")) | ||
Pierre-Yves David
|
r17825 | unstables = obsmod.getrevs(repo, 'unstable') | ||
Lucas Moscovicz
|
r20367 | return subset & unstables | ||
Pierre-Yves David
|
r17171 | |||
Idan Kamara
|
r13915 | def user(repo, subset, x): | ||
"""``user(string)`` | ||||
Martin Geisler
|
r14357 | User name contains string. The match is case-insensitive. | ||
Simon King
|
r16823 | |||
If `string` starts with `re:`, the remainder of the string is treated as | ||||
a regular expression. To match a user that actually contains `re:`, use | ||||
the prefix `literal:`. | ||||
Matt Mackall
|
r13359 | """ | ||
Idan Kamara
|
r13915 | return author(repo, subset, x) | ||
Matt Mackall
|
r13359 | |||
Yuya Nishihara
|
r24777 | # experimental | ||
Yuya Nishihara
|
r24419 | def wdir(repo, subset, x): | ||
# i18n: "wdir" is a keyword | ||||
getargs(x, 0, 0, _("wdir takes no arguments")) | ||||
Yuya Nishihara
|
r25765 | if node.wdirrev in subset or isinstance(subset, fullreposet): | ||
return baseset([node.wdirrev]) | ||||
Yuya Nishihara
|
r24419 | return baseset() | ||
Matt Mackall
|
r15898 | # for internal use | ||
def _list(repo, subset, x): | ||||
s = getstring(x, "internal error") | ||||
if not s: | ||||
Pierre-Yves David
|
r22802 | return baseset() | ||
Yuya Nishihara
|
r25341 | # remove duplicates here. it's difficult for caller to deduplicate sets | ||
# because different symbols can point to the same rev. | ||||
Yuya Nishihara
|
r25344 | cl = repo.changelog | ||
Yuya Nishihara
|
r25341 | ls = [] | ||
seen = set() | ||||
for t in s.split('\0'): | ||||
Yuya Nishihara
|
r25344 | try: | ||
# fast path for integer revision | ||||
r = int(t) | ||||
if str(r) != t or r not in cl: | ||||
raise ValueError | ||||
except ValueError: | ||||
r = repo[t].rev() | ||||
Yuya Nishihara
|
r25341 | if r in seen: | ||
continue | ||||
Yuya Nishihara
|
r25342 | if (r in subset | ||
or r == node.nullrev and isinstance(subset, fullreposet)): | ||||
Yuya Nishihara
|
r25341 | ls.append(r) | ||
seen.add(r) | ||||
return baseset(ls) | ||||
Matt Mackall
|
r15898 | |||
Lucas Moscovicz
|
r20566 | # for internal use | ||
def _intlist(repo, subset, x): | ||||
s = getstring(x, "internal error") | ||||
if not s: | ||||
Pierre-Yves David
|
r22802 | return baseset() | ||
Lucas Moscovicz
|
r20566 | ls = [int(r) for r in s.split('\0')] | ||
Pierre-Yves David
|
r22876 | s = subset | ||
Lucas Moscovicz
|
r20566 | return baseset([r for r in ls if r in s]) | ||
Lucas Moscovicz
|
r20569 | # for internal use | ||
def _hexlist(repo, subset, x): | ||||
s = getstring(x, "internal error") | ||||
if not s: | ||||
Pierre-Yves David
|
r22802 | return baseset() | ||
Lucas Moscovicz
|
r20569 | cl = repo.changelog | ||
ls = [cl.rev(node.bin(r)) for r in s.split('\0')] | ||||
Pierre-Yves David
|
r22877 | s = subset | ||
Lucas Moscovicz
|
r20569 | return baseset([r for r in ls if r in s]) | ||
Matt Mackall
|
r15898 | |||
Matt Mackall
|
r11275 | symbols = { | ||
Matt Mackall
|
r11284 | "adds": adds, | ||
"all": getall, | ||||
Matt Mackall
|
r11275 | "ancestor": ancestor, | ||
"ancestors": ancestors, | ||||
Patrick Mezard
|
r16409 | "_firstancestors": _firstancestors, | ||
Matt Mackall
|
r11284 | "author": author, | ||
"Yann E. MORIN"
|
r15134 | "bisect": bisect, | ||
Benoit Boissinot
|
r13602 | "bisected": bisected, | ||
Matt Mackall
|
r13359 | "bookmark": bookmark, | ||
Matt Mackall
|
r11275 | "branch": branch, | ||
Ivan Andrus
|
r17753 | "branchpoint": branchpoint, | ||
Pierre-Yves David
|
r17829 | "bumped": bumped, | ||
Tomasz Kleczek
|
r17913 | "bundle": bundle, | ||
Matt Mackall
|
r11284 | "children": children, | ||
"closed": closed, | ||||
"contains": contains, | ||||
Matt Harbison
|
r17002 | "converted": converted, | ||
Matt Mackall
|
r11284 | "date": date, | ||
Thomas Arendsen Hein
|
r14650 | "desc": desc, | ||
Matt Mackall
|
r11284 | "descendants": descendants, | ||
Patrick Mezard
|
r16409 | "_firstdescendants": _firstdescendants, | ||
Matt Harbison
|
r17186 | "destination": destination, | ||
Pierre-Yves David
|
r18071 | "divergent": divergent, | ||
Pierre-Yves David
|
r15819 | "draft": draft, | ||
Pierre-Yves David
|
r17173 | "extinct": extinct, | ||
Henrik Stuart
|
r16661 | "extra": extra, | ||
Matt Mackall
|
r11284 | "file": hasfile, | ||
Matt Mackall
|
r14342 | "filelog": filelog, | ||
Matt Mackall
|
r15117 | "first": first, | ||
Matt Mackall
|
r11284 | "follow": follow, | ||
Patrick Mezard
|
r16174 | "_followfirst": _followfirst, | ||
Matt Mackall
|
r11284 | "grep": grep, | ||
"head": head, | ||||
"heads": heads, | ||||
Patrick Mezard
|
r17390 | "hidden": hidden, | ||
Matt Mackall
|
r16417 | "id": node_, | ||
Matt Mackall
|
r11275 | "keyword": keyword, | ||
Matt Mackall
|
r14061 | "last": last, | ||
Matt Mackall
|
r11284 | "limit": limit, | ||
Patrick Mezard
|
r16161 | "_matchfiles": _matchfiles, | ||
Matt Mackall
|
r11284 | "max": maxrev, | ||
Thomas Arendsen Hein
|
r14649 | "merge": merge, | ||
Nicolas Dumazet
|
r11708 | "min": minrev, | ||
Matt Mackall
|
r11284 | "modifies": modifies, | ||
Sean Farley
|
r23836 | "named": named, | ||
Pierre-Yves David
|
r17170 | "obsolete": obsolete, | ||
Yuya Nishihara
|
r23466 | "only": only, | ||
Matt Harbison
|
r17185 | "origin": origin, | ||
Matt Mackall
|
r11284 | "outgoing": outgoing, | ||
Matt Mackall
|
r11275 | "p1": p1, | ||
"p2": p2, | ||||
"parents": parents, | ||||
Wagner Bruna
|
r11944 | "present": present, | ||
Pierre-Yves David
|
r15819 | "public": public, | ||
Laurent Charignon
|
r25191 | "_notpublic": _notpublic, | ||
Matt Mackall
|
r15936 | "remote": remote, | ||
Matt Mackall
|
r11284 | "removes": removes, | ||
Thomas Arendsen Hein
|
r14649 | "rev": rev, | ||
Matt Mackall
|
r11284 | "reverse": reverse, | ||
Matt Mackall
|
r11275 | "roots": roots, | ||
Matt Mackall
|
r11284 | "sort": sort, | ||
Pierre-Yves David
|
r15819 | "secret": secret, | ||
Matt Harbison
|
r24446 | "subrepo": subrepo, | ||
Angel Ezquerra
|
r16402 | "matching": matching, | ||
Augie Fackler
|
r12715 | "tag": tag, | ||
Patrick Mezard
|
r12821 | "tagged": tagged, | ||
"user": user, | ||||
Pierre-Yves David
|
r17171 | "unstable": unstable, | ||
Yuya Nishihara
|
r24419 | "wdir": wdir, | ||
Matt Mackall
|
r15898 | "_list": _list, | ||
Lucas Moscovicz
|
r20566 | "_intlist": _intlist, | ||
Lucas Moscovicz
|
r20569 | "_hexlist": _hexlist, | ||
Matt Mackall
|
r11275 | } | ||
Alexander Plavin
|
r19721 | # symbols which can't be used for a DoS attack for any given input | ||
# (e.g. those which accept regexes as plain strings shouldn't be included) | ||||
# functions that just return a lot of changesets (like all) don't count here | ||||
safesymbols = set([ | ||||
"adds", | ||||
"all", | ||||
"ancestor", | ||||
"ancestors", | ||||
"_firstancestors", | ||||
"author", | ||||
"bisect", | ||||
"bisected", | ||||
"bookmark", | ||||
"branch", | ||||
"branchpoint", | ||||
"bumped", | ||||
"bundle", | ||||
"children", | ||||
"closed", | ||||
"converted", | ||||
"date", | ||||
"desc", | ||||
"descendants", | ||||
"_firstdescendants", | ||||
"destination", | ||||
"divergent", | ||||
"draft", | ||||
"extinct", | ||||
"extra", | ||||
"file", | ||||
"filelog", | ||||
"first", | ||||
"follow", | ||||
"_followfirst", | ||||
"head", | ||||
"heads", | ||||
"hidden", | ||||
"id", | ||||
"keyword", | ||||
"last", | ||||
"limit", | ||||
"_matchfiles", | ||||
"max", | ||||
"merge", | ||||
"min", | ||||
"modifies", | ||||
"obsolete", | ||||
Yuya Nishihara
|
r23467 | "only", | ||
Alexander Plavin
|
r19721 | "origin", | ||
"outgoing", | ||||
"p1", | ||||
"p2", | ||||
"parents", | ||||
"present", | ||||
"public", | ||||
Laurent Charignon
|
r25191 | "_notpublic", | ||
Alexander Plavin
|
r19721 | "remote", | ||
"removes", | ||||
"rev", | ||||
"reverse", | ||||
"roots", | ||||
"sort", | ||||
"secret", | ||||
"matching", | ||||
"tag", | ||||
"tagged", | ||||
"user", | ||||
"unstable", | ||||
Yuya Nishihara
|
r24419 | "wdir", | ||
Alexander Plavin
|
r19721 | "_list", | ||
Lucas Moscovicz
|
r20566 | "_intlist", | ||
Lucas Moscovicz
|
r20569 | "_hexlist", | ||
Alexander Plavin
|
r19721 | ]) | ||
Matt Mackall
|
r11275 | methods = { | ||
"range": rangeset, | ||||
Bryan O'Sullivan
|
r16860 | "dagrange": dagrange, | ||
Matt Mackall
|
r11275 | "string": stringset, | ||
Jordi Gutiérrez Hermoso
|
r24932 | "symbol": stringset, | ||
Matt Mackall
|
r11275 | "and": andset, | ||
"or": orset, | ||||
"not": notset, | ||||
"list": listset, | ||||
Yuya Nishihara
|
r25704 | "keyvalue": keyvaluepair, | ||
Matt Mackall
|
r11275 | "func": func, | ||
Kevin Gessner
|
r14070 | "ancestor": ancestorspec, | ||
"parent": parentspec, | ||||
"parentpost": p1, | ||||
Matt Mackall
|
r11275 | } | ||
Matt Mackall
|
r11279 | def optimize(x, small): | ||
Martin Geisler
|
r13031 | if x is None: | ||
Matt Mackall
|
r11279 | return 0, x | ||
Matt Mackall
|
r11275 | smallbonus = 1 | ||
if small: | ||||
smallbonus = .5 | ||||
op = x[0] | ||||
Matt Mackall
|
r11283 | if op == 'minus': | ||
Matt Mackall
|
r11279 | return optimize(('and', x[1], ('not', x[2])), small) | ||
Sean Farley
|
r23765 | elif op == 'only': | ||
return optimize(('func', ('symbol', 'only'), | ||||
('list', x[1], x[2])), small) | ||||
Yuya Nishihara
|
r25094 | elif op == 'onlypost': | ||
return optimize(('func', ('symbol', 'only'), x[1]), small) | ||||
Matt Mackall
|
r11279 | elif op == 'dagrangepre': | ||
return optimize(('func', ('symbol', 'ancestors'), x[1]), small) | ||||
elif op == 'dagrangepost': | ||||
return optimize(('func', ('symbol', 'descendants'), x[1]), small) | ||||
Yuya Nishihara
|
r25819 | elif op == 'rangeall': | ||
return optimize(('range', ('string', '0'), ('string', 'tip')), small) | ||||
Matt Mackall
|
r11279 | elif op == 'rangepre': | ||
return optimize(('range', ('string', '0'), x[1]), small) | ||||
elif op == 'rangepost': | ||||
return optimize(('range', x[1], ('string', 'tip')), small) | ||||
Matt Mackall
|
r11467 | elif op == 'negate': | ||
return optimize(('string', | ||||
'-' + getstring(x[1], _("can't negate that"))), small) | ||||
Matt Mackall
|
r11279 | elif op in 'string symbol negate': | ||
return smallbonus, x # single revisions are small | ||||
Bryan O'Sullivan
|
r16859 | elif op == 'and': | ||
Matt Mackall
|
r11279 | wa, ta = optimize(x[1], True) | ||
wb, tb = optimize(x[2], True) | ||||
Siddharth Agarwal
|
r20499 | |||
# (::x and not ::y)/(not ::y and ::x) have a fast path | ||||
Siddharth Agarwal
|
r21893 | def isonly(revs, bases): | ||
Siddharth Agarwal
|
r20499 | return ( | ||
revs[0] == 'func' | ||||
and getstring(revs[1], _('not a symbol')) == 'ancestors' | ||||
and bases[0] == 'not' | ||||
and bases[1][0] == 'func' | ||||
and getstring(bases[1][1], _('not a symbol')) == 'ancestors') | ||||
Matt Mackall
|
r11279 | w = min(wa, wb) | ||
Siddharth Agarwal
|
r21893 | if isonly(ta, tb): | ||
return w, ('func', ('symbol', 'only'), ('list', ta[2], tb[1][2])) | ||||
if isonly(tb, ta): | ||||
return w, ('func', ('symbol', 'only'), ('list', tb[2], ta[1][2])) | ||||
Siddharth Agarwal
|
r20499 | |||
Matt Mackall
|
r11279 | if wa > wb: | ||
return w, (op, tb, ta) | ||||
return w, (op, ta, tb) | ||||
elif op == 'or': | ||||
Yuya Nishihara
|
r25343 | # fast path for machine-generated expression, that is likely to have | ||
# lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()' | ||||
ws, ts, ss = [], [], [] | ||||
def flushss(): | ||||
if not ss: | ||||
return | ||||
if len(ss) == 1: | ||||
w, t = ss[0] | ||||
else: | ||||
s = '\0'.join(t[1] for w, t in ss) | ||||
y = ('func', ('symbol', '_list'), ('string', s)) | ||||
w, t = optimize(y, False) | ||||
ws.append(w) | ||||
ts.append(t) | ||||
del ss[:] | ||||
for y in x[1:]: | ||||
w, t = optimize(y, False) | ||||
if t[0] == 'string' or t[0] == 'symbol': | ||||
ss.append((w, t)) | ||||
continue | ||||
flushss() | ||||
ws.append(w) | ||||
ts.append(t) | ||||
flushss() | ||||
if len(ts) == 1: | ||||
return ws[0], ts[0] # 'or' operation is fully optimized out | ||||
Yuya Nishihara
|
r25307 | # we can't reorder trees by weight because it would change the order. | ||
# ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a") | ||||
Yuya Nishihara
|
r25309 | # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0])) | ||
Yuya Nishihara
|
r25343 | return max(ws), (op,) + tuple(ts) | ||
Matt Mackall
|
r11275 | elif op == 'not': | ||
Laurent Charignon
|
r25191 | # Optimize not public() to _notpublic() because we have a fast version | ||
if x[1] == ('func', ('symbol', 'public'), None): | ||||
newsym = ('func', ('symbol', '_notpublic'), None) | ||||
o = optimize(newsym, not small) | ||||
return o[0], o[1] | ||||
else: | ||||
o = optimize(x[1], not small) | ||||
return o[0], (op, o[1]) | ||||
Kevin Gessner
|
r14070 | elif op == 'parentpost': | ||
o = optimize(x[1], small) | ||||
return o[0], (op, o[1]) | ||||
Matt Mackall
|
r11275 | elif op == 'group': | ||
Matt Mackall
|
r11279 | return optimize(x[1], small) | ||
Bryan O'Sullivan
|
r16860 | elif op in 'dagrange range list parent ancestorspec': | ||
Matt Mackall
|
r14842 | if op == 'parent': | ||
# x^:y means (x^) : y, not x ^ (:y) | ||||
post = ('parentpost', x[1]) | ||||
if x[2][0] == 'dagrangepre': | ||||
return optimize(('dagrange', post, x[2][1]), small) | ||||
elif x[2][0] == 'rangepre': | ||||
return optimize(('range', post, x[2][1]), small) | ||||
Matt Mackall
|
r11279 | wa, ta = optimize(x[1], small) | ||
wb, tb = optimize(x[2], small) | ||||
return wa + wb, (op, ta, tb) | ||||
Matt Mackall
|
r11275 | elif op == 'func': | ||
Martin Geisler
|
r11383 | f = getstring(x[1], _("not a symbol")) | ||
Matt Mackall
|
r11279 | wa, ta = optimize(x[2], small) | ||
Thomas Arendsen Hein
|
r14650 | if f in ("author branch closed date desc file grep keyword " | ||
"outgoing user"): | ||||
Matt Mackall
|
r11279 | w = 10 # slow | ||
Matt Mackall
|
r12351 | elif f in "modifies adds removes": | ||
Matt Mackall
|
r11279 | w = 30 # slower | ||
Matt Mackall
|
r11275 | elif f == "contains": | ||
Matt Mackall
|
r11279 | w = 100 # very slow | ||
Matt Mackall
|
r11275 | elif f == "ancestor": | ||
Matt Mackall
|
r11279 | w = 1 * smallbonus | ||
Durham Goode
|
r22451 | elif f in "reverse limit first _intlist": | ||
Matt Mackall
|
r11279 | w = 0 | ||
Matt Mackall
|
r11275 | elif f in "sort": | ||
Matt Mackall
|
r11279 | w = 10 # assume most sorts look at changelog | ||
Matt Mackall
|
r11275 | else: | ||
Matt Mackall
|
r11279 | w = 1 | ||
return w + wa, (op, x[1], ta) | ||||
return 1, x | ||||
Matt Mackall
|
r11275 | |||
Patrick Mezard
|
r16771 | _aliasarg = ('func', ('symbol', '_aliasarg')) | ||
def _getaliasarg(tree): | ||||
"""If tree matches ('func', ('symbol', '_aliasarg'), ('string', X)) | ||||
return X, None otherwise. | ||||
""" | ||||
if (len(tree) == 3 and tree[:2] == _aliasarg | ||||
and tree[2][0] == 'string'): | ||||
return tree[2][1] | ||||
return None | ||||
def _checkaliasarg(tree, known=None): | ||||
"""Check tree contains no _aliasarg construct or only ones which | ||||
value is in known. Used to avoid alias placeholders injection. | ||||
""" | ||||
if isinstance(tree, tuple): | ||||
arg = _getaliasarg(tree) | ||||
if arg is not None and (not known or arg not in known): | ||||
Augie Fackler
|
r24219 | raise error.UnknownIdentifier('_aliasarg', []) | ||
Patrick Mezard
|
r16771 | for t in tree: | ||
_checkaliasarg(t, known) | ||||
FUJIWARA Katsunori
|
r23845 | # the set of valid characters for the initial letter of symbols in | ||
# alias declarations and definitions | ||||
_aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)] | ||||
if c.isalnum() or c in '._@$' or ord(c) > 127) | ||||
def _tokenizealias(program, lookup=None): | ||||
"""Parse alias declaration/definition into a stream of tokens | ||||
This allows symbol names to use also ``$`` as an initial letter | ||||
(for backward compatibility), and callers of this function should | ||||
examine whether ``$`` is used also for unexpected symbols or not. | ||||
""" | ||||
return tokenize(program, lookup=lookup, | ||||
syminitletters=_aliassyminitletters) | ||||
def _parsealiasdecl(decl): | ||||
"""Parse alias declaration ``decl`` | ||||
This returns ``(name, tree, args, errorstr)`` tuple: | ||||
- ``name``: of declared alias (may be ``decl`` itself at error) | ||||
- ``tree``: parse result (or ``None`` at error) | ||||
- ``args``: list of alias argument names (or None for symbol declaration) | ||||
- ``errorstr``: detail about detected error (or None) | ||||
>>> _parsealiasdecl('foo') | ||||
('foo', ('symbol', 'foo'), None, None) | ||||
>>> _parsealiasdecl('$foo') | ||||
('$foo', None, None, "'$' not for alias arguments") | ||||
>>> _parsealiasdecl('foo::bar') | ||||
('foo::bar', None, None, 'invalid format') | ||||
>>> _parsealiasdecl('foo bar') | ||||
('foo bar', None, None, 'at 4: invalid token') | ||||
>>> _parsealiasdecl('foo()') | ||||
('foo', ('func', ('symbol', 'foo')), [], None) | ||||
>>> _parsealiasdecl('$foo()') | ||||
('$foo()', None, None, "'$' not for alias arguments") | ||||
>>> _parsealiasdecl('foo($1, $2)') | ||||
('foo', ('func', ('symbol', 'foo')), ['$1', '$2'], None) | ||||
>>> _parsealiasdecl('foo(bar_bar, baz.baz)') | ||||
('foo', ('func', ('symbol', 'foo')), ['bar_bar', 'baz.baz'], None) | ||||
>>> _parsealiasdecl('foo($1, $2, nested($1, $2))') | ||||
('foo($1, $2, nested($1, $2))', None, None, 'invalid argument list') | ||||
>>> _parsealiasdecl('foo(bar($1, $2))') | ||||
('foo(bar($1, $2))', None, None, 'invalid argument list') | ||||
>>> _parsealiasdecl('foo("string")') | ||||
('foo("string")', None, None, 'invalid argument list') | ||||
>>> _parsealiasdecl('foo($1, $2') | ||||
('foo($1, $2', None, None, 'at 10: unexpected token: end') | ||||
>>> _parsealiasdecl('foo("string') | ||||
('foo("string', None, None, 'at 5: unterminated string') | ||||
FUJIWARA Katsunori
|
r23847 | >>> _parsealiasdecl('foo($1, $2, $1)') | ||
('foo', None, None, 'argument names collide with each other') | ||||
FUJIWARA Katsunori
|
r23845 | """ | ||
Yuya Nishihara
|
r25654 | p = parser.parser(elements) | ||
FUJIWARA Katsunori
|
r23845 | try: | ||
Yuya Nishihara
|
r25654 | tree, pos = p.parse(_tokenizealias(decl)) | ||
FUJIWARA Katsunori
|
r23845 | if (pos != len(decl)): | ||
raise error.ParseError(_('invalid token'), pos) | ||||
if isvalidsymbol(tree): | ||||
# "name = ...." style | ||||
name = getsymbol(tree) | ||||
if name.startswith('$'): | ||||
return (decl, None, None, _("'$' not for alias arguments")) | ||||
return (name, ('symbol', name), None, None) | ||||
if isvalidfunc(tree): | ||||
# "name(arg, ....) = ...." style | ||||
name = getfuncname(tree) | ||||
if name.startswith('$'): | ||||
return (decl, None, None, _("'$' not for alias arguments")) | ||||
args = [] | ||||
for arg in getfuncargs(tree): | ||||
if not isvalidsymbol(arg): | ||||
return (decl, None, None, _("invalid argument list")) | ||||
args.append(getsymbol(arg)) | ||||
FUJIWARA Katsunori
|
r23847 | if len(args) != len(set(args)): | ||
return (name, None, None, | ||||
_("argument names collide with each other")) | ||||
FUJIWARA Katsunori
|
r23845 | return (name, ('func', ('symbol', name)), args, None) | ||
return (decl, None, None, _("invalid format")) | ||||
Gregory Szorc
|
r25660 | except error.ParseError as inst: | ||
FUJIWARA Katsunori
|
r23845 | return (decl, None, None, parseerrordetail(inst)) | ||
FUJIWARA Katsunori
|
r23993 | def _parsealiasdefn(defn, args): | ||
"""Parse alias definition ``defn`` | ||||
This function also replaces alias argument references in the | ||||
specified definition by ``_aliasarg(ARGNAME)``. | ||||
``args`` is a list of alias argument names, or None if the alias | ||||
is declared as a symbol. | ||||
This returns "tree" as parsing result. | ||||
>>> args = ['$1', '$2', 'foo'] | ||||
>>> print prettyformat(_parsealiasdefn('$1 or foo', args)) | ||||
(or | ||||
(func | ||||
('symbol', '_aliasarg') | ||||
('string', '$1')) | ||||
(func | ||||
('symbol', '_aliasarg') | ||||
('string', 'foo'))) | ||||
>>> try: | ||||
... _parsealiasdefn('$1 or $bar', args) | ||||
... except error.ParseError, inst: | ||||
... print parseerrordetail(inst) | ||||
at 6: '$' not for alias arguments | ||||
>>> args = ['$1', '$10', 'foo'] | ||||
>>> print prettyformat(_parsealiasdefn('$10 or foobar', args)) | ||||
(or | ||||
(func | ||||
('symbol', '_aliasarg') | ||||
('string', '$10')) | ||||
('symbol', 'foobar')) | ||||
>>> print prettyformat(_parsealiasdefn('"$1" or "foo"', args)) | ||||
(or | ||||
('string', '$1') | ||||
('string', 'foo')) | ||||
""" | ||||
def tokenizedefn(program, lookup=None): | ||||
if args: | ||||
argset = set(args) | ||||
else: | ||||
argset = set() | ||||
for t, value, pos in _tokenizealias(program, lookup=lookup): | ||||
if t == 'symbol': | ||||
if value in argset: | ||||
# emulate tokenization of "_aliasarg('ARGNAME')": | ||||
# "_aliasarg()" is an unknown symbol only used separate | ||||
# alias argument placeholders from regular strings. | ||||
yield ('symbol', '_aliasarg', pos) | ||||
yield ('(', None, pos) | ||||
yield ('string', value, pos) | ||||
yield (')', None, pos) | ||||
continue | ||||
elif value.startswith('$'): | ||||
raise error.ParseError(_("'$' not for alias arguments"), | ||||
pos) | ||||
yield (t, value, pos) | ||||
Yuya Nishihara
|
r25654 | p = parser.parser(elements) | ||
tree, pos = p.parse(tokenizedefn(defn)) | ||||
FUJIWARA Katsunori
|
r23993 | if pos != len(defn): | ||
raise error.ParseError(_('invalid token'), pos) | ||||
Yuya Nishihara
|
r25309 | return parser.simplifyinfixops(tree, ('or',)) | ||
FUJIWARA Katsunori
|
r23993 | |||
Alexander Solovyov
|
r14098 | class revsetalias(object): | ||
FUJIWARA Katsunori
|
r23725 | # whether own `error` information is already shown or not. | ||
# this avoids showing same warning multiple times at each `findaliases`. | ||||
warned = False | ||||
Mads Kiilerich
|
r14723 | def __init__(self, name, value): | ||
Alexander Solovyov
|
r14098 | '''Aliases like: | ||
h = heads(default) | ||||
b($1) = ancestors($1) - ancestors(default) | ||||
''' | ||||
FUJIWARA Katsunori
|
r23846 | self.name, self.tree, self.args, self.error = _parsealiasdecl(name) | ||
if self.error: | ||||
self.error = _('failed to parse the declaration of revset alias' | ||||
' "%s": %s') % (self.name, self.error) | ||||
return | ||||
FUJIWARA Katsunori
|
r23725 | try: | ||
FUJIWARA Katsunori
|
r23994 | self.replacement = _parsealiasdefn(value, self.args) | ||
FUJIWARA Katsunori
|
r23725 | # Check for placeholder injection | ||
_checkaliasarg(self.replacement, self.args) | ||||
Gregory Szorc
|
r25660 | except error.ParseError as inst: | ||
FUJIWARA Katsunori
|
r23844 | self.error = _('failed to parse the definition of revset alias' | ||
' "%s": %s') % (self.name, parseerrordetail(inst)) | ||||
Alexander Solovyov
|
r14098 | |||
Patrick Mezard
|
r16096 | def _getalias(aliases, tree): | ||
"""If tree looks like an unexpanded alias, return it. Return None | ||||
otherwise. | ||||
""" | ||||
if isinstance(tree, tuple) and tree: | ||||
if tree[0] == 'symbol' and len(tree) == 2: | ||||
name = tree[1] | ||||
alias = aliases.get(name) | ||||
if alias and alias.args is None and alias.tree == tree: | ||||
return alias | ||||
if tree[0] == 'func' and len(tree) > 1: | ||||
if tree[1][0] == 'symbol' and len(tree[1]) == 2: | ||||
name = tree[1][1] | ||||
alias = aliases.get(name) | ||||
if alias and alias.args is not None and alias.tree == tree[:2]: | ||||
return alias | ||||
return None | ||||
Alexander Solovyov
|
r14098 | |||
Patrick Mezard
|
r16096 | def _expandargs(tree, args): | ||
Patrick Mezard
|
r16771 | """Replace _aliasarg instances with the substitution value of the | ||
same name in args, recursively. | ||||
Patrick Mezard
|
r16096 | """ | ||
Patrick Mezard
|
r16771 | if not tree or not isinstance(tree, tuple): | ||
Patrick Mezard
|
r16096 | return tree | ||
Patrick Mezard
|
r16771 | arg = _getaliasarg(tree) | ||
if arg is not None: | ||||
return args[arg] | ||||
Patrick Mezard
|
r16096 | return tuple(_expandargs(t, args) for t in tree) | ||
Patrick Mezard
|
r16838 | def _expandaliases(aliases, tree, expanding, cache): | ||
Patrick Mezard
|
r16096 | """Expand aliases in tree, recursively. | ||
'aliases' is a dictionary mapping user defined aliases to | ||||
revsetalias objects. | ||||
""" | ||||
if not isinstance(tree, tuple): | ||||
# Do not expand raw strings | ||||
Alexander Solovyov
|
r14098 | return tree | ||
Patrick Mezard
|
r16096 | alias = _getalias(aliases, tree) | ||
if alias is not None: | ||||
FUJIWARA Katsunori
|
r23725 | if alias.error: | ||
FUJIWARA Katsunori
|
r23844 | raise util.Abort(alias.error) | ||
Patrick Mezard
|
r16096 | if alias in expanding: | ||
raise error.ParseError(_('infinite expansion of revset alias "%s" ' | ||||
'detected') % alias.name) | ||||
expanding.append(alias) | ||||
Patrick Mezard
|
r16838 | if alias.name not in cache: | ||
cache[alias.name] = _expandaliases(aliases, alias.replacement, | ||||
expanding, cache) | ||||
result = cache[alias.name] | ||||
Patrick Mezard
|
r16772 | expanding.pop() | ||
Patrick Mezard
|
r16096 | if alias.args is not None: | ||
l = getlist(tree[2]) | ||||
if len(l) != len(alias.args): | ||||
raise error.ParseError( | ||||
_('invalid number of arguments: %s') % len(l)) | ||||
Patrick Mezard
|
r16838 | l = [_expandaliases(aliases, a, [], cache) for a in l] | ||
Patrick Mezard
|
r16096 | result = _expandargs(result, dict(zip(alias.args, l))) | ||
else: | ||||
Patrick Mezard
|
r16838 | result = tuple(_expandaliases(aliases, t, expanding, cache) | ||
Patrick Mezard
|
r16096 | for t in tree) | ||
return result | ||||
Alexander Solovyov
|
r14098 | |||
FUJIWARA Katsunori
|
r23725 | def findaliases(ui, tree, showwarning=None): | ||
Patrick Mezard
|
r16771 | _checkaliasarg(tree) | ||
Patrick Mezard
|
r16096 | aliases = {} | ||
Alexander Solovyov
|
r14098 | for k, v in ui.configitems('revsetalias'): | ||
alias = revsetalias(k, v) | ||||
Patrick Mezard
|
r16096 | aliases[alias.name] = alias | ||
FUJIWARA Katsunori
|
r23725 | tree = _expandaliases(aliases, tree, [], {}) | ||
if showwarning: | ||||
# warn about problematic (but not referred) aliases | ||||
for name, alias in sorted(aliases.iteritems()): | ||||
if alias.error and not alias.warned: | ||||
FUJIWARA Katsunori
|
r23844 | showwarning(_('warning: %s\n') % (alias.error)) | ||
FUJIWARA Katsunori
|
r23725 | alias.warned = True | ||
return tree | ||||
Alexander Solovyov
|
r14098 | |||
FUJIWARA Katsunori
|
r23742 | def foldconcat(tree): | ||
"""Fold elements to be concatenated by `##` | ||||
""" | ||||
if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'): | ||||
return tree | ||||
if tree[0] == '_concat': | ||||
pending = [tree] | ||||
l = [] | ||||
while pending: | ||||
e = pending.pop() | ||||
if e[0] == '_concat': | ||||
pending.extend(reversed(e[1:])) | ||||
elif e[0] in ('string', 'symbol'): | ||||
l.append(e[1]) | ||||
else: | ||||
msg = _("\"##\" can't concatenate \"%s\" element") % (e[0]) | ||||
raise error.ParseError(msg) | ||||
return ('string', ''.join(l)) | ||||
else: | ||||
return tuple(foldconcat(t) for t in tree) | ||||
Alexander Solovyov
|
r14098 | |||
Matt Mackall
|
r20779 | def parse(spec, lookup=None): | ||
Yuya Nishihara
|
r25654 | p = parser.parser(elements) | ||
tree, pos = p.parse(tokenize(spec, lookup=lookup)) | ||||
Yuya Nishihara
|
r25251 | if pos != len(spec): | ||
raise error.ParseError(_("invalid token"), pos) | ||||
Yuya Nishihara
|
r25309 | return parser.simplifyinfixops(tree, ('or',)) | ||
Matt Mackall
|
r20779 | |||
Laurent Charignon
|
r24518 | def posttreebuilthook(tree, repo): | ||
# hook for extensions to execute code on the optimized tree | ||||
pass | ||||
Matt Mackall
|
r20779 | def match(ui, spec, repo=None): | ||
Matt Mackall
|
r11385 | if not spec: | ||
raise error.ParseError(_("empty query")) | ||||
Matt Mackall
|
r20779 | lookup = None | ||
if repo: | ||||
lookup = repo.__contains__ | ||||
Yuya Nishihara
|
r25251 | tree = parse(spec, lookup) | ||
Matt Mackall
|
r14900 | if ui: | ||
FUJIWARA Katsunori
|
r23725 | tree = findaliases(ui, tree, showwarning=ui.warn) | ||
FUJIWARA Katsunori
|
r23742 | tree = foldconcat(tree) | ||
Matt Mackall
|
r11279 | weight, tree = optimize(tree, True) | ||
Laurent Charignon
|
r24518 | posttreebuilthook(tree, repo) | ||
Yuya Nishihara
|
r24114 | def mfunc(repo, subset=None): | ||
if subset is None: | ||||
Yuya Nishihara
|
r24115 | subset = fullreposet(repo) | ||
Pierre-Yves David
|
r22885 | if util.safehasattr(subset, 'isascending'): | ||
Pierre-Yves David
|
r22686 | result = getset(repo, subset, tree) | ||
else: | ||||
result = getset(repo, baseset(subset), tree) | ||||
return result | ||||
Matt Mackall
|
r11275 | return mfunc | ||
Patrick Mezard
|
r12821 | |||
Matt Mackall
|
r14901 | def formatspec(expr, *args): | ||
''' | ||||
This is a convenience function for using revsets internally, and | ||||
escapes arguments appropriately. Aliases are intentionally ignored | ||||
so that intended expression behavior isn't accidentally subverted. | ||||
Supported arguments: | ||||
Matt Mackall
|
r15266 | %r = revset expression, parenthesized | ||
Matt Mackall
|
r14901 | %d = int(arg), no quoting | ||
%s = string(arg), escaped and single-quoted | ||||
%b = arg.branch(), escaped and single-quoted | ||||
%n = hex(arg), single-quoted | ||||
%% = a literal '%' | ||||
Matt Mackall
|
r15266 | Prefixing the type with 'l' specifies a parenthesized list of that type. | ||
Matt Mackall
|
r15140 | |||
Matt Mackall
|
r15268 | >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()")) | ||
'(10 or 11):: and ((this()) or (that()))' | ||||
Matt Mackall
|
r14901 | >>> formatspec('%d:: and not %d::', 10, 20) | ||
'10:: and not 20::' | ||||
Matt Mackall
|
r15325 | >>> formatspec('%ld or %ld', [], [1]) | ||
Matt Mackall
|
r15898 | "_list('') or 1" | ||
Matt Mackall
|
r14901 | >>> formatspec('keyword(%s)', 'foo\\xe9') | ||
"keyword('foo\\\\xe9')" | ||||
>>> b = lambda: 'default' | ||||
>>> b.branch = b | ||||
>>> formatspec('branch(%b)', b) | ||||
"branch('default')" | ||||
Matt Mackall
|
r15140 | >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd']) | ||
Matt Mackall
|
r15898 | "root(_list('a\\x00b\\x00c\\x00d'))" | ||
Matt Mackall
|
r14901 | ''' | ||
def quote(s): | ||||
return repr(str(s)) | ||||
Matt Mackall
|
r15140 | def argtype(c, arg): | ||
if c == 'd': | ||||
return str(int(arg)) | ||||
elif c == 's': | ||||
return quote(arg) | ||||
Matt Mackall
|
r15266 | elif c == 'r': | ||
parse(arg) # make sure syntax errors are confined | ||||
return '(%s)' % arg | ||||
Matt Mackall
|
r15140 | elif c == 'n': | ||
Matt Mackall
|
r16417 | return quote(node.hex(arg)) | ||
Matt Mackall
|
r15140 | elif c == 'b': | ||
return quote(arg.branch()) | ||||
Matt Mackall
|
r15595 | def listexp(s, t): | ||
l = len(s) | ||||
if l == 0: | ||||
Matt Mackall
|
r15898 | return "_list('')" | ||
elif l == 1: | ||||
Matt Mackall
|
r15595 | return argtype(t, s[0]) | ||
Matt Mackall
|
r15898 | elif t == 'd': | ||
Lucas Moscovicz
|
r20566 | return "_intlist('%s')" % "\0".join(str(int(a)) for a in s) | ||
Matt Mackall
|
r15898 | elif t == 's': | ||
return "_list('%s')" % "\0".join(s) | ||||
elif t == 'n': | ||||
Lucas Moscovicz
|
r20569 | return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s) | ||
Matt Mackall
|
r15898 | elif t == 'b': | ||
return "_list('%s')" % "\0".join(a.branch() for a in s) | ||||
Martin Geisler
|
r15791 | m = l // 2 | ||
Matt Mackall
|
r15595 | return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t)) | ||
Matt Mackall
|
r14901 | ret = '' | ||
pos = 0 | ||||
arg = 0 | ||||
while pos < len(expr): | ||||
c = expr[pos] | ||||
if c == '%': | ||||
pos += 1 | ||||
d = expr[pos] | ||||
if d == '%': | ||||
ret += d | ||||
Matt Mackall
|
r15268 | elif d in 'dsnbr': | ||
Matt Mackall
|
r15140 | ret += argtype(d, args[arg]) | ||
Matt Mackall
|
r14901 | arg += 1 | ||
Matt Mackall
|
r15140 | elif d == 'l': | ||
# a list of some type | ||||
pos += 1 | ||||
d = expr[pos] | ||||
Matt Mackall
|
r15596 | ret += listexp(list(args[arg]), d) | ||
Matt Mackall
|
r14901 | arg += 1 | ||
else: | ||||
raise util.Abort('unexpected revspec format character %s' % d) | ||||
else: | ||||
ret += c | ||||
pos += 1 | ||||
return ret | ||||
Patrick Mezard
|
r16218 | def prettyformat(tree): | ||
Yuya Nishihara
|
r25253 | return parser.prettyformat(tree, ('string', 'symbol')) | ||
Patrick Mezard
|
r16218 | |||
Alexander Plavin
|
r19719 | def depth(tree): | ||
if isinstance(tree, tuple): | ||||
return max(map(depth, tree)) + 1 | ||||
else: | ||||
return 0 | ||||
Alexander Plavin
|
r19720 | def funcsused(tree): | ||
if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'): | ||||
return set() | ||||
else: | ||||
funcs = set() | ||||
for s in tree[1:]: | ||||
funcs |= funcsused(s) | ||||
if tree[0] == 'func': | ||||
funcs.add(tree[1][1]) | ||||
return funcs | ||||
Pierre-Yves David
|
r22692 | class abstractsmartset(object): | ||
def __nonzero__(self): | ||||
"""True if the smartset is not empty""" | ||||
raise NotImplementedError() | ||||
def __contains__(self, rev): | ||||
"""provide fast membership testing""" | ||||
raise NotImplementedError() | ||||
def __iter__(self): | ||||
"""iterate the set in the order it is supposed to be iterated""" | ||||
raise NotImplementedError() | ||||
Pierre-Yves David
|
r22716 | # Attributes containing a function to perform a fast iteration in a given | ||
# direction. A smartset can have none, one, or both defined. | ||||
# | ||||
# Default value is None instead of a function returning None to avoid | ||||
# initializing an iterator just for testing if a fast method exists. | ||||
fastasc = None | ||||
fastdesc = None | ||||
Pierre-Yves David
|
r22692 | def isascending(self): | ||
"""True if the set will iterate in ascending order""" | ||||
raise NotImplementedError() | ||||
def isdescending(self): | ||||
"""True if the set will iterate in descending order""" | ||||
raise NotImplementedError() | ||||
def min(self): | ||||
"""return the minimum element in the set""" | ||||
Pierre-Yves David
|
r22722 | if self.fastasc is not None: | ||
for r in self.fastasc(): | ||||
return r | ||||
raise ValueError('arg is an empty sequence') | ||||
return min(self) | ||||
Pierre-Yves David
|
r22692 | |||
def max(self): | ||||
"""return the maximum element in the set""" | ||||
Pierre-Yves David
|
r22722 | if self.fastdesc is not None: | ||
for r in self.fastdesc(): | ||||
return r | ||||
raise ValueError('arg is an empty sequence') | ||||
return max(self) | ||||
Pierre-Yves David
|
r22692 | |||
Pierre-Yves David
|
r22808 | def first(self): | ||
"""return the first element in the set (user iteration perspective) | ||||
Return None if the set is empty""" | ||||
raise NotImplementedError() | ||||
def last(self): | ||||
"""return the last element in the set (user iteration perspective) | ||||
Return None if the set is empty""" | ||||
raise NotImplementedError() | ||||
Pierre-Yves David
|
r22995 | def __len__(self): | ||
"""return the length of the smartsets | ||||
This can be expensive on smartset that could be lazy otherwise.""" | ||||
raise NotImplementedError() | ||||
Pierre-Yves David
|
r22692 | def reverse(self): | ||
"""reverse the expected iteration order""" | ||||
raise NotImplementedError() | ||||
def sort(self, reverse=True): | ||||
"""get the set to iterate in an ascending or descending order""" | ||||
raise NotImplementedError() | ||||
def __and__(self, other): | ||||
"""Returns a new object with the intersection of the two collections. | ||||
This is part of the mandatory API for smartset.""" | ||||
Yuya Nishihara
|
r24459 | if isinstance(other, fullreposet): | ||
return self | ||||
Pierre-Yves David
|
r22864 | return self.filter(other.__contains__, cache=False) | ||
Pierre-Yves David
|
r22692 | |||
def __add__(self, other): | ||||
"""Returns a new object with the union of the two collections. | ||||
This is part of the mandatory API for smartset.""" | ||||
Pierre-Yves David
|
r22861 | return addset(self, other) | ||
Pierre-Yves David
|
r22692 | |||
def __sub__(self, other): | ||||
"""Returns a new object with the substraction of the two collections. | ||||
This is part of the mandatory API for smartset.""" | ||||
Pierre-Yves David
|
r22730 | c = other.__contains__ | ||
Pierre-Yves David
|
r22864 | return self.filter(lambda r: not c(r), cache=False) | ||
def filter(self, condition, cache=True): | ||||
Pierre-Yves David
|
r22692 | """Returns this smartset filtered by condition as a new smartset. | ||
`condition` is a callable which takes a revision number and returns a | ||||
boolean. | ||||
This is part of the mandatory API for smartset.""" | ||||
Pierre-Yves David
|
r22864 | # builtin cannot be cached. but do not needs to | ||
if cache and util.safehasattr(condition, 'func_code'): | ||||
condition = util.cachefunc(condition) | ||||
Pierre-Yves David
|
r22862 | return filteredset(self, condition) | ||
Pierre-Yves David
|
r22692 | |||
Pierre-Yves David
|
r22825 | class baseset(abstractsmartset): | ||
Lucas Moscovicz
|
r20416 | """Basic data structure that represents a revset and contains the basic | ||
operation that it should be able to perform. | ||||
Lucas Moscovicz
|
r20727 | |||
Every method in this class should be implemented by any smartset class. | ||||
Lucas Moscovicz
|
r20416 | """ | ||
Pierre-Yves David
|
r20752 | def __init__(self, data=()): | ||
Pierre-Yves David
|
r22825 | if not isinstance(data, list): | ||
data = list(data) | ||||
self._list = data | ||||
Pierre-Yves David
|
r22827 | self._ascending = None | ||
Lucas Moscovicz
|
r20365 | |||
Pierre-Yves David
|
r22826 | @util.propertycache | ||
Pierre-Yves David
|
r22879 | def _set(self): | ||
return set(self._list) | ||||
@util.propertycache | ||||
Pierre-Yves David
|
r22826 | def _asclist(self): | ||
asclist = self._list[:] | ||||
asclist.sort() | ||||
return asclist | ||||
Pierre-Yves David
|
r22827 | def __iter__(self): | ||
if self._ascending is None: | ||||
return iter(self._list) | ||||
elif self._ascending: | ||||
return iter(self._asclist) | ||||
else: | ||||
return reversed(self._asclist) | ||||
Pierre-Yves David
|
r22826 | def fastasc(self): | ||
return iter(self._asclist) | ||||
def fastdesc(self): | ||||
return reversed(self._asclist) | ||||
Pierre-Yves David
|
r22503 | @util.propertycache | ||
def __contains__(self): | ||||
Pierre-Yves David
|
r22880 | return self._set.__contains__ | ||
Pierre-Yves David
|
r22503 | |||
Pierre-Yves David
|
r22691 | def __nonzero__(self): | ||
Pierre-Yves David
|
r22825 | return bool(self._list) | ||
def sort(self, reverse=False): | ||||
Pierre-Yves David
|
r22829 | self._ascending = not bool(reverse) | ||
Pierre-Yves David
|
r22825 | |||
def reverse(self): | ||||
Pierre-Yves David
|
r22829 | if self._ascending is None: | ||
self._list.reverse() | ||||
else: | ||||
self._ascending = not self._ascending | ||||
Pierre-Yves David
|
r22825 | |||
def __len__(self): | ||||
return len(self._list) | ||||
Pierre-Yves David
|
r22691 | |||
Lucas Moscovicz
|
r20725 | def isascending(self): | ||
Lucas Moscovicz
|
r20727 | """Returns True if the collection is ascending order, False if not. | ||
This is part of the mandatory API for smartset.""" | ||||
Pierre-Yves David
|
r22863 | if len(self) <= 1: | ||
return True | ||||
Pierre-Yves David
|
r22828 | return self._ascending is not None and self._ascending | ||
Lucas Moscovicz
|
r20725 | |||
def isdescending(self): | ||||
Lucas Moscovicz
|
r20727 | """Returns True if the collection is descending order, False if not. | ||
This is part of the mandatory API for smartset.""" | ||||
Pierre-Yves David
|
r22863 | if len(self) <= 1: | ||
return True | ||||
Pierre-Yves David
|
r22828 | return self._ascending is not None and not self._ascending | ||
Lucas Moscovicz
|
r20725 | |||
Pierre-Yves David
|
r22812 | def first(self): | ||
if self: | ||||
Pierre-Yves David
|
r22829 | if self._ascending is None: | ||
return self._list[0] | ||||
elif self._ascending: | ||||
return self._asclist[0] | ||||
else: | ||||
return self._asclist[-1] | ||||
Pierre-Yves David
|
r22812 | return None | ||
def last(self): | ||||
if self: | ||||
Pierre-Yves David
|
r22829 | if self._ascending is None: | ||
return self._list[-1] | ||||
elif self._ascending: | ||||
return self._asclist[-1] | ||||
else: | ||||
return self._asclist[0] | ||||
Pierre-Yves David
|
r22812 | return None | ||
Yuya Nishihara
|
r24457 | def __repr__(self): | ||
d = {None: '', False: '-', True: '+'}[self._ascending] | ||||
return '<%s%s %r>' % (type(self).__name__, d, self._list) | ||||
Pierre-Yves David
|
r22726 | class filteredset(abstractsmartset): | ||
Lucas Moscovicz
|
r20427 | """Duck type for baseset class which iterates lazily over the revisions in | ||
the subset and contains a function which tests for membership in the | ||||
revset | ||||
""" | ||||
Pierre-Yves David
|
r22862 | def __init__(self, subset, condition=lambda x: True): | ||
Pierre-Yves David
|
r20738 | """ | ||
condition: a function that decide whether a revision in the subset | ||||
belongs to the revset or not. | ||||
""" | ||||
Lucas Moscovicz
|
r20427 | self._subset = subset | ||
self._condition = condition | ||||
Lucas Moscovicz
|
r20512 | self._cache = {} | ||
Lucas Moscovicz
|
r20427 | |||
def __contains__(self, x): | ||||
Lucas Moscovicz
|
r20512 | c = self._cache | ||
if x not in c: | ||||
Pierre-Yves David
|
r22527 | v = c[x] = x in self._subset and self._condition(x) | ||
return v | ||||
Lucas Moscovicz
|
r20512 | return c[x] | ||
Lucas Moscovicz
|
r20427 | |||
def __iter__(self): | ||||
Pierre-Yves David
|
r22719 | return self._iterfilter(self._subset) | ||
def _iterfilter(self, it): | ||||
Lucas Moscovicz
|
r20427 | cond = self._condition | ||
Pierre-Yves David
|
r22719 | for x in it: | ||
Lucas Moscovicz
|
r20427 | if cond(x): | ||
yield x | ||||
Pierre-Yves David
|
r22720 | @property | ||
def fastasc(self): | ||||
it = self._subset.fastasc | ||||
if it is None: | ||||
return None | ||||
return lambda: self._iterfilter(it()) | ||||
@property | ||||
def fastdesc(self): | ||||
it = self._subset.fastdesc | ||||
if it is None: | ||||
return None | ||||
return lambda: self._iterfilter(it()) | ||||
Lucas Moscovicz
|
r20552 | def __nonzero__(self): | ||
for r in self: | ||||
return True | ||||
return False | ||||
Lucas Moscovicz
|
r20429 | def __len__(self): | ||
# Basic implementation to be changed in future patches. | ||||
l = baseset([r for r in self]) | ||||
return len(l) | ||||
def sort(self, reverse=False): | ||||
Pierre-Yves David
|
r22862 | self._subset.sort(reverse=reverse) | ||
Lucas Moscovicz
|
r20429 | |||
def reverse(self): | ||||
self._subset.reverse() | ||||
Lucas Moscovicz
|
r20725 | def isascending(self): | ||
Pierre-Yves David
|
r22862 | return self._subset.isascending() | ||
Lucas Moscovicz
|
r20725 | |||
def isdescending(self): | ||||
Pierre-Yves David
|
r22862 | return self._subset.isdescending() | ||
Lucas Moscovicz
|
r20725 | |||
Pierre-Yves David
|
r22813 | def first(self): | ||
for x in self: | ||||
return x | ||||
return None | ||||
def last(self): | ||||
it = None | ||||
Pierre-Yves David
|
r25648 | if self.isascending(): | ||
Pierre-Yves David
|
r22862 | it = self.fastdesc | ||
Pierre-Yves David
|
r25648 | elif self.isdescending(): | ||
it = self.fastasc | ||||
if it is not None: | ||||
for x in it(): | ||||
return x | ||||
return None #empty case | ||||
else: | ||||
x = None | ||||
for x in self: | ||||
pass | ||||
Pierre-Yves David
|
r22813 | return x | ||
Yuya Nishihara
|
r24457 | def __repr__(self): | ||
return '<%s %r>' % (type(self).__name__, self._subset) | ||||
Yuya Nishihara
|
r25308 | # this function will be removed, or merged to addset or orset, when | ||
# - scmutil.revrange() can be rewritten to not combine calculated smartsets | ||||
# - or addset can handle more than two sets without balanced tree | ||||
def _combinesets(subsets): | ||||
"""Create balanced tree of addsets representing union of given sets""" | ||||
if not subsets: | ||||
return baseset() | ||||
if len(subsets) == 1: | ||||
return subsets[0] | ||||
p = len(subsets) // 2 | ||||
xs = _combinesets(subsets[:p]) | ||||
ys = _combinesets(subsets[p:]) | ||||
return addset(xs, ys) | ||||
Yuya Nishihara
|
r25131 | def _iterordered(ascending, iter1, iter2): | ||
"""produce an ordered iteration from two iterators with the same order | ||||
The ascending is used to indicated the iteration direction. | ||||
""" | ||||
choice = max | ||||
if ascending: | ||||
choice = min | ||||
val1 = None | ||||
val2 = None | ||||
try: | ||||
# Consume both iterators in an ordered way until one is empty | ||||
while True: | ||||
if val1 is None: | ||||
val1 = iter1.next() | ||||
if val2 is None: | ||||
val2 = iter2.next() | ||||
next = choice(val1, val2) | ||||
yield next | ||||
if val1 == next: | ||||
val1 = None | ||||
if val2 == next: | ||||
val2 = None | ||||
except StopIteration: | ||||
# Flush any remaining values and consume the other one | ||||
it = iter2 | ||||
if val1 is not None: | ||||
yield val1 | ||||
it = iter1 | ||||
elif val2 is not None: | ||||
# might have been equality and both are empty | ||||
yield val2 | ||||
for val in it: | ||||
yield val | ||||
Pierre-Yves David
|
r22793 | class addset(abstractsmartset): | ||
Lucas Moscovicz
|
r20708 | """Represent the addition of two sets | ||
Wrapper structure for lazily adding two structures without losing much | ||||
Lucas Moscovicz
|
r20694 | performance on the __contains__ method | ||
Lucas Moscovicz
|
r20708 | |||
Lucas Moscovicz
|
r20712 | If the ascending attribute is set, that means the two structures are | ||
ordered in either an ascending or descending way. Therefore, we can add | ||||
Mads Kiilerich
|
r21024 | them maintaining the order by iterating over both at the same time | ||
Yuya Nishihara
|
r25024 | |||
>>> xs = baseset([0, 3, 2]) | ||||
>>> ys = baseset([5, 2, 4]) | ||||
>>> rs = addset(xs, ys) | ||||
>>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last() | ||||
(True, True, False, True, 0, 4) | ||||
>>> rs = addset(xs, baseset([])) | ||||
>>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last() | ||||
(True, True, False, 0, 2) | ||||
>>> rs = addset(baseset([]), baseset([])) | ||||
>>> bool(rs), 0 in rs, rs.first(), rs.last() | ||||
(False, False, None, None) | ||||
iterate unsorted: | ||||
>>> rs = addset(xs, ys) | ||||
>>> [x for x in rs] # without _genlist | ||||
[0, 3, 2, 5, 4] | ||||
>>> assert not rs._genlist | ||||
>>> len(rs) | ||||
5 | ||||
>>> [x for x in rs] # with _genlist | ||||
[0, 3, 2, 5, 4] | ||||
>>> assert rs._genlist | ||||
iterate ascending: | ||||
>>> rs = addset(xs, ys, ascending=True) | ||||
>>> [x for x in rs], [x for x in rs.fastasc()] # without _asclist | ||||
([0, 2, 3, 4, 5], [0, 2, 3, 4, 5]) | ||||
>>> assert not rs._asclist | ||||
Pierre-Yves David
|
r25115 | >>> len(rs) | ||
5 | ||||
>>> [x for x in rs], [x for x in rs.fastasc()] | ||||
([0, 2, 3, 4, 5], [0, 2, 3, 4, 5]) | ||||
Yuya Nishihara
|
r25024 | >>> assert rs._asclist | ||
iterate descending: | ||||
>>> rs = addset(xs, ys, ascending=False) | ||||
>>> [x for x in rs], [x for x in rs.fastdesc()] # without _asclist | ||||
([5, 4, 3, 2, 0], [5, 4, 3, 2, 0]) | ||||
>>> assert not rs._asclist | ||||
Pierre-Yves David
|
r25115 | >>> len(rs) | ||
5 | ||||
>>> [x for x in rs], [x for x in rs.fastdesc()] | ||||
([5, 4, 3, 2, 0], [5, 4, 3, 2, 0]) | ||||
Yuya Nishihara
|
r25024 | >>> assert rs._asclist | ||
iterate ascending without fastasc: | ||||
>>> rs = addset(xs, generatorset(ys), ascending=True) | ||||
>>> assert rs.fastasc is None | ||||
Pierre-Yves David
|
r25115 | >>> [x for x in rs] | ||
[0, 2, 3, 4, 5] | ||||
Yuya Nishihara
|
r25024 | |||
iterate descending without fastdesc: | ||||
>>> rs = addset(generatorset(xs), ys, ascending=False) | ||||
>>> assert rs.fastdesc is None | ||||
Pierre-Yves David
|
r25115 | >>> [x for x in rs] | ||
[5, 4, 3, 2, 0] | ||||
Lucas Moscovicz
|
r20694 | """ | ||
Lucas Moscovicz
|
r20712 | def __init__(self, revs1, revs2, ascending=None): | ||
Lucas Moscovicz
|
r20694 | self._r1 = revs1 | ||
self._r2 = revs2 | ||||
self._iter = None | ||||
Lucas Moscovicz
|
r20712 | self._ascending = ascending | ||
Lucas Moscovicz
|
r20720 | self._genlist = None | ||
Pierre-Yves David
|
r22859 | self._asclist = None | ||
Lucas Moscovicz
|
r20720 | |||
Pierre-Yves David
|
r20845 | def __len__(self): | ||
return len(self._list) | ||||
Pierre-Yves David
|
r22743 | def __nonzero__(self): | ||
Durham Goode
|
r23100 | return bool(self._r1) or bool(self._r2) | ||
Pierre-Yves David
|
r22743 | |||
Lucas Moscovicz
|
r20720 | @util.propertycache | ||
def _list(self): | ||||
if not self._genlist: | ||||
Pierre-Yves David
|
r25115 | self._genlist = baseset(iter(self)) | ||
Lucas Moscovicz
|
r20720 | return self._genlist | ||
Lucas Moscovicz
|
r20694 | |||
Pierre-Yves David
|
r25115 | def __iter__(self): | ||
Lucas Moscovicz
|
r20722 | """Iterate over both collections without repeating elements | ||
If the ascending attribute is not set, iterate over the first one and | ||||
then over the second one checking for membership on the first one so we | ||||
dont yield any duplicates. | ||||
If the ascending attribute is set, iterate over both collections at the | ||||
same time, yielding only one value at a time in the given order. | ||||
""" | ||||
Pierre-Yves David
|
r22799 | if self._ascending is None: | ||
Pierre-Yves David
|
r25115 | if self._genlist: | ||
return iter(self._genlist) | ||||
def arbitraryordergen(): | ||||
Pierre-Yves David
|
r22799 | for r in self._r1: | ||
yield r | ||||
Pierre-Yves David
|
r22881 | inr1 = self._r1.__contains__ | ||
Pierre-Yves David
|
r22799 | for r in self._r2: | ||
Pierre-Yves David
|
r22881 | if not inr1(r): | ||
Lucas Moscovicz
|
r20694 | yield r | ||
Pierre-Yves David
|
r25115 | return arbitraryordergen() | ||
# try to use our own fast iterator if it exists | ||||
Pierre-Yves David
|
r22859 | self._trysetasclist() | ||
if self._ascending: | ||||
Yuya Nishihara
|
r25130 | attr = 'fastasc' | ||
Pierre-Yves David
|
r22859 | else: | ||
Yuya Nishihara
|
r25130 | attr = 'fastdesc' | ||
it = getattr(self, attr) | ||||
Pierre-Yves David
|
r25115 | if it is not None: | ||
return it() | ||||
# maybe half of the component supports fast | ||||
# get iterator for _r1 | ||||
iter1 = getattr(self._r1, attr) | ||||
if iter1 is None: | ||||
# let's avoid side effect (not sure it matters) | ||||
iter1 = iter(sorted(self._r1, reverse=not self._ascending)) | ||||
else: | ||||
iter1 = iter1() | ||||
# get iterator for _r2 | ||||
iter2 = getattr(self._r2, attr) | ||||
if iter2 is None: | ||||
# let's avoid side effect (not sure it matters) | ||||
iter2 = iter(sorted(self._r2, reverse=not self._ascending)) | ||||
else: | ||||
iter2 = iter2() | ||||
Yuya Nishihara
|
r25131 | return _iterordered(self._ascending, iter1, iter2) | ||
Pierre-Yves David
|
r22859 | |||
def _trysetasclist(self): | ||||
Mads Kiilerich
|
r23139 | """populate the _asclist attribute if possible and necessary""" | ||
Pierre-Yves David
|
r22859 | if self._genlist is not None and self._asclist is None: | ||
self._asclist = sorted(self._genlist) | ||||
Lucas Moscovicz
|
r20694 | |||
Pierre-Yves David
|
r22742 | @property | ||
def fastasc(self): | ||||
Pierre-Yves David
|
r22859 | self._trysetasclist() | ||
if self._asclist is not None: | ||||
return self._asclist.__iter__ | ||||
Pierre-Yves David
|
r22742 | iter1 = self._r1.fastasc | ||
iter2 = self._r2.fastasc | ||||
if None in (iter1, iter2): | ||||
return None | ||||
Yuya Nishihara
|
r25131 | return lambda: _iterordered(True, iter1(), iter2()) | ||
Pierre-Yves David
|
r22742 | |||
@property | ||||
def fastdesc(self): | ||||
Pierre-Yves David
|
r22859 | self._trysetasclist() | ||
if self._asclist is not None: | ||||
return self._asclist.__reversed__ | ||||
Pierre-Yves David
|
r22742 | iter1 = self._r1.fastdesc | ||
iter2 = self._r2.fastdesc | ||||
if None in (iter1, iter2): | ||||
return None | ||||
Yuya Nishihara
|
r25131 | return lambda: _iterordered(False, iter1(), iter2()) | ||
Pierre-Yves David
|
r22741 | |||
Lucas Moscovicz
|
r20694 | def __contains__(self, x): | ||
return x in self._r1 or x in self._r2 | ||||
Lucas Moscovicz
|
r20724 | def sort(self, reverse=False): | ||
"""Sort the added set | ||||
For this we use the cached list with all the generated values and if we | ||||
know they are ascending or descending we can sort them in a smart way. | ||||
""" | ||||
Pierre-Yves David
|
r22859 | self._ascending = not reverse | ||
Lucas Moscovicz
|
r20724 | |||
Lucas Moscovicz
|
r20733 | def isascending(self): | ||
return self._ascending is not None and self._ascending | ||||
def isdescending(self): | ||||
return self._ascending is not None and not self._ascending | ||||
Lucas Moscovicz
|
r20723 | def reverse(self): | ||
Pierre-Yves David
|
r22859 | if self._ascending is None: | ||
self._list.reverse() | ||||
else: | ||||
Lucas Moscovicz
|
r20723 | self._ascending = not self._ascending | ||
Pierre-Yves David
|
r22810 | def first(self): | ||
Pierre-Yves David
|
r23127 | for x in self: | ||
return x | ||||
Pierre-Yves David
|
r22810 | return None | ||
def last(self): | ||||
Pierre-Yves David
|
r23127 | self.reverse() | ||
val = self.first() | ||||
self.reverse() | ||||
return val | ||||
Pierre-Yves David
|
r22810 | |||
Yuya Nishihara
|
r24457 | def __repr__(self): | ||
d = {None: '', False: '-', True: '+'}[self._ascending] | ||||
return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2) | ||||
Pierre-Yves David
|
r22795 | class generatorset(abstractsmartset): | ||
Lucas Moscovicz
|
r20705 | """Wrap a generator for lazy iteration | ||
Wrapper structure for generators that provides lazy membership and can | ||||
Lucas Moscovicz
|
r20540 | be iterated more than once. | ||
When asked for membership it generates values until either it finds the | ||||
requested one or has gone through all the elements in the generator | ||||
""" | ||||
Pierre-Yves David
|
r22755 | def __init__(self, gen, iterasc=None): | ||
Pierre-Yves David
|
r20739 | """ | ||
gen: a generator producing the values for the generatorset. | ||||
""" | ||||
Lucas Moscovicz
|
r20536 | self._gen = gen | ||
Pierre-Yves David
|
r22798 | self._asclist = None | ||
Lucas Moscovicz
|
r20536 | self._cache = {} | ||
Pierre-Yves David
|
r22796 | self._genlist = [] | ||
Lucas Moscovicz
|
r20703 | self._finished = False | ||
Pierre-Yves David
|
r22800 | self._ascending = True | ||
Pierre-Yves David
|
r22755 | if iterasc is not None: | ||
if iterasc: | ||||
Pierre-Yves David
|
r22797 | self.fastasc = self._iterator | ||
Pierre-Yves David
|
r22757 | self.__contains__ = self._asccontains | ||
Pierre-Yves David
|
r22755 | else: | ||
Pierre-Yves David
|
r22797 | self.fastdesc = self._iterator | ||
Pierre-Yves David
|
r22757 | self.__contains__ = self._desccontains | ||
Lucas Moscovicz
|
r20540 | |||
Pierre-Yves David
|
r22739 | def __nonzero__(self): | ||
Pierre-Yves David
|
r24936 | # Do not use 'for r in self' because it will enforce the iteration | ||
# order (default ascending), possibly unrolling a whole descending | ||||
# iterator. | ||||
if self._genlist: | ||||
return True | ||||
for r in self._consumegen(): | ||||
Pierre-Yves David
|
r22739 | return True | ||
return False | ||||
Lucas Moscovicz
|
r20536 | def __contains__(self, x): | ||
if x in self._cache: | ||||
return self._cache[x] | ||||
Gregory Szorc
|
r20828 | # Use new values only, as existing values would be cached. | ||
for l in self._consumegen(): | ||||
Lucas Moscovicz
|
r20634 | if l == x: | ||
return True | ||||
Lucas Moscovicz
|
r20536 | |||
self._cache[x] = False | ||||
return False | ||||
Pierre-Yves David
|
r22757 | def _asccontains(self, x): | ||
"""version of contains optimised for ascending generator""" | ||||
if x in self._cache: | ||||
return self._cache[x] | ||||
# Use new values only, as existing values would be cached. | ||||
for l in self._consumegen(): | ||||
if l == x: | ||||
return True | ||||
if l > x: | ||||
break | ||||
self._cache[x] = False | ||||
return False | ||||
def _desccontains(self, x): | ||||
"""version of contains optimised for descending generator""" | ||||
if x in self._cache: | ||||
return self._cache[x] | ||||
# Use new values only, as existing values would be cached. | ||||
for l in self._consumegen(): | ||||
if l == x: | ||||
return True | ||||
if l < x: | ||||
break | ||||
self._cache[x] = False | ||||
return False | ||||
Lucas Moscovicz
|
r20536 | def __iter__(self): | ||
Pierre-Yves David
|
r22800 | if self._ascending: | ||
it = self.fastasc | ||||
else: | ||||
it = self.fastdesc | ||||
if it is not None: | ||||
return it() | ||||
# we need to consume the iterator | ||||
for x in self._consumegen(): | ||||
pass | ||||
# recall the same code | ||||
return iter(self) | ||||
Pierre-Yves David
|
r22797 | |||
def _iterator(self): | ||||
Durham Goode
|
r20833 | if self._finished: | ||
Pierre-Yves David
|
r22670 | return iter(self._genlist) | ||
Durham Goode
|
r20833 | |||
Pierre-Yves David
|
r22494 | # We have to use this complex iteration strategy to allow multiple | ||
# iterations at the same time. We need to be able to catch revision | ||||
Mads Kiilerich
|
r23139 | # removed from _consumegen and added to genlist in another instance. | ||
Pierre-Yves David
|
r22494 | # | ||
# Getting rid of it would provide an about 15% speed up on this | ||||
# iteration. | ||||
Durham Goode
|
r20833 | genlist = self._genlist | ||
Pierre-Yves David
|
r22669 | nextrev = self._consumegen().next | ||
_len = len # cache global lookup | ||||
Pierre-Yves David
|
r22670 | def gen(): | ||
i = 0 | ||||
while True: | ||||
if i < _len(genlist): | ||||
yield genlist[i] | ||||
else: | ||||
yield nextrev() | ||||
i += 1 | ||||
return gen() | ||||
Gregory Szorc
|
r20828 | |||
def _consumegen(self): | ||||
Pierre-Yves David
|
r22528 | cache = self._cache | ||
genlist = self._genlist.append | ||||
Lucas Moscovicz
|
r20634 | for item in self._gen: | ||
Pierre-Yves David
|
r22528 | cache[item] = True | ||
genlist(item) | ||||
Lucas Moscovicz
|
r20634 | yield item | ||
Pierre-Yves David
|
r22798 | if not self._finished: | ||
self._finished = True | ||||
asc = self._genlist[:] | ||||
asc.sort() | ||||
self._asclist = asc | ||||
self.fastasc = asc.__iter__ | ||||
self.fastdesc = asc.__reversed__ | ||||
Lucas Moscovicz
|
r20703 | |||
Pierre-Yves David
|
r22996 | def __len__(self): | ||
for x in self._consumegen(): | ||||
pass | ||||
return len(self._genlist) | ||||
Lucas Moscovicz
|
r20703 | def sort(self, reverse=False): | ||
Pierre-Yves David
|
r22800 | self._ascending = not reverse | ||
def reverse(self): | ||||
self._ascending = not self._ascending | ||||
Lucas Moscovicz
|
r20703 | |||
Pierre-Yves David
|
r22801 | def isascending(self): | ||
return self._ascending | ||||
def isdescending(self): | ||||
return not self._ascending | ||||
Pierre-Yves David
|
r22811 | def first(self): | ||
if self._ascending: | ||||
it = self.fastasc | ||||
else: | ||||
it = self.fastdesc | ||||
if it is None: | ||||
# we need to consume all and try again | ||||
for x in self._consumegen(): | ||||
pass | ||||
return self.first() | ||||
Pierre-Yves David
|
r25146 | return next(it(), None) | ||
Pierre-Yves David
|
r22811 | |||
def last(self): | ||||
if self._ascending: | ||||
it = self.fastdesc | ||||
else: | ||||
it = self.fastasc | ||||
if it is None: | ||||
# we need to consume all and try again | ||||
for x in self._consumegen(): | ||||
pass | ||||
return self.first() | ||||
Pierre-Yves David
|
r25146 | return next(it(), None) | ||
Pierre-Yves David
|
r22811 | |||
Yuya Nishihara
|
r24457 | def __repr__(self): | ||
d = {False: '-', True: '+'}[self._ascending] | ||||
return '<%s%s>' % (type(self).__name__, d) | ||||
Yuya Nishihara
|
r24116 | class spanset(abstractsmartset): | ||
Lucas Moscovicz
|
r20482 | """Duck type for baseset class which represents a range of revisions and | ||
can work lazily and without having all the range in memory | ||||
Lucas Moscovicz
|
r20737 | |||
Note that spanset(x, y) behave almost like xrange(x, y) except for two | ||||
notable points: | ||||
- when x < y it will be automatically descending, | ||||
- revision filtered with this repoview will be skipped. | ||||
Lucas Moscovicz
|
r20482 | """ | ||
Lucas Moscovicz
|
r20525 | def __init__(self, repo, start=0, end=None): | ||
Lucas Moscovicz
|
r20737 | """ | ||
start: first revision included the set | ||||
(default to 0) | ||||
end: first revision excluded (last+1) | ||||
(default to len(repo) | ||||
Spanset will be descending if `end` < `start`. | ||||
""" | ||||
Pierre-Yves David
|
r22717 | if end is None: | ||
end = len(repo) | ||||
self._ascending = start <= end | ||||
if not self._ascending: | ||||
start, end = end + 1, start +1 | ||||
Lucas Moscovicz
|
r20482 | self._start = start | ||
Pierre-Yves David
|
r22717 | self._end = end | ||
Lucas Moscovicz
|
r20525 | self._hiddenrevs = repo.changelog.filteredrevs | ||
Lucas Moscovicz
|
r20521 | |||
Pierre-Yves David
|
r22717 | def sort(self, reverse=False): | ||
self._ascending = not reverse | ||||
def reverse(self): | ||||
self._ascending = not self._ascending | ||||
def _iterfilter(self, iterrange): | ||||
s = self._hiddenrevs | ||||
for r in iterrange: | ||||
if r not in s: | ||||
yield r | ||||
Lucas Moscovicz
|
r20482 | def __iter__(self): | ||
Pierre-Yves David
|
r22717 | if self._ascending: | ||
return self.fastasc() | ||||
Lucas Moscovicz
|
r20482 | else: | ||
Pierre-Yves David
|
r22717 | return self.fastdesc() | ||
def fastasc(self): | ||||
iterrange = xrange(self._start, self._end) | ||||
Lucas Moscovicz
|
r20521 | if self._hiddenrevs: | ||
Pierre-Yves David
|
r22717 | return self._iterfilter(iterrange) | ||
return iter(iterrange) | ||||
def fastdesc(self): | ||||
iterrange = xrange(self._end - 1, self._start - 1, -1) | ||||
if self._hiddenrevs: | ||||
return self._iterfilter(iterrange) | ||||
return iter(iterrange) | ||||
Lucas Moscovicz
|
r20482 | |||
Pierre-Yves David
|
r21201 | def __contains__(self, rev): | ||
Pierre-Yves David
|
r22526 | hidden = self._hiddenrevs | ||
Pierre-Yves David
|
r22718 | return ((self._start <= rev < self._end) | ||
Pierre-Yves David
|
r22526 | and not (hidden and rev in hidden)) | ||
Lucas Moscovicz
|
r20482 | |||
Lucas Moscovicz
|
r20716 | def __nonzero__(self): | ||
for r in self: | ||||
return True | ||||
return False | ||||
Lucas Moscovicz
|
r20484 | def __len__(self): | ||
Lucas Moscovicz
|
r20521 | if not self._hiddenrevs: | ||
return abs(self._end - self._start) | ||||
else: | ||||
count = 0 | ||||
Pierre-Yves David
|
r21205 | start = self._start | ||
end = self._end | ||||
Lucas Moscovicz
|
r20521 | for rev in self._hiddenrevs: | ||
Pierre-Yves David
|
r21284 | if (end < rev <= start) or (start <= rev < end): | ||
Lucas Moscovicz
|
r20521 | count += 1 | ||
return abs(self._end - self._start) - count | ||||
Lucas Moscovicz
|
r20484 | |||
Lucas Moscovicz
|
r20725 | def isascending(self): | ||
Yuya Nishihara
|
r23826 | return self._ascending | ||
Lucas Moscovicz
|
r20725 | |||
def isdescending(self): | ||||
Yuya Nishihara
|
r23826 | return not self._ascending | ||
Lucas Moscovicz
|
r20725 | |||
Pierre-Yves David
|
r22809 | def first(self): | ||
if self._ascending: | ||||
it = self.fastasc | ||||
else: | ||||
it = self.fastdesc | ||||
for x in it(): | ||||
return x | ||||
return None | ||||
def last(self): | ||||
if self._ascending: | ||||
it = self.fastdesc | ||||
else: | ||||
it = self.fastasc | ||||
for x in it(): | ||||
return x | ||||
return None | ||||
Yuya Nishihara
|
r24457 | def __repr__(self): | ||
d = {False: '-', True: '+'}[self._ascending] | ||||
return '<%s%s %d:%d>' % (type(self).__name__, d, | ||||
self._start, self._end - 1) | ||||
Yuya Nishihara
|
r24116 | class fullreposet(spanset): | ||
Pierre-Yves David
|
r22508 | """a set containing all revisions in the repo | ||
Yuya Nishihara
|
r24204 | This class exists to host special optimization and magic to handle virtual | ||
revisions such as "null". | ||||
Pierre-Yves David
|
r22508 | """ | ||
def __init__(self, repo): | ||||
super(fullreposet, self).__init__(repo) | ||||
Pierre-Yves David
|
r22510 | def __and__(self, other): | ||
Mads Kiilerich
|
r23139 | """As self contains the whole repo, all of the other set should also be | ||
in self. Therefore `self & other = other`. | ||||
Pierre-Yves David
|
r22510 | |||
This boldly assumes the other contains valid revs only. | ||||
""" | ||||
# other not a smartset, make is so | ||||
Pierre-Yves David
|
r22883 | if not util.safehasattr(other, 'isascending'): | ||
Pierre-Yves David
|
r22510 | # filter out hidden revision | ||
# (this boldly assumes all smartset are pure) | ||||
# | ||||
# `other` was used with "&", let's assume this is a set like | ||||
# object. | ||||
other = baseset(other - self._hiddenrevs) | ||||
Pierre-Yves David
|
r25547 | # XXX As fullreposet is also used as bootstrap, this is wrong. | ||
# | ||||
# With a giveme312() revset returning [3,1,2], this makes | ||||
# 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong) | ||||
# We cannot just drop it because other usage still need to sort it: | ||||
# 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right) | ||||
# | ||||
# There is also some faulty revset implementations that rely on it | ||||
# (eg: children as of its state in e8075329c5fb) | ||||
# | ||||
# When we fix the two points above we can move this into the if clause | ||||
Yuya Nishihara
|
r23827 | other.sort(reverse=self.isdescending()) | ||
Pierre-Yves David
|
r22510 | return other | ||
Yuya Nishihara
|
r24458 | def prettyformatset(revs): | ||
lines = [] | ||||
rs = repr(revs) | ||||
p = 0 | ||||
while p < len(rs): | ||||
q = rs.find('<', p + 1) | ||||
if q < 0: | ||||
q = len(rs) | ||||
l = rs.count('<', 0, p) - rs.count('>', 0, p) | ||||
assert l >= 0 | ||||
lines.append((l, rs[p:q].rstrip())) | ||||
p = q | ||||
return '\n'.join(' ' * l + s for l, s in lines) | ||||
Patrick Mezard
|
r12823 | # tell hggettext to extract docstrings from these functions: | ||
i18nfunctions = symbols.values() | ||||