##// END OF EJS Templates
revset: add function to build dict of positional and keyword arguments...
Yuya Nishihara -
r25705:48919d24 default
parent child Browse files
Show More
@@ -1,184 +1,216 b''
1 # parser.py - simple top-down operator precedence parser for mercurial
1 # parser.py - simple top-down operator precedence parser for mercurial
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 # see http://effbot.org/zone/simple-top-down-parsing.htm and
8 # see http://effbot.org/zone/simple-top-down-parsing.htm and
9 # http://eli.thegreenplace.net/2010/01/02/top-down-operator-precedence-parsing/
9 # http://eli.thegreenplace.net/2010/01/02/top-down-operator-precedence-parsing/
10 # for background
10 # for background
11
11
12 # takes a tokenizer and elements
12 # takes a tokenizer and elements
13 # tokenizer is an iterator that returns (type, value, pos) tuples
13 # tokenizer is an iterator that returns (type, value, pos) tuples
14 # elements is a mapping of types to binding strength, prefix, infix and
14 # elements is a mapping of types to binding strength, prefix, infix and
15 # optional suffix actions
15 # optional suffix actions
16 # an action is a tree node name, a tree label, and an optional match
16 # an action is a tree node name, a tree label, and an optional match
17 # __call__(program) parses program into a labeled tree
17 # __call__(program) parses program into a labeled tree
18
18
19 import error
19 import error
20 from i18n import _
20 from i18n import _
21
21
22 class parser(object):
22 class parser(object):
23 def __init__(self, elements, methods=None):
23 def __init__(self, elements, methods=None):
24 self._elements = elements
24 self._elements = elements
25 self._methods = methods
25 self._methods = methods
26 self.current = None
26 self.current = None
27 def _advance(self):
27 def _advance(self):
28 'advance the tokenizer'
28 'advance the tokenizer'
29 t = self.current
29 t = self.current
30 self.current = next(self._iter, None)
30 self.current = next(self._iter, None)
31 return t
31 return t
32 def _match(self, m, pos):
32 def _match(self, m, pos):
33 'make sure the tokenizer matches an end condition'
33 'make sure the tokenizer matches an end condition'
34 if self.current[0] != m:
34 if self.current[0] != m:
35 raise error.ParseError(_("unexpected token: %s") % self.current[0],
35 raise error.ParseError(_("unexpected token: %s") % self.current[0],
36 self.current[2])
36 self.current[2])
37 self._advance()
37 self._advance()
38 def _parse(self, bind=0):
38 def _parse(self, bind=0):
39 token, value, pos = self._advance()
39 token, value, pos = self._advance()
40 # handle prefix rules on current token
40 # handle prefix rules on current token
41 prefix = self._elements[token][1]
41 prefix = self._elements[token][1]
42 if not prefix:
42 if not prefix:
43 raise error.ParseError(_("not a prefix: %s") % token, pos)
43 raise error.ParseError(_("not a prefix: %s") % token, pos)
44 if len(prefix) == 1:
44 if len(prefix) == 1:
45 expr = (prefix[0], value)
45 expr = (prefix[0], value)
46 else:
46 else:
47 if len(prefix) > 2 and prefix[2] == self.current[0]:
47 if len(prefix) > 2 and prefix[2] == self.current[0]:
48 self._match(prefix[2], pos)
48 self._match(prefix[2], pos)
49 expr = (prefix[0], None)
49 expr = (prefix[0], None)
50 else:
50 else:
51 expr = (prefix[0], self._parse(prefix[1]))
51 expr = (prefix[0], self._parse(prefix[1]))
52 if len(prefix) > 2:
52 if len(prefix) > 2:
53 self._match(prefix[2], pos)
53 self._match(prefix[2], pos)
54 # gather tokens until we meet a lower binding strength
54 # gather tokens until we meet a lower binding strength
55 while bind < self._elements[self.current[0]][0]:
55 while bind < self._elements[self.current[0]][0]:
56 token, value, pos = self._advance()
56 token, value, pos = self._advance()
57 e = self._elements[token]
57 e = self._elements[token]
58 # check for suffix - next token isn't a valid prefix
58 # check for suffix - next token isn't a valid prefix
59 if len(e) == 4 and not self._elements[self.current[0]][1]:
59 if len(e) == 4 and not self._elements[self.current[0]][1]:
60 suffix = e[3]
60 suffix = e[3]
61 expr = (suffix[0], expr)
61 expr = (suffix[0], expr)
62 else:
62 else:
63 # handle infix rules
63 # handle infix rules
64 if len(e) < 3 or not e[2]:
64 if len(e) < 3 or not e[2]:
65 raise error.ParseError(_("not an infix: %s") % token, pos)
65 raise error.ParseError(_("not an infix: %s") % token, pos)
66 infix = e[2]
66 infix = e[2]
67 if len(infix) == 3 and infix[2] == self.current[0]:
67 if len(infix) == 3 and infix[2] == self.current[0]:
68 self._match(infix[2], pos)
68 self._match(infix[2], pos)
69 expr = (infix[0], expr, (None))
69 expr = (infix[0], expr, (None))
70 else:
70 else:
71 expr = (infix[0], expr, self._parse(infix[1]))
71 expr = (infix[0], expr, self._parse(infix[1]))
72 if len(infix) == 3:
72 if len(infix) == 3:
73 self._match(infix[2], pos)
73 self._match(infix[2], pos)
74 return expr
74 return expr
75 def parse(self, tokeniter):
75 def parse(self, tokeniter):
76 'generate a parse tree from tokens'
76 'generate a parse tree from tokens'
77 self._iter = tokeniter
77 self._iter = tokeniter
78 self._advance()
78 self._advance()
79 res = self._parse()
79 res = self._parse()
80 token, value, pos = self.current
80 token, value, pos = self.current
81 return res, pos
81 return res, pos
82 def eval(self, tree):
82 def eval(self, tree):
83 'recursively evaluate a parse tree using node methods'
83 'recursively evaluate a parse tree using node methods'
84 if not isinstance(tree, tuple):
84 if not isinstance(tree, tuple):
85 return tree
85 return tree
86 return self._methods[tree[0]](*[self.eval(t) for t in tree[1:]])
86 return self._methods[tree[0]](*[self.eval(t) for t in tree[1:]])
87 def __call__(self, tokeniter):
87 def __call__(self, tokeniter):
88 'parse tokens into a parse tree and evaluate if methods given'
88 'parse tokens into a parse tree and evaluate if methods given'
89 t = self.parse(tokeniter)
89 t = self.parse(tokeniter)
90 if self._methods:
90 if self._methods:
91 return self.eval(t)
91 return self.eval(t)
92 return t
92 return t
93
93
94 def buildargsdict(trees, funcname, keys, keyvaluenode, keynode):
95 """Build dict from list containing positional and keyword arguments
96
97 Invalid keywords or too many positional arguments are rejected, but
98 missing arguments are just omitted.
99 """
100 if len(trees) > len(keys):
101 raise error.ParseError(_("%(func)s takes at most %(nargs)d arguments")
102 % {'func': funcname, 'nargs': len(keys)})
103 args = {}
104 # consume positional arguments
105 for k, x in zip(keys, trees):
106 if x[0] == keyvaluenode:
107 break
108 args[k] = x
109 # remainder should be keyword arguments
110 for x in trees[len(args):]:
111 if x[0] != keyvaluenode or x[1][0] != keynode:
112 raise error.ParseError(_("%(func)s got an invalid argument")
113 % {'func': funcname})
114 k = x[1][1]
115 if k not in keys:
116 raise error.ParseError(_("%(func)s got an unexpected keyword "
117 "argument '%(key)s'")
118 % {'func': funcname, 'key': k})
119 if k in args:
120 raise error.ParseError(_("%(func)s got multiple values for keyword "
121 "argument '%(key)s'")
122 % {'func': funcname, 'key': k})
123 args[k] = x[2]
124 return args
125
94 def _prettyformat(tree, leafnodes, level, lines):
126 def _prettyformat(tree, leafnodes, level, lines):
95 if not isinstance(tree, tuple) or tree[0] in leafnodes:
127 if not isinstance(tree, tuple) or tree[0] in leafnodes:
96 lines.append((level, str(tree)))
128 lines.append((level, str(tree)))
97 else:
129 else:
98 lines.append((level, '(%s' % tree[0]))
130 lines.append((level, '(%s' % tree[0]))
99 for s in tree[1:]:
131 for s in tree[1:]:
100 _prettyformat(s, leafnodes, level + 1, lines)
132 _prettyformat(s, leafnodes, level + 1, lines)
101 lines[-1:] = [(lines[-1][0], lines[-1][1] + ')')]
133 lines[-1:] = [(lines[-1][0], lines[-1][1] + ')')]
102
134
103 def prettyformat(tree, leafnodes):
135 def prettyformat(tree, leafnodes):
104 lines = []
136 lines = []
105 _prettyformat(tree, leafnodes, 0, lines)
137 _prettyformat(tree, leafnodes, 0, lines)
106 output = '\n'.join((' ' * l + s) for l, s in lines)
138 output = '\n'.join((' ' * l + s) for l, s in lines)
107 return output
139 return output
108
140
109 def simplifyinfixops(tree, targetnodes):
141 def simplifyinfixops(tree, targetnodes):
110 """Flatten chained infix operations to reduce usage of Python stack
142 """Flatten chained infix operations to reduce usage of Python stack
111
143
112 >>> def f(tree):
144 >>> def f(tree):
113 ... print prettyformat(simplifyinfixops(tree, ('or',)), ('symbol',))
145 ... print prettyformat(simplifyinfixops(tree, ('or',)), ('symbol',))
114 >>> f(('or',
146 >>> f(('or',
115 ... ('or',
147 ... ('or',
116 ... ('symbol', '1'),
148 ... ('symbol', '1'),
117 ... ('symbol', '2')),
149 ... ('symbol', '2')),
118 ... ('symbol', '3')))
150 ... ('symbol', '3')))
119 (or
151 (or
120 ('symbol', '1')
152 ('symbol', '1')
121 ('symbol', '2')
153 ('symbol', '2')
122 ('symbol', '3'))
154 ('symbol', '3'))
123 >>> f(('func',
155 >>> f(('func',
124 ... ('symbol', 'p1'),
156 ... ('symbol', 'p1'),
125 ... ('or',
157 ... ('or',
126 ... ('or',
158 ... ('or',
127 ... ('func',
159 ... ('func',
128 ... ('symbol', 'sort'),
160 ... ('symbol', 'sort'),
129 ... ('list',
161 ... ('list',
130 ... ('or',
162 ... ('or',
131 ... ('or',
163 ... ('or',
132 ... ('symbol', '1'),
164 ... ('symbol', '1'),
133 ... ('symbol', '2')),
165 ... ('symbol', '2')),
134 ... ('symbol', '3')),
166 ... ('symbol', '3')),
135 ... ('negate',
167 ... ('negate',
136 ... ('symbol', 'rev')))),
168 ... ('symbol', 'rev')))),
137 ... ('and',
169 ... ('and',
138 ... ('symbol', '4'),
170 ... ('symbol', '4'),
139 ... ('group',
171 ... ('group',
140 ... ('or',
172 ... ('or',
141 ... ('or',
173 ... ('or',
142 ... ('symbol', '5'),
174 ... ('symbol', '5'),
143 ... ('symbol', '6')),
175 ... ('symbol', '6')),
144 ... ('symbol', '7'))))),
176 ... ('symbol', '7'))))),
145 ... ('symbol', '8'))))
177 ... ('symbol', '8'))))
146 (func
178 (func
147 ('symbol', 'p1')
179 ('symbol', 'p1')
148 (or
180 (or
149 (func
181 (func
150 ('symbol', 'sort')
182 ('symbol', 'sort')
151 (list
183 (list
152 (or
184 (or
153 ('symbol', '1')
185 ('symbol', '1')
154 ('symbol', '2')
186 ('symbol', '2')
155 ('symbol', '3'))
187 ('symbol', '3'))
156 (negate
188 (negate
157 ('symbol', 'rev'))))
189 ('symbol', 'rev'))))
158 (and
190 (and
159 ('symbol', '4')
191 ('symbol', '4')
160 (group
192 (group
161 (or
193 (or
162 ('symbol', '5')
194 ('symbol', '5')
163 ('symbol', '6')
195 ('symbol', '6')
164 ('symbol', '7'))))
196 ('symbol', '7'))))
165 ('symbol', '8')))
197 ('symbol', '8')))
166 """
198 """
167 if not isinstance(tree, tuple):
199 if not isinstance(tree, tuple):
168 return tree
200 return tree
169 op = tree[0]
201 op = tree[0]
170 if op not in targetnodes:
202 if op not in targetnodes:
171 return (op,) + tuple(simplifyinfixops(x, targetnodes) for x in tree[1:])
203 return (op,) + tuple(simplifyinfixops(x, targetnodes) for x in tree[1:])
172
204
173 # walk down left nodes taking each right node. no recursion to left nodes
205 # walk down left nodes taking each right node. no recursion to left nodes
174 # because infix operators are left-associative, i.e. left tree is deep.
206 # because infix operators are left-associative, i.e. left tree is deep.
175 # e.g. '1 + 2 + 3' -> (+ (+ 1 2) 3) -> (+ 1 2 3)
207 # e.g. '1 + 2 + 3' -> (+ (+ 1 2) 3) -> (+ 1 2 3)
176 simplified = []
208 simplified = []
177 x = tree
209 x = tree
178 while x[0] == op:
210 while x[0] == op:
179 l, r = x[1:]
211 l, r = x[1:]
180 simplified.append(simplifyinfixops(r, targetnodes))
212 simplified.append(simplifyinfixops(r, targetnodes))
181 x = l
213 x = l
182 simplified.append(simplifyinfixops(x, targetnodes))
214 simplified.append(simplifyinfixops(x, targetnodes))
183 simplified.append(op)
215 simplified.append(op)
184 return tuple(reversed(simplified))
216 return tuple(reversed(simplified))
@@ -1,3643 +1,3647 b''
1 # revset.py - revision set queries for mercurial
1 # revset.py - revision set queries for mercurial
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import re
8 import re
9 import parser, util, error, hbisect, phases
9 import parser, util, error, hbisect, phases
10 import node
10 import node
11 import heapq
11 import heapq
12 import match as matchmod
12 import match as matchmod
13 from i18n import _
13 from i18n import _
14 import encoding
14 import encoding
15 import obsolete as obsmod
15 import obsolete as obsmod
16 import pathutil
16 import pathutil
17 import repoview
17 import repoview
18
18
19 def _revancestors(repo, revs, followfirst):
19 def _revancestors(repo, revs, followfirst):
20 """Like revlog.ancestors(), but supports followfirst."""
20 """Like revlog.ancestors(), but supports followfirst."""
21 if followfirst:
21 if followfirst:
22 cut = 1
22 cut = 1
23 else:
23 else:
24 cut = None
24 cut = None
25 cl = repo.changelog
25 cl = repo.changelog
26
26
27 def iterate():
27 def iterate():
28 revs.sort(reverse=True)
28 revs.sort(reverse=True)
29 irevs = iter(revs)
29 irevs = iter(revs)
30 h = []
30 h = []
31
31
32 inputrev = next(irevs, None)
32 inputrev = next(irevs, None)
33 if inputrev is not None:
33 if inputrev is not None:
34 heapq.heappush(h, -inputrev)
34 heapq.heappush(h, -inputrev)
35
35
36 seen = set()
36 seen = set()
37 while h:
37 while h:
38 current = -heapq.heappop(h)
38 current = -heapq.heappop(h)
39 if current == inputrev:
39 if current == inputrev:
40 inputrev = next(irevs, None)
40 inputrev = next(irevs, None)
41 if inputrev is not None:
41 if inputrev is not None:
42 heapq.heappush(h, -inputrev)
42 heapq.heappush(h, -inputrev)
43 if current not in seen:
43 if current not in seen:
44 seen.add(current)
44 seen.add(current)
45 yield current
45 yield current
46 for parent in cl.parentrevs(current)[:cut]:
46 for parent in cl.parentrevs(current)[:cut]:
47 if parent != node.nullrev:
47 if parent != node.nullrev:
48 heapq.heappush(h, -parent)
48 heapq.heappush(h, -parent)
49
49
50 return generatorset(iterate(), iterasc=False)
50 return generatorset(iterate(), iterasc=False)
51
51
52 def _revdescendants(repo, revs, followfirst):
52 def _revdescendants(repo, revs, followfirst):
53 """Like revlog.descendants() but supports followfirst."""
53 """Like revlog.descendants() but supports followfirst."""
54 if followfirst:
54 if followfirst:
55 cut = 1
55 cut = 1
56 else:
56 else:
57 cut = None
57 cut = None
58
58
59 def iterate():
59 def iterate():
60 cl = repo.changelog
60 cl = repo.changelog
61 # XXX this should be 'parentset.min()' assuming 'parentset' is a
61 # XXX this should be 'parentset.min()' assuming 'parentset' is a
62 # smartset (and if it is not, it should.)
62 # smartset (and if it is not, it should.)
63 first = min(revs)
63 first = min(revs)
64 nullrev = node.nullrev
64 nullrev = node.nullrev
65 if first == nullrev:
65 if first == nullrev:
66 # Are there nodes with a null first parent and a non-null
66 # Are there nodes with a null first parent and a non-null
67 # second one? Maybe. Do we care? Probably not.
67 # second one? Maybe. Do we care? Probably not.
68 for i in cl:
68 for i in cl:
69 yield i
69 yield i
70 else:
70 else:
71 seen = set(revs)
71 seen = set(revs)
72 for i in cl.revs(first + 1):
72 for i in cl.revs(first + 1):
73 for x in cl.parentrevs(i)[:cut]:
73 for x in cl.parentrevs(i)[:cut]:
74 if x != nullrev and x in seen:
74 if x != nullrev and x in seen:
75 seen.add(i)
75 seen.add(i)
76 yield i
76 yield i
77 break
77 break
78
78
79 return generatorset(iterate(), iterasc=True)
79 return generatorset(iterate(), iterasc=True)
80
80
81 def _revsbetween(repo, roots, heads):
81 def _revsbetween(repo, roots, heads):
82 """Return all paths between roots and heads, inclusive of both endpoint
82 """Return all paths between roots and heads, inclusive of both endpoint
83 sets."""
83 sets."""
84 if not roots:
84 if not roots:
85 return baseset()
85 return baseset()
86 parentrevs = repo.changelog.parentrevs
86 parentrevs = repo.changelog.parentrevs
87 visit = list(heads)
87 visit = list(heads)
88 reachable = set()
88 reachable = set()
89 seen = {}
89 seen = {}
90 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
90 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
91 # (and if it is not, it should.)
91 # (and if it is not, it should.)
92 minroot = min(roots)
92 minroot = min(roots)
93 roots = set(roots)
93 roots = set(roots)
94 # prefetch all the things! (because python is slow)
94 # prefetch all the things! (because python is slow)
95 reached = reachable.add
95 reached = reachable.add
96 dovisit = visit.append
96 dovisit = visit.append
97 nextvisit = visit.pop
97 nextvisit = visit.pop
98 # open-code the post-order traversal due to the tiny size of
98 # open-code the post-order traversal due to the tiny size of
99 # sys.getrecursionlimit()
99 # sys.getrecursionlimit()
100 while visit:
100 while visit:
101 rev = nextvisit()
101 rev = nextvisit()
102 if rev in roots:
102 if rev in roots:
103 reached(rev)
103 reached(rev)
104 parents = parentrevs(rev)
104 parents = parentrevs(rev)
105 seen[rev] = parents
105 seen[rev] = parents
106 for parent in parents:
106 for parent in parents:
107 if parent >= minroot and parent not in seen:
107 if parent >= minroot and parent not in seen:
108 dovisit(parent)
108 dovisit(parent)
109 if not reachable:
109 if not reachable:
110 return baseset()
110 return baseset()
111 for rev in sorted(seen):
111 for rev in sorted(seen):
112 for parent in seen[rev]:
112 for parent in seen[rev]:
113 if parent in reachable:
113 if parent in reachable:
114 reached(rev)
114 reached(rev)
115 return baseset(sorted(reachable))
115 return baseset(sorted(reachable))
116
116
117 elements = {
117 elements = {
118 "(": (21, ("group", 1, ")"), ("func", 1, ")")),
118 "(": (21, ("group", 1, ")"), ("func", 1, ")")),
119 "##": (20, None, ("_concat", 20)),
119 "##": (20, None, ("_concat", 20)),
120 "~": (18, None, ("ancestor", 18)),
120 "~": (18, None, ("ancestor", 18)),
121 "^": (18, None, ("parent", 18), ("parentpost", 18)),
121 "^": (18, None, ("parent", 18), ("parentpost", 18)),
122 "-": (5, ("negate", 19), ("minus", 5)),
122 "-": (5, ("negate", 19), ("minus", 5)),
123 "::": (17, ("dagrangepre", 17), ("dagrange", 17),
123 "::": (17, ("dagrangepre", 17), ("dagrange", 17),
124 ("dagrangepost", 17)),
124 ("dagrangepost", 17)),
125 "..": (17, ("dagrangepre", 17), ("dagrange", 17),
125 "..": (17, ("dagrangepre", 17), ("dagrange", 17),
126 ("dagrangepost", 17)),
126 ("dagrangepost", 17)),
127 ":": (15, ("rangepre", 15), ("range", 15), ("rangepost", 15)),
127 ":": (15, ("rangepre", 15), ("range", 15), ("rangepost", 15)),
128 "not": (10, ("not", 10)),
128 "not": (10, ("not", 10)),
129 "!": (10, ("not", 10)),
129 "!": (10, ("not", 10)),
130 "and": (5, None, ("and", 5)),
130 "and": (5, None, ("and", 5)),
131 "&": (5, None, ("and", 5)),
131 "&": (5, None, ("and", 5)),
132 "%": (5, None, ("only", 5), ("onlypost", 5)),
132 "%": (5, None, ("only", 5), ("onlypost", 5)),
133 "or": (4, None, ("or", 4)),
133 "or": (4, None, ("or", 4)),
134 "|": (4, None, ("or", 4)),
134 "|": (4, None, ("or", 4)),
135 "+": (4, None, ("or", 4)),
135 "+": (4, None, ("or", 4)),
136 "=": (3, None, ("keyvalue", 3)),
136 "=": (3, None, ("keyvalue", 3)),
137 ",": (2, None, ("list", 2)),
137 ",": (2, None, ("list", 2)),
138 ")": (0, None, None),
138 ")": (0, None, None),
139 "symbol": (0, ("symbol",), None),
139 "symbol": (0, ("symbol",), None),
140 "string": (0, ("string",), None),
140 "string": (0, ("string",), None),
141 "end": (0, None, None),
141 "end": (0, None, None),
142 }
142 }
143
143
144 keywords = set(['and', 'or', 'not'])
144 keywords = set(['and', 'or', 'not'])
145
145
146 # default set of valid characters for the initial letter of symbols
146 # default set of valid characters for the initial letter of symbols
147 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
147 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
148 if c.isalnum() or c in '._@' or ord(c) > 127)
148 if c.isalnum() or c in '._@' or ord(c) > 127)
149
149
150 # default set of valid characters for non-initial letters of symbols
150 # default set of valid characters for non-initial letters of symbols
151 _symletters = set(c for c in [chr(i) for i in xrange(256)]
151 _symletters = set(c for c in [chr(i) for i in xrange(256)]
152 if c.isalnum() or c in '-._/@' or ord(c) > 127)
152 if c.isalnum() or c in '-._/@' or ord(c) > 127)
153
153
154 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
154 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
155 '''
155 '''
156 Parse a revset statement into a stream of tokens
156 Parse a revset statement into a stream of tokens
157
157
158 ``syminitletters`` is the set of valid characters for the initial
158 ``syminitletters`` is the set of valid characters for the initial
159 letter of symbols.
159 letter of symbols.
160
160
161 By default, character ``c`` is recognized as valid for initial
161 By default, character ``c`` is recognized as valid for initial
162 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
162 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
163
163
164 ``symletters`` is the set of valid characters for non-initial
164 ``symletters`` is the set of valid characters for non-initial
165 letters of symbols.
165 letters of symbols.
166
166
167 By default, character ``c`` is recognized as valid for non-initial
167 By default, character ``c`` is recognized as valid for non-initial
168 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
168 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
169
169
170 Check that @ is a valid unquoted token character (issue3686):
170 Check that @ is a valid unquoted token character (issue3686):
171 >>> list(tokenize("@::"))
171 >>> list(tokenize("@::"))
172 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
172 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
173
173
174 '''
174 '''
175 if syminitletters is None:
175 if syminitletters is None:
176 syminitletters = _syminitletters
176 syminitletters = _syminitletters
177 if symletters is None:
177 if symletters is None:
178 symletters = _symletters
178 symletters = _symletters
179
179
180 pos, l = 0, len(program)
180 pos, l = 0, len(program)
181 while pos < l:
181 while pos < l:
182 c = program[pos]
182 c = program[pos]
183 if c.isspace(): # skip inter-token whitespace
183 if c.isspace(): # skip inter-token whitespace
184 pass
184 pass
185 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
185 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
186 yield ('::', None, pos)
186 yield ('::', None, pos)
187 pos += 1 # skip ahead
187 pos += 1 # skip ahead
188 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
188 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
189 yield ('..', None, pos)
189 yield ('..', None, pos)
190 pos += 1 # skip ahead
190 pos += 1 # skip ahead
191 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
191 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
192 yield ('##', None, pos)
192 yield ('##', None, pos)
193 pos += 1 # skip ahead
193 pos += 1 # skip ahead
194 elif c in "():=,-|&+!~^%": # handle simple operators
194 elif c in "():=,-|&+!~^%": # handle simple operators
195 yield (c, None, pos)
195 yield (c, None, pos)
196 elif (c in '"\'' or c == 'r' and
196 elif (c in '"\'' or c == 'r' and
197 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
197 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
198 if c == 'r':
198 if c == 'r':
199 pos += 1
199 pos += 1
200 c = program[pos]
200 c = program[pos]
201 decode = lambda x: x
201 decode = lambda x: x
202 else:
202 else:
203 decode = lambda x: x.decode('string-escape')
203 decode = lambda x: x.decode('string-escape')
204 pos += 1
204 pos += 1
205 s = pos
205 s = pos
206 while pos < l: # find closing quote
206 while pos < l: # find closing quote
207 d = program[pos]
207 d = program[pos]
208 if d == '\\': # skip over escaped characters
208 if d == '\\': # skip over escaped characters
209 pos += 2
209 pos += 2
210 continue
210 continue
211 if d == c:
211 if d == c:
212 yield ('string', decode(program[s:pos]), s)
212 yield ('string', decode(program[s:pos]), s)
213 break
213 break
214 pos += 1
214 pos += 1
215 else:
215 else:
216 raise error.ParseError(_("unterminated string"), s)
216 raise error.ParseError(_("unterminated string"), s)
217 # gather up a symbol/keyword
217 # gather up a symbol/keyword
218 elif c in syminitletters:
218 elif c in syminitletters:
219 s = pos
219 s = pos
220 pos += 1
220 pos += 1
221 while pos < l: # find end of symbol
221 while pos < l: # find end of symbol
222 d = program[pos]
222 d = program[pos]
223 if d not in symletters:
223 if d not in symletters:
224 break
224 break
225 if d == '.' and program[pos - 1] == '.': # special case for ..
225 if d == '.' and program[pos - 1] == '.': # special case for ..
226 pos -= 1
226 pos -= 1
227 break
227 break
228 pos += 1
228 pos += 1
229 sym = program[s:pos]
229 sym = program[s:pos]
230 if sym in keywords: # operator keywords
230 if sym in keywords: # operator keywords
231 yield (sym, None, s)
231 yield (sym, None, s)
232 elif '-' in sym:
232 elif '-' in sym:
233 # some jerk gave us foo-bar-baz, try to check if it's a symbol
233 # some jerk gave us foo-bar-baz, try to check if it's a symbol
234 if lookup and lookup(sym):
234 if lookup and lookup(sym):
235 # looks like a real symbol
235 # looks like a real symbol
236 yield ('symbol', sym, s)
236 yield ('symbol', sym, s)
237 else:
237 else:
238 # looks like an expression
238 # looks like an expression
239 parts = sym.split('-')
239 parts = sym.split('-')
240 for p in parts[:-1]:
240 for p in parts[:-1]:
241 if p: # possible consecutive -
241 if p: # possible consecutive -
242 yield ('symbol', p, s)
242 yield ('symbol', p, s)
243 s += len(p)
243 s += len(p)
244 yield ('-', None, pos)
244 yield ('-', None, pos)
245 s += 1
245 s += 1
246 if parts[-1]: # possible trailing -
246 if parts[-1]: # possible trailing -
247 yield ('symbol', parts[-1], s)
247 yield ('symbol', parts[-1], s)
248 else:
248 else:
249 yield ('symbol', sym, s)
249 yield ('symbol', sym, s)
250 pos -= 1
250 pos -= 1
251 else:
251 else:
252 raise error.ParseError(_("syntax error in revset '%s'") %
252 raise error.ParseError(_("syntax error in revset '%s'") %
253 program, pos)
253 program, pos)
254 pos += 1
254 pos += 1
255 yield ('end', None, pos)
255 yield ('end', None, pos)
256
256
257 def parseerrordetail(inst):
257 def parseerrordetail(inst):
258 """Compose error message from specified ParseError object
258 """Compose error message from specified ParseError object
259 """
259 """
260 if len(inst.args) > 1:
260 if len(inst.args) > 1:
261 return _('at %s: %s') % (inst.args[1], inst.args[0])
261 return _('at %s: %s') % (inst.args[1], inst.args[0])
262 else:
262 else:
263 return inst.args[0]
263 return inst.args[0]
264
264
265 # helpers
265 # helpers
266
266
267 def getstring(x, err):
267 def getstring(x, err):
268 if x and (x[0] == 'string' or x[0] == 'symbol'):
268 if x and (x[0] == 'string' or x[0] == 'symbol'):
269 return x[1]
269 return x[1]
270 raise error.ParseError(err)
270 raise error.ParseError(err)
271
271
272 def getlist(x):
272 def getlist(x):
273 if not x:
273 if not x:
274 return []
274 return []
275 if x[0] == 'list':
275 if x[0] == 'list':
276 return getlist(x[1]) + [x[2]]
276 return getlist(x[1]) + [x[2]]
277 return [x]
277 return [x]
278
278
279 def getargs(x, min, max, err):
279 def getargs(x, min, max, err):
280 l = getlist(x)
280 l = getlist(x)
281 if len(l) < min or (max >= 0 and len(l) > max):
281 if len(l) < min or (max >= 0 and len(l) > max):
282 raise error.ParseError(err)
282 raise error.ParseError(err)
283 return l
283 return l
284
284
285 def getkwargs(x, funcname, keys):
286 return parser.buildargsdict(getlist(x), funcname, keys.split(),
287 keyvaluenode='keyvalue', keynode='symbol')
288
285 def isvalidsymbol(tree):
289 def isvalidsymbol(tree):
286 """Examine whether specified ``tree`` is valid ``symbol`` or not
290 """Examine whether specified ``tree`` is valid ``symbol`` or not
287 """
291 """
288 return tree[0] == 'symbol' and len(tree) > 1
292 return tree[0] == 'symbol' and len(tree) > 1
289
293
290 def getsymbol(tree):
294 def getsymbol(tree):
291 """Get symbol name from valid ``symbol`` in ``tree``
295 """Get symbol name from valid ``symbol`` in ``tree``
292
296
293 This assumes that ``tree`` is already examined by ``isvalidsymbol``.
297 This assumes that ``tree`` is already examined by ``isvalidsymbol``.
294 """
298 """
295 return tree[1]
299 return tree[1]
296
300
297 def isvalidfunc(tree):
301 def isvalidfunc(tree):
298 """Examine whether specified ``tree`` is valid ``func`` or not
302 """Examine whether specified ``tree`` is valid ``func`` or not
299 """
303 """
300 return tree[0] == 'func' and len(tree) > 1 and isvalidsymbol(tree[1])
304 return tree[0] == 'func' and len(tree) > 1 and isvalidsymbol(tree[1])
301
305
302 def getfuncname(tree):
306 def getfuncname(tree):
303 """Get function name from valid ``func`` in ``tree``
307 """Get function name from valid ``func`` in ``tree``
304
308
305 This assumes that ``tree`` is already examined by ``isvalidfunc``.
309 This assumes that ``tree`` is already examined by ``isvalidfunc``.
306 """
310 """
307 return getsymbol(tree[1])
311 return getsymbol(tree[1])
308
312
309 def getfuncargs(tree):
313 def getfuncargs(tree):
310 """Get list of function arguments from valid ``func`` in ``tree``
314 """Get list of function arguments from valid ``func`` in ``tree``
311
315
312 This assumes that ``tree`` is already examined by ``isvalidfunc``.
316 This assumes that ``tree`` is already examined by ``isvalidfunc``.
313 """
317 """
314 if len(tree) > 2:
318 if len(tree) > 2:
315 return getlist(tree[2])
319 return getlist(tree[2])
316 else:
320 else:
317 return []
321 return []
318
322
319 def getset(repo, subset, x):
323 def getset(repo, subset, x):
320 if not x:
324 if not x:
321 raise error.ParseError(_("missing argument"))
325 raise error.ParseError(_("missing argument"))
322 s = methods[x[0]](repo, subset, *x[1:])
326 s = methods[x[0]](repo, subset, *x[1:])
323 if util.safehasattr(s, 'isascending'):
327 if util.safehasattr(s, 'isascending'):
324 return s
328 return s
325 if (repo.ui.configbool('devel', 'all-warnings')
329 if (repo.ui.configbool('devel', 'all-warnings')
326 or repo.ui.configbool('devel', 'old-revset')):
330 or repo.ui.configbool('devel', 'old-revset')):
327 # else case should not happen, because all non-func are internal,
331 # else case should not happen, because all non-func are internal,
328 # ignoring for now.
332 # ignoring for now.
329 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
333 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
330 repo.ui.develwarn('revset "%s" use list instead of smartset, '
334 repo.ui.develwarn('revset "%s" use list instead of smartset, '
331 '(upgrade your code)' % x[1][1])
335 '(upgrade your code)' % x[1][1])
332 return baseset(s)
336 return baseset(s)
333
337
334 def _getrevsource(repo, r):
338 def _getrevsource(repo, r):
335 extra = repo[r].extra()
339 extra = repo[r].extra()
336 for label in ('source', 'transplant_source', 'rebase_source'):
340 for label in ('source', 'transplant_source', 'rebase_source'):
337 if label in extra:
341 if label in extra:
338 try:
342 try:
339 return repo[extra[label]].rev()
343 return repo[extra[label]].rev()
340 except error.RepoLookupError:
344 except error.RepoLookupError:
341 pass
345 pass
342 return None
346 return None
343
347
344 # operator methods
348 # operator methods
345
349
346 def stringset(repo, subset, x):
350 def stringset(repo, subset, x):
347 x = repo[x].rev()
351 x = repo[x].rev()
348 if (x in subset
352 if (x in subset
349 or x == node.nullrev and isinstance(subset, fullreposet)):
353 or x == node.nullrev and isinstance(subset, fullreposet)):
350 return baseset([x])
354 return baseset([x])
351 return baseset()
355 return baseset()
352
356
353 def rangeset(repo, subset, x, y):
357 def rangeset(repo, subset, x, y):
354 m = getset(repo, fullreposet(repo), x)
358 m = getset(repo, fullreposet(repo), x)
355 n = getset(repo, fullreposet(repo), y)
359 n = getset(repo, fullreposet(repo), y)
356
360
357 if not m or not n:
361 if not m or not n:
358 return baseset()
362 return baseset()
359 m, n = m.first(), n.last()
363 m, n = m.first(), n.last()
360
364
361 if m < n:
365 if m < n:
362 r = spanset(repo, m, n + 1)
366 r = spanset(repo, m, n + 1)
363 else:
367 else:
364 r = spanset(repo, m, n - 1)
368 r = spanset(repo, m, n - 1)
365 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
369 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
366 # necessary to ensure we preserve the order in subset.
370 # necessary to ensure we preserve the order in subset.
367 #
371 #
368 # This has performance implication, carrying the sorting over when possible
372 # This has performance implication, carrying the sorting over when possible
369 # would be more efficient.
373 # would be more efficient.
370 return r & subset
374 return r & subset
371
375
372 def dagrange(repo, subset, x, y):
376 def dagrange(repo, subset, x, y):
373 r = fullreposet(repo)
377 r = fullreposet(repo)
374 xs = _revsbetween(repo, getset(repo, r, x), getset(repo, r, y))
378 xs = _revsbetween(repo, getset(repo, r, x), getset(repo, r, y))
375 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
379 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
376 # necessary to ensure we preserve the order in subset.
380 # necessary to ensure we preserve the order in subset.
377 return xs & subset
381 return xs & subset
378
382
379 def andset(repo, subset, x, y):
383 def andset(repo, subset, x, y):
380 return getset(repo, getset(repo, subset, x), y)
384 return getset(repo, getset(repo, subset, x), y)
381
385
382 def orset(repo, subset, *xs):
386 def orset(repo, subset, *xs):
383 rs = [getset(repo, subset, x) for x in xs]
387 rs = [getset(repo, subset, x) for x in xs]
384 return _combinesets(rs)
388 return _combinesets(rs)
385
389
386 def notset(repo, subset, x):
390 def notset(repo, subset, x):
387 return subset - getset(repo, subset, x)
391 return subset - getset(repo, subset, x)
388
392
389 def listset(repo, subset, a, b):
393 def listset(repo, subset, a, b):
390 raise error.ParseError(_("can't use a list in this context"))
394 raise error.ParseError(_("can't use a list in this context"))
391
395
392 def keyvaluepair(repo, subset, k, v):
396 def keyvaluepair(repo, subset, k, v):
393 raise error.ParseError(_("can't use a key-value pair in this context"))
397 raise error.ParseError(_("can't use a key-value pair in this context"))
394
398
395 def func(repo, subset, a, b):
399 def func(repo, subset, a, b):
396 if a[0] == 'symbol' and a[1] in symbols:
400 if a[0] == 'symbol' and a[1] in symbols:
397 return symbols[a[1]](repo, subset, b)
401 return symbols[a[1]](repo, subset, b)
398
402
399 keep = lambda fn: getattr(fn, '__doc__', None) is not None
403 keep = lambda fn: getattr(fn, '__doc__', None) is not None
400
404
401 syms = [s for (s, fn) in symbols.items() if keep(fn)]
405 syms = [s for (s, fn) in symbols.items() if keep(fn)]
402 raise error.UnknownIdentifier(a[1], syms)
406 raise error.UnknownIdentifier(a[1], syms)
403
407
404 # functions
408 # functions
405
409
406 def adds(repo, subset, x):
410 def adds(repo, subset, x):
407 """``adds(pattern)``
411 """``adds(pattern)``
408 Changesets that add a file matching pattern.
412 Changesets that add a file matching pattern.
409
413
410 The pattern without explicit kind like ``glob:`` is expected to be
414 The pattern without explicit kind like ``glob:`` is expected to be
411 relative to the current directory and match against a file or a
415 relative to the current directory and match against a file or a
412 directory.
416 directory.
413 """
417 """
414 # i18n: "adds" is a keyword
418 # i18n: "adds" is a keyword
415 pat = getstring(x, _("adds requires a pattern"))
419 pat = getstring(x, _("adds requires a pattern"))
416 return checkstatus(repo, subset, pat, 1)
420 return checkstatus(repo, subset, pat, 1)
417
421
418 def ancestor(repo, subset, x):
422 def ancestor(repo, subset, x):
419 """``ancestor(*changeset)``
423 """``ancestor(*changeset)``
420 A greatest common ancestor of the changesets.
424 A greatest common ancestor of the changesets.
421
425
422 Accepts 0 or more changesets.
426 Accepts 0 or more changesets.
423 Will return empty list when passed no args.
427 Will return empty list when passed no args.
424 Greatest common ancestor of a single changeset is that changeset.
428 Greatest common ancestor of a single changeset is that changeset.
425 """
429 """
426 # i18n: "ancestor" is a keyword
430 # i18n: "ancestor" is a keyword
427 l = getlist(x)
431 l = getlist(x)
428 rl = fullreposet(repo)
432 rl = fullreposet(repo)
429 anc = None
433 anc = None
430
434
431 # (getset(repo, rl, i) for i in l) generates a list of lists
435 # (getset(repo, rl, i) for i in l) generates a list of lists
432 for revs in (getset(repo, rl, i) for i in l):
436 for revs in (getset(repo, rl, i) for i in l):
433 for r in revs:
437 for r in revs:
434 if anc is None:
438 if anc is None:
435 anc = repo[r]
439 anc = repo[r]
436 else:
440 else:
437 anc = anc.ancestor(repo[r])
441 anc = anc.ancestor(repo[r])
438
442
439 if anc is not None and anc.rev() in subset:
443 if anc is not None and anc.rev() in subset:
440 return baseset([anc.rev()])
444 return baseset([anc.rev()])
441 return baseset()
445 return baseset()
442
446
443 def _ancestors(repo, subset, x, followfirst=False):
447 def _ancestors(repo, subset, x, followfirst=False):
444 heads = getset(repo, fullreposet(repo), x)
448 heads = getset(repo, fullreposet(repo), x)
445 if not heads:
449 if not heads:
446 return baseset()
450 return baseset()
447 s = _revancestors(repo, heads, followfirst)
451 s = _revancestors(repo, heads, followfirst)
448 return subset & s
452 return subset & s
449
453
450 def ancestors(repo, subset, x):
454 def ancestors(repo, subset, x):
451 """``ancestors(set)``
455 """``ancestors(set)``
452 Changesets that are ancestors of a changeset in set.
456 Changesets that are ancestors of a changeset in set.
453 """
457 """
454 return _ancestors(repo, subset, x)
458 return _ancestors(repo, subset, x)
455
459
456 def _firstancestors(repo, subset, x):
460 def _firstancestors(repo, subset, x):
457 # ``_firstancestors(set)``
461 # ``_firstancestors(set)``
458 # Like ``ancestors(set)`` but follows only the first parents.
462 # Like ``ancestors(set)`` but follows only the first parents.
459 return _ancestors(repo, subset, x, followfirst=True)
463 return _ancestors(repo, subset, x, followfirst=True)
460
464
461 def ancestorspec(repo, subset, x, n):
465 def ancestorspec(repo, subset, x, n):
462 """``set~n``
466 """``set~n``
463 Changesets that are the Nth ancestor (first parents only) of a changeset
467 Changesets that are the Nth ancestor (first parents only) of a changeset
464 in set.
468 in set.
465 """
469 """
466 try:
470 try:
467 n = int(n[1])
471 n = int(n[1])
468 except (TypeError, ValueError):
472 except (TypeError, ValueError):
469 raise error.ParseError(_("~ expects a number"))
473 raise error.ParseError(_("~ expects a number"))
470 ps = set()
474 ps = set()
471 cl = repo.changelog
475 cl = repo.changelog
472 for r in getset(repo, fullreposet(repo), x):
476 for r in getset(repo, fullreposet(repo), x):
473 for i in range(n):
477 for i in range(n):
474 r = cl.parentrevs(r)[0]
478 r = cl.parentrevs(r)[0]
475 ps.add(r)
479 ps.add(r)
476 return subset & ps
480 return subset & ps
477
481
478 def author(repo, subset, x):
482 def author(repo, subset, x):
479 """``author(string)``
483 """``author(string)``
480 Alias for ``user(string)``.
484 Alias for ``user(string)``.
481 """
485 """
482 # i18n: "author" is a keyword
486 # i18n: "author" is a keyword
483 n = encoding.lower(getstring(x, _("author requires a string")))
487 n = encoding.lower(getstring(x, _("author requires a string")))
484 kind, pattern, matcher = _substringmatcher(n)
488 kind, pattern, matcher = _substringmatcher(n)
485 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())))
489 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())))
486
490
487 def bisect(repo, subset, x):
491 def bisect(repo, subset, x):
488 """``bisect(string)``
492 """``bisect(string)``
489 Changesets marked in the specified bisect status:
493 Changesets marked in the specified bisect status:
490
494
491 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
495 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
492 - ``goods``, ``bads`` : csets topologically good/bad
496 - ``goods``, ``bads`` : csets topologically good/bad
493 - ``range`` : csets taking part in the bisection
497 - ``range`` : csets taking part in the bisection
494 - ``pruned`` : csets that are goods, bads or skipped
498 - ``pruned`` : csets that are goods, bads or skipped
495 - ``untested`` : csets whose fate is yet unknown
499 - ``untested`` : csets whose fate is yet unknown
496 - ``ignored`` : csets ignored due to DAG topology
500 - ``ignored`` : csets ignored due to DAG topology
497 - ``current`` : the cset currently being bisected
501 - ``current`` : the cset currently being bisected
498 """
502 """
499 # i18n: "bisect" is a keyword
503 # i18n: "bisect" is a keyword
500 status = getstring(x, _("bisect requires a string")).lower()
504 status = getstring(x, _("bisect requires a string")).lower()
501 state = set(hbisect.get(repo, status))
505 state = set(hbisect.get(repo, status))
502 return subset & state
506 return subset & state
503
507
504 # Backward-compatibility
508 # Backward-compatibility
505 # - no help entry so that we do not advertise it any more
509 # - no help entry so that we do not advertise it any more
506 def bisected(repo, subset, x):
510 def bisected(repo, subset, x):
507 return bisect(repo, subset, x)
511 return bisect(repo, subset, x)
508
512
509 def bookmark(repo, subset, x):
513 def bookmark(repo, subset, x):
510 """``bookmark([name])``
514 """``bookmark([name])``
511 The named bookmark or all bookmarks.
515 The named bookmark or all bookmarks.
512
516
513 If `name` starts with `re:`, the remainder of the name is treated as
517 If `name` starts with `re:`, the remainder of the name is treated as
514 a regular expression. To match a bookmark that actually starts with `re:`,
518 a regular expression. To match a bookmark that actually starts with `re:`,
515 use the prefix `literal:`.
519 use the prefix `literal:`.
516 """
520 """
517 # i18n: "bookmark" is a keyword
521 # i18n: "bookmark" is a keyword
518 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
522 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
519 if args:
523 if args:
520 bm = getstring(args[0],
524 bm = getstring(args[0],
521 # i18n: "bookmark" is a keyword
525 # i18n: "bookmark" is a keyword
522 _('the argument to bookmark must be a string'))
526 _('the argument to bookmark must be a string'))
523 kind, pattern, matcher = _stringmatcher(bm)
527 kind, pattern, matcher = _stringmatcher(bm)
524 bms = set()
528 bms = set()
525 if kind == 'literal':
529 if kind == 'literal':
526 bmrev = repo._bookmarks.get(pattern, None)
530 bmrev = repo._bookmarks.get(pattern, None)
527 if not bmrev:
531 if not bmrev:
528 raise error.RepoLookupError(_("bookmark '%s' does not exist")
532 raise error.RepoLookupError(_("bookmark '%s' does not exist")
529 % bm)
533 % bm)
530 bms.add(repo[bmrev].rev())
534 bms.add(repo[bmrev].rev())
531 else:
535 else:
532 matchrevs = set()
536 matchrevs = set()
533 for name, bmrev in repo._bookmarks.iteritems():
537 for name, bmrev in repo._bookmarks.iteritems():
534 if matcher(name):
538 if matcher(name):
535 matchrevs.add(bmrev)
539 matchrevs.add(bmrev)
536 if not matchrevs:
540 if not matchrevs:
537 raise error.RepoLookupError(_("no bookmarks exist"
541 raise error.RepoLookupError(_("no bookmarks exist"
538 " that match '%s'") % pattern)
542 " that match '%s'") % pattern)
539 for bmrev in matchrevs:
543 for bmrev in matchrevs:
540 bms.add(repo[bmrev].rev())
544 bms.add(repo[bmrev].rev())
541 else:
545 else:
542 bms = set([repo[r].rev()
546 bms = set([repo[r].rev()
543 for r in repo._bookmarks.values()])
547 for r in repo._bookmarks.values()])
544 bms -= set([node.nullrev])
548 bms -= set([node.nullrev])
545 return subset & bms
549 return subset & bms
546
550
547 def branch(repo, subset, x):
551 def branch(repo, subset, x):
548 """``branch(string or set)``
552 """``branch(string or set)``
549 All changesets belonging to the given branch or the branches of the given
553 All changesets belonging to the given branch or the branches of the given
550 changesets.
554 changesets.
551
555
552 If `string` starts with `re:`, the remainder of the name is treated as
556 If `string` starts with `re:`, the remainder of the name is treated as
553 a regular expression. To match a branch that actually starts with `re:`,
557 a regular expression. To match a branch that actually starts with `re:`,
554 use the prefix `literal:`.
558 use the prefix `literal:`.
555 """
559 """
556 getbi = repo.revbranchcache().branchinfo
560 getbi = repo.revbranchcache().branchinfo
557
561
558 try:
562 try:
559 b = getstring(x, '')
563 b = getstring(x, '')
560 except error.ParseError:
564 except error.ParseError:
561 # not a string, but another revspec, e.g. tip()
565 # not a string, but another revspec, e.g. tip()
562 pass
566 pass
563 else:
567 else:
564 kind, pattern, matcher = _stringmatcher(b)
568 kind, pattern, matcher = _stringmatcher(b)
565 if kind == 'literal':
569 if kind == 'literal':
566 # note: falls through to the revspec case if no branch with
570 # note: falls through to the revspec case if no branch with
567 # this name exists
571 # this name exists
568 if pattern in repo.branchmap():
572 if pattern in repo.branchmap():
569 return subset.filter(lambda r: matcher(getbi(r)[0]))
573 return subset.filter(lambda r: matcher(getbi(r)[0]))
570 else:
574 else:
571 return subset.filter(lambda r: matcher(getbi(r)[0]))
575 return subset.filter(lambda r: matcher(getbi(r)[0]))
572
576
573 s = getset(repo, fullreposet(repo), x)
577 s = getset(repo, fullreposet(repo), x)
574 b = set()
578 b = set()
575 for r in s:
579 for r in s:
576 b.add(getbi(r)[0])
580 b.add(getbi(r)[0])
577 c = s.__contains__
581 c = s.__contains__
578 return subset.filter(lambda r: c(r) or getbi(r)[0] in b)
582 return subset.filter(lambda r: c(r) or getbi(r)[0] in b)
579
583
580 def bumped(repo, subset, x):
584 def bumped(repo, subset, x):
581 """``bumped()``
585 """``bumped()``
582 Mutable changesets marked as successors of public changesets.
586 Mutable changesets marked as successors of public changesets.
583
587
584 Only non-public and non-obsolete changesets can be `bumped`.
588 Only non-public and non-obsolete changesets can be `bumped`.
585 """
589 """
586 # i18n: "bumped" is a keyword
590 # i18n: "bumped" is a keyword
587 getargs(x, 0, 0, _("bumped takes no arguments"))
591 getargs(x, 0, 0, _("bumped takes no arguments"))
588 bumped = obsmod.getrevs(repo, 'bumped')
592 bumped = obsmod.getrevs(repo, 'bumped')
589 return subset & bumped
593 return subset & bumped
590
594
591 def bundle(repo, subset, x):
595 def bundle(repo, subset, x):
592 """``bundle()``
596 """``bundle()``
593 Changesets in the bundle.
597 Changesets in the bundle.
594
598
595 Bundle must be specified by the -R option."""
599 Bundle must be specified by the -R option."""
596
600
597 try:
601 try:
598 bundlerevs = repo.changelog.bundlerevs
602 bundlerevs = repo.changelog.bundlerevs
599 except AttributeError:
603 except AttributeError:
600 raise util.Abort(_("no bundle provided - specify with -R"))
604 raise util.Abort(_("no bundle provided - specify with -R"))
601 return subset & bundlerevs
605 return subset & bundlerevs
602
606
603 def checkstatus(repo, subset, pat, field):
607 def checkstatus(repo, subset, pat, field):
604 hasset = matchmod.patkind(pat) == 'set'
608 hasset = matchmod.patkind(pat) == 'set'
605
609
606 mcache = [None]
610 mcache = [None]
607 def matches(x):
611 def matches(x):
608 c = repo[x]
612 c = repo[x]
609 if not mcache[0] or hasset:
613 if not mcache[0] or hasset:
610 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
614 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
611 m = mcache[0]
615 m = mcache[0]
612 fname = None
616 fname = None
613 if not m.anypats() and len(m.files()) == 1:
617 if not m.anypats() and len(m.files()) == 1:
614 fname = m.files()[0]
618 fname = m.files()[0]
615 if fname is not None:
619 if fname is not None:
616 if fname not in c.files():
620 if fname not in c.files():
617 return False
621 return False
618 else:
622 else:
619 for f in c.files():
623 for f in c.files():
620 if m(f):
624 if m(f):
621 break
625 break
622 else:
626 else:
623 return False
627 return False
624 files = repo.status(c.p1().node(), c.node())[field]
628 files = repo.status(c.p1().node(), c.node())[field]
625 if fname is not None:
629 if fname is not None:
626 if fname in files:
630 if fname in files:
627 return True
631 return True
628 else:
632 else:
629 for f in files:
633 for f in files:
630 if m(f):
634 if m(f):
631 return True
635 return True
632
636
633 return subset.filter(matches)
637 return subset.filter(matches)
634
638
635 def _children(repo, narrow, parentset):
639 def _children(repo, narrow, parentset):
636 if not parentset:
640 if not parentset:
637 return baseset()
641 return baseset()
638 cs = set()
642 cs = set()
639 pr = repo.changelog.parentrevs
643 pr = repo.changelog.parentrevs
640 minrev = parentset.min()
644 minrev = parentset.min()
641 for r in narrow:
645 for r in narrow:
642 if r <= minrev:
646 if r <= minrev:
643 continue
647 continue
644 for p in pr(r):
648 for p in pr(r):
645 if p in parentset:
649 if p in parentset:
646 cs.add(r)
650 cs.add(r)
647 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
651 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
648 # This does not break because of other fullreposet misbehavior.
652 # This does not break because of other fullreposet misbehavior.
649 return baseset(cs)
653 return baseset(cs)
650
654
651 def children(repo, subset, x):
655 def children(repo, subset, x):
652 """``children(set)``
656 """``children(set)``
653 Child changesets of changesets in set.
657 Child changesets of changesets in set.
654 """
658 """
655 s = getset(repo, fullreposet(repo), x)
659 s = getset(repo, fullreposet(repo), x)
656 cs = _children(repo, subset, s)
660 cs = _children(repo, subset, s)
657 return subset & cs
661 return subset & cs
658
662
659 def closed(repo, subset, x):
663 def closed(repo, subset, x):
660 """``closed()``
664 """``closed()``
661 Changeset is closed.
665 Changeset is closed.
662 """
666 """
663 # i18n: "closed" is a keyword
667 # i18n: "closed" is a keyword
664 getargs(x, 0, 0, _("closed takes no arguments"))
668 getargs(x, 0, 0, _("closed takes no arguments"))
665 return subset.filter(lambda r: repo[r].closesbranch())
669 return subset.filter(lambda r: repo[r].closesbranch())
666
670
667 def contains(repo, subset, x):
671 def contains(repo, subset, x):
668 """``contains(pattern)``
672 """``contains(pattern)``
669 The revision's manifest contains a file matching pattern (but might not
673 The revision's manifest contains a file matching pattern (but might not
670 modify it). See :hg:`help patterns` for information about file patterns.
674 modify it). See :hg:`help patterns` for information about file patterns.
671
675
672 The pattern without explicit kind like ``glob:`` is expected to be
676 The pattern without explicit kind like ``glob:`` is expected to be
673 relative to the current directory and match against a file exactly
677 relative to the current directory and match against a file exactly
674 for efficiency.
678 for efficiency.
675 """
679 """
676 # i18n: "contains" is a keyword
680 # i18n: "contains" is a keyword
677 pat = getstring(x, _("contains requires a pattern"))
681 pat = getstring(x, _("contains requires a pattern"))
678
682
679 def matches(x):
683 def matches(x):
680 if not matchmod.patkind(pat):
684 if not matchmod.patkind(pat):
681 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
685 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
682 if pats in repo[x]:
686 if pats in repo[x]:
683 return True
687 return True
684 else:
688 else:
685 c = repo[x]
689 c = repo[x]
686 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
690 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
687 for f in c.manifest():
691 for f in c.manifest():
688 if m(f):
692 if m(f):
689 return True
693 return True
690 return False
694 return False
691
695
692 return subset.filter(matches)
696 return subset.filter(matches)
693
697
694 def converted(repo, subset, x):
698 def converted(repo, subset, x):
695 """``converted([id])``
699 """``converted([id])``
696 Changesets converted from the given identifier in the old repository if
700 Changesets converted from the given identifier in the old repository if
697 present, or all converted changesets if no identifier is specified.
701 present, or all converted changesets if no identifier is specified.
698 """
702 """
699
703
700 # There is exactly no chance of resolving the revision, so do a simple
704 # There is exactly no chance of resolving the revision, so do a simple
701 # string compare and hope for the best
705 # string compare and hope for the best
702
706
703 rev = None
707 rev = None
704 # i18n: "converted" is a keyword
708 # i18n: "converted" is a keyword
705 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
709 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
706 if l:
710 if l:
707 # i18n: "converted" is a keyword
711 # i18n: "converted" is a keyword
708 rev = getstring(l[0], _('converted requires a revision'))
712 rev = getstring(l[0], _('converted requires a revision'))
709
713
710 def _matchvalue(r):
714 def _matchvalue(r):
711 source = repo[r].extra().get('convert_revision', None)
715 source = repo[r].extra().get('convert_revision', None)
712 return source is not None and (rev is None or source.startswith(rev))
716 return source is not None and (rev is None or source.startswith(rev))
713
717
714 return subset.filter(lambda r: _matchvalue(r))
718 return subset.filter(lambda r: _matchvalue(r))
715
719
716 def date(repo, subset, x):
720 def date(repo, subset, x):
717 """``date(interval)``
721 """``date(interval)``
718 Changesets within the interval, see :hg:`help dates`.
722 Changesets within the interval, see :hg:`help dates`.
719 """
723 """
720 # i18n: "date" is a keyword
724 # i18n: "date" is a keyword
721 ds = getstring(x, _("date requires a string"))
725 ds = getstring(x, _("date requires a string"))
722 dm = util.matchdate(ds)
726 dm = util.matchdate(ds)
723 return subset.filter(lambda x: dm(repo[x].date()[0]))
727 return subset.filter(lambda x: dm(repo[x].date()[0]))
724
728
725 def desc(repo, subset, x):
729 def desc(repo, subset, x):
726 """``desc(string)``
730 """``desc(string)``
727 Search commit message for string. The match is case-insensitive.
731 Search commit message for string. The match is case-insensitive.
728 """
732 """
729 # i18n: "desc" is a keyword
733 # i18n: "desc" is a keyword
730 ds = encoding.lower(getstring(x, _("desc requires a string")))
734 ds = encoding.lower(getstring(x, _("desc requires a string")))
731
735
732 def matches(x):
736 def matches(x):
733 c = repo[x]
737 c = repo[x]
734 return ds in encoding.lower(c.description())
738 return ds in encoding.lower(c.description())
735
739
736 return subset.filter(matches)
740 return subset.filter(matches)
737
741
738 def _descendants(repo, subset, x, followfirst=False):
742 def _descendants(repo, subset, x, followfirst=False):
739 roots = getset(repo, fullreposet(repo), x)
743 roots = getset(repo, fullreposet(repo), x)
740 if not roots:
744 if not roots:
741 return baseset()
745 return baseset()
742 s = _revdescendants(repo, roots, followfirst)
746 s = _revdescendants(repo, roots, followfirst)
743
747
744 # Both sets need to be ascending in order to lazily return the union
748 # Both sets need to be ascending in order to lazily return the union
745 # in the correct order.
749 # in the correct order.
746 base = subset & roots
750 base = subset & roots
747 desc = subset & s
751 desc = subset & s
748 result = base + desc
752 result = base + desc
749 if subset.isascending():
753 if subset.isascending():
750 result.sort()
754 result.sort()
751 elif subset.isdescending():
755 elif subset.isdescending():
752 result.sort(reverse=True)
756 result.sort(reverse=True)
753 else:
757 else:
754 result = subset & result
758 result = subset & result
755 return result
759 return result
756
760
757 def descendants(repo, subset, x):
761 def descendants(repo, subset, x):
758 """``descendants(set)``
762 """``descendants(set)``
759 Changesets which are descendants of changesets in set.
763 Changesets which are descendants of changesets in set.
760 """
764 """
761 return _descendants(repo, subset, x)
765 return _descendants(repo, subset, x)
762
766
763 def _firstdescendants(repo, subset, x):
767 def _firstdescendants(repo, subset, x):
764 # ``_firstdescendants(set)``
768 # ``_firstdescendants(set)``
765 # Like ``descendants(set)`` but follows only the first parents.
769 # Like ``descendants(set)`` but follows only the first parents.
766 return _descendants(repo, subset, x, followfirst=True)
770 return _descendants(repo, subset, x, followfirst=True)
767
771
768 def destination(repo, subset, x):
772 def destination(repo, subset, x):
769 """``destination([set])``
773 """``destination([set])``
770 Changesets that were created by a graft, transplant or rebase operation,
774 Changesets that were created by a graft, transplant or rebase operation,
771 with the given revisions specified as the source. Omitting the optional set
775 with the given revisions specified as the source. Omitting the optional set
772 is the same as passing all().
776 is the same as passing all().
773 """
777 """
774 if x is not None:
778 if x is not None:
775 sources = getset(repo, fullreposet(repo), x)
779 sources = getset(repo, fullreposet(repo), x)
776 else:
780 else:
777 sources = fullreposet(repo)
781 sources = fullreposet(repo)
778
782
779 dests = set()
783 dests = set()
780
784
781 # subset contains all of the possible destinations that can be returned, so
785 # subset contains all of the possible destinations that can be returned, so
782 # iterate over them and see if their source(s) were provided in the arg set.
786 # iterate over them and see if their source(s) were provided in the arg set.
783 # Even if the immediate src of r is not in the arg set, src's source (or
787 # Even if the immediate src of r is not in the arg set, src's source (or
784 # further back) may be. Scanning back further than the immediate src allows
788 # further back) may be. Scanning back further than the immediate src allows
785 # transitive transplants and rebases to yield the same results as transitive
789 # transitive transplants and rebases to yield the same results as transitive
786 # grafts.
790 # grafts.
787 for r in subset:
791 for r in subset:
788 src = _getrevsource(repo, r)
792 src = _getrevsource(repo, r)
789 lineage = None
793 lineage = None
790
794
791 while src is not None:
795 while src is not None:
792 if lineage is None:
796 if lineage is None:
793 lineage = list()
797 lineage = list()
794
798
795 lineage.append(r)
799 lineage.append(r)
796
800
797 # The visited lineage is a match if the current source is in the arg
801 # The visited lineage is a match if the current source is in the arg
798 # set. Since every candidate dest is visited by way of iterating
802 # set. Since every candidate dest is visited by way of iterating
799 # subset, any dests further back in the lineage will be tested by a
803 # subset, any dests further back in the lineage will be tested by a
800 # different iteration over subset. Likewise, if the src was already
804 # different iteration over subset. Likewise, if the src was already
801 # selected, the current lineage can be selected without going back
805 # selected, the current lineage can be selected without going back
802 # further.
806 # further.
803 if src in sources or src in dests:
807 if src in sources or src in dests:
804 dests.update(lineage)
808 dests.update(lineage)
805 break
809 break
806
810
807 r = src
811 r = src
808 src = _getrevsource(repo, r)
812 src = _getrevsource(repo, r)
809
813
810 return subset.filter(dests.__contains__)
814 return subset.filter(dests.__contains__)
811
815
812 def divergent(repo, subset, x):
816 def divergent(repo, subset, x):
813 """``divergent()``
817 """``divergent()``
814 Final successors of changesets with an alternative set of final successors.
818 Final successors of changesets with an alternative set of final successors.
815 """
819 """
816 # i18n: "divergent" is a keyword
820 # i18n: "divergent" is a keyword
817 getargs(x, 0, 0, _("divergent takes no arguments"))
821 getargs(x, 0, 0, _("divergent takes no arguments"))
818 divergent = obsmod.getrevs(repo, 'divergent')
822 divergent = obsmod.getrevs(repo, 'divergent')
819 return subset & divergent
823 return subset & divergent
820
824
821 def extinct(repo, subset, x):
825 def extinct(repo, subset, x):
822 """``extinct()``
826 """``extinct()``
823 Obsolete changesets with obsolete descendants only.
827 Obsolete changesets with obsolete descendants only.
824 """
828 """
825 # i18n: "extinct" is a keyword
829 # i18n: "extinct" is a keyword
826 getargs(x, 0, 0, _("extinct takes no arguments"))
830 getargs(x, 0, 0, _("extinct takes no arguments"))
827 extincts = obsmod.getrevs(repo, 'extinct')
831 extincts = obsmod.getrevs(repo, 'extinct')
828 return subset & extincts
832 return subset & extincts
829
833
830 def extra(repo, subset, x):
834 def extra(repo, subset, x):
831 """``extra(label, [value])``
835 """``extra(label, [value])``
832 Changesets with the given label in the extra metadata, with the given
836 Changesets with the given label in the extra metadata, with the given
833 optional value.
837 optional value.
834
838
835 If `value` starts with `re:`, the remainder of the value is treated as
839 If `value` starts with `re:`, the remainder of the value is treated as
836 a regular expression. To match a value that actually starts with `re:`,
840 a regular expression. To match a value that actually starts with `re:`,
837 use the prefix `literal:`.
841 use the prefix `literal:`.
838 """
842 """
839
843
840 # i18n: "extra" is a keyword
844 # i18n: "extra" is a keyword
841 l = getargs(x, 1, 2, _('extra takes at least 1 and at most 2 arguments'))
845 l = getargs(x, 1, 2, _('extra takes at least 1 and at most 2 arguments'))
842 # i18n: "extra" is a keyword
846 # i18n: "extra" is a keyword
843 label = getstring(l[0], _('first argument to extra must be a string'))
847 label = getstring(l[0], _('first argument to extra must be a string'))
844 value = None
848 value = None
845
849
846 if len(l) > 1:
850 if len(l) > 1:
847 # i18n: "extra" is a keyword
851 # i18n: "extra" is a keyword
848 value = getstring(l[1], _('second argument to extra must be a string'))
852 value = getstring(l[1], _('second argument to extra must be a string'))
849 kind, value, matcher = _stringmatcher(value)
853 kind, value, matcher = _stringmatcher(value)
850
854
851 def _matchvalue(r):
855 def _matchvalue(r):
852 extra = repo[r].extra()
856 extra = repo[r].extra()
853 return label in extra and (value is None or matcher(extra[label]))
857 return label in extra and (value is None or matcher(extra[label]))
854
858
855 return subset.filter(lambda r: _matchvalue(r))
859 return subset.filter(lambda r: _matchvalue(r))
856
860
857 def filelog(repo, subset, x):
861 def filelog(repo, subset, x):
858 """``filelog(pattern)``
862 """``filelog(pattern)``
859 Changesets connected to the specified filelog.
863 Changesets connected to the specified filelog.
860
864
861 For performance reasons, visits only revisions mentioned in the file-level
865 For performance reasons, visits only revisions mentioned in the file-level
862 filelog, rather than filtering through all changesets (much faster, but
866 filelog, rather than filtering through all changesets (much faster, but
863 doesn't include deletes or duplicate changes). For a slower, more accurate
867 doesn't include deletes or duplicate changes). For a slower, more accurate
864 result, use ``file()``.
868 result, use ``file()``.
865
869
866 The pattern without explicit kind like ``glob:`` is expected to be
870 The pattern without explicit kind like ``glob:`` is expected to be
867 relative to the current directory and match against a file exactly
871 relative to the current directory and match against a file exactly
868 for efficiency.
872 for efficiency.
869
873
870 If some linkrev points to revisions filtered by the current repoview, we'll
874 If some linkrev points to revisions filtered by the current repoview, we'll
871 work around it to return a non-filtered value.
875 work around it to return a non-filtered value.
872 """
876 """
873
877
874 # i18n: "filelog" is a keyword
878 # i18n: "filelog" is a keyword
875 pat = getstring(x, _("filelog requires a pattern"))
879 pat = getstring(x, _("filelog requires a pattern"))
876 s = set()
880 s = set()
877 cl = repo.changelog
881 cl = repo.changelog
878
882
879 if not matchmod.patkind(pat):
883 if not matchmod.patkind(pat):
880 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
884 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
881 files = [f]
885 files = [f]
882 else:
886 else:
883 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
887 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
884 files = (f for f in repo[None] if m(f))
888 files = (f for f in repo[None] if m(f))
885
889
886 for f in files:
890 for f in files:
887 backrevref = {} # final value for: filerev -> changerev
891 backrevref = {} # final value for: filerev -> changerev
888 lowestchild = {} # lowest known filerev child of a filerev
892 lowestchild = {} # lowest known filerev child of a filerev
889 delayed = [] # filerev with filtered linkrev, for post-processing
893 delayed = [] # filerev with filtered linkrev, for post-processing
890 lowesthead = None # cache for manifest content of all head revisions
894 lowesthead = None # cache for manifest content of all head revisions
891 fl = repo.file(f)
895 fl = repo.file(f)
892 for fr in list(fl):
896 for fr in list(fl):
893 rev = fl.linkrev(fr)
897 rev = fl.linkrev(fr)
894 if rev not in cl:
898 if rev not in cl:
895 # changerev pointed in linkrev is filtered
899 # changerev pointed in linkrev is filtered
896 # record it for post processing.
900 # record it for post processing.
897 delayed.append((fr, rev))
901 delayed.append((fr, rev))
898 continue
902 continue
899 for p in fl.parentrevs(fr):
903 for p in fl.parentrevs(fr):
900 if 0 <= p and p not in lowestchild:
904 if 0 <= p and p not in lowestchild:
901 lowestchild[p] = fr
905 lowestchild[p] = fr
902 backrevref[fr] = rev
906 backrevref[fr] = rev
903 s.add(rev)
907 s.add(rev)
904
908
905 # Post-processing of all filerevs we skipped because they were
909 # Post-processing of all filerevs we skipped because they were
906 # filtered. If such filerevs have known and unfiltered children, this
910 # filtered. If such filerevs have known and unfiltered children, this
907 # means they have an unfiltered appearance out there. We'll use linkrev
911 # means they have an unfiltered appearance out there. We'll use linkrev
908 # adjustment to find one of these appearances. The lowest known child
912 # adjustment to find one of these appearances. The lowest known child
909 # will be used as a starting point because it is the best upper-bound we
913 # will be used as a starting point because it is the best upper-bound we
910 # have.
914 # have.
911 #
915 #
912 # This approach will fail when an unfiltered but linkrev-shadowed
916 # This approach will fail when an unfiltered but linkrev-shadowed
913 # appearance exists in a head changeset without unfiltered filerev
917 # appearance exists in a head changeset without unfiltered filerev
914 # children anywhere.
918 # children anywhere.
915 while delayed:
919 while delayed:
916 # must be a descending iteration. To slowly fill lowest child
920 # must be a descending iteration. To slowly fill lowest child
917 # information that is of potential use by the next item.
921 # information that is of potential use by the next item.
918 fr, rev = delayed.pop()
922 fr, rev = delayed.pop()
919 lkr = rev
923 lkr = rev
920
924
921 child = lowestchild.get(fr)
925 child = lowestchild.get(fr)
922
926
923 if child is None:
927 if child is None:
924 # search for existence of this file revision in a head revision.
928 # search for existence of this file revision in a head revision.
925 # There are three possibilities:
929 # There are three possibilities:
926 # - the revision exists in a head and we can find an
930 # - the revision exists in a head and we can find an
927 # introduction from there,
931 # introduction from there,
928 # - the revision does not exist in a head because it has been
932 # - the revision does not exist in a head because it has been
929 # changed since its introduction: we would have found a child
933 # changed since its introduction: we would have found a child
930 # and be in the other 'else' clause,
934 # and be in the other 'else' clause,
931 # - all versions of the revision are hidden.
935 # - all versions of the revision are hidden.
932 if lowesthead is None:
936 if lowesthead is None:
933 lowesthead = {}
937 lowesthead = {}
934 for h in repo.heads():
938 for h in repo.heads():
935 fnode = repo[h].manifest().get(f)
939 fnode = repo[h].manifest().get(f)
936 if fnode is not None:
940 if fnode is not None:
937 lowesthead[fl.rev(fnode)] = h
941 lowesthead[fl.rev(fnode)] = h
938 headrev = lowesthead.get(fr)
942 headrev = lowesthead.get(fr)
939 if headrev is None:
943 if headrev is None:
940 # content is nowhere unfiltered
944 # content is nowhere unfiltered
941 continue
945 continue
942 rev = repo[headrev][f].introrev()
946 rev = repo[headrev][f].introrev()
943 else:
947 else:
944 # the lowest known child is a good upper bound
948 # the lowest known child is a good upper bound
945 childcrev = backrevref[child]
949 childcrev = backrevref[child]
946 # XXX this does not guarantee returning the lowest
950 # XXX this does not guarantee returning the lowest
947 # introduction of this revision, but this gives a
951 # introduction of this revision, but this gives a
948 # result which is a good start and will fit in most
952 # result which is a good start and will fit in most
949 # cases. We probably need to fix the multiple
953 # cases. We probably need to fix the multiple
950 # introductions case properly (report each
954 # introductions case properly (report each
951 # introduction, even for identical file revisions)
955 # introduction, even for identical file revisions)
952 # once and for all at some point anyway.
956 # once and for all at some point anyway.
953 for p in repo[childcrev][f].parents():
957 for p in repo[childcrev][f].parents():
954 if p.filerev() == fr:
958 if p.filerev() == fr:
955 rev = p.rev()
959 rev = p.rev()
956 break
960 break
957 if rev == lkr: # no shadowed entry found
961 if rev == lkr: # no shadowed entry found
958 # XXX This should never happen unless some manifest points
962 # XXX This should never happen unless some manifest points
959 # to biggish file revisions (like a revision that uses a
963 # to biggish file revisions (like a revision that uses a
960 # parent that never appears in the manifest ancestors)
964 # parent that never appears in the manifest ancestors)
961 continue
965 continue
962
966
963 # Fill the data for the next iteration.
967 # Fill the data for the next iteration.
964 for p in fl.parentrevs(fr):
968 for p in fl.parentrevs(fr):
965 if 0 <= p and p not in lowestchild:
969 if 0 <= p and p not in lowestchild:
966 lowestchild[p] = fr
970 lowestchild[p] = fr
967 backrevref[fr] = rev
971 backrevref[fr] = rev
968 s.add(rev)
972 s.add(rev)
969
973
970 return subset & s
974 return subset & s
971
975
972 def first(repo, subset, x):
976 def first(repo, subset, x):
973 """``first(set, [n])``
977 """``first(set, [n])``
974 An alias for limit().
978 An alias for limit().
975 """
979 """
976 return limit(repo, subset, x)
980 return limit(repo, subset, x)
977
981
978 def _follow(repo, subset, x, name, followfirst=False):
982 def _follow(repo, subset, x, name, followfirst=False):
979 l = getargs(x, 0, 1, _("%s takes no arguments or a filename") % name)
983 l = getargs(x, 0, 1, _("%s takes no arguments or a filename") % name)
980 c = repo['.']
984 c = repo['.']
981 if l:
985 if l:
982 x = getstring(l[0], _("%s expected a filename") % name)
986 x = getstring(l[0], _("%s expected a filename") % name)
983 if x in c:
987 if x in c:
984 cx = c[x]
988 cx = c[x]
985 s = set(ctx.rev() for ctx in cx.ancestors(followfirst=followfirst))
989 s = set(ctx.rev() for ctx in cx.ancestors(followfirst=followfirst))
986 # include the revision responsible for the most recent version
990 # include the revision responsible for the most recent version
987 s.add(cx.introrev())
991 s.add(cx.introrev())
988 else:
992 else:
989 return baseset()
993 return baseset()
990 else:
994 else:
991 s = _revancestors(repo, baseset([c.rev()]), followfirst)
995 s = _revancestors(repo, baseset([c.rev()]), followfirst)
992
996
993 return subset & s
997 return subset & s
994
998
995 def follow(repo, subset, x):
999 def follow(repo, subset, x):
996 """``follow([file])``
1000 """``follow([file])``
997 An alias for ``::.`` (ancestors of the working directory's first parent).
1001 An alias for ``::.`` (ancestors of the working directory's first parent).
998 If a filename is specified, the history of the given file is followed,
1002 If a filename is specified, the history of the given file is followed,
999 including copies.
1003 including copies.
1000 """
1004 """
1001 return _follow(repo, subset, x, 'follow')
1005 return _follow(repo, subset, x, 'follow')
1002
1006
1003 def _followfirst(repo, subset, x):
1007 def _followfirst(repo, subset, x):
1004 # ``followfirst([file])``
1008 # ``followfirst([file])``
1005 # Like ``follow([file])`` but follows only the first parent of
1009 # Like ``follow([file])`` but follows only the first parent of
1006 # every revision or file revision.
1010 # every revision or file revision.
1007 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1011 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1008
1012
1009 def getall(repo, subset, x):
1013 def getall(repo, subset, x):
1010 """``all()``
1014 """``all()``
1011 All changesets, the same as ``0:tip``.
1015 All changesets, the same as ``0:tip``.
1012 """
1016 """
1013 # i18n: "all" is a keyword
1017 # i18n: "all" is a keyword
1014 getargs(x, 0, 0, _("all takes no arguments"))
1018 getargs(x, 0, 0, _("all takes no arguments"))
1015 return subset & spanset(repo) # drop "null" if any
1019 return subset & spanset(repo) # drop "null" if any
1016
1020
1017 def grep(repo, subset, x):
1021 def grep(repo, subset, x):
1018 """``grep(regex)``
1022 """``grep(regex)``
1019 Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1023 Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1020 to ensure special escape characters are handled correctly. Unlike
1024 to ensure special escape characters are handled correctly. Unlike
1021 ``keyword(string)``, the match is case-sensitive.
1025 ``keyword(string)``, the match is case-sensitive.
1022 """
1026 """
1023 try:
1027 try:
1024 # i18n: "grep" is a keyword
1028 # i18n: "grep" is a keyword
1025 gr = re.compile(getstring(x, _("grep requires a string")))
1029 gr = re.compile(getstring(x, _("grep requires a string")))
1026 except re.error as e:
1030 except re.error as e:
1027 raise error.ParseError(_('invalid match pattern: %s') % e)
1031 raise error.ParseError(_('invalid match pattern: %s') % e)
1028
1032
1029 def matches(x):
1033 def matches(x):
1030 c = repo[x]
1034 c = repo[x]
1031 for e in c.files() + [c.user(), c.description()]:
1035 for e in c.files() + [c.user(), c.description()]:
1032 if gr.search(e):
1036 if gr.search(e):
1033 return True
1037 return True
1034 return False
1038 return False
1035
1039
1036 return subset.filter(matches)
1040 return subset.filter(matches)
1037
1041
1038 def _matchfiles(repo, subset, x):
1042 def _matchfiles(repo, subset, x):
1039 # _matchfiles takes a revset list of prefixed arguments:
1043 # _matchfiles takes a revset list of prefixed arguments:
1040 #
1044 #
1041 # [p:foo, i:bar, x:baz]
1045 # [p:foo, i:bar, x:baz]
1042 #
1046 #
1043 # builds a match object from them and filters subset. Allowed
1047 # builds a match object from them and filters subset. Allowed
1044 # prefixes are 'p:' for regular patterns, 'i:' for include
1048 # prefixes are 'p:' for regular patterns, 'i:' for include
1045 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1049 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1046 # a revision identifier, or the empty string to reference the
1050 # a revision identifier, or the empty string to reference the
1047 # working directory, from which the match object is
1051 # working directory, from which the match object is
1048 # initialized. Use 'd:' to set the default matching mode, default
1052 # initialized. Use 'd:' to set the default matching mode, default
1049 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1053 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1050
1054
1051 # i18n: "_matchfiles" is a keyword
1055 # i18n: "_matchfiles" is a keyword
1052 l = getargs(x, 1, -1, _("_matchfiles requires at least one argument"))
1056 l = getargs(x, 1, -1, _("_matchfiles requires at least one argument"))
1053 pats, inc, exc = [], [], []
1057 pats, inc, exc = [], [], []
1054 rev, default = None, None
1058 rev, default = None, None
1055 for arg in l:
1059 for arg in l:
1056 # i18n: "_matchfiles" is a keyword
1060 # i18n: "_matchfiles" is a keyword
1057 s = getstring(arg, _("_matchfiles requires string arguments"))
1061 s = getstring(arg, _("_matchfiles requires string arguments"))
1058 prefix, value = s[:2], s[2:]
1062 prefix, value = s[:2], s[2:]
1059 if prefix == 'p:':
1063 if prefix == 'p:':
1060 pats.append(value)
1064 pats.append(value)
1061 elif prefix == 'i:':
1065 elif prefix == 'i:':
1062 inc.append(value)
1066 inc.append(value)
1063 elif prefix == 'x:':
1067 elif prefix == 'x:':
1064 exc.append(value)
1068 exc.append(value)
1065 elif prefix == 'r:':
1069 elif prefix == 'r:':
1066 if rev is not None:
1070 if rev is not None:
1067 # i18n: "_matchfiles" is a keyword
1071 # i18n: "_matchfiles" is a keyword
1068 raise error.ParseError(_('_matchfiles expected at most one '
1072 raise error.ParseError(_('_matchfiles expected at most one '
1069 'revision'))
1073 'revision'))
1070 if value != '': # empty means working directory; leave rev as None
1074 if value != '': # empty means working directory; leave rev as None
1071 rev = value
1075 rev = value
1072 elif prefix == 'd:':
1076 elif prefix == 'd:':
1073 if default is not None:
1077 if default is not None:
1074 # i18n: "_matchfiles" is a keyword
1078 # i18n: "_matchfiles" is a keyword
1075 raise error.ParseError(_('_matchfiles expected at most one '
1079 raise error.ParseError(_('_matchfiles expected at most one '
1076 'default mode'))
1080 'default mode'))
1077 default = value
1081 default = value
1078 else:
1082 else:
1079 # i18n: "_matchfiles" is a keyword
1083 # i18n: "_matchfiles" is a keyword
1080 raise error.ParseError(_('invalid _matchfiles prefix: %s') % prefix)
1084 raise error.ParseError(_('invalid _matchfiles prefix: %s') % prefix)
1081 if not default:
1085 if not default:
1082 default = 'glob'
1086 default = 'glob'
1083
1087
1084 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1088 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1085 exclude=exc, ctx=repo[rev], default=default)
1089 exclude=exc, ctx=repo[rev], default=default)
1086
1090
1087 def matches(x):
1091 def matches(x):
1088 for f in repo[x].files():
1092 for f in repo[x].files():
1089 if m(f):
1093 if m(f):
1090 return True
1094 return True
1091 return False
1095 return False
1092
1096
1093 return subset.filter(matches)
1097 return subset.filter(matches)
1094
1098
1095 def hasfile(repo, subset, x):
1099 def hasfile(repo, subset, x):
1096 """``file(pattern)``
1100 """``file(pattern)``
1097 Changesets affecting files matched by pattern.
1101 Changesets affecting files matched by pattern.
1098
1102
1099 For a faster but less accurate result, consider using ``filelog()``
1103 For a faster but less accurate result, consider using ``filelog()``
1100 instead.
1104 instead.
1101
1105
1102 This predicate uses ``glob:`` as the default kind of pattern.
1106 This predicate uses ``glob:`` as the default kind of pattern.
1103 """
1107 """
1104 # i18n: "file" is a keyword
1108 # i18n: "file" is a keyword
1105 pat = getstring(x, _("file requires a pattern"))
1109 pat = getstring(x, _("file requires a pattern"))
1106 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1110 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1107
1111
1108 def head(repo, subset, x):
1112 def head(repo, subset, x):
1109 """``head()``
1113 """``head()``
1110 Changeset is a named branch head.
1114 Changeset is a named branch head.
1111 """
1115 """
1112 # i18n: "head" is a keyword
1116 # i18n: "head" is a keyword
1113 getargs(x, 0, 0, _("head takes no arguments"))
1117 getargs(x, 0, 0, _("head takes no arguments"))
1114 hs = set()
1118 hs = set()
1115 cl = repo.changelog
1119 cl = repo.changelog
1116 for b, ls in repo.branchmap().iteritems():
1120 for b, ls in repo.branchmap().iteritems():
1117 hs.update(cl.rev(h) for h in ls)
1121 hs.update(cl.rev(h) for h in ls)
1118 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
1122 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
1119 # This does not break because of other fullreposet misbehavior.
1123 # This does not break because of other fullreposet misbehavior.
1120 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
1124 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
1121 # necessary to ensure we preserve the order in subset.
1125 # necessary to ensure we preserve the order in subset.
1122 return baseset(hs) & subset
1126 return baseset(hs) & subset
1123
1127
1124 def heads(repo, subset, x):
1128 def heads(repo, subset, x):
1125 """``heads(set)``
1129 """``heads(set)``
1126 Members of set with no children in set.
1130 Members of set with no children in set.
1127 """
1131 """
1128 s = getset(repo, subset, x)
1132 s = getset(repo, subset, x)
1129 ps = parents(repo, subset, x)
1133 ps = parents(repo, subset, x)
1130 return s - ps
1134 return s - ps
1131
1135
1132 def hidden(repo, subset, x):
1136 def hidden(repo, subset, x):
1133 """``hidden()``
1137 """``hidden()``
1134 Hidden changesets.
1138 Hidden changesets.
1135 """
1139 """
1136 # i18n: "hidden" is a keyword
1140 # i18n: "hidden" is a keyword
1137 getargs(x, 0, 0, _("hidden takes no arguments"))
1141 getargs(x, 0, 0, _("hidden takes no arguments"))
1138 hiddenrevs = repoview.filterrevs(repo, 'visible')
1142 hiddenrevs = repoview.filterrevs(repo, 'visible')
1139 return subset & hiddenrevs
1143 return subset & hiddenrevs
1140
1144
1141 def keyword(repo, subset, x):
1145 def keyword(repo, subset, x):
1142 """``keyword(string)``
1146 """``keyword(string)``
1143 Search commit message, user name, and names of changed files for
1147 Search commit message, user name, and names of changed files for
1144 string. The match is case-insensitive.
1148 string. The match is case-insensitive.
1145 """
1149 """
1146 # i18n: "keyword" is a keyword
1150 # i18n: "keyword" is a keyword
1147 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1151 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1148
1152
1149 def matches(r):
1153 def matches(r):
1150 c = repo[r]
1154 c = repo[r]
1151 return any(kw in encoding.lower(t)
1155 return any(kw in encoding.lower(t)
1152 for t in c.files() + [c.user(), c.description()])
1156 for t in c.files() + [c.user(), c.description()])
1153
1157
1154 return subset.filter(matches)
1158 return subset.filter(matches)
1155
1159
1156 def limit(repo, subset, x):
1160 def limit(repo, subset, x):
1157 """``limit(set, [n])``
1161 """``limit(set, [n])``
1158 First n members of set, defaulting to 1.
1162 First n members of set, defaulting to 1.
1159 """
1163 """
1160 # i18n: "limit" is a keyword
1164 # i18n: "limit" is a keyword
1161 l = getargs(x, 1, 2, _("limit requires one or two arguments"))
1165 l = getargs(x, 1, 2, _("limit requires one or two arguments"))
1162 try:
1166 try:
1163 lim = 1
1167 lim = 1
1164 if len(l) == 2:
1168 if len(l) == 2:
1165 # i18n: "limit" is a keyword
1169 # i18n: "limit" is a keyword
1166 lim = int(getstring(l[1], _("limit requires a number")))
1170 lim = int(getstring(l[1], _("limit requires a number")))
1167 except (TypeError, ValueError):
1171 except (TypeError, ValueError):
1168 # i18n: "limit" is a keyword
1172 # i18n: "limit" is a keyword
1169 raise error.ParseError(_("limit expects a number"))
1173 raise error.ParseError(_("limit expects a number"))
1170 ss = subset
1174 ss = subset
1171 os = getset(repo, fullreposet(repo), l[0])
1175 os = getset(repo, fullreposet(repo), l[0])
1172 result = []
1176 result = []
1173 it = iter(os)
1177 it = iter(os)
1174 for x in xrange(lim):
1178 for x in xrange(lim):
1175 y = next(it, None)
1179 y = next(it, None)
1176 if y is None:
1180 if y is None:
1177 break
1181 break
1178 elif y in ss:
1182 elif y in ss:
1179 result.append(y)
1183 result.append(y)
1180 return baseset(result)
1184 return baseset(result)
1181
1185
1182 def last(repo, subset, x):
1186 def last(repo, subset, x):
1183 """``last(set, [n])``
1187 """``last(set, [n])``
1184 Last n members of set, defaulting to 1.
1188 Last n members of set, defaulting to 1.
1185 """
1189 """
1186 # i18n: "last" is a keyword
1190 # i18n: "last" is a keyword
1187 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1191 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1188 try:
1192 try:
1189 lim = 1
1193 lim = 1
1190 if len(l) == 2:
1194 if len(l) == 2:
1191 # i18n: "last" is a keyword
1195 # i18n: "last" is a keyword
1192 lim = int(getstring(l[1], _("last requires a number")))
1196 lim = int(getstring(l[1], _("last requires a number")))
1193 except (TypeError, ValueError):
1197 except (TypeError, ValueError):
1194 # i18n: "last" is a keyword
1198 # i18n: "last" is a keyword
1195 raise error.ParseError(_("last expects a number"))
1199 raise error.ParseError(_("last expects a number"))
1196 ss = subset
1200 ss = subset
1197 os = getset(repo, fullreposet(repo), l[0])
1201 os = getset(repo, fullreposet(repo), l[0])
1198 os.reverse()
1202 os.reverse()
1199 result = []
1203 result = []
1200 it = iter(os)
1204 it = iter(os)
1201 for x in xrange(lim):
1205 for x in xrange(lim):
1202 y = next(it, None)
1206 y = next(it, None)
1203 if y is None:
1207 if y is None:
1204 break
1208 break
1205 elif y in ss:
1209 elif y in ss:
1206 result.append(y)
1210 result.append(y)
1207 return baseset(result)
1211 return baseset(result)
1208
1212
1209 def maxrev(repo, subset, x):
1213 def maxrev(repo, subset, x):
1210 """``max(set)``
1214 """``max(set)``
1211 Changeset with highest revision number in set.
1215 Changeset with highest revision number in set.
1212 """
1216 """
1213 os = getset(repo, fullreposet(repo), x)
1217 os = getset(repo, fullreposet(repo), x)
1214 if os:
1218 if os:
1215 m = os.max()
1219 m = os.max()
1216 if m in subset:
1220 if m in subset:
1217 return baseset([m])
1221 return baseset([m])
1218 return baseset()
1222 return baseset()
1219
1223
1220 def merge(repo, subset, x):
1224 def merge(repo, subset, x):
1221 """``merge()``
1225 """``merge()``
1222 Changeset is a merge changeset.
1226 Changeset is a merge changeset.
1223 """
1227 """
1224 # i18n: "merge" is a keyword
1228 # i18n: "merge" is a keyword
1225 getargs(x, 0, 0, _("merge takes no arguments"))
1229 getargs(x, 0, 0, _("merge takes no arguments"))
1226 cl = repo.changelog
1230 cl = repo.changelog
1227 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1)
1231 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1)
1228
1232
1229 def branchpoint(repo, subset, x):
1233 def branchpoint(repo, subset, x):
1230 """``branchpoint()``
1234 """``branchpoint()``
1231 Changesets with more than one child.
1235 Changesets with more than one child.
1232 """
1236 """
1233 # i18n: "branchpoint" is a keyword
1237 # i18n: "branchpoint" is a keyword
1234 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1238 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1235 cl = repo.changelog
1239 cl = repo.changelog
1236 if not subset:
1240 if not subset:
1237 return baseset()
1241 return baseset()
1238 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1242 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1239 # (and if it is not, it should.)
1243 # (and if it is not, it should.)
1240 baserev = min(subset)
1244 baserev = min(subset)
1241 parentscount = [0]*(len(repo) - baserev)
1245 parentscount = [0]*(len(repo) - baserev)
1242 for r in cl.revs(start=baserev + 1):
1246 for r in cl.revs(start=baserev + 1):
1243 for p in cl.parentrevs(r):
1247 for p in cl.parentrevs(r):
1244 if p >= baserev:
1248 if p >= baserev:
1245 parentscount[p - baserev] += 1
1249 parentscount[p - baserev] += 1
1246 return subset.filter(lambda r: parentscount[r - baserev] > 1)
1250 return subset.filter(lambda r: parentscount[r - baserev] > 1)
1247
1251
1248 def minrev(repo, subset, x):
1252 def minrev(repo, subset, x):
1249 """``min(set)``
1253 """``min(set)``
1250 Changeset with lowest revision number in set.
1254 Changeset with lowest revision number in set.
1251 """
1255 """
1252 os = getset(repo, fullreposet(repo), x)
1256 os = getset(repo, fullreposet(repo), x)
1253 if os:
1257 if os:
1254 m = os.min()
1258 m = os.min()
1255 if m in subset:
1259 if m in subset:
1256 return baseset([m])
1260 return baseset([m])
1257 return baseset()
1261 return baseset()
1258
1262
1259 def modifies(repo, subset, x):
1263 def modifies(repo, subset, x):
1260 """``modifies(pattern)``
1264 """``modifies(pattern)``
1261 Changesets modifying files matched by pattern.
1265 Changesets modifying files matched by pattern.
1262
1266
1263 The pattern without explicit kind like ``glob:`` is expected to be
1267 The pattern without explicit kind like ``glob:`` is expected to be
1264 relative to the current directory and match against a file or a
1268 relative to the current directory and match against a file or a
1265 directory.
1269 directory.
1266 """
1270 """
1267 # i18n: "modifies" is a keyword
1271 # i18n: "modifies" is a keyword
1268 pat = getstring(x, _("modifies requires a pattern"))
1272 pat = getstring(x, _("modifies requires a pattern"))
1269 return checkstatus(repo, subset, pat, 0)
1273 return checkstatus(repo, subset, pat, 0)
1270
1274
1271 def named(repo, subset, x):
1275 def named(repo, subset, x):
1272 """``named(namespace)``
1276 """``named(namespace)``
1273 The changesets in a given namespace.
1277 The changesets in a given namespace.
1274
1278
1275 If `namespace` starts with `re:`, the remainder of the string is treated as
1279 If `namespace` starts with `re:`, the remainder of the string is treated as
1276 a regular expression. To match a namespace that actually starts with `re:`,
1280 a regular expression. To match a namespace that actually starts with `re:`,
1277 use the prefix `literal:`.
1281 use the prefix `literal:`.
1278 """
1282 """
1279 # i18n: "named" is a keyword
1283 # i18n: "named" is a keyword
1280 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1284 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1281
1285
1282 ns = getstring(args[0],
1286 ns = getstring(args[0],
1283 # i18n: "named" is a keyword
1287 # i18n: "named" is a keyword
1284 _('the argument to named must be a string'))
1288 _('the argument to named must be a string'))
1285 kind, pattern, matcher = _stringmatcher(ns)
1289 kind, pattern, matcher = _stringmatcher(ns)
1286 namespaces = set()
1290 namespaces = set()
1287 if kind == 'literal':
1291 if kind == 'literal':
1288 if pattern not in repo.names:
1292 if pattern not in repo.names:
1289 raise error.RepoLookupError(_("namespace '%s' does not exist")
1293 raise error.RepoLookupError(_("namespace '%s' does not exist")
1290 % ns)
1294 % ns)
1291 namespaces.add(repo.names[pattern])
1295 namespaces.add(repo.names[pattern])
1292 else:
1296 else:
1293 for name, ns in repo.names.iteritems():
1297 for name, ns in repo.names.iteritems():
1294 if matcher(name):
1298 if matcher(name):
1295 namespaces.add(ns)
1299 namespaces.add(ns)
1296 if not namespaces:
1300 if not namespaces:
1297 raise error.RepoLookupError(_("no namespace exists"
1301 raise error.RepoLookupError(_("no namespace exists"
1298 " that match '%s'") % pattern)
1302 " that match '%s'") % pattern)
1299
1303
1300 names = set()
1304 names = set()
1301 for ns in namespaces:
1305 for ns in namespaces:
1302 for name in ns.listnames(repo):
1306 for name in ns.listnames(repo):
1303 if name not in ns.deprecated:
1307 if name not in ns.deprecated:
1304 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1308 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1305
1309
1306 names -= set([node.nullrev])
1310 names -= set([node.nullrev])
1307 return subset & names
1311 return subset & names
1308
1312
1309 def node_(repo, subset, x):
1313 def node_(repo, subset, x):
1310 """``id(string)``
1314 """``id(string)``
1311 Revision non-ambiguously specified by the given hex string prefix.
1315 Revision non-ambiguously specified by the given hex string prefix.
1312 """
1316 """
1313 # i18n: "id" is a keyword
1317 # i18n: "id" is a keyword
1314 l = getargs(x, 1, 1, _("id requires one argument"))
1318 l = getargs(x, 1, 1, _("id requires one argument"))
1315 # i18n: "id" is a keyword
1319 # i18n: "id" is a keyword
1316 n = getstring(l[0], _("id requires a string"))
1320 n = getstring(l[0], _("id requires a string"))
1317 if len(n) == 40:
1321 if len(n) == 40:
1318 try:
1322 try:
1319 rn = repo.changelog.rev(node.bin(n))
1323 rn = repo.changelog.rev(node.bin(n))
1320 except (LookupError, TypeError):
1324 except (LookupError, TypeError):
1321 rn = None
1325 rn = None
1322 else:
1326 else:
1323 rn = None
1327 rn = None
1324 pm = repo.changelog._partialmatch(n)
1328 pm = repo.changelog._partialmatch(n)
1325 if pm is not None:
1329 if pm is not None:
1326 rn = repo.changelog.rev(pm)
1330 rn = repo.changelog.rev(pm)
1327
1331
1328 if rn is None:
1332 if rn is None:
1329 return baseset()
1333 return baseset()
1330 result = baseset([rn])
1334 result = baseset([rn])
1331 return result & subset
1335 return result & subset
1332
1336
1333 def obsolete(repo, subset, x):
1337 def obsolete(repo, subset, x):
1334 """``obsolete()``
1338 """``obsolete()``
1335 Mutable changeset with a newer version."""
1339 Mutable changeset with a newer version."""
1336 # i18n: "obsolete" is a keyword
1340 # i18n: "obsolete" is a keyword
1337 getargs(x, 0, 0, _("obsolete takes no arguments"))
1341 getargs(x, 0, 0, _("obsolete takes no arguments"))
1338 obsoletes = obsmod.getrevs(repo, 'obsolete')
1342 obsoletes = obsmod.getrevs(repo, 'obsolete')
1339 return subset & obsoletes
1343 return subset & obsoletes
1340
1344
1341 def only(repo, subset, x):
1345 def only(repo, subset, x):
1342 """``only(set, [set])``
1346 """``only(set, [set])``
1343 Changesets that are ancestors of the first set that are not ancestors
1347 Changesets that are ancestors of the first set that are not ancestors
1344 of any other head in the repo. If a second set is specified, the result
1348 of any other head in the repo. If a second set is specified, the result
1345 is ancestors of the first set that are not ancestors of the second set
1349 is ancestors of the first set that are not ancestors of the second set
1346 (i.e. ::<set1> - ::<set2>).
1350 (i.e. ::<set1> - ::<set2>).
1347 """
1351 """
1348 cl = repo.changelog
1352 cl = repo.changelog
1349 # i18n: "only" is a keyword
1353 # i18n: "only" is a keyword
1350 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1354 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1351 include = getset(repo, fullreposet(repo), args[0])
1355 include = getset(repo, fullreposet(repo), args[0])
1352 if len(args) == 1:
1356 if len(args) == 1:
1353 if not include:
1357 if not include:
1354 return baseset()
1358 return baseset()
1355
1359
1356 descendants = set(_revdescendants(repo, include, False))
1360 descendants = set(_revdescendants(repo, include, False))
1357 exclude = [rev for rev in cl.headrevs()
1361 exclude = [rev for rev in cl.headrevs()
1358 if not rev in descendants and not rev in include]
1362 if not rev in descendants and not rev in include]
1359 else:
1363 else:
1360 exclude = getset(repo, fullreposet(repo), args[1])
1364 exclude = getset(repo, fullreposet(repo), args[1])
1361
1365
1362 results = set(cl.findmissingrevs(common=exclude, heads=include))
1366 results = set(cl.findmissingrevs(common=exclude, heads=include))
1363 # XXX we should turn this into a baseset instead of a set, smartset may do
1367 # XXX we should turn this into a baseset instead of a set, smartset may do
1364 # some optimisations from the fact this is a baseset.
1368 # some optimisations from the fact this is a baseset.
1365 return subset & results
1369 return subset & results
1366
1370
1367 def origin(repo, subset, x):
1371 def origin(repo, subset, x):
1368 """``origin([set])``
1372 """``origin([set])``
1369 Changesets that were specified as a source for the grafts, transplants or
1373 Changesets that were specified as a source for the grafts, transplants or
1370 rebases that created the given revisions. Omitting the optional set is the
1374 rebases that created the given revisions. Omitting the optional set is the
1371 same as passing all(). If a changeset created by these operations is itself
1375 same as passing all(). If a changeset created by these operations is itself
1372 specified as a source for one of these operations, only the source changeset
1376 specified as a source for one of these operations, only the source changeset
1373 for the first operation is selected.
1377 for the first operation is selected.
1374 """
1378 """
1375 if x is not None:
1379 if x is not None:
1376 dests = getset(repo, fullreposet(repo), x)
1380 dests = getset(repo, fullreposet(repo), x)
1377 else:
1381 else:
1378 dests = fullreposet(repo)
1382 dests = fullreposet(repo)
1379
1383
1380 def _firstsrc(rev):
1384 def _firstsrc(rev):
1381 src = _getrevsource(repo, rev)
1385 src = _getrevsource(repo, rev)
1382 if src is None:
1386 if src is None:
1383 return None
1387 return None
1384
1388
1385 while True:
1389 while True:
1386 prev = _getrevsource(repo, src)
1390 prev = _getrevsource(repo, src)
1387
1391
1388 if prev is None:
1392 if prev is None:
1389 return src
1393 return src
1390 src = prev
1394 src = prev
1391
1395
1392 o = set([_firstsrc(r) for r in dests])
1396 o = set([_firstsrc(r) for r in dests])
1393 o -= set([None])
1397 o -= set([None])
1394 # XXX we should turn this into a baseset instead of a set, smartset may do
1398 # XXX we should turn this into a baseset instead of a set, smartset may do
1395 # some optimisations from the fact this is a baseset.
1399 # some optimisations from the fact this is a baseset.
1396 return subset & o
1400 return subset & o
1397
1401
1398 def outgoing(repo, subset, x):
1402 def outgoing(repo, subset, x):
1399 """``outgoing([path])``
1403 """``outgoing([path])``
1400 Changesets not found in the specified destination repository, or the
1404 Changesets not found in the specified destination repository, or the
1401 default push location.
1405 default push location.
1402 """
1406 """
1403 # Avoid cycles.
1407 # Avoid cycles.
1404 import discovery
1408 import discovery
1405 import hg
1409 import hg
1406 # i18n: "outgoing" is a keyword
1410 # i18n: "outgoing" is a keyword
1407 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1411 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1408 # i18n: "outgoing" is a keyword
1412 # i18n: "outgoing" is a keyword
1409 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1413 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1410 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1414 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1411 dest, branches = hg.parseurl(dest)
1415 dest, branches = hg.parseurl(dest)
1412 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1416 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1413 if revs:
1417 if revs:
1414 revs = [repo.lookup(rev) for rev in revs]
1418 revs = [repo.lookup(rev) for rev in revs]
1415 other = hg.peer(repo, {}, dest)
1419 other = hg.peer(repo, {}, dest)
1416 repo.ui.pushbuffer()
1420 repo.ui.pushbuffer()
1417 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1421 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1418 repo.ui.popbuffer()
1422 repo.ui.popbuffer()
1419 cl = repo.changelog
1423 cl = repo.changelog
1420 o = set([cl.rev(r) for r in outgoing.missing])
1424 o = set([cl.rev(r) for r in outgoing.missing])
1421 return subset & o
1425 return subset & o
1422
1426
1423 def p1(repo, subset, x):
1427 def p1(repo, subset, x):
1424 """``p1([set])``
1428 """``p1([set])``
1425 First parent of changesets in set, or the working directory.
1429 First parent of changesets in set, or the working directory.
1426 """
1430 """
1427 if x is None:
1431 if x is None:
1428 p = repo[x].p1().rev()
1432 p = repo[x].p1().rev()
1429 if p >= 0:
1433 if p >= 0:
1430 return subset & baseset([p])
1434 return subset & baseset([p])
1431 return baseset()
1435 return baseset()
1432
1436
1433 ps = set()
1437 ps = set()
1434 cl = repo.changelog
1438 cl = repo.changelog
1435 for r in getset(repo, fullreposet(repo), x):
1439 for r in getset(repo, fullreposet(repo), x):
1436 ps.add(cl.parentrevs(r)[0])
1440 ps.add(cl.parentrevs(r)[0])
1437 ps -= set([node.nullrev])
1441 ps -= set([node.nullrev])
1438 # XXX we should turn this into a baseset instead of a set, smartset may do
1442 # XXX we should turn this into a baseset instead of a set, smartset may do
1439 # some optimisations from the fact this is a baseset.
1443 # some optimisations from the fact this is a baseset.
1440 return subset & ps
1444 return subset & ps
1441
1445
1442 def p2(repo, subset, x):
1446 def p2(repo, subset, x):
1443 """``p2([set])``
1447 """``p2([set])``
1444 Second parent of changesets in set, or the working directory.
1448 Second parent of changesets in set, or the working directory.
1445 """
1449 """
1446 if x is None:
1450 if x is None:
1447 ps = repo[x].parents()
1451 ps = repo[x].parents()
1448 try:
1452 try:
1449 p = ps[1].rev()
1453 p = ps[1].rev()
1450 if p >= 0:
1454 if p >= 0:
1451 return subset & baseset([p])
1455 return subset & baseset([p])
1452 return baseset()
1456 return baseset()
1453 except IndexError:
1457 except IndexError:
1454 return baseset()
1458 return baseset()
1455
1459
1456 ps = set()
1460 ps = set()
1457 cl = repo.changelog
1461 cl = repo.changelog
1458 for r in getset(repo, fullreposet(repo), x):
1462 for r in getset(repo, fullreposet(repo), x):
1459 ps.add(cl.parentrevs(r)[1])
1463 ps.add(cl.parentrevs(r)[1])
1460 ps -= set([node.nullrev])
1464 ps -= set([node.nullrev])
1461 # XXX we should turn this into a baseset instead of a set, smartset may do
1465 # XXX we should turn this into a baseset instead of a set, smartset may do
1462 # some optimisations from the fact this is a baseset.
1466 # some optimisations from the fact this is a baseset.
1463 return subset & ps
1467 return subset & ps
1464
1468
1465 def parents(repo, subset, x):
1469 def parents(repo, subset, x):
1466 """``parents([set])``
1470 """``parents([set])``
1467 The set of all parents for all changesets in set, or the working directory.
1471 The set of all parents for all changesets in set, or the working directory.
1468 """
1472 """
1469 if x is None:
1473 if x is None:
1470 ps = set(p.rev() for p in repo[x].parents())
1474 ps = set(p.rev() for p in repo[x].parents())
1471 else:
1475 else:
1472 ps = set()
1476 ps = set()
1473 cl = repo.changelog
1477 cl = repo.changelog
1474 for r in getset(repo, fullreposet(repo), x):
1478 for r in getset(repo, fullreposet(repo), x):
1475 if r is None:
1479 if r is None:
1476 ps.update(p.rev() for p in repo[r].parents())
1480 ps.update(p.rev() for p in repo[r].parents())
1477 else:
1481 else:
1478 ps.update(cl.parentrevs(r))
1482 ps.update(cl.parentrevs(r))
1479 ps -= set([node.nullrev])
1483 ps -= set([node.nullrev])
1480 return subset & ps
1484 return subset & ps
1481
1485
1482 def _phase(repo, subset, target):
1486 def _phase(repo, subset, target):
1483 """helper to select all rev in phase <target>"""
1487 """helper to select all rev in phase <target>"""
1484 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1488 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1485 if repo._phasecache._phasesets:
1489 if repo._phasecache._phasesets:
1486 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1490 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1487 s = baseset(s)
1491 s = baseset(s)
1488 s.sort() # set are non ordered, so we enforce ascending
1492 s.sort() # set are non ordered, so we enforce ascending
1489 return subset & s
1493 return subset & s
1490 else:
1494 else:
1491 phase = repo._phasecache.phase
1495 phase = repo._phasecache.phase
1492 condition = lambda r: phase(repo, r) == target
1496 condition = lambda r: phase(repo, r) == target
1493 return subset.filter(condition, cache=False)
1497 return subset.filter(condition, cache=False)
1494
1498
1495 def draft(repo, subset, x):
1499 def draft(repo, subset, x):
1496 """``draft()``
1500 """``draft()``
1497 Changeset in draft phase."""
1501 Changeset in draft phase."""
1498 # i18n: "draft" is a keyword
1502 # i18n: "draft" is a keyword
1499 getargs(x, 0, 0, _("draft takes no arguments"))
1503 getargs(x, 0, 0, _("draft takes no arguments"))
1500 target = phases.draft
1504 target = phases.draft
1501 return _phase(repo, subset, target)
1505 return _phase(repo, subset, target)
1502
1506
1503 def secret(repo, subset, x):
1507 def secret(repo, subset, x):
1504 """``secret()``
1508 """``secret()``
1505 Changeset in secret phase."""
1509 Changeset in secret phase."""
1506 # i18n: "secret" is a keyword
1510 # i18n: "secret" is a keyword
1507 getargs(x, 0, 0, _("secret takes no arguments"))
1511 getargs(x, 0, 0, _("secret takes no arguments"))
1508 target = phases.secret
1512 target = phases.secret
1509 return _phase(repo, subset, target)
1513 return _phase(repo, subset, target)
1510
1514
1511 def parentspec(repo, subset, x, n):
1515 def parentspec(repo, subset, x, n):
1512 """``set^0``
1516 """``set^0``
1513 The set.
1517 The set.
1514 ``set^1`` (or ``set^``), ``set^2``
1518 ``set^1`` (or ``set^``), ``set^2``
1515 First or second parent, respectively, of all changesets in set.
1519 First or second parent, respectively, of all changesets in set.
1516 """
1520 """
1517 try:
1521 try:
1518 n = int(n[1])
1522 n = int(n[1])
1519 if n not in (0, 1, 2):
1523 if n not in (0, 1, 2):
1520 raise ValueError
1524 raise ValueError
1521 except (TypeError, ValueError):
1525 except (TypeError, ValueError):
1522 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1526 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1523 ps = set()
1527 ps = set()
1524 cl = repo.changelog
1528 cl = repo.changelog
1525 for r in getset(repo, fullreposet(repo), x):
1529 for r in getset(repo, fullreposet(repo), x):
1526 if n == 0:
1530 if n == 0:
1527 ps.add(r)
1531 ps.add(r)
1528 elif n == 1:
1532 elif n == 1:
1529 ps.add(cl.parentrevs(r)[0])
1533 ps.add(cl.parentrevs(r)[0])
1530 elif n == 2:
1534 elif n == 2:
1531 parents = cl.parentrevs(r)
1535 parents = cl.parentrevs(r)
1532 if len(parents) > 1:
1536 if len(parents) > 1:
1533 ps.add(parents[1])
1537 ps.add(parents[1])
1534 return subset & ps
1538 return subset & ps
1535
1539
1536 def present(repo, subset, x):
1540 def present(repo, subset, x):
1537 """``present(set)``
1541 """``present(set)``
1538 An empty set, if any revision in set isn't found; otherwise,
1542 An empty set, if any revision in set isn't found; otherwise,
1539 all revisions in set.
1543 all revisions in set.
1540
1544
1541 If any of specified revisions is not present in the local repository,
1545 If any of specified revisions is not present in the local repository,
1542 the query is normally aborted. But this predicate allows the query
1546 the query is normally aborted. But this predicate allows the query
1543 to continue even in such cases.
1547 to continue even in such cases.
1544 """
1548 """
1545 try:
1549 try:
1546 return getset(repo, subset, x)
1550 return getset(repo, subset, x)
1547 except error.RepoLookupError:
1551 except error.RepoLookupError:
1548 return baseset()
1552 return baseset()
1549
1553
1550 # for internal use
1554 # for internal use
1551 def _notpublic(repo, subset, x):
1555 def _notpublic(repo, subset, x):
1552 getargs(x, 0, 0, "_notpublic takes no arguments")
1556 getargs(x, 0, 0, "_notpublic takes no arguments")
1553 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1557 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1554 if repo._phasecache._phasesets:
1558 if repo._phasecache._phasesets:
1555 s = set()
1559 s = set()
1556 for u in repo._phasecache._phasesets[1:]:
1560 for u in repo._phasecache._phasesets[1:]:
1557 s.update(u)
1561 s.update(u)
1558 s = baseset(s - repo.changelog.filteredrevs)
1562 s = baseset(s - repo.changelog.filteredrevs)
1559 s.sort()
1563 s.sort()
1560 return subset & s
1564 return subset & s
1561 else:
1565 else:
1562 phase = repo._phasecache.phase
1566 phase = repo._phasecache.phase
1563 target = phases.public
1567 target = phases.public
1564 condition = lambda r: phase(repo, r) != target
1568 condition = lambda r: phase(repo, r) != target
1565 return subset.filter(condition, cache=False)
1569 return subset.filter(condition, cache=False)
1566
1570
1567 def public(repo, subset, x):
1571 def public(repo, subset, x):
1568 """``public()``
1572 """``public()``
1569 Changeset in public phase."""
1573 Changeset in public phase."""
1570 # i18n: "public" is a keyword
1574 # i18n: "public" is a keyword
1571 getargs(x, 0, 0, _("public takes no arguments"))
1575 getargs(x, 0, 0, _("public takes no arguments"))
1572 phase = repo._phasecache.phase
1576 phase = repo._phasecache.phase
1573 target = phases.public
1577 target = phases.public
1574 condition = lambda r: phase(repo, r) == target
1578 condition = lambda r: phase(repo, r) == target
1575 return subset.filter(condition, cache=False)
1579 return subset.filter(condition, cache=False)
1576
1580
1577 def remote(repo, subset, x):
1581 def remote(repo, subset, x):
1578 """``remote([id [,path]])``
1582 """``remote([id [,path]])``
1579 Local revision that corresponds to the given identifier in a
1583 Local revision that corresponds to the given identifier in a
1580 remote repository, if present. Here, the '.' identifier is a
1584 remote repository, if present. Here, the '.' identifier is a
1581 synonym for the current local branch.
1585 synonym for the current local branch.
1582 """
1586 """
1583
1587
1584 import hg # avoid start-up nasties
1588 import hg # avoid start-up nasties
1585 # i18n: "remote" is a keyword
1589 # i18n: "remote" is a keyword
1586 l = getargs(x, 0, 2, _("remote takes one, two or no arguments"))
1590 l = getargs(x, 0, 2, _("remote takes one, two or no arguments"))
1587
1591
1588 q = '.'
1592 q = '.'
1589 if len(l) > 0:
1593 if len(l) > 0:
1590 # i18n: "remote" is a keyword
1594 # i18n: "remote" is a keyword
1591 q = getstring(l[0], _("remote requires a string id"))
1595 q = getstring(l[0], _("remote requires a string id"))
1592 if q == '.':
1596 if q == '.':
1593 q = repo['.'].branch()
1597 q = repo['.'].branch()
1594
1598
1595 dest = ''
1599 dest = ''
1596 if len(l) > 1:
1600 if len(l) > 1:
1597 # i18n: "remote" is a keyword
1601 # i18n: "remote" is a keyword
1598 dest = getstring(l[1], _("remote requires a repository path"))
1602 dest = getstring(l[1], _("remote requires a repository path"))
1599 dest = repo.ui.expandpath(dest or 'default')
1603 dest = repo.ui.expandpath(dest or 'default')
1600 dest, branches = hg.parseurl(dest)
1604 dest, branches = hg.parseurl(dest)
1601 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1605 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1602 if revs:
1606 if revs:
1603 revs = [repo.lookup(rev) for rev in revs]
1607 revs = [repo.lookup(rev) for rev in revs]
1604 other = hg.peer(repo, {}, dest)
1608 other = hg.peer(repo, {}, dest)
1605 n = other.lookup(q)
1609 n = other.lookup(q)
1606 if n in repo:
1610 if n in repo:
1607 r = repo[n].rev()
1611 r = repo[n].rev()
1608 if r in subset:
1612 if r in subset:
1609 return baseset([r])
1613 return baseset([r])
1610 return baseset()
1614 return baseset()
1611
1615
1612 def removes(repo, subset, x):
1616 def removes(repo, subset, x):
1613 """``removes(pattern)``
1617 """``removes(pattern)``
1614 Changesets which remove files matching pattern.
1618 Changesets which remove files matching pattern.
1615
1619
1616 The pattern without explicit kind like ``glob:`` is expected to be
1620 The pattern without explicit kind like ``glob:`` is expected to be
1617 relative to the current directory and match against a file or a
1621 relative to the current directory and match against a file or a
1618 directory.
1622 directory.
1619 """
1623 """
1620 # i18n: "removes" is a keyword
1624 # i18n: "removes" is a keyword
1621 pat = getstring(x, _("removes requires a pattern"))
1625 pat = getstring(x, _("removes requires a pattern"))
1622 return checkstatus(repo, subset, pat, 2)
1626 return checkstatus(repo, subset, pat, 2)
1623
1627
1624 def rev(repo, subset, x):
1628 def rev(repo, subset, x):
1625 """``rev(number)``
1629 """``rev(number)``
1626 Revision with the given numeric identifier.
1630 Revision with the given numeric identifier.
1627 """
1631 """
1628 # i18n: "rev" is a keyword
1632 # i18n: "rev" is a keyword
1629 l = getargs(x, 1, 1, _("rev requires one argument"))
1633 l = getargs(x, 1, 1, _("rev requires one argument"))
1630 try:
1634 try:
1631 # i18n: "rev" is a keyword
1635 # i18n: "rev" is a keyword
1632 l = int(getstring(l[0], _("rev requires a number")))
1636 l = int(getstring(l[0], _("rev requires a number")))
1633 except (TypeError, ValueError):
1637 except (TypeError, ValueError):
1634 # i18n: "rev" is a keyword
1638 # i18n: "rev" is a keyword
1635 raise error.ParseError(_("rev expects a number"))
1639 raise error.ParseError(_("rev expects a number"))
1636 if l not in repo.changelog and l != node.nullrev:
1640 if l not in repo.changelog and l != node.nullrev:
1637 return baseset()
1641 return baseset()
1638 return subset & baseset([l])
1642 return subset & baseset([l])
1639
1643
1640 def matching(repo, subset, x):
1644 def matching(repo, subset, x):
1641 """``matching(revision [, field])``
1645 """``matching(revision [, field])``
1642 Changesets in which a given set of fields match the set of fields in the
1646 Changesets in which a given set of fields match the set of fields in the
1643 selected revision or set.
1647 selected revision or set.
1644
1648
1645 To match more than one field pass the list of fields to match separated
1649 To match more than one field pass the list of fields to match separated
1646 by spaces (e.g. ``author description``).
1650 by spaces (e.g. ``author description``).
1647
1651
1648 Valid fields are most regular revision fields and some special fields.
1652 Valid fields are most regular revision fields and some special fields.
1649
1653
1650 Regular revision fields are ``description``, ``author``, ``branch``,
1654 Regular revision fields are ``description``, ``author``, ``branch``,
1651 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1655 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1652 and ``diff``.
1656 and ``diff``.
1653 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1657 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1654 contents of the revision. Two revisions matching their ``diff`` will
1658 contents of the revision. Two revisions matching their ``diff`` will
1655 also match their ``files``.
1659 also match their ``files``.
1656
1660
1657 Special fields are ``summary`` and ``metadata``:
1661 Special fields are ``summary`` and ``metadata``:
1658 ``summary`` matches the first line of the description.
1662 ``summary`` matches the first line of the description.
1659 ``metadata`` is equivalent to matching ``description user date``
1663 ``metadata`` is equivalent to matching ``description user date``
1660 (i.e. it matches the main metadata fields).
1664 (i.e. it matches the main metadata fields).
1661
1665
1662 ``metadata`` is the default field which is used when no fields are
1666 ``metadata`` is the default field which is used when no fields are
1663 specified. You can match more than one field at a time.
1667 specified. You can match more than one field at a time.
1664 """
1668 """
1665 # i18n: "matching" is a keyword
1669 # i18n: "matching" is a keyword
1666 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1670 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1667
1671
1668 revs = getset(repo, fullreposet(repo), l[0])
1672 revs = getset(repo, fullreposet(repo), l[0])
1669
1673
1670 fieldlist = ['metadata']
1674 fieldlist = ['metadata']
1671 if len(l) > 1:
1675 if len(l) > 1:
1672 fieldlist = getstring(l[1],
1676 fieldlist = getstring(l[1],
1673 # i18n: "matching" is a keyword
1677 # i18n: "matching" is a keyword
1674 _("matching requires a string "
1678 _("matching requires a string "
1675 "as its second argument")).split()
1679 "as its second argument")).split()
1676
1680
1677 # Make sure that there are no repeated fields,
1681 # Make sure that there are no repeated fields,
1678 # expand the 'special' 'metadata' field type
1682 # expand the 'special' 'metadata' field type
1679 # and check the 'files' whenever we check the 'diff'
1683 # and check the 'files' whenever we check the 'diff'
1680 fields = []
1684 fields = []
1681 for field in fieldlist:
1685 for field in fieldlist:
1682 if field == 'metadata':
1686 if field == 'metadata':
1683 fields += ['user', 'description', 'date']
1687 fields += ['user', 'description', 'date']
1684 elif field == 'diff':
1688 elif field == 'diff':
1685 # a revision matching the diff must also match the files
1689 # a revision matching the diff must also match the files
1686 # since matching the diff is very costly, make sure to
1690 # since matching the diff is very costly, make sure to
1687 # also match the files first
1691 # also match the files first
1688 fields += ['files', 'diff']
1692 fields += ['files', 'diff']
1689 else:
1693 else:
1690 if field == 'author':
1694 if field == 'author':
1691 field = 'user'
1695 field = 'user'
1692 fields.append(field)
1696 fields.append(field)
1693 fields = set(fields)
1697 fields = set(fields)
1694 if 'summary' in fields and 'description' in fields:
1698 if 'summary' in fields and 'description' in fields:
1695 # If a revision matches its description it also matches its summary
1699 # If a revision matches its description it also matches its summary
1696 fields.discard('summary')
1700 fields.discard('summary')
1697
1701
1698 # We may want to match more than one field
1702 # We may want to match more than one field
1699 # Not all fields take the same amount of time to be matched
1703 # Not all fields take the same amount of time to be matched
1700 # Sort the selected fields in order of increasing matching cost
1704 # Sort the selected fields in order of increasing matching cost
1701 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1705 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1702 'files', 'description', 'substate', 'diff']
1706 'files', 'description', 'substate', 'diff']
1703 def fieldkeyfunc(f):
1707 def fieldkeyfunc(f):
1704 try:
1708 try:
1705 return fieldorder.index(f)
1709 return fieldorder.index(f)
1706 except ValueError:
1710 except ValueError:
1707 # assume an unknown field is very costly
1711 # assume an unknown field is very costly
1708 return len(fieldorder)
1712 return len(fieldorder)
1709 fields = list(fields)
1713 fields = list(fields)
1710 fields.sort(key=fieldkeyfunc)
1714 fields.sort(key=fieldkeyfunc)
1711
1715
1712 # Each field will be matched with its own "getfield" function
1716 # Each field will be matched with its own "getfield" function
1713 # which will be added to the getfieldfuncs array of functions
1717 # which will be added to the getfieldfuncs array of functions
1714 getfieldfuncs = []
1718 getfieldfuncs = []
1715 _funcs = {
1719 _funcs = {
1716 'user': lambda r: repo[r].user(),
1720 'user': lambda r: repo[r].user(),
1717 'branch': lambda r: repo[r].branch(),
1721 'branch': lambda r: repo[r].branch(),
1718 'date': lambda r: repo[r].date(),
1722 'date': lambda r: repo[r].date(),
1719 'description': lambda r: repo[r].description(),
1723 'description': lambda r: repo[r].description(),
1720 'files': lambda r: repo[r].files(),
1724 'files': lambda r: repo[r].files(),
1721 'parents': lambda r: repo[r].parents(),
1725 'parents': lambda r: repo[r].parents(),
1722 'phase': lambda r: repo[r].phase(),
1726 'phase': lambda r: repo[r].phase(),
1723 'substate': lambda r: repo[r].substate,
1727 'substate': lambda r: repo[r].substate,
1724 'summary': lambda r: repo[r].description().splitlines()[0],
1728 'summary': lambda r: repo[r].description().splitlines()[0],
1725 'diff': lambda r: list(repo[r].diff(git=True),)
1729 'diff': lambda r: list(repo[r].diff(git=True),)
1726 }
1730 }
1727 for info in fields:
1731 for info in fields:
1728 getfield = _funcs.get(info, None)
1732 getfield = _funcs.get(info, None)
1729 if getfield is None:
1733 if getfield is None:
1730 raise error.ParseError(
1734 raise error.ParseError(
1731 # i18n: "matching" is a keyword
1735 # i18n: "matching" is a keyword
1732 _("unexpected field name passed to matching: %s") % info)
1736 _("unexpected field name passed to matching: %s") % info)
1733 getfieldfuncs.append(getfield)
1737 getfieldfuncs.append(getfield)
1734 # convert the getfield array of functions into a "getinfo" function
1738 # convert the getfield array of functions into a "getinfo" function
1735 # which returns an array of field values (or a single value if there
1739 # which returns an array of field values (or a single value if there
1736 # is only one field to match)
1740 # is only one field to match)
1737 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1741 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1738
1742
1739 def matches(x):
1743 def matches(x):
1740 for rev in revs:
1744 for rev in revs:
1741 target = getinfo(rev)
1745 target = getinfo(rev)
1742 match = True
1746 match = True
1743 for n, f in enumerate(getfieldfuncs):
1747 for n, f in enumerate(getfieldfuncs):
1744 if target[n] != f(x):
1748 if target[n] != f(x):
1745 match = False
1749 match = False
1746 if match:
1750 if match:
1747 return True
1751 return True
1748 return False
1752 return False
1749
1753
1750 return subset.filter(matches)
1754 return subset.filter(matches)
1751
1755
1752 def reverse(repo, subset, x):
1756 def reverse(repo, subset, x):
1753 """``reverse(set)``
1757 """``reverse(set)``
1754 Reverse order of set.
1758 Reverse order of set.
1755 """
1759 """
1756 l = getset(repo, subset, x)
1760 l = getset(repo, subset, x)
1757 l.reverse()
1761 l.reverse()
1758 return l
1762 return l
1759
1763
1760 def roots(repo, subset, x):
1764 def roots(repo, subset, x):
1761 """``roots(set)``
1765 """``roots(set)``
1762 Changesets in set with no parent changeset in set.
1766 Changesets in set with no parent changeset in set.
1763 """
1767 """
1764 s = getset(repo, fullreposet(repo), x)
1768 s = getset(repo, fullreposet(repo), x)
1765 parents = repo.changelog.parentrevs
1769 parents = repo.changelog.parentrevs
1766 def filter(r):
1770 def filter(r):
1767 for p in parents(r):
1771 for p in parents(r):
1768 if 0 <= p and p in s:
1772 if 0 <= p and p in s:
1769 return False
1773 return False
1770 return True
1774 return True
1771 return subset & s.filter(filter)
1775 return subset & s.filter(filter)
1772
1776
1773 def sort(repo, subset, x):
1777 def sort(repo, subset, x):
1774 """``sort(set[, [-]key...])``
1778 """``sort(set[, [-]key...])``
1775 Sort set by keys. The default sort order is ascending, specify a key
1779 Sort set by keys. The default sort order is ascending, specify a key
1776 as ``-key`` to sort in descending order.
1780 as ``-key`` to sort in descending order.
1777
1781
1778 The keys can be:
1782 The keys can be:
1779
1783
1780 - ``rev`` for the revision number,
1784 - ``rev`` for the revision number,
1781 - ``branch`` for the branch name,
1785 - ``branch`` for the branch name,
1782 - ``desc`` for the commit message (description),
1786 - ``desc`` for the commit message (description),
1783 - ``user`` for user name (``author`` can be used as an alias),
1787 - ``user`` for user name (``author`` can be used as an alias),
1784 - ``date`` for the commit date
1788 - ``date`` for the commit date
1785 """
1789 """
1786 # i18n: "sort" is a keyword
1790 # i18n: "sort" is a keyword
1787 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1791 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1788 keys = "rev"
1792 keys = "rev"
1789 if len(l) == 2:
1793 if len(l) == 2:
1790 # i18n: "sort" is a keyword
1794 # i18n: "sort" is a keyword
1791 keys = getstring(l[1], _("sort spec must be a string"))
1795 keys = getstring(l[1], _("sort spec must be a string"))
1792
1796
1793 s = l[0]
1797 s = l[0]
1794 keys = keys.split()
1798 keys = keys.split()
1795 l = []
1799 l = []
1796 def invert(s):
1800 def invert(s):
1797 return "".join(chr(255 - ord(c)) for c in s)
1801 return "".join(chr(255 - ord(c)) for c in s)
1798 revs = getset(repo, subset, s)
1802 revs = getset(repo, subset, s)
1799 if keys == ["rev"]:
1803 if keys == ["rev"]:
1800 revs.sort()
1804 revs.sort()
1801 return revs
1805 return revs
1802 elif keys == ["-rev"]:
1806 elif keys == ["-rev"]:
1803 revs.sort(reverse=True)
1807 revs.sort(reverse=True)
1804 return revs
1808 return revs
1805 for r in revs:
1809 for r in revs:
1806 c = repo[r]
1810 c = repo[r]
1807 e = []
1811 e = []
1808 for k in keys:
1812 for k in keys:
1809 if k == 'rev':
1813 if k == 'rev':
1810 e.append(r)
1814 e.append(r)
1811 elif k == '-rev':
1815 elif k == '-rev':
1812 e.append(-r)
1816 e.append(-r)
1813 elif k == 'branch':
1817 elif k == 'branch':
1814 e.append(c.branch())
1818 e.append(c.branch())
1815 elif k == '-branch':
1819 elif k == '-branch':
1816 e.append(invert(c.branch()))
1820 e.append(invert(c.branch()))
1817 elif k == 'desc':
1821 elif k == 'desc':
1818 e.append(c.description())
1822 e.append(c.description())
1819 elif k == '-desc':
1823 elif k == '-desc':
1820 e.append(invert(c.description()))
1824 e.append(invert(c.description()))
1821 elif k in 'user author':
1825 elif k in 'user author':
1822 e.append(c.user())
1826 e.append(c.user())
1823 elif k in '-user -author':
1827 elif k in '-user -author':
1824 e.append(invert(c.user()))
1828 e.append(invert(c.user()))
1825 elif k == 'date':
1829 elif k == 'date':
1826 e.append(c.date()[0])
1830 e.append(c.date()[0])
1827 elif k == '-date':
1831 elif k == '-date':
1828 e.append(-c.date()[0])
1832 e.append(-c.date()[0])
1829 else:
1833 else:
1830 raise error.ParseError(_("unknown sort key %r") % k)
1834 raise error.ParseError(_("unknown sort key %r") % k)
1831 e.append(r)
1835 e.append(r)
1832 l.append(e)
1836 l.append(e)
1833 l.sort()
1837 l.sort()
1834 return baseset([e[-1] for e in l])
1838 return baseset([e[-1] for e in l])
1835
1839
1836 def subrepo(repo, subset, x):
1840 def subrepo(repo, subset, x):
1837 """``subrepo([pattern])``
1841 """``subrepo([pattern])``
1838 Changesets that add, modify or remove the given subrepo. If no subrepo
1842 Changesets that add, modify or remove the given subrepo. If no subrepo
1839 pattern is named, any subrepo changes are returned.
1843 pattern is named, any subrepo changes are returned.
1840 """
1844 """
1841 # i18n: "subrepo" is a keyword
1845 # i18n: "subrepo" is a keyword
1842 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
1846 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
1843 if len(args) != 0:
1847 if len(args) != 0:
1844 pat = getstring(args[0], _("subrepo requires a pattern"))
1848 pat = getstring(args[0], _("subrepo requires a pattern"))
1845
1849
1846 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
1850 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
1847
1851
1848 def submatches(names):
1852 def submatches(names):
1849 k, p, m = _stringmatcher(pat)
1853 k, p, m = _stringmatcher(pat)
1850 for name in names:
1854 for name in names:
1851 if m(name):
1855 if m(name):
1852 yield name
1856 yield name
1853
1857
1854 def matches(x):
1858 def matches(x):
1855 c = repo[x]
1859 c = repo[x]
1856 s = repo.status(c.p1().node(), c.node(), match=m)
1860 s = repo.status(c.p1().node(), c.node(), match=m)
1857
1861
1858 if len(args) == 0:
1862 if len(args) == 0:
1859 return s.added or s.modified or s.removed
1863 return s.added or s.modified or s.removed
1860
1864
1861 if s.added:
1865 if s.added:
1862 return any(submatches(c.substate.keys()))
1866 return any(submatches(c.substate.keys()))
1863
1867
1864 if s.modified:
1868 if s.modified:
1865 subs = set(c.p1().substate.keys())
1869 subs = set(c.p1().substate.keys())
1866 subs.update(c.substate.keys())
1870 subs.update(c.substate.keys())
1867
1871
1868 for path in submatches(subs):
1872 for path in submatches(subs):
1869 if c.p1().substate.get(path) != c.substate.get(path):
1873 if c.p1().substate.get(path) != c.substate.get(path):
1870 return True
1874 return True
1871
1875
1872 if s.removed:
1876 if s.removed:
1873 return any(submatches(c.p1().substate.keys()))
1877 return any(submatches(c.p1().substate.keys()))
1874
1878
1875 return False
1879 return False
1876
1880
1877 return subset.filter(matches)
1881 return subset.filter(matches)
1878
1882
1879 def _stringmatcher(pattern):
1883 def _stringmatcher(pattern):
1880 """
1884 """
1881 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1885 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1882 returns the matcher name, pattern, and matcher function.
1886 returns the matcher name, pattern, and matcher function.
1883 missing or unknown prefixes are treated as literal matches.
1887 missing or unknown prefixes are treated as literal matches.
1884
1888
1885 helper for tests:
1889 helper for tests:
1886 >>> def test(pattern, *tests):
1890 >>> def test(pattern, *tests):
1887 ... kind, pattern, matcher = _stringmatcher(pattern)
1891 ... kind, pattern, matcher = _stringmatcher(pattern)
1888 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1892 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1889
1893
1890 exact matching (no prefix):
1894 exact matching (no prefix):
1891 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1895 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1892 ('literal', 'abcdefg', [False, False, True])
1896 ('literal', 'abcdefg', [False, False, True])
1893
1897
1894 regex matching ('re:' prefix)
1898 regex matching ('re:' prefix)
1895 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1899 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1896 ('re', 'a.+b', [False, False, True])
1900 ('re', 'a.+b', [False, False, True])
1897
1901
1898 force exact matches ('literal:' prefix)
1902 force exact matches ('literal:' prefix)
1899 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1903 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1900 ('literal', 're:foobar', [False, True])
1904 ('literal', 're:foobar', [False, True])
1901
1905
1902 unknown prefixes are ignored and treated as literals
1906 unknown prefixes are ignored and treated as literals
1903 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1907 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1904 ('literal', 'foo:bar', [False, False, True])
1908 ('literal', 'foo:bar', [False, False, True])
1905 """
1909 """
1906 if pattern.startswith('re:'):
1910 if pattern.startswith('re:'):
1907 pattern = pattern[3:]
1911 pattern = pattern[3:]
1908 try:
1912 try:
1909 regex = re.compile(pattern)
1913 regex = re.compile(pattern)
1910 except re.error as e:
1914 except re.error as e:
1911 raise error.ParseError(_('invalid regular expression: %s')
1915 raise error.ParseError(_('invalid regular expression: %s')
1912 % e)
1916 % e)
1913 return 're', pattern, regex.search
1917 return 're', pattern, regex.search
1914 elif pattern.startswith('literal:'):
1918 elif pattern.startswith('literal:'):
1915 pattern = pattern[8:]
1919 pattern = pattern[8:]
1916 return 'literal', pattern, pattern.__eq__
1920 return 'literal', pattern, pattern.__eq__
1917
1921
1918 def _substringmatcher(pattern):
1922 def _substringmatcher(pattern):
1919 kind, pattern, matcher = _stringmatcher(pattern)
1923 kind, pattern, matcher = _stringmatcher(pattern)
1920 if kind == 'literal':
1924 if kind == 'literal':
1921 matcher = lambda s: pattern in s
1925 matcher = lambda s: pattern in s
1922 return kind, pattern, matcher
1926 return kind, pattern, matcher
1923
1927
1924 def tag(repo, subset, x):
1928 def tag(repo, subset, x):
1925 """``tag([name])``
1929 """``tag([name])``
1926 The specified tag by name, or all tagged revisions if no name is given.
1930 The specified tag by name, or all tagged revisions if no name is given.
1927
1931
1928 If `name` starts with `re:`, the remainder of the name is treated as
1932 If `name` starts with `re:`, the remainder of the name is treated as
1929 a regular expression. To match a tag that actually starts with `re:`,
1933 a regular expression. To match a tag that actually starts with `re:`,
1930 use the prefix `literal:`.
1934 use the prefix `literal:`.
1931 """
1935 """
1932 # i18n: "tag" is a keyword
1936 # i18n: "tag" is a keyword
1933 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
1937 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
1934 cl = repo.changelog
1938 cl = repo.changelog
1935 if args:
1939 if args:
1936 pattern = getstring(args[0],
1940 pattern = getstring(args[0],
1937 # i18n: "tag" is a keyword
1941 # i18n: "tag" is a keyword
1938 _('the argument to tag must be a string'))
1942 _('the argument to tag must be a string'))
1939 kind, pattern, matcher = _stringmatcher(pattern)
1943 kind, pattern, matcher = _stringmatcher(pattern)
1940 if kind == 'literal':
1944 if kind == 'literal':
1941 # avoid resolving all tags
1945 # avoid resolving all tags
1942 tn = repo._tagscache.tags.get(pattern, None)
1946 tn = repo._tagscache.tags.get(pattern, None)
1943 if tn is None:
1947 if tn is None:
1944 raise error.RepoLookupError(_("tag '%s' does not exist")
1948 raise error.RepoLookupError(_("tag '%s' does not exist")
1945 % pattern)
1949 % pattern)
1946 s = set([repo[tn].rev()])
1950 s = set([repo[tn].rev()])
1947 else:
1951 else:
1948 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
1952 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
1949 else:
1953 else:
1950 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
1954 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
1951 return subset & s
1955 return subset & s
1952
1956
1953 def tagged(repo, subset, x):
1957 def tagged(repo, subset, x):
1954 return tag(repo, subset, x)
1958 return tag(repo, subset, x)
1955
1959
1956 def unstable(repo, subset, x):
1960 def unstable(repo, subset, x):
1957 """``unstable()``
1961 """``unstable()``
1958 Non-obsolete changesets with obsolete ancestors.
1962 Non-obsolete changesets with obsolete ancestors.
1959 """
1963 """
1960 # i18n: "unstable" is a keyword
1964 # i18n: "unstable" is a keyword
1961 getargs(x, 0, 0, _("unstable takes no arguments"))
1965 getargs(x, 0, 0, _("unstable takes no arguments"))
1962 unstables = obsmod.getrevs(repo, 'unstable')
1966 unstables = obsmod.getrevs(repo, 'unstable')
1963 return subset & unstables
1967 return subset & unstables
1964
1968
1965
1969
1966 def user(repo, subset, x):
1970 def user(repo, subset, x):
1967 """``user(string)``
1971 """``user(string)``
1968 User name contains string. The match is case-insensitive.
1972 User name contains string. The match is case-insensitive.
1969
1973
1970 If `string` starts with `re:`, the remainder of the string is treated as
1974 If `string` starts with `re:`, the remainder of the string is treated as
1971 a regular expression. To match a user that actually contains `re:`, use
1975 a regular expression. To match a user that actually contains `re:`, use
1972 the prefix `literal:`.
1976 the prefix `literal:`.
1973 """
1977 """
1974 return author(repo, subset, x)
1978 return author(repo, subset, x)
1975
1979
1976 # experimental
1980 # experimental
1977 def wdir(repo, subset, x):
1981 def wdir(repo, subset, x):
1978 # i18n: "wdir" is a keyword
1982 # i18n: "wdir" is a keyword
1979 getargs(x, 0, 0, _("wdir takes no arguments"))
1983 getargs(x, 0, 0, _("wdir takes no arguments"))
1980 if None in subset or isinstance(subset, fullreposet):
1984 if None in subset or isinstance(subset, fullreposet):
1981 return baseset([None])
1985 return baseset([None])
1982 return baseset()
1986 return baseset()
1983
1987
1984 # for internal use
1988 # for internal use
1985 def _list(repo, subset, x):
1989 def _list(repo, subset, x):
1986 s = getstring(x, "internal error")
1990 s = getstring(x, "internal error")
1987 if not s:
1991 if not s:
1988 return baseset()
1992 return baseset()
1989 # remove duplicates here. it's difficult for caller to deduplicate sets
1993 # remove duplicates here. it's difficult for caller to deduplicate sets
1990 # because different symbols can point to the same rev.
1994 # because different symbols can point to the same rev.
1991 cl = repo.changelog
1995 cl = repo.changelog
1992 ls = []
1996 ls = []
1993 seen = set()
1997 seen = set()
1994 for t in s.split('\0'):
1998 for t in s.split('\0'):
1995 try:
1999 try:
1996 # fast path for integer revision
2000 # fast path for integer revision
1997 r = int(t)
2001 r = int(t)
1998 if str(r) != t or r not in cl:
2002 if str(r) != t or r not in cl:
1999 raise ValueError
2003 raise ValueError
2000 except ValueError:
2004 except ValueError:
2001 r = repo[t].rev()
2005 r = repo[t].rev()
2002 if r in seen:
2006 if r in seen:
2003 continue
2007 continue
2004 if (r in subset
2008 if (r in subset
2005 or r == node.nullrev and isinstance(subset, fullreposet)):
2009 or r == node.nullrev and isinstance(subset, fullreposet)):
2006 ls.append(r)
2010 ls.append(r)
2007 seen.add(r)
2011 seen.add(r)
2008 return baseset(ls)
2012 return baseset(ls)
2009
2013
2010 # for internal use
2014 # for internal use
2011 def _intlist(repo, subset, x):
2015 def _intlist(repo, subset, x):
2012 s = getstring(x, "internal error")
2016 s = getstring(x, "internal error")
2013 if not s:
2017 if not s:
2014 return baseset()
2018 return baseset()
2015 ls = [int(r) for r in s.split('\0')]
2019 ls = [int(r) for r in s.split('\0')]
2016 s = subset
2020 s = subset
2017 return baseset([r for r in ls if r in s])
2021 return baseset([r for r in ls if r in s])
2018
2022
2019 # for internal use
2023 # for internal use
2020 def _hexlist(repo, subset, x):
2024 def _hexlist(repo, subset, x):
2021 s = getstring(x, "internal error")
2025 s = getstring(x, "internal error")
2022 if not s:
2026 if not s:
2023 return baseset()
2027 return baseset()
2024 cl = repo.changelog
2028 cl = repo.changelog
2025 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2029 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2026 s = subset
2030 s = subset
2027 return baseset([r for r in ls if r in s])
2031 return baseset([r for r in ls if r in s])
2028
2032
2029 symbols = {
2033 symbols = {
2030 "adds": adds,
2034 "adds": adds,
2031 "all": getall,
2035 "all": getall,
2032 "ancestor": ancestor,
2036 "ancestor": ancestor,
2033 "ancestors": ancestors,
2037 "ancestors": ancestors,
2034 "_firstancestors": _firstancestors,
2038 "_firstancestors": _firstancestors,
2035 "author": author,
2039 "author": author,
2036 "bisect": bisect,
2040 "bisect": bisect,
2037 "bisected": bisected,
2041 "bisected": bisected,
2038 "bookmark": bookmark,
2042 "bookmark": bookmark,
2039 "branch": branch,
2043 "branch": branch,
2040 "branchpoint": branchpoint,
2044 "branchpoint": branchpoint,
2041 "bumped": bumped,
2045 "bumped": bumped,
2042 "bundle": bundle,
2046 "bundle": bundle,
2043 "children": children,
2047 "children": children,
2044 "closed": closed,
2048 "closed": closed,
2045 "contains": contains,
2049 "contains": contains,
2046 "converted": converted,
2050 "converted": converted,
2047 "date": date,
2051 "date": date,
2048 "desc": desc,
2052 "desc": desc,
2049 "descendants": descendants,
2053 "descendants": descendants,
2050 "_firstdescendants": _firstdescendants,
2054 "_firstdescendants": _firstdescendants,
2051 "destination": destination,
2055 "destination": destination,
2052 "divergent": divergent,
2056 "divergent": divergent,
2053 "draft": draft,
2057 "draft": draft,
2054 "extinct": extinct,
2058 "extinct": extinct,
2055 "extra": extra,
2059 "extra": extra,
2056 "file": hasfile,
2060 "file": hasfile,
2057 "filelog": filelog,
2061 "filelog": filelog,
2058 "first": first,
2062 "first": first,
2059 "follow": follow,
2063 "follow": follow,
2060 "_followfirst": _followfirst,
2064 "_followfirst": _followfirst,
2061 "grep": grep,
2065 "grep": grep,
2062 "head": head,
2066 "head": head,
2063 "heads": heads,
2067 "heads": heads,
2064 "hidden": hidden,
2068 "hidden": hidden,
2065 "id": node_,
2069 "id": node_,
2066 "keyword": keyword,
2070 "keyword": keyword,
2067 "last": last,
2071 "last": last,
2068 "limit": limit,
2072 "limit": limit,
2069 "_matchfiles": _matchfiles,
2073 "_matchfiles": _matchfiles,
2070 "max": maxrev,
2074 "max": maxrev,
2071 "merge": merge,
2075 "merge": merge,
2072 "min": minrev,
2076 "min": minrev,
2073 "modifies": modifies,
2077 "modifies": modifies,
2074 "named": named,
2078 "named": named,
2075 "obsolete": obsolete,
2079 "obsolete": obsolete,
2076 "only": only,
2080 "only": only,
2077 "origin": origin,
2081 "origin": origin,
2078 "outgoing": outgoing,
2082 "outgoing": outgoing,
2079 "p1": p1,
2083 "p1": p1,
2080 "p2": p2,
2084 "p2": p2,
2081 "parents": parents,
2085 "parents": parents,
2082 "present": present,
2086 "present": present,
2083 "public": public,
2087 "public": public,
2084 "_notpublic": _notpublic,
2088 "_notpublic": _notpublic,
2085 "remote": remote,
2089 "remote": remote,
2086 "removes": removes,
2090 "removes": removes,
2087 "rev": rev,
2091 "rev": rev,
2088 "reverse": reverse,
2092 "reverse": reverse,
2089 "roots": roots,
2093 "roots": roots,
2090 "sort": sort,
2094 "sort": sort,
2091 "secret": secret,
2095 "secret": secret,
2092 "subrepo": subrepo,
2096 "subrepo": subrepo,
2093 "matching": matching,
2097 "matching": matching,
2094 "tag": tag,
2098 "tag": tag,
2095 "tagged": tagged,
2099 "tagged": tagged,
2096 "user": user,
2100 "user": user,
2097 "unstable": unstable,
2101 "unstable": unstable,
2098 "wdir": wdir,
2102 "wdir": wdir,
2099 "_list": _list,
2103 "_list": _list,
2100 "_intlist": _intlist,
2104 "_intlist": _intlist,
2101 "_hexlist": _hexlist,
2105 "_hexlist": _hexlist,
2102 }
2106 }
2103
2107
2104 # symbols which can't be used for a DoS attack for any given input
2108 # symbols which can't be used for a DoS attack for any given input
2105 # (e.g. those which accept regexes as plain strings shouldn't be included)
2109 # (e.g. those which accept regexes as plain strings shouldn't be included)
2106 # functions that just return a lot of changesets (like all) don't count here
2110 # functions that just return a lot of changesets (like all) don't count here
2107 safesymbols = set([
2111 safesymbols = set([
2108 "adds",
2112 "adds",
2109 "all",
2113 "all",
2110 "ancestor",
2114 "ancestor",
2111 "ancestors",
2115 "ancestors",
2112 "_firstancestors",
2116 "_firstancestors",
2113 "author",
2117 "author",
2114 "bisect",
2118 "bisect",
2115 "bisected",
2119 "bisected",
2116 "bookmark",
2120 "bookmark",
2117 "branch",
2121 "branch",
2118 "branchpoint",
2122 "branchpoint",
2119 "bumped",
2123 "bumped",
2120 "bundle",
2124 "bundle",
2121 "children",
2125 "children",
2122 "closed",
2126 "closed",
2123 "converted",
2127 "converted",
2124 "date",
2128 "date",
2125 "desc",
2129 "desc",
2126 "descendants",
2130 "descendants",
2127 "_firstdescendants",
2131 "_firstdescendants",
2128 "destination",
2132 "destination",
2129 "divergent",
2133 "divergent",
2130 "draft",
2134 "draft",
2131 "extinct",
2135 "extinct",
2132 "extra",
2136 "extra",
2133 "file",
2137 "file",
2134 "filelog",
2138 "filelog",
2135 "first",
2139 "first",
2136 "follow",
2140 "follow",
2137 "_followfirst",
2141 "_followfirst",
2138 "head",
2142 "head",
2139 "heads",
2143 "heads",
2140 "hidden",
2144 "hidden",
2141 "id",
2145 "id",
2142 "keyword",
2146 "keyword",
2143 "last",
2147 "last",
2144 "limit",
2148 "limit",
2145 "_matchfiles",
2149 "_matchfiles",
2146 "max",
2150 "max",
2147 "merge",
2151 "merge",
2148 "min",
2152 "min",
2149 "modifies",
2153 "modifies",
2150 "obsolete",
2154 "obsolete",
2151 "only",
2155 "only",
2152 "origin",
2156 "origin",
2153 "outgoing",
2157 "outgoing",
2154 "p1",
2158 "p1",
2155 "p2",
2159 "p2",
2156 "parents",
2160 "parents",
2157 "present",
2161 "present",
2158 "public",
2162 "public",
2159 "_notpublic",
2163 "_notpublic",
2160 "remote",
2164 "remote",
2161 "removes",
2165 "removes",
2162 "rev",
2166 "rev",
2163 "reverse",
2167 "reverse",
2164 "roots",
2168 "roots",
2165 "sort",
2169 "sort",
2166 "secret",
2170 "secret",
2167 "matching",
2171 "matching",
2168 "tag",
2172 "tag",
2169 "tagged",
2173 "tagged",
2170 "user",
2174 "user",
2171 "unstable",
2175 "unstable",
2172 "wdir",
2176 "wdir",
2173 "_list",
2177 "_list",
2174 "_intlist",
2178 "_intlist",
2175 "_hexlist",
2179 "_hexlist",
2176 ])
2180 ])
2177
2181
2178 methods = {
2182 methods = {
2179 "range": rangeset,
2183 "range": rangeset,
2180 "dagrange": dagrange,
2184 "dagrange": dagrange,
2181 "string": stringset,
2185 "string": stringset,
2182 "symbol": stringset,
2186 "symbol": stringset,
2183 "and": andset,
2187 "and": andset,
2184 "or": orset,
2188 "or": orset,
2185 "not": notset,
2189 "not": notset,
2186 "list": listset,
2190 "list": listset,
2187 "keyvalue": keyvaluepair,
2191 "keyvalue": keyvaluepair,
2188 "func": func,
2192 "func": func,
2189 "ancestor": ancestorspec,
2193 "ancestor": ancestorspec,
2190 "parent": parentspec,
2194 "parent": parentspec,
2191 "parentpost": p1,
2195 "parentpost": p1,
2192 }
2196 }
2193
2197
2194 def optimize(x, small):
2198 def optimize(x, small):
2195 if x is None:
2199 if x is None:
2196 return 0, x
2200 return 0, x
2197
2201
2198 smallbonus = 1
2202 smallbonus = 1
2199 if small:
2203 if small:
2200 smallbonus = .5
2204 smallbonus = .5
2201
2205
2202 op = x[0]
2206 op = x[0]
2203 if op == 'minus':
2207 if op == 'minus':
2204 return optimize(('and', x[1], ('not', x[2])), small)
2208 return optimize(('and', x[1], ('not', x[2])), small)
2205 elif op == 'only':
2209 elif op == 'only':
2206 return optimize(('func', ('symbol', 'only'),
2210 return optimize(('func', ('symbol', 'only'),
2207 ('list', x[1], x[2])), small)
2211 ('list', x[1], x[2])), small)
2208 elif op == 'onlypost':
2212 elif op == 'onlypost':
2209 return optimize(('func', ('symbol', 'only'), x[1]), small)
2213 return optimize(('func', ('symbol', 'only'), x[1]), small)
2210 elif op == 'dagrangepre':
2214 elif op == 'dagrangepre':
2211 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2215 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2212 elif op == 'dagrangepost':
2216 elif op == 'dagrangepost':
2213 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
2217 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
2214 elif op == 'rangepre':
2218 elif op == 'rangepre':
2215 return optimize(('range', ('string', '0'), x[1]), small)
2219 return optimize(('range', ('string', '0'), x[1]), small)
2216 elif op == 'rangepost':
2220 elif op == 'rangepost':
2217 return optimize(('range', x[1], ('string', 'tip')), small)
2221 return optimize(('range', x[1], ('string', 'tip')), small)
2218 elif op == 'negate':
2222 elif op == 'negate':
2219 return optimize(('string',
2223 return optimize(('string',
2220 '-' + getstring(x[1], _("can't negate that"))), small)
2224 '-' + getstring(x[1], _("can't negate that"))), small)
2221 elif op in 'string symbol negate':
2225 elif op in 'string symbol negate':
2222 return smallbonus, x # single revisions are small
2226 return smallbonus, x # single revisions are small
2223 elif op == 'and':
2227 elif op == 'and':
2224 wa, ta = optimize(x[1], True)
2228 wa, ta = optimize(x[1], True)
2225 wb, tb = optimize(x[2], True)
2229 wb, tb = optimize(x[2], True)
2226
2230
2227 # (::x and not ::y)/(not ::y and ::x) have a fast path
2231 # (::x and not ::y)/(not ::y and ::x) have a fast path
2228 def isonly(revs, bases):
2232 def isonly(revs, bases):
2229 return (
2233 return (
2230 revs[0] == 'func'
2234 revs[0] == 'func'
2231 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2235 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2232 and bases[0] == 'not'
2236 and bases[0] == 'not'
2233 and bases[1][0] == 'func'
2237 and bases[1][0] == 'func'
2234 and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
2238 and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
2235
2239
2236 w = min(wa, wb)
2240 w = min(wa, wb)
2237 if isonly(ta, tb):
2241 if isonly(ta, tb):
2238 return w, ('func', ('symbol', 'only'), ('list', ta[2], tb[1][2]))
2242 return w, ('func', ('symbol', 'only'), ('list', ta[2], tb[1][2]))
2239 if isonly(tb, ta):
2243 if isonly(tb, ta):
2240 return w, ('func', ('symbol', 'only'), ('list', tb[2], ta[1][2]))
2244 return w, ('func', ('symbol', 'only'), ('list', tb[2], ta[1][2]))
2241
2245
2242 if wa > wb:
2246 if wa > wb:
2243 return w, (op, tb, ta)
2247 return w, (op, tb, ta)
2244 return w, (op, ta, tb)
2248 return w, (op, ta, tb)
2245 elif op == 'or':
2249 elif op == 'or':
2246 # fast path for machine-generated expression, that is likely to have
2250 # fast path for machine-generated expression, that is likely to have
2247 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2251 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2248 ws, ts, ss = [], [], []
2252 ws, ts, ss = [], [], []
2249 def flushss():
2253 def flushss():
2250 if not ss:
2254 if not ss:
2251 return
2255 return
2252 if len(ss) == 1:
2256 if len(ss) == 1:
2253 w, t = ss[0]
2257 w, t = ss[0]
2254 else:
2258 else:
2255 s = '\0'.join(t[1] for w, t in ss)
2259 s = '\0'.join(t[1] for w, t in ss)
2256 y = ('func', ('symbol', '_list'), ('string', s))
2260 y = ('func', ('symbol', '_list'), ('string', s))
2257 w, t = optimize(y, False)
2261 w, t = optimize(y, False)
2258 ws.append(w)
2262 ws.append(w)
2259 ts.append(t)
2263 ts.append(t)
2260 del ss[:]
2264 del ss[:]
2261 for y in x[1:]:
2265 for y in x[1:]:
2262 w, t = optimize(y, False)
2266 w, t = optimize(y, False)
2263 if t[0] == 'string' or t[0] == 'symbol':
2267 if t[0] == 'string' or t[0] == 'symbol':
2264 ss.append((w, t))
2268 ss.append((w, t))
2265 continue
2269 continue
2266 flushss()
2270 flushss()
2267 ws.append(w)
2271 ws.append(w)
2268 ts.append(t)
2272 ts.append(t)
2269 flushss()
2273 flushss()
2270 if len(ts) == 1:
2274 if len(ts) == 1:
2271 return ws[0], ts[0] # 'or' operation is fully optimized out
2275 return ws[0], ts[0] # 'or' operation is fully optimized out
2272 # we can't reorder trees by weight because it would change the order.
2276 # we can't reorder trees by weight because it would change the order.
2273 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2277 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2274 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2278 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2275 return max(ws), (op,) + tuple(ts)
2279 return max(ws), (op,) + tuple(ts)
2276 elif op == 'not':
2280 elif op == 'not':
2277 # Optimize not public() to _notpublic() because we have a fast version
2281 # Optimize not public() to _notpublic() because we have a fast version
2278 if x[1] == ('func', ('symbol', 'public'), None):
2282 if x[1] == ('func', ('symbol', 'public'), None):
2279 newsym = ('func', ('symbol', '_notpublic'), None)
2283 newsym = ('func', ('symbol', '_notpublic'), None)
2280 o = optimize(newsym, not small)
2284 o = optimize(newsym, not small)
2281 return o[0], o[1]
2285 return o[0], o[1]
2282 else:
2286 else:
2283 o = optimize(x[1], not small)
2287 o = optimize(x[1], not small)
2284 return o[0], (op, o[1])
2288 return o[0], (op, o[1])
2285 elif op == 'parentpost':
2289 elif op == 'parentpost':
2286 o = optimize(x[1], small)
2290 o = optimize(x[1], small)
2287 return o[0], (op, o[1])
2291 return o[0], (op, o[1])
2288 elif op == 'group':
2292 elif op == 'group':
2289 return optimize(x[1], small)
2293 return optimize(x[1], small)
2290 elif op in 'dagrange range list parent ancestorspec':
2294 elif op in 'dagrange range list parent ancestorspec':
2291 if op == 'parent':
2295 if op == 'parent':
2292 # x^:y means (x^) : y, not x ^ (:y)
2296 # x^:y means (x^) : y, not x ^ (:y)
2293 post = ('parentpost', x[1])
2297 post = ('parentpost', x[1])
2294 if x[2][0] == 'dagrangepre':
2298 if x[2][0] == 'dagrangepre':
2295 return optimize(('dagrange', post, x[2][1]), small)
2299 return optimize(('dagrange', post, x[2][1]), small)
2296 elif x[2][0] == 'rangepre':
2300 elif x[2][0] == 'rangepre':
2297 return optimize(('range', post, x[2][1]), small)
2301 return optimize(('range', post, x[2][1]), small)
2298
2302
2299 wa, ta = optimize(x[1], small)
2303 wa, ta = optimize(x[1], small)
2300 wb, tb = optimize(x[2], small)
2304 wb, tb = optimize(x[2], small)
2301 return wa + wb, (op, ta, tb)
2305 return wa + wb, (op, ta, tb)
2302 elif op == 'func':
2306 elif op == 'func':
2303 f = getstring(x[1], _("not a symbol"))
2307 f = getstring(x[1], _("not a symbol"))
2304 wa, ta = optimize(x[2], small)
2308 wa, ta = optimize(x[2], small)
2305 if f in ("author branch closed date desc file grep keyword "
2309 if f in ("author branch closed date desc file grep keyword "
2306 "outgoing user"):
2310 "outgoing user"):
2307 w = 10 # slow
2311 w = 10 # slow
2308 elif f in "modifies adds removes":
2312 elif f in "modifies adds removes":
2309 w = 30 # slower
2313 w = 30 # slower
2310 elif f == "contains":
2314 elif f == "contains":
2311 w = 100 # very slow
2315 w = 100 # very slow
2312 elif f == "ancestor":
2316 elif f == "ancestor":
2313 w = 1 * smallbonus
2317 w = 1 * smallbonus
2314 elif f in "reverse limit first _intlist":
2318 elif f in "reverse limit first _intlist":
2315 w = 0
2319 w = 0
2316 elif f in "sort":
2320 elif f in "sort":
2317 w = 10 # assume most sorts look at changelog
2321 w = 10 # assume most sorts look at changelog
2318 else:
2322 else:
2319 w = 1
2323 w = 1
2320 return w + wa, (op, x[1], ta)
2324 return w + wa, (op, x[1], ta)
2321 return 1, x
2325 return 1, x
2322
2326
2323 _aliasarg = ('func', ('symbol', '_aliasarg'))
2327 _aliasarg = ('func', ('symbol', '_aliasarg'))
2324 def _getaliasarg(tree):
2328 def _getaliasarg(tree):
2325 """If tree matches ('func', ('symbol', '_aliasarg'), ('string', X))
2329 """If tree matches ('func', ('symbol', '_aliasarg'), ('string', X))
2326 return X, None otherwise.
2330 return X, None otherwise.
2327 """
2331 """
2328 if (len(tree) == 3 and tree[:2] == _aliasarg
2332 if (len(tree) == 3 and tree[:2] == _aliasarg
2329 and tree[2][0] == 'string'):
2333 and tree[2][0] == 'string'):
2330 return tree[2][1]
2334 return tree[2][1]
2331 return None
2335 return None
2332
2336
2333 def _checkaliasarg(tree, known=None):
2337 def _checkaliasarg(tree, known=None):
2334 """Check tree contains no _aliasarg construct or only ones which
2338 """Check tree contains no _aliasarg construct or only ones which
2335 value is in known. Used to avoid alias placeholders injection.
2339 value is in known. Used to avoid alias placeholders injection.
2336 """
2340 """
2337 if isinstance(tree, tuple):
2341 if isinstance(tree, tuple):
2338 arg = _getaliasarg(tree)
2342 arg = _getaliasarg(tree)
2339 if arg is not None and (not known or arg not in known):
2343 if arg is not None and (not known or arg not in known):
2340 raise error.UnknownIdentifier('_aliasarg', [])
2344 raise error.UnknownIdentifier('_aliasarg', [])
2341 for t in tree:
2345 for t in tree:
2342 _checkaliasarg(t, known)
2346 _checkaliasarg(t, known)
2343
2347
2344 # the set of valid characters for the initial letter of symbols in
2348 # the set of valid characters for the initial letter of symbols in
2345 # alias declarations and definitions
2349 # alias declarations and definitions
2346 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2350 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2347 if c.isalnum() or c in '._@$' or ord(c) > 127)
2351 if c.isalnum() or c in '._@$' or ord(c) > 127)
2348
2352
2349 def _tokenizealias(program, lookup=None):
2353 def _tokenizealias(program, lookup=None):
2350 """Parse alias declaration/definition into a stream of tokens
2354 """Parse alias declaration/definition into a stream of tokens
2351
2355
2352 This allows symbol names to use also ``$`` as an initial letter
2356 This allows symbol names to use also ``$`` as an initial letter
2353 (for backward compatibility), and callers of this function should
2357 (for backward compatibility), and callers of this function should
2354 examine whether ``$`` is used also for unexpected symbols or not.
2358 examine whether ``$`` is used also for unexpected symbols or not.
2355 """
2359 """
2356 return tokenize(program, lookup=lookup,
2360 return tokenize(program, lookup=lookup,
2357 syminitletters=_aliassyminitletters)
2361 syminitletters=_aliassyminitletters)
2358
2362
2359 def _parsealiasdecl(decl):
2363 def _parsealiasdecl(decl):
2360 """Parse alias declaration ``decl``
2364 """Parse alias declaration ``decl``
2361
2365
2362 This returns ``(name, tree, args, errorstr)`` tuple:
2366 This returns ``(name, tree, args, errorstr)`` tuple:
2363
2367
2364 - ``name``: of declared alias (may be ``decl`` itself at error)
2368 - ``name``: of declared alias (may be ``decl`` itself at error)
2365 - ``tree``: parse result (or ``None`` at error)
2369 - ``tree``: parse result (or ``None`` at error)
2366 - ``args``: list of alias argument names (or None for symbol declaration)
2370 - ``args``: list of alias argument names (or None for symbol declaration)
2367 - ``errorstr``: detail about detected error (or None)
2371 - ``errorstr``: detail about detected error (or None)
2368
2372
2369 >>> _parsealiasdecl('foo')
2373 >>> _parsealiasdecl('foo')
2370 ('foo', ('symbol', 'foo'), None, None)
2374 ('foo', ('symbol', 'foo'), None, None)
2371 >>> _parsealiasdecl('$foo')
2375 >>> _parsealiasdecl('$foo')
2372 ('$foo', None, None, "'$' not for alias arguments")
2376 ('$foo', None, None, "'$' not for alias arguments")
2373 >>> _parsealiasdecl('foo::bar')
2377 >>> _parsealiasdecl('foo::bar')
2374 ('foo::bar', None, None, 'invalid format')
2378 ('foo::bar', None, None, 'invalid format')
2375 >>> _parsealiasdecl('foo bar')
2379 >>> _parsealiasdecl('foo bar')
2376 ('foo bar', None, None, 'at 4: invalid token')
2380 ('foo bar', None, None, 'at 4: invalid token')
2377 >>> _parsealiasdecl('foo()')
2381 >>> _parsealiasdecl('foo()')
2378 ('foo', ('func', ('symbol', 'foo')), [], None)
2382 ('foo', ('func', ('symbol', 'foo')), [], None)
2379 >>> _parsealiasdecl('$foo()')
2383 >>> _parsealiasdecl('$foo()')
2380 ('$foo()', None, None, "'$' not for alias arguments")
2384 ('$foo()', None, None, "'$' not for alias arguments")
2381 >>> _parsealiasdecl('foo($1, $2)')
2385 >>> _parsealiasdecl('foo($1, $2)')
2382 ('foo', ('func', ('symbol', 'foo')), ['$1', '$2'], None)
2386 ('foo', ('func', ('symbol', 'foo')), ['$1', '$2'], None)
2383 >>> _parsealiasdecl('foo(bar_bar, baz.baz)')
2387 >>> _parsealiasdecl('foo(bar_bar, baz.baz)')
2384 ('foo', ('func', ('symbol', 'foo')), ['bar_bar', 'baz.baz'], None)
2388 ('foo', ('func', ('symbol', 'foo')), ['bar_bar', 'baz.baz'], None)
2385 >>> _parsealiasdecl('foo($1, $2, nested($1, $2))')
2389 >>> _parsealiasdecl('foo($1, $2, nested($1, $2))')
2386 ('foo($1, $2, nested($1, $2))', None, None, 'invalid argument list')
2390 ('foo($1, $2, nested($1, $2))', None, None, 'invalid argument list')
2387 >>> _parsealiasdecl('foo(bar($1, $2))')
2391 >>> _parsealiasdecl('foo(bar($1, $2))')
2388 ('foo(bar($1, $2))', None, None, 'invalid argument list')
2392 ('foo(bar($1, $2))', None, None, 'invalid argument list')
2389 >>> _parsealiasdecl('foo("string")')
2393 >>> _parsealiasdecl('foo("string")')
2390 ('foo("string")', None, None, 'invalid argument list')
2394 ('foo("string")', None, None, 'invalid argument list')
2391 >>> _parsealiasdecl('foo($1, $2')
2395 >>> _parsealiasdecl('foo($1, $2')
2392 ('foo($1, $2', None, None, 'at 10: unexpected token: end')
2396 ('foo($1, $2', None, None, 'at 10: unexpected token: end')
2393 >>> _parsealiasdecl('foo("string')
2397 >>> _parsealiasdecl('foo("string')
2394 ('foo("string', None, None, 'at 5: unterminated string')
2398 ('foo("string', None, None, 'at 5: unterminated string')
2395 >>> _parsealiasdecl('foo($1, $2, $1)')
2399 >>> _parsealiasdecl('foo($1, $2, $1)')
2396 ('foo', None, None, 'argument names collide with each other')
2400 ('foo', None, None, 'argument names collide with each other')
2397 """
2401 """
2398 p = parser.parser(elements)
2402 p = parser.parser(elements)
2399 try:
2403 try:
2400 tree, pos = p.parse(_tokenizealias(decl))
2404 tree, pos = p.parse(_tokenizealias(decl))
2401 if (pos != len(decl)):
2405 if (pos != len(decl)):
2402 raise error.ParseError(_('invalid token'), pos)
2406 raise error.ParseError(_('invalid token'), pos)
2403
2407
2404 if isvalidsymbol(tree):
2408 if isvalidsymbol(tree):
2405 # "name = ...." style
2409 # "name = ...." style
2406 name = getsymbol(tree)
2410 name = getsymbol(tree)
2407 if name.startswith('$'):
2411 if name.startswith('$'):
2408 return (decl, None, None, _("'$' not for alias arguments"))
2412 return (decl, None, None, _("'$' not for alias arguments"))
2409 return (name, ('symbol', name), None, None)
2413 return (name, ('symbol', name), None, None)
2410
2414
2411 if isvalidfunc(tree):
2415 if isvalidfunc(tree):
2412 # "name(arg, ....) = ...." style
2416 # "name(arg, ....) = ...." style
2413 name = getfuncname(tree)
2417 name = getfuncname(tree)
2414 if name.startswith('$'):
2418 if name.startswith('$'):
2415 return (decl, None, None, _("'$' not for alias arguments"))
2419 return (decl, None, None, _("'$' not for alias arguments"))
2416 args = []
2420 args = []
2417 for arg in getfuncargs(tree):
2421 for arg in getfuncargs(tree):
2418 if not isvalidsymbol(arg):
2422 if not isvalidsymbol(arg):
2419 return (decl, None, None, _("invalid argument list"))
2423 return (decl, None, None, _("invalid argument list"))
2420 args.append(getsymbol(arg))
2424 args.append(getsymbol(arg))
2421 if len(args) != len(set(args)):
2425 if len(args) != len(set(args)):
2422 return (name, None, None,
2426 return (name, None, None,
2423 _("argument names collide with each other"))
2427 _("argument names collide with each other"))
2424 return (name, ('func', ('symbol', name)), args, None)
2428 return (name, ('func', ('symbol', name)), args, None)
2425
2429
2426 return (decl, None, None, _("invalid format"))
2430 return (decl, None, None, _("invalid format"))
2427 except error.ParseError as inst:
2431 except error.ParseError as inst:
2428 return (decl, None, None, parseerrordetail(inst))
2432 return (decl, None, None, parseerrordetail(inst))
2429
2433
2430 def _parsealiasdefn(defn, args):
2434 def _parsealiasdefn(defn, args):
2431 """Parse alias definition ``defn``
2435 """Parse alias definition ``defn``
2432
2436
2433 This function also replaces alias argument references in the
2437 This function also replaces alias argument references in the
2434 specified definition by ``_aliasarg(ARGNAME)``.
2438 specified definition by ``_aliasarg(ARGNAME)``.
2435
2439
2436 ``args`` is a list of alias argument names, or None if the alias
2440 ``args`` is a list of alias argument names, or None if the alias
2437 is declared as a symbol.
2441 is declared as a symbol.
2438
2442
2439 This returns "tree" as parsing result.
2443 This returns "tree" as parsing result.
2440
2444
2441 >>> args = ['$1', '$2', 'foo']
2445 >>> args = ['$1', '$2', 'foo']
2442 >>> print prettyformat(_parsealiasdefn('$1 or foo', args))
2446 >>> print prettyformat(_parsealiasdefn('$1 or foo', args))
2443 (or
2447 (or
2444 (func
2448 (func
2445 ('symbol', '_aliasarg')
2449 ('symbol', '_aliasarg')
2446 ('string', '$1'))
2450 ('string', '$1'))
2447 (func
2451 (func
2448 ('symbol', '_aliasarg')
2452 ('symbol', '_aliasarg')
2449 ('string', 'foo')))
2453 ('string', 'foo')))
2450 >>> try:
2454 >>> try:
2451 ... _parsealiasdefn('$1 or $bar', args)
2455 ... _parsealiasdefn('$1 or $bar', args)
2452 ... except error.ParseError, inst:
2456 ... except error.ParseError, inst:
2453 ... print parseerrordetail(inst)
2457 ... print parseerrordetail(inst)
2454 at 6: '$' not for alias arguments
2458 at 6: '$' not for alias arguments
2455 >>> args = ['$1', '$10', 'foo']
2459 >>> args = ['$1', '$10', 'foo']
2456 >>> print prettyformat(_parsealiasdefn('$10 or foobar', args))
2460 >>> print prettyformat(_parsealiasdefn('$10 or foobar', args))
2457 (or
2461 (or
2458 (func
2462 (func
2459 ('symbol', '_aliasarg')
2463 ('symbol', '_aliasarg')
2460 ('string', '$10'))
2464 ('string', '$10'))
2461 ('symbol', 'foobar'))
2465 ('symbol', 'foobar'))
2462 >>> print prettyformat(_parsealiasdefn('"$1" or "foo"', args))
2466 >>> print prettyformat(_parsealiasdefn('"$1" or "foo"', args))
2463 (or
2467 (or
2464 ('string', '$1')
2468 ('string', '$1')
2465 ('string', 'foo'))
2469 ('string', 'foo'))
2466 """
2470 """
2467 def tokenizedefn(program, lookup=None):
2471 def tokenizedefn(program, lookup=None):
2468 if args:
2472 if args:
2469 argset = set(args)
2473 argset = set(args)
2470 else:
2474 else:
2471 argset = set()
2475 argset = set()
2472
2476
2473 for t, value, pos in _tokenizealias(program, lookup=lookup):
2477 for t, value, pos in _tokenizealias(program, lookup=lookup):
2474 if t == 'symbol':
2478 if t == 'symbol':
2475 if value in argset:
2479 if value in argset:
2476 # emulate tokenization of "_aliasarg('ARGNAME')":
2480 # emulate tokenization of "_aliasarg('ARGNAME')":
2477 # "_aliasarg()" is an unknown symbol only used separate
2481 # "_aliasarg()" is an unknown symbol only used separate
2478 # alias argument placeholders from regular strings.
2482 # alias argument placeholders from regular strings.
2479 yield ('symbol', '_aliasarg', pos)
2483 yield ('symbol', '_aliasarg', pos)
2480 yield ('(', None, pos)
2484 yield ('(', None, pos)
2481 yield ('string', value, pos)
2485 yield ('string', value, pos)
2482 yield (')', None, pos)
2486 yield (')', None, pos)
2483 continue
2487 continue
2484 elif value.startswith('$'):
2488 elif value.startswith('$'):
2485 raise error.ParseError(_("'$' not for alias arguments"),
2489 raise error.ParseError(_("'$' not for alias arguments"),
2486 pos)
2490 pos)
2487 yield (t, value, pos)
2491 yield (t, value, pos)
2488
2492
2489 p = parser.parser(elements)
2493 p = parser.parser(elements)
2490 tree, pos = p.parse(tokenizedefn(defn))
2494 tree, pos = p.parse(tokenizedefn(defn))
2491 if pos != len(defn):
2495 if pos != len(defn):
2492 raise error.ParseError(_('invalid token'), pos)
2496 raise error.ParseError(_('invalid token'), pos)
2493 return parser.simplifyinfixops(tree, ('or',))
2497 return parser.simplifyinfixops(tree, ('or',))
2494
2498
2495 class revsetalias(object):
2499 class revsetalias(object):
2496 # whether own `error` information is already shown or not.
2500 # whether own `error` information is already shown or not.
2497 # this avoids showing same warning multiple times at each `findaliases`.
2501 # this avoids showing same warning multiple times at each `findaliases`.
2498 warned = False
2502 warned = False
2499
2503
2500 def __init__(self, name, value):
2504 def __init__(self, name, value):
2501 '''Aliases like:
2505 '''Aliases like:
2502
2506
2503 h = heads(default)
2507 h = heads(default)
2504 b($1) = ancestors($1) - ancestors(default)
2508 b($1) = ancestors($1) - ancestors(default)
2505 '''
2509 '''
2506 self.name, self.tree, self.args, self.error = _parsealiasdecl(name)
2510 self.name, self.tree, self.args, self.error = _parsealiasdecl(name)
2507 if self.error:
2511 if self.error:
2508 self.error = _('failed to parse the declaration of revset alias'
2512 self.error = _('failed to parse the declaration of revset alias'
2509 ' "%s": %s') % (self.name, self.error)
2513 ' "%s": %s') % (self.name, self.error)
2510 return
2514 return
2511
2515
2512 try:
2516 try:
2513 self.replacement = _parsealiasdefn(value, self.args)
2517 self.replacement = _parsealiasdefn(value, self.args)
2514 # Check for placeholder injection
2518 # Check for placeholder injection
2515 _checkaliasarg(self.replacement, self.args)
2519 _checkaliasarg(self.replacement, self.args)
2516 except error.ParseError as inst:
2520 except error.ParseError as inst:
2517 self.error = _('failed to parse the definition of revset alias'
2521 self.error = _('failed to parse the definition of revset alias'
2518 ' "%s": %s') % (self.name, parseerrordetail(inst))
2522 ' "%s": %s') % (self.name, parseerrordetail(inst))
2519
2523
2520 def _getalias(aliases, tree):
2524 def _getalias(aliases, tree):
2521 """If tree looks like an unexpanded alias, return it. Return None
2525 """If tree looks like an unexpanded alias, return it. Return None
2522 otherwise.
2526 otherwise.
2523 """
2527 """
2524 if isinstance(tree, tuple) and tree:
2528 if isinstance(tree, tuple) and tree:
2525 if tree[0] == 'symbol' and len(tree) == 2:
2529 if tree[0] == 'symbol' and len(tree) == 2:
2526 name = tree[1]
2530 name = tree[1]
2527 alias = aliases.get(name)
2531 alias = aliases.get(name)
2528 if alias and alias.args is None and alias.tree == tree:
2532 if alias and alias.args is None and alias.tree == tree:
2529 return alias
2533 return alias
2530 if tree[0] == 'func' and len(tree) > 1:
2534 if tree[0] == 'func' and len(tree) > 1:
2531 if tree[1][0] == 'symbol' and len(tree[1]) == 2:
2535 if tree[1][0] == 'symbol' and len(tree[1]) == 2:
2532 name = tree[1][1]
2536 name = tree[1][1]
2533 alias = aliases.get(name)
2537 alias = aliases.get(name)
2534 if alias and alias.args is not None and alias.tree == tree[:2]:
2538 if alias and alias.args is not None and alias.tree == tree[:2]:
2535 return alias
2539 return alias
2536 return None
2540 return None
2537
2541
2538 def _expandargs(tree, args):
2542 def _expandargs(tree, args):
2539 """Replace _aliasarg instances with the substitution value of the
2543 """Replace _aliasarg instances with the substitution value of the
2540 same name in args, recursively.
2544 same name in args, recursively.
2541 """
2545 """
2542 if not tree or not isinstance(tree, tuple):
2546 if not tree or not isinstance(tree, tuple):
2543 return tree
2547 return tree
2544 arg = _getaliasarg(tree)
2548 arg = _getaliasarg(tree)
2545 if arg is not None:
2549 if arg is not None:
2546 return args[arg]
2550 return args[arg]
2547 return tuple(_expandargs(t, args) for t in tree)
2551 return tuple(_expandargs(t, args) for t in tree)
2548
2552
2549 def _expandaliases(aliases, tree, expanding, cache):
2553 def _expandaliases(aliases, tree, expanding, cache):
2550 """Expand aliases in tree, recursively.
2554 """Expand aliases in tree, recursively.
2551
2555
2552 'aliases' is a dictionary mapping user defined aliases to
2556 'aliases' is a dictionary mapping user defined aliases to
2553 revsetalias objects.
2557 revsetalias objects.
2554 """
2558 """
2555 if not isinstance(tree, tuple):
2559 if not isinstance(tree, tuple):
2556 # Do not expand raw strings
2560 # Do not expand raw strings
2557 return tree
2561 return tree
2558 alias = _getalias(aliases, tree)
2562 alias = _getalias(aliases, tree)
2559 if alias is not None:
2563 if alias is not None:
2560 if alias.error:
2564 if alias.error:
2561 raise util.Abort(alias.error)
2565 raise util.Abort(alias.error)
2562 if alias in expanding:
2566 if alias in expanding:
2563 raise error.ParseError(_('infinite expansion of revset alias "%s" '
2567 raise error.ParseError(_('infinite expansion of revset alias "%s" '
2564 'detected') % alias.name)
2568 'detected') % alias.name)
2565 expanding.append(alias)
2569 expanding.append(alias)
2566 if alias.name not in cache:
2570 if alias.name not in cache:
2567 cache[alias.name] = _expandaliases(aliases, alias.replacement,
2571 cache[alias.name] = _expandaliases(aliases, alias.replacement,
2568 expanding, cache)
2572 expanding, cache)
2569 result = cache[alias.name]
2573 result = cache[alias.name]
2570 expanding.pop()
2574 expanding.pop()
2571 if alias.args is not None:
2575 if alias.args is not None:
2572 l = getlist(tree[2])
2576 l = getlist(tree[2])
2573 if len(l) != len(alias.args):
2577 if len(l) != len(alias.args):
2574 raise error.ParseError(
2578 raise error.ParseError(
2575 _('invalid number of arguments: %s') % len(l))
2579 _('invalid number of arguments: %s') % len(l))
2576 l = [_expandaliases(aliases, a, [], cache) for a in l]
2580 l = [_expandaliases(aliases, a, [], cache) for a in l]
2577 result = _expandargs(result, dict(zip(alias.args, l)))
2581 result = _expandargs(result, dict(zip(alias.args, l)))
2578 else:
2582 else:
2579 result = tuple(_expandaliases(aliases, t, expanding, cache)
2583 result = tuple(_expandaliases(aliases, t, expanding, cache)
2580 for t in tree)
2584 for t in tree)
2581 return result
2585 return result
2582
2586
2583 def findaliases(ui, tree, showwarning=None):
2587 def findaliases(ui, tree, showwarning=None):
2584 _checkaliasarg(tree)
2588 _checkaliasarg(tree)
2585 aliases = {}
2589 aliases = {}
2586 for k, v in ui.configitems('revsetalias'):
2590 for k, v in ui.configitems('revsetalias'):
2587 alias = revsetalias(k, v)
2591 alias = revsetalias(k, v)
2588 aliases[alias.name] = alias
2592 aliases[alias.name] = alias
2589 tree = _expandaliases(aliases, tree, [], {})
2593 tree = _expandaliases(aliases, tree, [], {})
2590 if showwarning:
2594 if showwarning:
2591 # warn about problematic (but not referred) aliases
2595 # warn about problematic (but not referred) aliases
2592 for name, alias in sorted(aliases.iteritems()):
2596 for name, alias in sorted(aliases.iteritems()):
2593 if alias.error and not alias.warned:
2597 if alias.error and not alias.warned:
2594 showwarning(_('warning: %s\n') % (alias.error))
2598 showwarning(_('warning: %s\n') % (alias.error))
2595 alias.warned = True
2599 alias.warned = True
2596 return tree
2600 return tree
2597
2601
2598 def foldconcat(tree):
2602 def foldconcat(tree):
2599 """Fold elements to be concatenated by `##`
2603 """Fold elements to be concatenated by `##`
2600 """
2604 """
2601 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2605 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2602 return tree
2606 return tree
2603 if tree[0] == '_concat':
2607 if tree[0] == '_concat':
2604 pending = [tree]
2608 pending = [tree]
2605 l = []
2609 l = []
2606 while pending:
2610 while pending:
2607 e = pending.pop()
2611 e = pending.pop()
2608 if e[0] == '_concat':
2612 if e[0] == '_concat':
2609 pending.extend(reversed(e[1:]))
2613 pending.extend(reversed(e[1:]))
2610 elif e[0] in ('string', 'symbol'):
2614 elif e[0] in ('string', 'symbol'):
2611 l.append(e[1])
2615 l.append(e[1])
2612 else:
2616 else:
2613 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2617 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2614 raise error.ParseError(msg)
2618 raise error.ParseError(msg)
2615 return ('string', ''.join(l))
2619 return ('string', ''.join(l))
2616 else:
2620 else:
2617 return tuple(foldconcat(t) for t in tree)
2621 return tuple(foldconcat(t) for t in tree)
2618
2622
2619 def parse(spec, lookup=None):
2623 def parse(spec, lookup=None):
2620 p = parser.parser(elements)
2624 p = parser.parser(elements)
2621 tree, pos = p.parse(tokenize(spec, lookup=lookup))
2625 tree, pos = p.parse(tokenize(spec, lookup=lookup))
2622 if pos != len(spec):
2626 if pos != len(spec):
2623 raise error.ParseError(_("invalid token"), pos)
2627 raise error.ParseError(_("invalid token"), pos)
2624 return parser.simplifyinfixops(tree, ('or',))
2628 return parser.simplifyinfixops(tree, ('or',))
2625
2629
2626 def posttreebuilthook(tree, repo):
2630 def posttreebuilthook(tree, repo):
2627 # hook for extensions to execute code on the optimized tree
2631 # hook for extensions to execute code on the optimized tree
2628 pass
2632 pass
2629
2633
2630 def match(ui, spec, repo=None):
2634 def match(ui, spec, repo=None):
2631 if not spec:
2635 if not spec:
2632 raise error.ParseError(_("empty query"))
2636 raise error.ParseError(_("empty query"))
2633 lookup = None
2637 lookup = None
2634 if repo:
2638 if repo:
2635 lookup = repo.__contains__
2639 lookup = repo.__contains__
2636 tree = parse(spec, lookup)
2640 tree = parse(spec, lookup)
2637 if ui:
2641 if ui:
2638 tree = findaliases(ui, tree, showwarning=ui.warn)
2642 tree = findaliases(ui, tree, showwarning=ui.warn)
2639 tree = foldconcat(tree)
2643 tree = foldconcat(tree)
2640 weight, tree = optimize(tree, True)
2644 weight, tree = optimize(tree, True)
2641 posttreebuilthook(tree, repo)
2645 posttreebuilthook(tree, repo)
2642 def mfunc(repo, subset=None):
2646 def mfunc(repo, subset=None):
2643 if subset is None:
2647 if subset is None:
2644 subset = fullreposet(repo)
2648 subset = fullreposet(repo)
2645 if util.safehasattr(subset, 'isascending'):
2649 if util.safehasattr(subset, 'isascending'):
2646 result = getset(repo, subset, tree)
2650 result = getset(repo, subset, tree)
2647 else:
2651 else:
2648 result = getset(repo, baseset(subset), tree)
2652 result = getset(repo, baseset(subset), tree)
2649 return result
2653 return result
2650 return mfunc
2654 return mfunc
2651
2655
2652 def formatspec(expr, *args):
2656 def formatspec(expr, *args):
2653 '''
2657 '''
2654 This is a convenience function for using revsets internally, and
2658 This is a convenience function for using revsets internally, and
2655 escapes arguments appropriately. Aliases are intentionally ignored
2659 escapes arguments appropriately. Aliases are intentionally ignored
2656 so that intended expression behavior isn't accidentally subverted.
2660 so that intended expression behavior isn't accidentally subverted.
2657
2661
2658 Supported arguments:
2662 Supported arguments:
2659
2663
2660 %r = revset expression, parenthesized
2664 %r = revset expression, parenthesized
2661 %d = int(arg), no quoting
2665 %d = int(arg), no quoting
2662 %s = string(arg), escaped and single-quoted
2666 %s = string(arg), escaped and single-quoted
2663 %b = arg.branch(), escaped and single-quoted
2667 %b = arg.branch(), escaped and single-quoted
2664 %n = hex(arg), single-quoted
2668 %n = hex(arg), single-quoted
2665 %% = a literal '%'
2669 %% = a literal '%'
2666
2670
2667 Prefixing the type with 'l' specifies a parenthesized list of that type.
2671 Prefixing the type with 'l' specifies a parenthesized list of that type.
2668
2672
2669 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2673 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2670 '(10 or 11):: and ((this()) or (that()))'
2674 '(10 or 11):: and ((this()) or (that()))'
2671 >>> formatspec('%d:: and not %d::', 10, 20)
2675 >>> formatspec('%d:: and not %d::', 10, 20)
2672 '10:: and not 20::'
2676 '10:: and not 20::'
2673 >>> formatspec('%ld or %ld', [], [1])
2677 >>> formatspec('%ld or %ld', [], [1])
2674 "_list('') or 1"
2678 "_list('') or 1"
2675 >>> formatspec('keyword(%s)', 'foo\\xe9')
2679 >>> formatspec('keyword(%s)', 'foo\\xe9')
2676 "keyword('foo\\\\xe9')"
2680 "keyword('foo\\\\xe9')"
2677 >>> b = lambda: 'default'
2681 >>> b = lambda: 'default'
2678 >>> b.branch = b
2682 >>> b.branch = b
2679 >>> formatspec('branch(%b)', b)
2683 >>> formatspec('branch(%b)', b)
2680 "branch('default')"
2684 "branch('default')"
2681 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2685 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2682 "root(_list('a\\x00b\\x00c\\x00d'))"
2686 "root(_list('a\\x00b\\x00c\\x00d'))"
2683 '''
2687 '''
2684
2688
2685 def quote(s):
2689 def quote(s):
2686 return repr(str(s))
2690 return repr(str(s))
2687
2691
2688 def argtype(c, arg):
2692 def argtype(c, arg):
2689 if c == 'd':
2693 if c == 'd':
2690 return str(int(arg))
2694 return str(int(arg))
2691 elif c == 's':
2695 elif c == 's':
2692 return quote(arg)
2696 return quote(arg)
2693 elif c == 'r':
2697 elif c == 'r':
2694 parse(arg) # make sure syntax errors are confined
2698 parse(arg) # make sure syntax errors are confined
2695 return '(%s)' % arg
2699 return '(%s)' % arg
2696 elif c == 'n':
2700 elif c == 'n':
2697 return quote(node.hex(arg))
2701 return quote(node.hex(arg))
2698 elif c == 'b':
2702 elif c == 'b':
2699 return quote(arg.branch())
2703 return quote(arg.branch())
2700
2704
2701 def listexp(s, t):
2705 def listexp(s, t):
2702 l = len(s)
2706 l = len(s)
2703 if l == 0:
2707 if l == 0:
2704 return "_list('')"
2708 return "_list('')"
2705 elif l == 1:
2709 elif l == 1:
2706 return argtype(t, s[0])
2710 return argtype(t, s[0])
2707 elif t == 'd':
2711 elif t == 'd':
2708 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2712 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2709 elif t == 's':
2713 elif t == 's':
2710 return "_list('%s')" % "\0".join(s)
2714 return "_list('%s')" % "\0".join(s)
2711 elif t == 'n':
2715 elif t == 'n':
2712 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2716 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2713 elif t == 'b':
2717 elif t == 'b':
2714 return "_list('%s')" % "\0".join(a.branch() for a in s)
2718 return "_list('%s')" % "\0".join(a.branch() for a in s)
2715
2719
2716 m = l // 2
2720 m = l // 2
2717 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2721 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2718
2722
2719 ret = ''
2723 ret = ''
2720 pos = 0
2724 pos = 0
2721 arg = 0
2725 arg = 0
2722 while pos < len(expr):
2726 while pos < len(expr):
2723 c = expr[pos]
2727 c = expr[pos]
2724 if c == '%':
2728 if c == '%':
2725 pos += 1
2729 pos += 1
2726 d = expr[pos]
2730 d = expr[pos]
2727 if d == '%':
2731 if d == '%':
2728 ret += d
2732 ret += d
2729 elif d in 'dsnbr':
2733 elif d in 'dsnbr':
2730 ret += argtype(d, args[arg])
2734 ret += argtype(d, args[arg])
2731 arg += 1
2735 arg += 1
2732 elif d == 'l':
2736 elif d == 'l':
2733 # a list of some type
2737 # a list of some type
2734 pos += 1
2738 pos += 1
2735 d = expr[pos]
2739 d = expr[pos]
2736 ret += listexp(list(args[arg]), d)
2740 ret += listexp(list(args[arg]), d)
2737 arg += 1
2741 arg += 1
2738 else:
2742 else:
2739 raise util.Abort('unexpected revspec format character %s' % d)
2743 raise util.Abort('unexpected revspec format character %s' % d)
2740 else:
2744 else:
2741 ret += c
2745 ret += c
2742 pos += 1
2746 pos += 1
2743
2747
2744 return ret
2748 return ret
2745
2749
2746 def prettyformat(tree):
2750 def prettyformat(tree):
2747 return parser.prettyformat(tree, ('string', 'symbol'))
2751 return parser.prettyformat(tree, ('string', 'symbol'))
2748
2752
2749 def depth(tree):
2753 def depth(tree):
2750 if isinstance(tree, tuple):
2754 if isinstance(tree, tuple):
2751 return max(map(depth, tree)) + 1
2755 return max(map(depth, tree)) + 1
2752 else:
2756 else:
2753 return 0
2757 return 0
2754
2758
2755 def funcsused(tree):
2759 def funcsused(tree):
2756 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2760 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2757 return set()
2761 return set()
2758 else:
2762 else:
2759 funcs = set()
2763 funcs = set()
2760 for s in tree[1:]:
2764 for s in tree[1:]:
2761 funcs |= funcsused(s)
2765 funcs |= funcsused(s)
2762 if tree[0] == 'func':
2766 if tree[0] == 'func':
2763 funcs.add(tree[1][1])
2767 funcs.add(tree[1][1])
2764 return funcs
2768 return funcs
2765
2769
2766 class abstractsmartset(object):
2770 class abstractsmartset(object):
2767
2771
2768 def __nonzero__(self):
2772 def __nonzero__(self):
2769 """True if the smartset is not empty"""
2773 """True if the smartset is not empty"""
2770 raise NotImplementedError()
2774 raise NotImplementedError()
2771
2775
2772 def __contains__(self, rev):
2776 def __contains__(self, rev):
2773 """provide fast membership testing"""
2777 """provide fast membership testing"""
2774 raise NotImplementedError()
2778 raise NotImplementedError()
2775
2779
2776 def __iter__(self):
2780 def __iter__(self):
2777 """iterate the set in the order it is supposed to be iterated"""
2781 """iterate the set in the order it is supposed to be iterated"""
2778 raise NotImplementedError()
2782 raise NotImplementedError()
2779
2783
2780 # Attributes containing a function to perform a fast iteration in a given
2784 # Attributes containing a function to perform a fast iteration in a given
2781 # direction. A smartset can have none, one, or both defined.
2785 # direction. A smartset can have none, one, or both defined.
2782 #
2786 #
2783 # Default value is None instead of a function returning None to avoid
2787 # Default value is None instead of a function returning None to avoid
2784 # initializing an iterator just for testing if a fast method exists.
2788 # initializing an iterator just for testing if a fast method exists.
2785 fastasc = None
2789 fastasc = None
2786 fastdesc = None
2790 fastdesc = None
2787
2791
2788 def isascending(self):
2792 def isascending(self):
2789 """True if the set will iterate in ascending order"""
2793 """True if the set will iterate in ascending order"""
2790 raise NotImplementedError()
2794 raise NotImplementedError()
2791
2795
2792 def isdescending(self):
2796 def isdescending(self):
2793 """True if the set will iterate in descending order"""
2797 """True if the set will iterate in descending order"""
2794 raise NotImplementedError()
2798 raise NotImplementedError()
2795
2799
2796 def min(self):
2800 def min(self):
2797 """return the minimum element in the set"""
2801 """return the minimum element in the set"""
2798 if self.fastasc is not None:
2802 if self.fastasc is not None:
2799 for r in self.fastasc():
2803 for r in self.fastasc():
2800 return r
2804 return r
2801 raise ValueError('arg is an empty sequence')
2805 raise ValueError('arg is an empty sequence')
2802 return min(self)
2806 return min(self)
2803
2807
2804 def max(self):
2808 def max(self):
2805 """return the maximum element in the set"""
2809 """return the maximum element in the set"""
2806 if self.fastdesc is not None:
2810 if self.fastdesc is not None:
2807 for r in self.fastdesc():
2811 for r in self.fastdesc():
2808 return r
2812 return r
2809 raise ValueError('arg is an empty sequence')
2813 raise ValueError('arg is an empty sequence')
2810 return max(self)
2814 return max(self)
2811
2815
2812 def first(self):
2816 def first(self):
2813 """return the first element in the set (user iteration perspective)
2817 """return the first element in the set (user iteration perspective)
2814
2818
2815 Return None if the set is empty"""
2819 Return None if the set is empty"""
2816 raise NotImplementedError()
2820 raise NotImplementedError()
2817
2821
2818 def last(self):
2822 def last(self):
2819 """return the last element in the set (user iteration perspective)
2823 """return the last element in the set (user iteration perspective)
2820
2824
2821 Return None if the set is empty"""
2825 Return None if the set is empty"""
2822 raise NotImplementedError()
2826 raise NotImplementedError()
2823
2827
2824 def __len__(self):
2828 def __len__(self):
2825 """return the length of the smartsets
2829 """return the length of the smartsets
2826
2830
2827 This can be expensive on smartset that could be lazy otherwise."""
2831 This can be expensive on smartset that could be lazy otherwise."""
2828 raise NotImplementedError()
2832 raise NotImplementedError()
2829
2833
2830 def reverse(self):
2834 def reverse(self):
2831 """reverse the expected iteration order"""
2835 """reverse the expected iteration order"""
2832 raise NotImplementedError()
2836 raise NotImplementedError()
2833
2837
2834 def sort(self, reverse=True):
2838 def sort(self, reverse=True):
2835 """get the set to iterate in an ascending or descending order"""
2839 """get the set to iterate in an ascending or descending order"""
2836 raise NotImplementedError()
2840 raise NotImplementedError()
2837
2841
2838 def __and__(self, other):
2842 def __and__(self, other):
2839 """Returns a new object with the intersection of the two collections.
2843 """Returns a new object with the intersection of the two collections.
2840
2844
2841 This is part of the mandatory API for smartset."""
2845 This is part of the mandatory API for smartset."""
2842 if isinstance(other, fullreposet):
2846 if isinstance(other, fullreposet):
2843 return self
2847 return self
2844 return self.filter(other.__contains__, cache=False)
2848 return self.filter(other.__contains__, cache=False)
2845
2849
2846 def __add__(self, other):
2850 def __add__(self, other):
2847 """Returns a new object with the union of the two collections.
2851 """Returns a new object with the union of the two collections.
2848
2852
2849 This is part of the mandatory API for smartset."""
2853 This is part of the mandatory API for smartset."""
2850 return addset(self, other)
2854 return addset(self, other)
2851
2855
2852 def __sub__(self, other):
2856 def __sub__(self, other):
2853 """Returns a new object with the substraction of the two collections.
2857 """Returns a new object with the substraction of the two collections.
2854
2858
2855 This is part of the mandatory API for smartset."""
2859 This is part of the mandatory API for smartset."""
2856 c = other.__contains__
2860 c = other.__contains__
2857 return self.filter(lambda r: not c(r), cache=False)
2861 return self.filter(lambda r: not c(r), cache=False)
2858
2862
2859 def filter(self, condition, cache=True):
2863 def filter(self, condition, cache=True):
2860 """Returns this smartset filtered by condition as a new smartset.
2864 """Returns this smartset filtered by condition as a new smartset.
2861
2865
2862 `condition` is a callable which takes a revision number and returns a
2866 `condition` is a callable which takes a revision number and returns a
2863 boolean.
2867 boolean.
2864
2868
2865 This is part of the mandatory API for smartset."""
2869 This is part of the mandatory API for smartset."""
2866 # builtin cannot be cached. but do not needs to
2870 # builtin cannot be cached. but do not needs to
2867 if cache and util.safehasattr(condition, 'func_code'):
2871 if cache and util.safehasattr(condition, 'func_code'):
2868 condition = util.cachefunc(condition)
2872 condition = util.cachefunc(condition)
2869 return filteredset(self, condition)
2873 return filteredset(self, condition)
2870
2874
2871 class baseset(abstractsmartset):
2875 class baseset(abstractsmartset):
2872 """Basic data structure that represents a revset and contains the basic
2876 """Basic data structure that represents a revset and contains the basic
2873 operation that it should be able to perform.
2877 operation that it should be able to perform.
2874
2878
2875 Every method in this class should be implemented by any smartset class.
2879 Every method in this class should be implemented by any smartset class.
2876 """
2880 """
2877 def __init__(self, data=()):
2881 def __init__(self, data=()):
2878 if not isinstance(data, list):
2882 if not isinstance(data, list):
2879 data = list(data)
2883 data = list(data)
2880 self._list = data
2884 self._list = data
2881 self._ascending = None
2885 self._ascending = None
2882
2886
2883 @util.propertycache
2887 @util.propertycache
2884 def _set(self):
2888 def _set(self):
2885 return set(self._list)
2889 return set(self._list)
2886
2890
2887 @util.propertycache
2891 @util.propertycache
2888 def _asclist(self):
2892 def _asclist(self):
2889 asclist = self._list[:]
2893 asclist = self._list[:]
2890 asclist.sort()
2894 asclist.sort()
2891 return asclist
2895 return asclist
2892
2896
2893 def __iter__(self):
2897 def __iter__(self):
2894 if self._ascending is None:
2898 if self._ascending is None:
2895 return iter(self._list)
2899 return iter(self._list)
2896 elif self._ascending:
2900 elif self._ascending:
2897 return iter(self._asclist)
2901 return iter(self._asclist)
2898 else:
2902 else:
2899 return reversed(self._asclist)
2903 return reversed(self._asclist)
2900
2904
2901 def fastasc(self):
2905 def fastasc(self):
2902 return iter(self._asclist)
2906 return iter(self._asclist)
2903
2907
2904 def fastdesc(self):
2908 def fastdesc(self):
2905 return reversed(self._asclist)
2909 return reversed(self._asclist)
2906
2910
2907 @util.propertycache
2911 @util.propertycache
2908 def __contains__(self):
2912 def __contains__(self):
2909 return self._set.__contains__
2913 return self._set.__contains__
2910
2914
2911 def __nonzero__(self):
2915 def __nonzero__(self):
2912 return bool(self._list)
2916 return bool(self._list)
2913
2917
2914 def sort(self, reverse=False):
2918 def sort(self, reverse=False):
2915 self._ascending = not bool(reverse)
2919 self._ascending = not bool(reverse)
2916
2920
2917 def reverse(self):
2921 def reverse(self):
2918 if self._ascending is None:
2922 if self._ascending is None:
2919 self._list.reverse()
2923 self._list.reverse()
2920 else:
2924 else:
2921 self._ascending = not self._ascending
2925 self._ascending = not self._ascending
2922
2926
2923 def __len__(self):
2927 def __len__(self):
2924 return len(self._list)
2928 return len(self._list)
2925
2929
2926 def isascending(self):
2930 def isascending(self):
2927 """Returns True if the collection is ascending order, False if not.
2931 """Returns True if the collection is ascending order, False if not.
2928
2932
2929 This is part of the mandatory API for smartset."""
2933 This is part of the mandatory API for smartset."""
2930 if len(self) <= 1:
2934 if len(self) <= 1:
2931 return True
2935 return True
2932 return self._ascending is not None and self._ascending
2936 return self._ascending is not None and self._ascending
2933
2937
2934 def isdescending(self):
2938 def isdescending(self):
2935 """Returns True if the collection is descending order, False if not.
2939 """Returns True if the collection is descending order, False if not.
2936
2940
2937 This is part of the mandatory API for smartset."""
2941 This is part of the mandatory API for smartset."""
2938 if len(self) <= 1:
2942 if len(self) <= 1:
2939 return True
2943 return True
2940 return self._ascending is not None and not self._ascending
2944 return self._ascending is not None and not self._ascending
2941
2945
2942 def first(self):
2946 def first(self):
2943 if self:
2947 if self:
2944 if self._ascending is None:
2948 if self._ascending is None:
2945 return self._list[0]
2949 return self._list[0]
2946 elif self._ascending:
2950 elif self._ascending:
2947 return self._asclist[0]
2951 return self._asclist[0]
2948 else:
2952 else:
2949 return self._asclist[-1]
2953 return self._asclist[-1]
2950 return None
2954 return None
2951
2955
2952 def last(self):
2956 def last(self):
2953 if self:
2957 if self:
2954 if self._ascending is None:
2958 if self._ascending is None:
2955 return self._list[-1]
2959 return self._list[-1]
2956 elif self._ascending:
2960 elif self._ascending:
2957 return self._asclist[-1]
2961 return self._asclist[-1]
2958 else:
2962 else:
2959 return self._asclist[0]
2963 return self._asclist[0]
2960 return None
2964 return None
2961
2965
2962 def __repr__(self):
2966 def __repr__(self):
2963 d = {None: '', False: '-', True: '+'}[self._ascending]
2967 d = {None: '', False: '-', True: '+'}[self._ascending]
2964 return '<%s%s %r>' % (type(self).__name__, d, self._list)
2968 return '<%s%s %r>' % (type(self).__name__, d, self._list)
2965
2969
2966 class filteredset(abstractsmartset):
2970 class filteredset(abstractsmartset):
2967 """Duck type for baseset class which iterates lazily over the revisions in
2971 """Duck type for baseset class which iterates lazily over the revisions in
2968 the subset and contains a function which tests for membership in the
2972 the subset and contains a function which tests for membership in the
2969 revset
2973 revset
2970 """
2974 """
2971 def __init__(self, subset, condition=lambda x: True):
2975 def __init__(self, subset, condition=lambda x: True):
2972 """
2976 """
2973 condition: a function that decide whether a revision in the subset
2977 condition: a function that decide whether a revision in the subset
2974 belongs to the revset or not.
2978 belongs to the revset or not.
2975 """
2979 """
2976 self._subset = subset
2980 self._subset = subset
2977 self._condition = condition
2981 self._condition = condition
2978 self._cache = {}
2982 self._cache = {}
2979
2983
2980 def __contains__(self, x):
2984 def __contains__(self, x):
2981 c = self._cache
2985 c = self._cache
2982 if x not in c:
2986 if x not in c:
2983 v = c[x] = x in self._subset and self._condition(x)
2987 v = c[x] = x in self._subset and self._condition(x)
2984 return v
2988 return v
2985 return c[x]
2989 return c[x]
2986
2990
2987 def __iter__(self):
2991 def __iter__(self):
2988 return self._iterfilter(self._subset)
2992 return self._iterfilter(self._subset)
2989
2993
2990 def _iterfilter(self, it):
2994 def _iterfilter(self, it):
2991 cond = self._condition
2995 cond = self._condition
2992 for x in it:
2996 for x in it:
2993 if cond(x):
2997 if cond(x):
2994 yield x
2998 yield x
2995
2999
2996 @property
3000 @property
2997 def fastasc(self):
3001 def fastasc(self):
2998 it = self._subset.fastasc
3002 it = self._subset.fastasc
2999 if it is None:
3003 if it is None:
3000 return None
3004 return None
3001 return lambda: self._iterfilter(it())
3005 return lambda: self._iterfilter(it())
3002
3006
3003 @property
3007 @property
3004 def fastdesc(self):
3008 def fastdesc(self):
3005 it = self._subset.fastdesc
3009 it = self._subset.fastdesc
3006 if it is None:
3010 if it is None:
3007 return None
3011 return None
3008 return lambda: self._iterfilter(it())
3012 return lambda: self._iterfilter(it())
3009
3013
3010 def __nonzero__(self):
3014 def __nonzero__(self):
3011 for r in self:
3015 for r in self:
3012 return True
3016 return True
3013 return False
3017 return False
3014
3018
3015 def __len__(self):
3019 def __len__(self):
3016 # Basic implementation to be changed in future patches.
3020 # Basic implementation to be changed in future patches.
3017 l = baseset([r for r in self])
3021 l = baseset([r for r in self])
3018 return len(l)
3022 return len(l)
3019
3023
3020 def sort(self, reverse=False):
3024 def sort(self, reverse=False):
3021 self._subset.sort(reverse=reverse)
3025 self._subset.sort(reverse=reverse)
3022
3026
3023 def reverse(self):
3027 def reverse(self):
3024 self._subset.reverse()
3028 self._subset.reverse()
3025
3029
3026 def isascending(self):
3030 def isascending(self):
3027 return self._subset.isascending()
3031 return self._subset.isascending()
3028
3032
3029 def isdescending(self):
3033 def isdescending(self):
3030 return self._subset.isdescending()
3034 return self._subset.isdescending()
3031
3035
3032 def first(self):
3036 def first(self):
3033 for x in self:
3037 for x in self:
3034 return x
3038 return x
3035 return None
3039 return None
3036
3040
3037 def last(self):
3041 def last(self):
3038 it = None
3042 it = None
3039 if self.isascending():
3043 if self.isascending():
3040 it = self.fastdesc
3044 it = self.fastdesc
3041 elif self.isdescending():
3045 elif self.isdescending():
3042 it = self.fastasc
3046 it = self.fastasc
3043 if it is not None:
3047 if it is not None:
3044 for x in it():
3048 for x in it():
3045 return x
3049 return x
3046 return None #empty case
3050 return None #empty case
3047 else:
3051 else:
3048 x = None
3052 x = None
3049 for x in self:
3053 for x in self:
3050 pass
3054 pass
3051 return x
3055 return x
3052
3056
3053 def __repr__(self):
3057 def __repr__(self):
3054 return '<%s %r>' % (type(self).__name__, self._subset)
3058 return '<%s %r>' % (type(self).__name__, self._subset)
3055
3059
3056 # this function will be removed, or merged to addset or orset, when
3060 # this function will be removed, or merged to addset or orset, when
3057 # - scmutil.revrange() can be rewritten to not combine calculated smartsets
3061 # - scmutil.revrange() can be rewritten to not combine calculated smartsets
3058 # - or addset can handle more than two sets without balanced tree
3062 # - or addset can handle more than two sets without balanced tree
3059 def _combinesets(subsets):
3063 def _combinesets(subsets):
3060 """Create balanced tree of addsets representing union of given sets"""
3064 """Create balanced tree of addsets representing union of given sets"""
3061 if not subsets:
3065 if not subsets:
3062 return baseset()
3066 return baseset()
3063 if len(subsets) == 1:
3067 if len(subsets) == 1:
3064 return subsets[0]
3068 return subsets[0]
3065 p = len(subsets) // 2
3069 p = len(subsets) // 2
3066 xs = _combinesets(subsets[:p])
3070 xs = _combinesets(subsets[:p])
3067 ys = _combinesets(subsets[p:])
3071 ys = _combinesets(subsets[p:])
3068 return addset(xs, ys)
3072 return addset(xs, ys)
3069
3073
3070 def _iterordered(ascending, iter1, iter2):
3074 def _iterordered(ascending, iter1, iter2):
3071 """produce an ordered iteration from two iterators with the same order
3075 """produce an ordered iteration from two iterators with the same order
3072
3076
3073 The ascending is used to indicated the iteration direction.
3077 The ascending is used to indicated the iteration direction.
3074 """
3078 """
3075 choice = max
3079 choice = max
3076 if ascending:
3080 if ascending:
3077 choice = min
3081 choice = min
3078
3082
3079 val1 = None
3083 val1 = None
3080 val2 = None
3084 val2 = None
3081 try:
3085 try:
3082 # Consume both iterators in an ordered way until one is empty
3086 # Consume both iterators in an ordered way until one is empty
3083 while True:
3087 while True:
3084 if val1 is None:
3088 if val1 is None:
3085 val1 = iter1.next()
3089 val1 = iter1.next()
3086 if val2 is None:
3090 if val2 is None:
3087 val2 = iter2.next()
3091 val2 = iter2.next()
3088 next = choice(val1, val2)
3092 next = choice(val1, val2)
3089 yield next
3093 yield next
3090 if val1 == next:
3094 if val1 == next:
3091 val1 = None
3095 val1 = None
3092 if val2 == next:
3096 if val2 == next:
3093 val2 = None
3097 val2 = None
3094 except StopIteration:
3098 except StopIteration:
3095 # Flush any remaining values and consume the other one
3099 # Flush any remaining values and consume the other one
3096 it = iter2
3100 it = iter2
3097 if val1 is not None:
3101 if val1 is not None:
3098 yield val1
3102 yield val1
3099 it = iter1
3103 it = iter1
3100 elif val2 is not None:
3104 elif val2 is not None:
3101 # might have been equality and both are empty
3105 # might have been equality and both are empty
3102 yield val2
3106 yield val2
3103 for val in it:
3107 for val in it:
3104 yield val
3108 yield val
3105
3109
3106 class addset(abstractsmartset):
3110 class addset(abstractsmartset):
3107 """Represent the addition of two sets
3111 """Represent the addition of two sets
3108
3112
3109 Wrapper structure for lazily adding two structures without losing much
3113 Wrapper structure for lazily adding two structures without losing much
3110 performance on the __contains__ method
3114 performance on the __contains__ method
3111
3115
3112 If the ascending attribute is set, that means the two structures are
3116 If the ascending attribute is set, that means the two structures are
3113 ordered in either an ascending or descending way. Therefore, we can add
3117 ordered in either an ascending or descending way. Therefore, we can add
3114 them maintaining the order by iterating over both at the same time
3118 them maintaining the order by iterating over both at the same time
3115
3119
3116 >>> xs = baseset([0, 3, 2])
3120 >>> xs = baseset([0, 3, 2])
3117 >>> ys = baseset([5, 2, 4])
3121 >>> ys = baseset([5, 2, 4])
3118
3122
3119 >>> rs = addset(xs, ys)
3123 >>> rs = addset(xs, ys)
3120 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3124 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3121 (True, True, False, True, 0, 4)
3125 (True, True, False, True, 0, 4)
3122 >>> rs = addset(xs, baseset([]))
3126 >>> rs = addset(xs, baseset([]))
3123 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3127 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3124 (True, True, False, 0, 2)
3128 (True, True, False, 0, 2)
3125 >>> rs = addset(baseset([]), baseset([]))
3129 >>> rs = addset(baseset([]), baseset([]))
3126 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3130 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3127 (False, False, None, None)
3131 (False, False, None, None)
3128
3132
3129 iterate unsorted:
3133 iterate unsorted:
3130 >>> rs = addset(xs, ys)
3134 >>> rs = addset(xs, ys)
3131 >>> [x for x in rs] # without _genlist
3135 >>> [x for x in rs] # without _genlist
3132 [0, 3, 2, 5, 4]
3136 [0, 3, 2, 5, 4]
3133 >>> assert not rs._genlist
3137 >>> assert not rs._genlist
3134 >>> len(rs)
3138 >>> len(rs)
3135 5
3139 5
3136 >>> [x for x in rs] # with _genlist
3140 >>> [x for x in rs] # with _genlist
3137 [0, 3, 2, 5, 4]
3141 [0, 3, 2, 5, 4]
3138 >>> assert rs._genlist
3142 >>> assert rs._genlist
3139
3143
3140 iterate ascending:
3144 iterate ascending:
3141 >>> rs = addset(xs, ys, ascending=True)
3145 >>> rs = addset(xs, ys, ascending=True)
3142 >>> [x for x in rs], [x for x in rs.fastasc()] # without _asclist
3146 >>> [x for x in rs], [x for x in rs.fastasc()] # without _asclist
3143 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3147 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3144 >>> assert not rs._asclist
3148 >>> assert not rs._asclist
3145 >>> len(rs)
3149 >>> len(rs)
3146 5
3150 5
3147 >>> [x for x in rs], [x for x in rs.fastasc()]
3151 >>> [x for x in rs], [x for x in rs.fastasc()]
3148 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3152 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3149 >>> assert rs._asclist
3153 >>> assert rs._asclist
3150
3154
3151 iterate descending:
3155 iterate descending:
3152 >>> rs = addset(xs, ys, ascending=False)
3156 >>> rs = addset(xs, ys, ascending=False)
3153 >>> [x for x in rs], [x for x in rs.fastdesc()] # without _asclist
3157 >>> [x for x in rs], [x for x in rs.fastdesc()] # without _asclist
3154 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3158 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3155 >>> assert not rs._asclist
3159 >>> assert not rs._asclist
3156 >>> len(rs)
3160 >>> len(rs)
3157 5
3161 5
3158 >>> [x for x in rs], [x for x in rs.fastdesc()]
3162 >>> [x for x in rs], [x for x in rs.fastdesc()]
3159 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3163 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3160 >>> assert rs._asclist
3164 >>> assert rs._asclist
3161
3165
3162 iterate ascending without fastasc:
3166 iterate ascending without fastasc:
3163 >>> rs = addset(xs, generatorset(ys), ascending=True)
3167 >>> rs = addset(xs, generatorset(ys), ascending=True)
3164 >>> assert rs.fastasc is None
3168 >>> assert rs.fastasc is None
3165 >>> [x for x in rs]
3169 >>> [x for x in rs]
3166 [0, 2, 3, 4, 5]
3170 [0, 2, 3, 4, 5]
3167
3171
3168 iterate descending without fastdesc:
3172 iterate descending without fastdesc:
3169 >>> rs = addset(generatorset(xs), ys, ascending=False)
3173 >>> rs = addset(generatorset(xs), ys, ascending=False)
3170 >>> assert rs.fastdesc is None
3174 >>> assert rs.fastdesc is None
3171 >>> [x for x in rs]
3175 >>> [x for x in rs]
3172 [5, 4, 3, 2, 0]
3176 [5, 4, 3, 2, 0]
3173 """
3177 """
3174 def __init__(self, revs1, revs2, ascending=None):
3178 def __init__(self, revs1, revs2, ascending=None):
3175 self._r1 = revs1
3179 self._r1 = revs1
3176 self._r2 = revs2
3180 self._r2 = revs2
3177 self._iter = None
3181 self._iter = None
3178 self._ascending = ascending
3182 self._ascending = ascending
3179 self._genlist = None
3183 self._genlist = None
3180 self._asclist = None
3184 self._asclist = None
3181
3185
3182 def __len__(self):
3186 def __len__(self):
3183 return len(self._list)
3187 return len(self._list)
3184
3188
3185 def __nonzero__(self):
3189 def __nonzero__(self):
3186 return bool(self._r1) or bool(self._r2)
3190 return bool(self._r1) or bool(self._r2)
3187
3191
3188 @util.propertycache
3192 @util.propertycache
3189 def _list(self):
3193 def _list(self):
3190 if not self._genlist:
3194 if not self._genlist:
3191 self._genlist = baseset(iter(self))
3195 self._genlist = baseset(iter(self))
3192 return self._genlist
3196 return self._genlist
3193
3197
3194 def __iter__(self):
3198 def __iter__(self):
3195 """Iterate over both collections without repeating elements
3199 """Iterate over both collections without repeating elements
3196
3200
3197 If the ascending attribute is not set, iterate over the first one and
3201 If the ascending attribute is not set, iterate over the first one and
3198 then over the second one checking for membership on the first one so we
3202 then over the second one checking for membership on the first one so we
3199 dont yield any duplicates.
3203 dont yield any duplicates.
3200
3204
3201 If the ascending attribute is set, iterate over both collections at the
3205 If the ascending attribute is set, iterate over both collections at the
3202 same time, yielding only one value at a time in the given order.
3206 same time, yielding only one value at a time in the given order.
3203 """
3207 """
3204 if self._ascending is None:
3208 if self._ascending is None:
3205 if self._genlist:
3209 if self._genlist:
3206 return iter(self._genlist)
3210 return iter(self._genlist)
3207 def arbitraryordergen():
3211 def arbitraryordergen():
3208 for r in self._r1:
3212 for r in self._r1:
3209 yield r
3213 yield r
3210 inr1 = self._r1.__contains__
3214 inr1 = self._r1.__contains__
3211 for r in self._r2:
3215 for r in self._r2:
3212 if not inr1(r):
3216 if not inr1(r):
3213 yield r
3217 yield r
3214 return arbitraryordergen()
3218 return arbitraryordergen()
3215 # try to use our own fast iterator if it exists
3219 # try to use our own fast iterator if it exists
3216 self._trysetasclist()
3220 self._trysetasclist()
3217 if self._ascending:
3221 if self._ascending:
3218 attr = 'fastasc'
3222 attr = 'fastasc'
3219 else:
3223 else:
3220 attr = 'fastdesc'
3224 attr = 'fastdesc'
3221 it = getattr(self, attr)
3225 it = getattr(self, attr)
3222 if it is not None:
3226 if it is not None:
3223 return it()
3227 return it()
3224 # maybe half of the component supports fast
3228 # maybe half of the component supports fast
3225 # get iterator for _r1
3229 # get iterator for _r1
3226 iter1 = getattr(self._r1, attr)
3230 iter1 = getattr(self._r1, attr)
3227 if iter1 is None:
3231 if iter1 is None:
3228 # let's avoid side effect (not sure it matters)
3232 # let's avoid side effect (not sure it matters)
3229 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3233 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3230 else:
3234 else:
3231 iter1 = iter1()
3235 iter1 = iter1()
3232 # get iterator for _r2
3236 # get iterator for _r2
3233 iter2 = getattr(self._r2, attr)
3237 iter2 = getattr(self._r2, attr)
3234 if iter2 is None:
3238 if iter2 is None:
3235 # let's avoid side effect (not sure it matters)
3239 # let's avoid side effect (not sure it matters)
3236 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3240 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3237 else:
3241 else:
3238 iter2 = iter2()
3242 iter2 = iter2()
3239 return _iterordered(self._ascending, iter1, iter2)
3243 return _iterordered(self._ascending, iter1, iter2)
3240
3244
3241 def _trysetasclist(self):
3245 def _trysetasclist(self):
3242 """populate the _asclist attribute if possible and necessary"""
3246 """populate the _asclist attribute if possible and necessary"""
3243 if self._genlist is not None and self._asclist is None:
3247 if self._genlist is not None and self._asclist is None:
3244 self._asclist = sorted(self._genlist)
3248 self._asclist = sorted(self._genlist)
3245
3249
3246 @property
3250 @property
3247 def fastasc(self):
3251 def fastasc(self):
3248 self._trysetasclist()
3252 self._trysetasclist()
3249 if self._asclist is not None:
3253 if self._asclist is not None:
3250 return self._asclist.__iter__
3254 return self._asclist.__iter__
3251 iter1 = self._r1.fastasc
3255 iter1 = self._r1.fastasc
3252 iter2 = self._r2.fastasc
3256 iter2 = self._r2.fastasc
3253 if None in (iter1, iter2):
3257 if None in (iter1, iter2):
3254 return None
3258 return None
3255 return lambda: _iterordered(True, iter1(), iter2())
3259 return lambda: _iterordered(True, iter1(), iter2())
3256
3260
3257 @property
3261 @property
3258 def fastdesc(self):
3262 def fastdesc(self):
3259 self._trysetasclist()
3263 self._trysetasclist()
3260 if self._asclist is not None:
3264 if self._asclist is not None:
3261 return self._asclist.__reversed__
3265 return self._asclist.__reversed__
3262 iter1 = self._r1.fastdesc
3266 iter1 = self._r1.fastdesc
3263 iter2 = self._r2.fastdesc
3267 iter2 = self._r2.fastdesc
3264 if None in (iter1, iter2):
3268 if None in (iter1, iter2):
3265 return None
3269 return None
3266 return lambda: _iterordered(False, iter1(), iter2())
3270 return lambda: _iterordered(False, iter1(), iter2())
3267
3271
3268 def __contains__(self, x):
3272 def __contains__(self, x):
3269 return x in self._r1 or x in self._r2
3273 return x in self._r1 or x in self._r2
3270
3274
3271 def sort(self, reverse=False):
3275 def sort(self, reverse=False):
3272 """Sort the added set
3276 """Sort the added set
3273
3277
3274 For this we use the cached list with all the generated values and if we
3278 For this we use the cached list with all the generated values and if we
3275 know they are ascending or descending we can sort them in a smart way.
3279 know they are ascending or descending we can sort them in a smart way.
3276 """
3280 """
3277 self._ascending = not reverse
3281 self._ascending = not reverse
3278
3282
3279 def isascending(self):
3283 def isascending(self):
3280 return self._ascending is not None and self._ascending
3284 return self._ascending is not None and self._ascending
3281
3285
3282 def isdescending(self):
3286 def isdescending(self):
3283 return self._ascending is not None and not self._ascending
3287 return self._ascending is not None and not self._ascending
3284
3288
3285 def reverse(self):
3289 def reverse(self):
3286 if self._ascending is None:
3290 if self._ascending is None:
3287 self._list.reverse()
3291 self._list.reverse()
3288 else:
3292 else:
3289 self._ascending = not self._ascending
3293 self._ascending = not self._ascending
3290
3294
3291 def first(self):
3295 def first(self):
3292 for x in self:
3296 for x in self:
3293 return x
3297 return x
3294 return None
3298 return None
3295
3299
3296 def last(self):
3300 def last(self):
3297 self.reverse()
3301 self.reverse()
3298 val = self.first()
3302 val = self.first()
3299 self.reverse()
3303 self.reverse()
3300 return val
3304 return val
3301
3305
3302 def __repr__(self):
3306 def __repr__(self):
3303 d = {None: '', False: '-', True: '+'}[self._ascending]
3307 d = {None: '', False: '-', True: '+'}[self._ascending]
3304 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3308 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3305
3309
3306 class generatorset(abstractsmartset):
3310 class generatorset(abstractsmartset):
3307 """Wrap a generator for lazy iteration
3311 """Wrap a generator for lazy iteration
3308
3312
3309 Wrapper structure for generators that provides lazy membership and can
3313 Wrapper structure for generators that provides lazy membership and can
3310 be iterated more than once.
3314 be iterated more than once.
3311 When asked for membership it generates values until either it finds the
3315 When asked for membership it generates values until either it finds the
3312 requested one or has gone through all the elements in the generator
3316 requested one or has gone through all the elements in the generator
3313 """
3317 """
3314 def __init__(self, gen, iterasc=None):
3318 def __init__(self, gen, iterasc=None):
3315 """
3319 """
3316 gen: a generator producing the values for the generatorset.
3320 gen: a generator producing the values for the generatorset.
3317 """
3321 """
3318 self._gen = gen
3322 self._gen = gen
3319 self._asclist = None
3323 self._asclist = None
3320 self._cache = {}
3324 self._cache = {}
3321 self._genlist = []
3325 self._genlist = []
3322 self._finished = False
3326 self._finished = False
3323 self._ascending = True
3327 self._ascending = True
3324 if iterasc is not None:
3328 if iterasc is not None:
3325 if iterasc:
3329 if iterasc:
3326 self.fastasc = self._iterator
3330 self.fastasc = self._iterator
3327 self.__contains__ = self._asccontains
3331 self.__contains__ = self._asccontains
3328 else:
3332 else:
3329 self.fastdesc = self._iterator
3333 self.fastdesc = self._iterator
3330 self.__contains__ = self._desccontains
3334 self.__contains__ = self._desccontains
3331
3335
3332 def __nonzero__(self):
3336 def __nonzero__(self):
3333 # Do not use 'for r in self' because it will enforce the iteration
3337 # Do not use 'for r in self' because it will enforce the iteration
3334 # order (default ascending), possibly unrolling a whole descending
3338 # order (default ascending), possibly unrolling a whole descending
3335 # iterator.
3339 # iterator.
3336 if self._genlist:
3340 if self._genlist:
3337 return True
3341 return True
3338 for r in self._consumegen():
3342 for r in self._consumegen():
3339 return True
3343 return True
3340 return False
3344 return False
3341
3345
3342 def __contains__(self, x):
3346 def __contains__(self, x):
3343 if x in self._cache:
3347 if x in self._cache:
3344 return self._cache[x]
3348 return self._cache[x]
3345
3349
3346 # Use new values only, as existing values would be cached.
3350 # Use new values only, as existing values would be cached.
3347 for l in self._consumegen():
3351 for l in self._consumegen():
3348 if l == x:
3352 if l == x:
3349 return True
3353 return True
3350
3354
3351 self._cache[x] = False
3355 self._cache[x] = False
3352 return False
3356 return False
3353
3357
3354 def _asccontains(self, x):
3358 def _asccontains(self, x):
3355 """version of contains optimised for ascending generator"""
3359 """version of contains optimised for ascending generator"""
3356 if x in self._cache:
3360 if x in self._cache:
3357 return self._cache[x]
3361 return self._cache[x]
3358
3362
3359 # Use new values only, as existing values would be cached.
3363 # Use new values only, as existing values would be cached.
3360 for l in self._consumegen():
3364 for l in self._consumegen():
3361 if l == x:
3365 if l == x:
3362 return True
3366 return True
3363 if l > x:
3367 if l > x:
3364 break
3368 break
3365
3369
3366 self._cache[x] = False
3370 self._cache[x] = False
3367 return False
3371 return False
3368
3372
3369 def _desccontains(self, x):
3373 def _desccontains(self, x):
3370 """version of contains optimised for descending generator"""
3374 """version of contains optimised for descending generator"""
3371 if x in self._cache:
3375 if x in self._cache:
3372 return self._cache[x]
3376 return self._cache[x]
3373
3377
3374 # Use new values only, as existing values would be cached.
3378 # Use new values only, as existing values would be cached.
3375 for l in self._consumegen():
3379 for l in self._consumegen():
3376 if l == x:
3380 if l == x:
3377 return True
3381 return True
3378 if l < x:
3382 if l < x:
3379 break
3383 break
3380
3384
3381 self._cache[x] = False
3385 self._cache[x] = False
3382 return False
3386 return False
3383
3387
3384 def __iter__(self):
3388 def __iter__(self):
3385 if self._ascending:
3389 if self._ascending:
3386 it = self.fastasc
3390 it = self.fastasc
3387 else:
3391 else:
3388 it = self.fastdesc
3392 it = self.fastdesc
3389 if it is not None:
3393 if it is not None:
3390 return it()
3394 return it()
3391 # we need to consume the iterator
3395 # we need to consume the iterator
3392 for x in self._consumegen():
3396 for x in self._consumegen():
3393 pass
3397 pass
3394 # recall the same code
3398 # recall the same code
3395 return iter(self)
3399 return iter(self)
3396
3400
3397 def _iterator(self):
3401 def _iterator(self):
3398 if self._finished:
3402 if self._finished:
3399 return iter(self._genlist)
3403 return iter(self._genlist)
3400
3404
3401 # We have to use this complex iteration strategy to allow multiple
3405 # We have to use this complex iteration strategy to allow multiple
3402 # iterations at the same time. We need to be able to catch revision
3406 # iterations at the same time. We need to be able to catch revision
3403 # removed from _consumegen and added to genlist in another instance.
3407 # removed from _consumegen and added to genlist in another instance.
3404 #
3408 #
3405 # Getting rid of it would provide an about 15% speed up on this
3409 # Getting rid of it would provide an about 15% speed up on this
3406 # iteration.
3410 # iteration.
3407 genlist = self._genlist
3411 genlist = self._genlist
3408 nextrev = self._consumegen().next
3412 nextrev = self._consumegen().next
3409 _len = len # cache global lookup
3413 _len = len # cache global lookup
3410 def gen():
3414 def gen():
3411 i = 0
3415 i = 0
3412 while True:
3416 while True:
3413 if i < _len(genlist):
3417 if i < _len(genlist):
3414 yield genlist[i]
3418 yield genlist[i]
3415 else:
3419 else:
3416 yield nextrev()
3420 yield nextrev()
3417 i += 1
3421 i += 1
3418 return gen()
3422 return gen()
3419
3423
3420 def _consumegen(self):
3424 def _consumegen(self):
3421 cache = self._cache
3425 cache = self._cache
3422 genlist = self._genlist.append
3426 genlist = self._genlist.append
3423 for item in self._gen:
3427 for item in self._gen:
3424 cache[item] = True
3428 cache[item] = True
3425 genlist(item)
3429 genlist(item)
3426 yield item
3430 yield item
3427 if not self._finished:
3431 if not self._finished:
3428 self._finished = True
3432 self._finished = True
3429 asc = self._genlist[:]
3433 asc = self._genlist[:]
3430 asc.sort()
3434 asc.sort()
3431 self._asclist = asc
3435 self._asclist = asc
3432 self.fastasc = asc.__iter__
3436 self.fastasc = asc.__iter__
3433 self.fastdesc = asc.__reversed__
3437 self.fastdesc = asc.__reversed__
3434
3438
3435 def __len__(self):
3439 def __len__(self):
3436 for x in self._consumegen():
3440 for x in self._consumegen():
3437 pass
3441 pass
3438 return len(self._genlist)
3442 return len(self._genlist)
3439
3443
3440 def sort(self, reverse=False):
3444 def sort(self, reverse=False):
3441 self._ascending = not reverse
3445 self._ascending = not reverse
3442
3446
3443 def reverse(self):
3447 def reverse(self):
3444 self._ascending = not self._ascending
3448 self._ascending = not self._ascending
3445
3449
3446 def isascending(self):
3450 def isascending(self):
3447 return self._ascending
3451 return self._ascending
3448
3452
3449 def isdescending(self):
3453 def isdescending(self):
3450 return not self._ascending
3454 return not self._ascending
3451
3455
3452 def first(self):
3456 def first(self):
3453 if self._ascending:
3457 if self._ascending:
3454 it = self.fastasc
3458 it = self.fastasc
3455 else:
3459 else:
3456 it = self.fastdesc
3460 it = self.fastdesc
3457 if it is None:
3461 if it is None:
3458 # we need to consume all and try again
3462 # we need to consume all and try again
3459 for x in self._consumegen():
3463 for x in self._consumegen():
3460 pass
3464 pass
3461 return self.first()
3465 return self.first()
3462 return next(it(), None)
3466 return next(it(), None)
3463
3467
3464 def last(self):
3468 def last(self):
3465 if self._ascending:
3469 if self._ascending:
3466 it = self.fastdesc
3470 it = self.fastdesc
3467 else:
3471 else:
3468 it = self.fastasc
3472 it = self.fastasc
3469 if it is None:
3473 if it is None:
3470 # we need to consume all and try again
3474 # we need to consume all and try again
3471 for x in self._consumegen():
3475 for x in self._consumegen():
3472 pass
3476 pass
3473 return self.first()
3477 return self.first()
3474 return next(it(), None)
3478 return next(it(), None)
3475
3479
3476 def __repr__(self):
3480 def __repr__(self):
3477 d = {False: '-', True: '+'}[self._ascending]
3481 d = {False: '-', True: '+'}[self._ascending]
3478 return '<%s%s>' % (type(self).__name__, d)
3482 return '<%s%s>' % (type(self).__name__, d)
3479
3483
3480 class spanset(abstractsmartset):
3484 class spanset(abstractsmartset):
3481 """Duck type for baseset class which represents a range of revisions and
3485 """Duck type for baseset class which represents a range of revisions and
3482 can work lazily and without having all the range in memory
3486 can work lazily and without having all the range in memory
3483
3487
3484 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3488 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3485 notable points:
3489 notable points:
3486 - when x < y it will be automatically descending,
3490 - when x < y it will be automatically descending,
3487 - revision filtered with this repoview will be skipped.
3491 - revision filtered with this repoview will be skipped.
3488
3492
3489 """
3493 """
3490 def __init__(self, repo, start=0, end=None):
3494 def __init__(self, repo, start=0, end=None):
3491 """
3495 """
3492 start: first revision included the set
3496 start: first revision included the set
3493 (default to 0)
3497 (default to 0)
3494 end: first revision excluded (last+1)
3498 end: first revision excluded (last+1)
3495 (default to len(repo)
3499 (default to len(repo)
3496
3500
3497 Spanset will be descending if `end` < `start`.
3501 Spanset will be descending if `end` < `start`.
3498 """
3502 """
3499 if end is None:
3503 if end is None:
3500 end = len(repo)
3504 end = len(repo)
3501 self._ascending = start <= end
3505 self._ascending = start <= end
3502 if not self._ascending:
3506 if not self._ascending:
3503 start, end = end + 1, start +1
3507 start, end = end + 1, start +1
3504 self._start = start
3508 self._start = start
3505 self._end = end
3509 self._end = end
3506 self._hiddenrevs = repo.changelog.filteredrevs
3510 self._hiddenrevs = repo.changelog.filteredrevs
3507
3511
3508 def sort(self, reverse=False):
3512 def sort(self, reverse=False):
3509 self._ascending = not reverse
3513 self._ascending = not reverse
3510
3514
3511 def reverse(self):
3515 def reverse(self):
3512 self._ascending = not self._ascending
3516 self._ascending = not self._ascending
3513
3517
3514 def _iterfilter(self, iterrange):
3518 def _iterfilter(self, iterrange):
3515 s = self._hiddenrevs
3519 s = self._hiddenrevs
3516 for r in iterrange:
3520 for r in iterrange:
3517 if r not in s:
3521 if r not in s:
3518 yield r
3522 yield r
3519
3523
3520 def __iter__(self):
3524 def __iter__(self):
3521 if self._ascending:
3525 if self._ascending:
3522 return self.fastasc()
3526 return self.fastasc()
3523 else:
3527 else:
3524 return self.fastdesc()
3528 return self.fastdesc()
3525
3529
3526 def fastasc(self):
3530 def fastasc(self):
3527 iterrange = xrange(self._start, self._end)
3531 iterrange = xrange(self._start, self._end)
3528 if self._hiddenrevs:
3532 if self._hiddenrevs:
3529 return self._iterfilter(iterrange)
3533 return self._iterfilter(iterrange)
3530 return iter(iterrange)
3534 return iter(iterrange)
3531
3535
3532 def fastdesc(self):
3536 def fastdesc(self):
3533 iterrange = xrange(self._end - 1, self._start - 1, -1)
3537 iterrange = xrange(self._end - 1, self._start - 1, -1)
3534 if self._hiddenrevs:
3538 if self._hiddenrevs:
3535 return self._iterfilter(iterrange)
3539 return self._iterfilter(iterrange)
3536 return iter(iterrange)
3540 return iter(iterrange)
3537
3541
3538 def __contains__(self, rev):
3542 def __contains__(self, rev):
3539 hidden = self._hiddenrevs
3543 hidden = self._hiddenrevs
3540 return ((self._start <= rev < self._end)
3544 return ((self._start <= rev < self._end)
3541 and not (hidden and rev in hidden))
3545 and not (hidden and rev in hidden))
3542
3546
3543 def __nonzero__(self):
3547 def __nonzero__(self):
3544 for r in self:
3548 for r in self:
3545 return True
3549 return True
3546 return False
3550 return False
3547
3551
3548 def __len__(self):
3552 def __len__(self):
3549 if not self._hiddenrevs:
3553 if not self._hiddenrevs:
3550 return abs(self._end - self._start)
3554 return abs(self._end - self._start)
3551 else:
3555 else:
3552 count = 0
3556 count = 0
3553 start = self._start
3557 start = self._start
3554 end = self._end
3558 end = self._end
3555 for rev in self._hiddenrevs:
3559 for rev in self._hiddenrevs:
3556 if (end < rev <= start) or (start <= rev < end):
3560 if (end < rev <= start) or (start <= rev < end):
3557 count += 1
3561 count += 1
3558 return abs(self._end - self._start) - count
3562 return abs(self._end - self._start) - count
3559
3563
3560 def isascending(self):
3564 def isascending(self):
3561 return self._ascending
3565 return self._ascending
3562
3566
3563 def isdescending(self):
3567 def isdescending(self):
3564 return not self._ascending
3568 return not self._ascending
3565
3569
3566 def first(self):
3570 def first(self):
3567 if self._ascending:
3571 if self._ascending:
3568 it = self.fastasc
3572 it = self.fastasc
3569 else:
3573 else:
3570 it = self.fastdesc
3574 it = self.fastdesc
3571 for x in it():
3575 for x in it():
3572 return x
3576 return x
3573 return None
3577 return None
3574
3578
3575 def last(self):
3579 def last(self):
3576 if self._ascending:
3580 if self._ascending:
3577 it = self.fastdesc
3581 it = self.fastdesc
3578 else:
3582 else:
3579 it = self.fastasc
3583 it = self.fastasc
3580 for x in it():
3584 for x in it():
3581 return x
3585 return x
3582 return None
3586 return None
3583
3587
3584 def __repr__(self):
3588 def __repr__(self):
3585 d = {False: '-', True: '+'}[self._ascending]
3589 d = {False: '-', True: '+'}[self._ascending]
3586 return '<%s%s %d:%d>' % (type(self).__name__, d,
3590 return '<%s%s %d:%d>' % (type(self).__name__, d,
3587 self._start, self._end - 1)
3591 self._start, self._end - 1)
3588
3592
3589 class fullreposet(spanset):
3593 class fullreposet(spanset):
3590 """a set containing all revisions in the repo
3594 """a set containing all revisions in the repo
3591
3595
3592 This class exists to host special optimization and magic to handle virtual
3596 This class exists to host special optimization and magic to handle virtual
3593 revisions such as "null".
3597 revisions such as "null".
3594 """
3598 """
3595
3599
3596 def __init__(self, repo):
3600 def __init__(self, repo):
3597 super(fullreposet, self).__init__(repo)
3601 super(fullreposet, self).__init__(repo)
3598
3602
3599 def __and__(self, other):
3603 def __and__(self, other):
3600 """As self contains the whole repo, all of the other set should also be
3604 """As self contains the whole repo, all of the other set should also be
3601 in self. Therefore `self & other = other`.
3605 in self. Therefore `self & other = other`.
3602
3606
3603 This boldly assumes the other contains valid revs only.
3607 This boldly assumes the other contains valid revs only.
3604 """
3608 """
3605 # other not a smartset, make is so
3609 # other not a smartset, make is so
3606 if not util.safehasattr(other, 'isascending'):
3610 if not util.safehasattr(other, 'isascending'):
3607 # filter out hidden revision
3611 # filter out hidden revision
3608 # (this boldly assumes all smartset are pure)
3612 # (this boldly assumes all smartset are pure)
3609 #
3613 #
3610 # `other` was used with "&", let's assume this is a set like
3614 # `other` was used with "&", let's assume this is a set like
3611 # object.
3615 # object.
3612 other = baseset(other - self._hiddenrevs)
3616 other = baseset(other - self._hiddenrevs)
3613
3617
3614 # XXX As fullreposet is also used as bootstrap, this is wrong.
3618 # XXX As fullreposet is also used as bootstrap, this is wrong.
3615 #
3619 #
3616 # With a giveme312() revset returning [3,1,2], this makes
3620 # With a giveme312() revset returning [3,1,2], this makes
3617 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3621 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3618 # We cannot just drop it because other usage still need to sort it:
3622 # We cannot just drop it because other usage still need to sort it:
3619 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3623 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3620 #
3624 #
3621 # There is also some faulty revset implementations that rely on it
3625 # There is also some faulty revset implementations that rely on it
3622 # (eg: children as of its state in e8075329c5fb)
3626 # (eg: children as of its state in e8075329c5fb)
3623 #
3627 #
3624 # When we fix the two points above we can move this into the if clause
3628 # When we fix the two points above we can move this into the if clause
3625 other.sort(reverse=self.isdescending())
3629 other.sort(reverse=self.isdescending())
3626 return other
3630 return other
3627
3631
3628 def prettyformatset(revs):
3632 def prettyformatset(revs):
3629 lines = []
3633 lines = []
3630 rs = repr(revs)
3634 rs = repr(revs)
3631 p = 0
3635 p = 0
3632 while p < len(rs):
3636 while p < len(rs):
3633 q = rs.find('<', p + 1)
3637 q = rs.find('<', p + 1)
3634 if q < 0:
3638 if q < 0:
3635 q = len(rs)
3639 q = len(rs)
3636 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3640 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3637 assert l >= 0
3641 assert l >= 0
3638 lines.append((l, rs[p:q].rstrip()))
3642 lines.append((l, rs[p:q].rstrip()))
3639 p = q
3643 p = q
3640 return '\n'.join(' ' * l + s for l, s in lines)
3644 return '\n'.join(' ' * l + s for l, s in lines)
3641
3645
3642 # tell hggettext to extract docstrings from these functions:
3646 # tell hggettext to extract docstrings from these functions:
3643 i18nfunctions = symbols.values()
3647 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now