##// END OF EJS Templates
parser: remove unused binding parameter from suffix action...
Yuya Nishihara -
r29767:e5b79406 default
parent child Browse files
Show More
@@ -1,540 +1,540 b''
1 # parser.py - simple top-down operator precedence parser for mercurial
1 # parser.py - simple top-down operator precedence parser for mercurial
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 # see http://effbot.org/zone/simple-top-down-parsing.htm and
8 # see http://effbot.org/zone/simple-top-down-parsing.htm and
9 # http://eli.thegreenplace.net/2010/01/02/top-down-operator-precedence-parsing/
9 # http://eli.thegreenplace.net/2010/01/02/top-down-operator-precedence-parsing/
10 # for background
10 # for background
11
11
12 # takes a tokenizer and elements
12 # takes a tokenizer and elements
13 # tokenizer is an iterator that returns (type, value, pos) tuples
13 # tokenizer is an iterator that returns (type, value, pos) tuples
14 # elements is a mapping of types to binding strength, primary, prefix, infix
14 # elements is a mapping of types to binding strength, primary, prefix, infix
15 # and suffix actions
15 # and suffix actions
16 # an action is a tree node name, a tree label, and an optional match
16 # an action is a tree node name, a tree label, and an optional match
17 # __call__(program) parses program into a labeled tree
17 # __call__(program) parses program into a labeled tree
18
18
19 from __future__ import absolute_import
19 from __future__ import absolute_import
20
20
21 from .i18n import _
21 from .i18n import _
22 from . import error
22 from . import error
23
23
24 class parser(object):
24 class parser(object):
25 def __init__(self, elements, methods=None):
25 def __init__(self, elements, methods=None):
26 self._elements = elements
26 self._elements = elements
27 self._methods = methods
27 self._methods = methods
28 self.current = None
28 self.current = None
29 def _advance(self):
29 def _advance(self):
30 'advance the tokenizer'
30 'advance the tokenizer'
31 t = self.current
31 t = self.current
32 self.current = next(self._iter, None)
32 self.current = next(self._iter, None)
33 return t
33 return t
34 def _hasnewterm(self):
34 def _hasnewterm(self):
35 'True if next token may start new term'
35 'True if next token may start new term'
36 return any(self._elements[self.current[0]][1:3])
36 return any(self._elements[self.current[0]][1:3])
37 def _match(self, m):
37 def _match(self, m):
38 'make sure the tokenizer matches an end condition'
38 'make sure the tokenizer matches an end condition'
39 if self.current[0] != m:
39 if self.current[0] != m:
40 raise error.ParseError(_("unexpected token: %s") % self.current[0],
40 raise error.ParseError(_("unexpected token: %s") % self.current[0],
41 self.current[2])
41 self.current[2])
42 self._advance()
42 self._advance()
43 def _parseoperand(self, bind, m=None):
43 def _parseoperand(self, bind, m=None):
44 'gather right-hand-side operand until an end condition or binding met'
44 'gather right-hand-side operand until an end condition or binding met'
45 if m and self.current[0] == m:
45 if m and self.current[0] == m:
46 expr = None
46 expr = None
47 else:
47 else:
48 expr = self._parse(bind)
48 expr = self._parse(bind)
49 if m:
49 if m:
50 self._match(m)
50 self._match(m)
51 return expr
51 return expr
52 def _parse(self, bind=0):
52 def _parse(self, bind=0):
53 token, value, pos = self._advance()
53 token, value, pos = self._advance()
54 # handle prefix rules on current token, take as primary if unambiguous
54 # handle prefix rules on current token, take as primary if unambiguous
55 primary, prefix = self._elements[token][1:3]
55 primary, prefix = self._elements[token][1:3]
56 if primary and not (prefix and self._hasnewterm()):
56 if primary and not (prefix and self._hasnewterm()):
57 expr = (primary, value)
57 expr = (primary, value)
58 elif prefix:
58 elif prefix:
59 expr = (prefix[0], self._parseoperand(*prefix[1:]))
59 expr = (prefix[0], self._parseoperand(*prefix[1:]))
60 else:
60 else:
61 raise error.ParseError(_("not a prefix: %s") % token, pos)
61 raise error.ParseError(_("not a prefix: %s") % token, pos)
62 # gather tokens until we meet a lower binding strength
62 # gather tokens until we meet a lower binding strength
63 while bind < self._elements[self.current[0]][0]:
63 while bind < self._elements[self.current[0]][0]:
64 token, value, pos = self._advance()
64 token, value, pos = self._advance()
65 # handle infix rules, take as suffix if unambiguous
65 # handle infix rules, take as suffix if unambiguous
66 infix, suffix = self._elements[token][3:]
66 infix, suffix = self._elements[token][3:]
67 if suffix and not (infix and self._hasnewterm()):
67 if suffix and not (infix and self._hasnewterm()):
68 expr = (suffix[0], expr)
68 expr = (suffix, expr)
69 elif infix:
69 elif infix:
70 expr = (infix[0], expr, self._parseoperand(*infix[1:]))
70 expr = (infix[0], expr, self._parseoperand(*infix[1:]))
71 else:
71 else:
72 raise error.ParseError(_("not an infix: %s") % token, pos)
72 raise error.ParseError(_("not an infix: %s") % token, pos)
73 return expr
73 return expr
74 def parse(self, tokeniter):
74 def parse(self, tokeniter):
75 'generate a parse tree from tokens'
75 'generate a parse tree from tokens'
76 self._iter = tokeniter
76 self._iter = tokeniter
77 self._advance()
77 self._advance()
78 res = self._parse()
78 res = self._parse()
79 token, value, pos = self.current
79 token, value, pos = self.current
80 return res, pos
80 return res, pos
81 def eval(self, tree):
81 def eval(self, tree):
82 'recursively evaluate a parse tree using node methods'
82 'recursively evaluate a parse tree using node methods'
83 if not isinstance(tree, tuple):
83 if not isinstance(tree, tuple):
84 return tree
84 return tree
85 return self._methods[tree[0]](*[self.eval(t) for t in tree[1:]])
85 return self._methods[tree[0]](*[self.eval(t) for t in tree[1:]])
86 def __call__(self, tokeniter):
86 def __call__(self, tokeniter):
87 'parse tokens into a parse tree and evaluate if methods given'
87 'parse tokens into a parse tree and evaluate if methods given'
88 t = self.parse(tokeniter)
88 t = self.parse(tokeniter)
89 if self._methods:
89 if self._methods:
90 return self.eval(t)
90 return self.eval(t)
91 return t
91 return t
92
92
93 def buildargsdict(trees, funcname, keys, keyvaluenode, keynode):
93 def buildargsdict(trees, funcname, keys, keyvaluenode, keynode):
94 """Build dict from list containing positional and keyword arguments
94 """Build dict from list containing positional and keyword arguments
95
95
96 Invalid keywords or too many positional arguments are rejected, but
96 Invalid keywords or too many positional arguments are rejected, but
97 missing arguments are just omitted.
97 missing arguments are just omitted.
98 """
98 """
99 if len(trees) > len(keys):
99 if len(trees) > len(keys):
100 raise error.ParseError(_("%(func)s takes at most %(nargs)d arguments")
100 raise error.ParseError(_("%(func)s takes at most %(nargs)d arguments")
101 % {'func': funcname, 'nargs': len(keys)})
101 % {'func': funcname, 'nargs': len(keys)})
102 args = {}
102 args = {}
103 # consume positional arguments
103 # consume positional arguments
104 for k, x in zip(keys, trees):
104 for k, x in zip(keys, trees):
105 if x[0] == keyvaluenode:
105 if x[0] == keyvaluenode:
106 break
106 break
107 args[k] = x
107 args[k] = x
108 # remainder should be keyword arguments
108 # remainder should be keyword arguments
109 for x in trees[len(args):]:
109 for x in trees[len(args):]:
110 if x[0] != keyvaluenode or x[1][0] != keynode:
110 if x[0] != keyvaluenode or x[1][0] != keynode:
111 raise error.ParseError(_("%(func)s got an invalid argument")
111 raise error.ParseError(_("%(func)s got an invalid argument")
112 % {'func': funcname})
112 % {'func': funcname})
113 k = x[1][1]
113 k = x[1][1]
114 if k not in keys:
114 if k not in keys:
115 raise error.ParseError(_("%(func)s got an unexpected keyword "
115 raise error.ParseError(_("%(func)s got an unexpected keyword "
116 "argument '%(key)s'")
116 "argument '%(key)s'")
117 % {'func': funcname, 'key': k})
117 % {'func': funcname, 'key': k})
118 if k in args:
118 if k in args:
119 raise error.ParseError(_("%(func)s got multiple values for keyword "
119 raise error.ParseError(_("%(func)s got multiple values for keyword "
120 "argument '%(key)s'")
120 "argument '%(key)s'")
121 % {'func': funcname, 'key': k})
121 % {'func': funcname, 'key': k})
122 args[k] = x[2]
122 args[k] = x[2]
123 return args
123 return args
124
124
125 def unescapestr(s):
125 def unescapestr(s):
126 try:
126 try:
127 return s.decode("string_escape")
127 return s.decode("string_escape")
128 except ValueError as e:
128 except ValueError as e:
129 # mangle Python's exception into our format
129 # mangle Python's exception into our format
130 raise error.ParseError(str(e).lower())
130 raise error.ParseError(str(e).lower())
131
131
132 def _prettyformat(tree, leafnodes, level, lines):
132 def _prettyformat(tree, leafnodes, level, lines):
133 if not isinstance(tree, tuple) or tree[0] in leafnodes:
133 if not isinstance(tree, tuple) or tree[0] in leafnodes:
134 lines.append((level, str(tree)))
134 lines.append((level, str(tree)))
135 else:
135 else:
136 lines.append((level, '(%s' % tree[0]))
136 lines.append((level, '(%s' % tree[0]))
137 for s in tree[1:]:
137 for s in tree[1:]:
138 _prettyformat(s, leafnodes, level + 1, lines)
138 _prettyformat(s, leafnodes, level + 1, lines)
139 lines[-1:] = [(lines[-1][0], lines[-1][1] + ')')]
139 lines[-1:] = [(lines[-1][0], lines[-1][1] + ')')]
140
140
141 def prettyformat(tree, leafnodes):
141 def prettyformat(tree, leafnodes):
142 lines = []
142 lines = []
143 _prettyformat(tree, leafnodes, 0, lines)
143 _prettyformat(tree, leafnodes, 0, lines)
144 output = '\n'.join((' ' * l + s) for l, s in lines)
144 output = '\n'.join((' ' * l + s) for l, s in lines)
145 return output
145 return output
146
146
147 def simplifyinfixops(tree, targetnodes):
147 def simplifyinfixops(tree, targetnodes):
148 """Flatten chained infix operations to reduce usage of Python stack
148 """Flatten chained infix operations to reduce usage of Python stack
149
149
150 >>> def f(tree):
150 >>> def f(tree):
151 ... print prettyformat(simplifyinfixops(tree, ('or',)), ('symbol',))
151 ... print prettyformat(simplifyinfixops(tree, ('or',)), ('symbol',))
152 >>> f(('or',
152 >>> f(('or',
153 ... ('or',
153 ... ('or',
154 ... ('symbol', '1'),
154 ... ('symbol', '1'),
155 ... ('symbol', '2')),
155 ... ('symbol', '2')),
156 ... ('symbol', '3')))
156 ... ('symbol', '3')))
157 (or
157 (or
158 ('symbol', '1')
158 ('symbol', '1')
159 ('symbol', '2')
159 ('symbol', '2')
160 ('symbol', '3'))
160 ('symbol', '3'))
161 >>> f(('func',
161 >>> f(('func',
162 ... ('symbol', 'p1'),
162 ... ('symbol', 'p1'),
163 ... ('or',
163 ... ('or',
164 ... ('or',
164 ... ('or',
165 ... ('func',
165 ... ('func',
166 ... ('symbol', 'sort'),
166 ... ('symbol', 'sort'),
167 ... ('list',
167 ... ('list',
168 ... ('or',
168 ... ('or',
169 ... ('or',
169 ... ('or',
170 ... ('symbol', '1'),
170 ... ('symbol', '1'),
171 ... ('symbol', '2')),
171 ... ('symbol', '2')),
172 ... ('symbol', '3')),
172 ... ('symbol', '3')),
173 ... ('negate',
173 ... ('negate',
174 ... ('symbol', 'rev')))),
174 ... ('symbol', 'rev')))),
175 ... ('and',
175 ... ('and',
176 ... ('symbol', '4'),
176 ... ('symbol', '4'),
177 ... ('group',
177 ... ('group',
178 ... ('or',
178 ... ('or',
179 ... ('or',
179 ... ('or',
180 ... ('symbol', '5'),
180 ... ('symbol', '5'),
181 ... ('symbol', '6')),
181 ... ('symbol', '6')),
182 ... ('symbol', '7'))))),
182 ... ('symbol', '7'))))),
183 ... ('symbol', '8'))))
183 ... ('symbol', '8'))))
184 (func
184 (func
185 ('symbol', 'p1')
185 ('symbol', 'p1')
186 (or
186 (or
187 (func
187 (func
188 ('symbol', 'sort')
188 ('symbol', 'sort')
189 (list
189 (list
190 (or
190 (or
191 ('symbol', '1')
191 ('symbol', '1')
192 ('symbol', '2')
192 ('symbol', '2')
193 ('symbol', '3'))
193 ('symbol', '3'))
194 (negate
194 (negate
195 ('symbol', 'rev'))))
195 ('symbol', 'rev'))))
196 (and
196 (and
197 ('symbol', '4')
197 ('symbol', '4')
198 (group
198 (group
199 (or
199 (or
200 ('symbol', '5')
200 ('symbol', '5')
201 ('symbol', '6')
201 ('symbol', '6')
202 ('symbol', '7'))))
202 ('symbol', '7'))))
203 ('symbol', '8')))
203 ('symbol', '8')))
204 """
204 """
205 if not isinstance(tree, tuple):
205 if not isinstance(tree, tuple):
206 return tree
206 return tree
207 op = tree[0]
207 op = tree[0]
208 if op not in targetnodes:
208 if op not in targetnodes:
209 return (op,) + tuple(simplifyinfixops(x, targetnodes) for x in tree[1:])
209 return (op,) + tuple(simplifyinfixops(x, targetnodes) for x in tree[1:])
210
210
211 # walk down left nodes taking each right node. no recursion to left nodes
211 # walk down left nodes taking each right node. no recursion to left nodes
212 # because infix operators are left-associative, i.e. left tree is deep.
212 # because infix operators are left-associative, i.e. left tree is deep.
213 # e.g. '1 + 2 + 3' -> (+ (+ 1 2) 3) -> (+ 1 2 3)
213 # e.g. '1 + 2 + 3' -> (+ (+ 1 2) 3) -> (+ 1 2 3)
214 simplified = []
214 simplified = []
215 x = tree
215 x = tree
216 while x[0] == op:
216 while x[0] == op:
217 l, r = x[1:]
217 l, r = x[1:]
218 simplified.append(simplifyinfixops(r, targetnodes))
218 simplified.append(simplifyinfixops(r, targetnodes))
219 x = l
219 x = l
220 simplified.append(simplifyinfixops(x, targetnodes))
220 simplified.append(simplifyinfixops(x, targetnodes))
221 simplified.append(op)
221 simplified.append(op)
222 return tuple(reversed(simplified))
222 return tuple(reversed(simplified))
223
223
224 def parseerrordetail(inst):
224 def parseerrordetail(inst):
225 """Compose error message from specified ParseError object
225 """Compose error message from specified ParseError object
226 """
226 """
227 if len(inst.args) > 1:
227 if len(inst.args) > 1:
228 return _('at %s: %s') % (inst.args[1], inst.args[0])
228 return _('at %s: %s') % (inst.args[1], inst.args[0])
229 else:
229 else:
230 return inst.args[0]
230 return inst.args[0]
231
231
232 class alias(object):
232 class alias(object):
233 """Parsed result of alias"""
233 """Parsed result of alias"""
234
234
235 def __init__(self, name, args, err, replacement):
235 def __init__(self, name, args, err, replacement):
236 self.name = name
236 self.name = name
237 self.args = args
237 self.args = args
238 self.error = err
238 self.error = err
239 self.replacement = replacement
239 self.replacement = replacement
240 # whether own `error` information is already shown or not.
240 # whether own `error` information is already shown or not.
241 # this avoids showing same warning multiple times at each
241 # this avoids showing same warning multiple times at each
242 # `expandaliases`.
242 # `expandaliases`.
243 self.warned = False
243 self.warned = False
244
244
245 class basealiasrules(object):
245 class basealiasrules(object):
246 """Parsing and expansion rule set of aliases
246 """Parsing and expansion rule set of aliases
247
247
248 This is a helper for fileset/revset/template aliases. A concrete rule set
248 This is a helper for fileset/revset/template aliases. A concrete rule set
249 should be made by sub-classing this and implementing class/static methods.
249 should be made by sub-classing this and implementing class/static methods.
250
250
251 It supports alias expansion of symbol and funciton-call styles::
251 It supports alias expansion of symbol and funciton-call styles::
252
252
253 # decl = defn
253 # decl = defn
254 h = heads(default)
254 h = heads(default)
255 b($1) = ancestors($1) - ancestors(default)
255 b($1) = ancestors($1) - ancestors(default)
256 """
256 """
257 # typically a config section, which will be included in error messages
257 # typically a config section, which will be included in error messages
258 _section = None
258 _section = None
259 # tag of symbol node
259 # tag of symbol node
260 _symbolnode = 'symbol'
260 _symbolnode = 'symbol'
261
261
262 def __new__(cls):
262 def __new__(cls):
263 raise TypeError("'%s' is not instantiatable" % cls.__name__)
263 raise TypeError("'%s' is not instantiatable" % cls.__name__)
264
264
265 @staticmethod
265 @staticmethod
266 def _parse(spec):
266 def _parse(spec):
267 """Parse an alias name, arguments and definition"""
267 """Parse an alias name, arguments and definition"""
268 raise NotImplementedError
268 raise NotImplementedError
269
269
270 @staticmethod
270 @staticmethod
271 def _trygetfunc(tree):
271 def _trygetfunc(tree):
272 """Return (name, args) if tree is a function; otherwise None"""
272 """Return (name, args) if tree is a function; otherwise None"""
273 raise NotImplementedError
273 raise NotImplementedError
274
274
275 @classmethod
275 @classmethod
276 def _builddecl(cls, decl):
276 def _builddecl(cls, decl):
277 """Parse an alias declaration into ``(name, args, errorstr)``
277 """Parse an alias declaration into ``(name, args, errorstr)``
278
278
279 This function analyzes the parsed tree. The parsing rule is provided
279 This function analyzes the parsed tree. The parsing rule is provided
280 by ``_parse()``.
280 by ``_parse()``.
281
281
282 - ``name``: of declared alias (may be ``decl`` itself at error)
282 - ``name``: of declared alias (may be ``decl`` itself at error)
283 - ``args``: list of argument names (or None for symbol declaration)
283 - ``args``: list of argument names (or None for symbol declaration)
284 - ``errorstr``: detail about detected error (or None)
284 - ``errorstr``: detail about detected error (or None)
285
285
286 >>> sym = lambda x: ('symbol', x)
286 >>> sym = lambda x: ('symbol', x)
287 >>> symlist = lambda *xs: ('list',) + tuple(sym(x) for x in xs)
287 >>> symlist = lambda *xs: ('list',) + tuple(sym(x) for x in xs)
288 >>> func = lambda n, a: ('func', sym(n), a)
288 >>> func = lambda n, a: ('func', sym(n), a)
289 >>> parsemap = {
289 >>> parsemap = {
290 ... 'foo': sym('foo'),
290 ... 'foo': sym('foo'),
291 ... '$foo': sym('$foo'),
291 ... '$foo': sym('$foo'),
292 ... 'foo::bar': ('dagrange', sym('foo'), sym('bar')),
292 ... 'foo::bar': ('dagrange', sym('foo'), sym('bar')),
293 ... 'foo()': func('foo', None),
293 ... 'foo()': func('foo', None),
294 ... '$foo()': func('$foo', None),
294 ... '$foo()': func('$foo', None),
295 ... 'foo($1, $2)': func('foo', symlist('$1', '$2')),
295 ... 'foo($1, $2)': func('foo', symlist('$1', '$2')),
296 ... 'foo(bar_bar, baz.baz)':
296 ... 'foo(bar_bar, baz.baz)':
297 ... func('foo', symlist('bar_bar', 'baz.baz')),
297 ... func('foo', symlist('bar_bar', 'baz.baz')),
298 ... 'foo(bar($1, $2))':
298 ... 'foo(bar($1, $2))':
299 ... func('foo', func('bar', symlist('$1', '$2'))),
299 ... func('foo', func('bar', symlist('$1', '$2'))),
300 ... 'foo($1, $2, nested($1, $2))':
300 ... 'foo($1, $2, nested($1, $2))':
301 ... func('foo', (symlist('$1', '$2') +
301 ... func('foo', (symlist('$1', '$2') +
302 ... (func('nested', symlist('$1', '$2')),))),
302 ... (func('nested', symlist('$1', '$2')),))),
303 ... 'foo("bar")': func('foo', ('string', 'bar')),
303 ... 'foo("bar")': func('foo', ('string', 'bar')),
304 ... 'foo($1, $2': error.ParseError('unexpected token: end', 10),
304 ... 'foo($1, $2': error.ParseError('unexpected token: end', 10),
305 ... 'foo("bar': error.ParseError('unterminated string', 5),
305 ... 'foo("bar': error.ParseError('unterminated string', 5),
306 ... 'foo($1, $2, $1)': func('foo', symlist('$1', '$2', '$1')),
306 ... 'foo($1, $2, $1)': func('foo', symlist('$1', '$2', '$1')),
307 ... }
307 ... }
308 >>> def parse(expr):
308 >>> def parse(expr):
309 ... x = parsemap[expr]
309 ... x = parsemap[expr]
310 ... if isinstance(x, Exception):
310 ... if isinstance(x, Exception):
311 ... raise x
311 ... raise x
312 ... return x
312 ... return x
313 >>> def trygetfunc(tree):
313 >>> def trygetfunc(tree):
314 ... if not tree or tree[0] != 'func' or tree[1][0] != 'symbol':
314 ... if not tree or tree[0] != 'func' or tree[1][0] != 'symbol':
315 ... return None
315 ... return None
316 ... if not tree[2]:
316 ... if not tree[2]:
317 ... return tree[1][1], []
317 ... return tree[1][1], []
318 ... if tree[2][0] == 'list':
318 ... if tree[2][0] == 'list':
319 ... return tree[1][1], list(tree[2][1:])
319 ... return tree[1][1], list(tree[2][1:])
320 ... return tree[1][1], [tree[2]]
320 ... return tree[1][1], [tree[2]]
321 >>> class aliasrules(basealiasrules):
321 >>> class aliasrules(basealiasrules):
322 ... _parse = staticmethod(parse)
322 ... _parse = staticmethod(parse)
323 ... _trygetfunc = staticmethod(trygetfunc)
323 ... _trygetfunc = staticmethod(trygetfunc)
324 >>> builddecl = aliasrules._builddecl
324 >>> builddecl = aliasrules._builddecl
325 >>> builddecl('foo')
325 >>> builddecl('foo')
326 ('foo', None, None)
326 ('foo', None, None)
327 >>> builddecl('$foo')
327 >>> builddecl('$foo')
328 ('$foo', None, "invalid symbol '$foo'")
328 ('$foo', None, "invalid symbol '$foo'")
329 >>> builddecl('foo::bar')
329 >>> builddecl('foo::bar')
330 ('foo::bar', None, 'invalid format')
330 ('foo::bar', None, 'invalid format')
331 >>> builddecl('foo()')
331 >>> builddecl('foo()')
332 ('foo', [], None)
332 ('foo', [], None)
333 >>> builddecl('$foo()')
333 >>> builddecl('$foo()')
334 ('$foo()', None, "invalid function '$foo'")
334 ('$foo()', None, "invalid function '$foo'")
335 >>> builddecl('foo($1, $2)')
335 >>> builddecl('foo($1, $2)')
336 ('foo', ['$1', '$2'], None)
336 ('foo', ['$1', '$2'], None)
337 >>> builddecl('foo(bar_bar, baz.baz)')
337 >>> builddecl('foo(bar_bar, baz.baz)')
338 ('foo', ['bar_bar', 'baz.baz'], None)
338 ('foo', ['bar_bar', 'baz.baz'], None)
339 >>> builddecl('foo($1, $2, nested($1, $2))')
339 >>> builddecl('foo($1, $2, nested($1, $2))')
340 ('foo($1, $2, nested($1, $2))', None, 'invalid argument list')
340 ('foo($1, $2, nested($1, $2))', None, 'invalid argument list')
341 >>> builddecl('foo(bar($1, $2))')
341 >>> builddecl('foo(bar($1, $2))')
342 ('foo(bar($1, $2))', None, 'invalid argument list')
342 ('foo(bar($1, $2))', None, 'invalid argument list')
343 >>> builddecl('foo("bar")')
343 >>> builddecl('foo("bar")')
344 ('foo("bar")', None, 'invalid argument list')
344 ('foo("bar")', None, 'invalid argument list')
345 >>> builddecl('foo($1, $2')
345 >>> builddecl('foo($1, $2')
346 ('foo($1, $2', None, 'at 10: unexpected token: end')
346 ('foo($1, $2', None, 'at 10: unexpected token: end')
347 >>> builddecl('foo("bar')
347 >>> builddecl('foo("bar')
348 ('foo("bar', None, 'at 5: unterminated string')
348 ('foo("bar', None, 'at 5: unterminated string')
349 >>> builddecl('foo($1, $2, $1)')
349 >>> builddecl('foo($1, $2, $1)')
350 ('foo', None, 'argument names collide with each other')
350 ('foo', None, 'argument names collide with each other')
351 """
351 """
352 try:
352 try:
353 tree = cls._parse(decl)
353 tree = cls._parse(decl)
354 except error.ParseError as inst:
354 except error.ParseError as inst:
355 return (decl, None, parseerrordetail(inst))
355 return (decl, None, parseerrordetail(inst))
356
356
357 if tree[0] == cls._symbolnode:
357 if tree[0] == cls._symbolnode:
358 # "name = ...." style
358 # "name = ...." style
359 name = tree[1]
359 name = tree[1]
360 if name.startswith('$'):
360 if name.startswith('$'):
361 return (decl, None, _("invalid symbol '%s'") % name)
361 return (decl, None, _("invalid symbol '%s'") % name)
362 return (name, None, None)
362 return (name, None, None)
363
363
364 func = cls._trygetfunc(tree)
364 func = cls._trygetfunc(tree)
365 if func:
365 if func:
366 # "name(arg, ....) = ...." style
366 # "name(arg, ....) = ...." style
367 name, args = func
367 name, args = func
368 if name.startswith('$'):
368 if name.startswith('$'):
369 return (decl, None, _("invalid function '%s'") % name)
369 return (decl, None, _("invalid function '%s'") % name)
370 if any(t[0] != cls._symbolnode for t in args):
370 if any(t[0] != cls._symbolnode for t in args):
371 return (decl, None, _("invalid argument list"))
371 return (decl, None, _("invalid argument list"))
372 if len(args) != len(set(args)):
372 if len(args) != len(set(args)):
373 return (name, None, _("argument names collide with each other"))
373 return (name, None, _("argument names collide with each other"))
374 return (name, [t[1] for t in args], None)
374 return (name, [t[1] for t in args], None)
375
375
376 return (decl, None, _("invalid format"))
376 return (decl, None, _("invalid format"))
377
377
378 @classmethod
378 @classmethod
379 def _relabelargs(cls, tree, args):
379 def _relabelargs(cls, tree, args):
380 """Mark alias arguments as ``_aliasarg``"""
380 """Mark alias arguments as ``_aliasarg``"""
381 if not isinstance(tree, tuple):
381 if not isinstance(tree, tuple):
382 return tree
382 return tree
383 op = tree[0]
383 op = tree[0]
384 if op != cls._symbolnode:
384 if op != cls._symbolnode:
385 return (op,) + tuple(cls._relabelargs(x, args) for x in tree[1:])
385 return (op,) + tuple(cls._relabelargs(x, args) for x in tree[1:])
386
386
387 assert len(tree) == 2
387 assert len(tree) == 2
388 sym = tree[1]
388 sym = tree[1]
389 if sym in args:
389 if sym in args:
390 op = '_aliasarg'
390 op = '_aliasarg'
391 elif sym.startswith('$'):
391 elif sym.startswith('$'):
392 raise error.ParseError(_("invalid symbol '%s'") % sym)
392 raise error.ParseError(_("invalid symbol '%s'") % sym)
393 return (op, sym)
393 return (op, sym)
394
394
395 @classmethod
395 @classmethod
396 def _builddefn(cls, defn, args):
396 def _builddefn(cls, defn, args):
397 """Parse an alias definition into a tree and marks substitutions
397 """Parse an alias definition into a tree and marks substitutions
398
398
399 This function marks alias argument references as ``_aliasarg``. The
399 This function marks alias argument references as ``_aliasarg``. The
400 parsing rule is provided by ``_parse()``.
400 parsing rule is provided by ``_parse()``.
401
401
402 ``args`` is a list of alias argument names, or None if the alias
402 ``args`` is a list of alias argument names, or None if the alias
403 is declared as a symbol.
403 is declared as a symbol.
404
404
405 >>> parsemap = {
405 >>> parsemap = {
406 ... '$1 or foo': ('or', ('symbol', '$1'), ('symbol', 'foo')),
406 ... '$1 or foo': ('or', ('symbol', '$1'), ('symbol', 'foo')),
407 ... '$1 or $bar': ('or', ('symbol', '$1'), ('symbol', '$bar')),
407 ... '$1 or $bar': ('or', ('symbol', '$1'), ('symbol', '$bar')),
408 ... '$10 or baz': ('or', ('symbol', '$10'), ('symbol', 'baz')),
408 ... '$10 or baz': ('or', ('symbol', '$10'), ('symbol', 'baz')),
409 ... '"$1" or "foo"': ('or', ('string', '$1'), ('string', 'foo')),
409 ... '"$1" or "foo"': ('or', ('string', '$1'), ('string', 'foo')),
410 ... }
410 ... }
411 >>> class aliasrules(basealiasrules):
411 >>> class aliasrules(basealiasrules):
412 ... _parse = staticmethod(parsemap.__getitem__)
412 ... _parse = staticmethod(parsemap.__getitem__)
413 ... _trygetfunc = staticmethod(lambda x: None)
413 ... _trygetfunc = staticmethod(lambda x: None)
414 >>> builddefn = aliasrules._builddefn
414 >>> builddefn = aliasrules._builddefn
415 >>> def pprint(tree):
415 >>> def pprint(tree):
416 ... print prettyformat(tree, ('_aliasarg', 'string', 'symbol'))
416 ... print prettyformat(tree, ('_aliasarg', 'string', 'symbol'))
417 >>> args = ['$1', '$2', 'foo']
417 >>> args = ['$1', '$2', 'foo']
418 >>> pprint(builddefn('$1 or foo', args))
418 >>> pprint(builddefn('$1 or foo', args))
419 (or
419 (or
420 ('_aliasarg', '$1')
420 ('_aliasarg', '$1')
421 ('_aliasarg', 'foo'))
421 ('_aliasarg', 'foo'))
422 >>> try:
422 >>> try:
423 ... builddefn('$1 or $bar', args)
423 ... builddefn('$1 or $bar', args)
424 ... except error.ParseError as inst:
424 ... except error.ParseError as inst:
425 ... print parseerrordetail(inst)
425 ... print parseerrordetail(inst)
426 invalid symbol '$bar'
426 invalid symbol '$bar'
427 >>> args = ['$1', '$10', 'foo']
427 >>> args = ['$1', '$10', 'foo']
428 >>> pprint(builddefn('$10 or baz', args))
428 >>> pprint(builddefn('$10 or baz', args))
429 (or
429 (or
430 ('_aliasarg', '$10')
430 ('_aliasarg', '$10')
431 ('symbol', 'baz'))
431 ('symbol', 'baz'))
432 >>> pprint(builddefn('"$1" or "foo"', args))
432 >>> pprint(builddefn('"$1" or "foo"', args))
433 (or
433 (or
434 ('string', '$1')
434 ('string', '$1')
435 ('string', 'foo'))
435 ('string', 'foo'))
436 """
436 """
437 tree = cls._parse(defn)
437 tree = cls._parse(defn)
438 if args:
438 if args:
439 args = set(args)
439 args = set(args)
440 else:
440 else:
441 args = set()
441 args = set()
442 return cls._relabelargs(tree, args)
442 return cls._relabelargs(tree, args)
443
443
444 @classmethod
444 @classmethod
445 def build(cls, decl, defn):
445 def build(cls, decl, defn):
446 """Parse an alias declaration and definition into an alias object"""
446 """Parse an alias declaration and definition into an alias object"""
447 repl = efmt = None
447 repl = efmt = None
448 name, args, err = cls._builddecl(decl)
448 name, args, err = cls._builddecl(decl)
449 if err:
449 if err:
450 efmt = _('bad declaration of %(section)s "%(name)s": %(error)s')
450 efmt = _('bad declaration of %(section)s "%(name)s": %(error)s')
451 else:
451 else:
452 try:
452 try:
453 repl = cls._builddefn(defn, args)
453 repl = cls._builddefn(defn, args)
454 except error.ParseError as inst:
454 except error.ParseError as inst:
455 err = parseerrordetail(inst)
455 err = parseerrordetail(inst)
456 efmt = _('bad definition of %(section)s "%(name)s": %(error)s')
456 efmt = _('bad definition of %(section)s "%(name)s": %(error)s')
457 if err:
457 if err:
458 err = efmt % {'section': cls._section, 'name': name, 'error': err}
458 err = efmt % {'section': cls._section, 'name': name, 'error': err}
459 return alias(name, args, err, repl)
459 return alias(name, args, err, repl)
460
460
461 @classmethod
461 @classmethod
462 def buildmap(cls, items):
462 def buildmap(cls, items):
463 """Parse a list of alias (name, replacement) pairs into a dict of
463 """Parse a list of alias (name, replacement) pairs into a dict of
464 alias objects"""
464 alias objects"""
465 aliases = {}
465 aliases = {}
466 for decl, defn in items:
466 for decl, defn in items:
467 a = cls.build(decl, defn)
467 a = cls.build(decl, defn)
468 aliases[a.name] = a
468 aliases[a.name] = a
469 return aliases
469 return aliases
470
470
471 @classmethod
471 @classmethod
472 def _getalias(cls, aliases, tree):
472 def _getalias(cls, aliases, tree):
473 """If tree looks like an unexpanded alias, return (alias, pattern-args)
473 """If tree looks like an unexpanded alias, return (alias, pattern-args)
474 pair. Return None otherwise.
474 pair. Return None otherwise.
475 """
475 """
476 if not isinstance(tree, tuple):
476 if not isinstance(tree, tuple):
477 return None
477 return None
478 if tree[0] == cls._symbolnode:
478 if tree[0] == cls._symbolnode:
479 name = tree[1]
479 name = tree[1]
480 a = aliases.get(name)
480 a = aliases.get(name)
481 if a and a.args is None:
481 if a and a.args is None:
482 return a, None
482 return a, None
483 func = cls._trygetfunc(tree)
483 func = cls._trygetfunc(tree)
484 if func:
484 if func:
485 name, args = func
485 name, args = func
486 a = aliases.get(name)
486 a = aliases.get(name)
487 if a and a.args is not None:
487 if a and a.args is not None:
488 return a, args
488 return a, args
489 return None
489 return None
490
490
491 @classmethod
491 @classmethod
492 def _expandargs(cls, tree, args):
492 def _expandargs(cls, tree, args):
493 """Replace _aliasarg instances with the substitution value of the
493 """Replace _aliasarg instances with the substitution value of the
494 same name in args, recursively.
494 same name in args, recursively.
495 """
495 """
496 if not isinstance(tree, tuple):
496 if not isinstance(tree, tuple):
497 return tree
497 return tree
498 if tree[0] == '_aliasarg':
498 if tree[0] == '_aliasarg':
499 sym = tree[1]
499 sym = tree[1]
500 return args[sym]
500 return args[sym]
501 return tuple(cls._expandargs(t, args) for t in tree)
501 return tuple(cls._expandargs(t, args) for t in tree)
502
502
503 @classmethod
503 @classmethod
504 def _expand(cls, aliases, tree, expanding, cache):
504 def _expand(cls, aliases, tree, expanding, cache):
505 if not isinstance(tree, tuple):
505 if not isinstance(tree, tuple):
506 return tree
506 return tree
507 r = cls._getalias(aliases, tree)
507 r = cls._getalias(aliases, tree)
508 if r is None:
508 if r is None:
509 return tuple(cls._expand(aliases, t, expanding, cache)
509 return tuple(cls._expand(aliases, t, expanding, cache)
510 for t in tree)
510 for t in tree)
511 a, l = r
511 a, l = r
512 if a.error:
512 if a.error:
513 raise error.Abort(a.error)
513 raise error.Abort(a.error)
514 if a in expanding:
514 if a in expanding:
515 raise error.ParseError(_('infinite expansion of %(section)s '
515 raise error.ParseError(_('infinite expansion of %(section)s '
516 '"%(name)s" detected')
516 '"%(name)s" detected')
517 % {'section': cls._section, 'name': a.name})
517 % {'section': cls._section, 'name': a.name})
518 # get cacheable replacement tree by expanding aliases recursively
518 # get cacheable replacement tree by expanding aliases recursively
519 expanding.append(a)
519 expanding.append(a)
520 if a.name not in cache:
520 if a.name not in cache:
521 cache[a.name] = cls._expand(aliases, a.replacement, expanding,
521 cache[a.name] = cls._expand(aliases, a.replacement, expanding,
522 cache)
522 cache)
523 result = cache[a.name]
523 result = cache[a.name]
524 expanding.pop()
524 expanding.pop()
525 if a.args is None:
525 if a.args is None:
526 return result
526 return result
527 # substitute function arguments in replacement tree
527 # substitute function arguments in replacement tree
528 if len(l) != len(a.args):
528 if len(l) != len(a.args):
529 raise error.ParseError(_('invalid number of arguments: %d')
529 raise error.ParseError(_('invalid number of arguments: %d')
530 % len(l))
530 % len(l))
531 l = [cls._expand(aliases, t, [], cache) for t in l]
531 l = [cls._expand(aliases, t, [], cache) for t in l]
532 return cls._expandargs(result, dict(zip(a.args, l)))
532 return cls._expandargs(result, dict(zip(a.args, l)))
533
533
534 @classmethod
534 @classmethod
535 def expand(cls, aliases, tree):
535 def expand(cls, aliases, tree):
536 """Expand aliases in tree, recursively.
536 """Expand aliases in tree, recursively.
537
537
538 'aliases' is a dictionary mapping user defined aliases to alias objects.
538 'aliases' is a dictionary mapping user defined aliases to alias objects.
539 """
539 """
540 return cls._expand(aliases, tree, [], {})
540 return cls._expand(aliases, tree, [], {})
@@ -1,3666 +1,3664 b''
1 # revset.py - revision set queries for mercurial
1 # revset.py - revision set queries for mercurial
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import heapq
10 import heapq
11 import re
11 import re
12
12
13 from .i18n import _
13 from .i18n import _
14 from . import (
14 from . import (
15 destutil,
15 destutil,
16 encoding,
16 encoding,
17 error,
17 error,
18 hbisect,
18 hbisect,
19 match as matchmod,
19 match as matchmod,
20 node,
20 node,
21 obsolete as obsmod,
21 obsolete as obsmod,
22 parser,
22 parser,
23 pathutil,
23 pathutil,
24 phases,
24 phases,
25 registrar,
25 registrar,
26 repoview,
26 repoview,
27 util,
27 util,
28 )
28 )
29
29
30 def _revancestors(repo, revs, followfirst):
30 def _revancestors(repo, revs, followfirst):
31 """Like revlog.ancestors(), but supports followfirst."""
31 """Like revlog.ancestors(), but supports followfirst."""
32 if followfirst:
32 if followfirst:
33 cut = 1
33 cut = 1
34 else:
34 else:
35 cut = None
35 cut = None
36 cl = repo.changelog
36 cl = repo.changelog
37
37
38 def iterate():
38 def iterate():
39 revs.sort(reverse=True)
39 revs.sort(reverse=True)
40 irevs = iter(revs)
40 irevs = iter(revs)
41 h = []
41 h = []
42
42
43 inputrev = next(irevs, None)
43 inputrev = next(irevs, None)
44 if inputrev is not None:
44 if inputrev is not None:
45 heapq.heappush(h, -inputrev)
45 heapq.heappush(h, -inputrev)
46
46
47 seen = set()
47 seen = set()
48 while h:
48 while h:
49 current = -heapq.heappop(h)
49 current = -heapq.heappop(h)
50 if current == inputrev:
50 if current == inputrev:
51 inputrev = next(irevs, None)
51 inputrev = next(irevs, None)
52 if inputrev is not None:
52 if inputrev is not None:
53 heapq.heappush(h, -inputrev)
53 heapq.heappush(h, -inputrev)
54 if current not in seen:
54 if current not in seen:
55 seen.add(current)
55 seen.add(current)
56 yield current
56 yield current
57 for parent in cl.parentrevs(current)[:cut]:
57 for parent in cl.parentrevs(current)[:cut]:
58 if parent != node.nullrev:
58 if parent != node.nullrev:
59 heapq.heappush(h, -parent)
59 heapq.heappush(h, -parent)
60
60
61 return generatorset(iterate(), iterasc=False)
61 return generatorset(iterate(), iterasc=False)
62
62
63 def _revdescendants(repo, revs, followfirst):
63 def _revdescendants(repo, revs, followfirst):
64 """Like revlog.descendants() but supports followfirst."""
64 """Like revlog.descendants() but supports followfirst."""
65 if followfirst:
65 if followfirst:
66 cut = 1
66 cut = 1
67 else:
67 else:
68 cut = None
68 cut = None
69
69
70 def iterate():
70 def iterate():
71 cl = repo.changelog
71 cl = repo.changelog
72 # XXX this should be 'parentset.min()' assuming 'parentset' is a
72 # XXX this should be 'parentset.min()' assuming 'parentset' is a
73 # smartset (and if it is not, it should.)
73 # smartset (and if it is not, it should.)
74 first = min(revs)
74 first = min(revs)
75 nullrev = node.nullrev
75 nullrev = node.nullrev
76 if first == nullrev:
76 if first == nullrev:
77 # Are there nodes with a null first parent and a non-null
77 # Are there nodes with a null first parent and a non-null
78 # second one? Maybe. Do we care? Probably not.
78 # second one? Maybe. Do we care? Probably not.
79 for i in cl:
79 for i in cl:
80 yield i
80 yield i
81 else:
81 else:
82 seen = set(revs)
82 seen = set(revs)
83 for i in cl.revs(first + 1):
83 for i in cl.revs(first + 1):
84 for x in cl.parentrevs(i)[:cut]:
84 for x in cl.parentrevs(i)[:cut]:
85 if x != nullrev and x in seen:
85 if x != nullrev and x in seen:
86 seen.add(i)
86 seen.add(i)
87 yield i
87 yield i
88 break
88 break
89
89
90 return generatorset(iterate(), iterasc=True)
90 return generatorset(iterate(), iterasc=True)
91
91
92 def _reachablerootspure(repo, minroot, roots, heads, includepath):
92 def _reachablerootspure(repo, minroot, roots, heads, includepath):
93 """return (heads(::<roots> and ::<heads>))
93 """return (heads(::<roots> and ::<heads>))
94
94
95 If includepath is True, return (<roots>::<heads>)."""
95 If includepath is True, return (<roots>::<heads>)."""
96 if not roots:
96 if not roots:
97 return []
97 return []
98 parentrevs = repo.changelog.parentrevs
98 parentrevs = repo.changelog.parentrevs
99 roots = set(roots)
99 roots = set(roots)
100 visit = list(heads)
100 visit = list(heads)
101 reachable = set()
101 reachable = set()
102 seen = {}
102 seen = {}
103 # prefetch all the things! (because python is slow)
103 # prefetch all the things! (because python is slow)
104 reached = reachable.add
104 reached = reachable.add
105 dovisit = visit.append
105 dovisit = visit.append
106 nextvisit = visit.pop
106 nextvisit = visit.pop
107 # open-code the post-order traversal due to the tiny size of
107 # open-code the post-order traversal due to the tiny size of
108 # sys.getrecursionlimit()
108 # sys.getrecursionlimit()
109 while visit:
109 while visit:
110 rev = nextvisit()
110 rev = nextvisit()
111 if rev in roots:
111 if rev in roots:
112 reached(rev)
112 reached(rev)
113 if not includepath:
113 if not includepath:
114 continue
114 continue
115 parents = parentrevs(rev)
115 parents = parentrevs(rev)
116 seen[rev] = parents
116 seen[rev] = parents
117 for parent in parents:
117 for parent in parents:
118 if parent >= minroot and parent not in seen:
118 if parent >= minroot and parent not in seen:
119 dovisit(parent)
119 dovisit(parent)
120 if not reachable:
120 if not reachable:
121 return baseset()
121 return baseset()
122 if not includepath:
122 if not includepath:
123 return reachable
123 return reachable
124 for rev in sorted(seen):
124 for rev in sorted(seen):
125 for parent in seen[rev]:
125 for parent in seen[rev]:
126 if parent in reachable:
126 if parent in reachable:
127 reached(rev)
127 reached(rev)
128 return reachable
128 return reachable
129
129
130 def reachableroots(repo, roots, heads, includepath=False):
130 def reachableroots(repo, roots, heads, includepath=False):
131 """return (heads(::<roots> and ::<heads>))
131 """return (heads(::<roots> and ::<heads>))
132
132
133 If includepath is True, return (<roots>::<heads>)."""
133 If includepath is True, return (<roots>::<heads>)."""
134 if not roots:
134 if not roots:
135 return baseset()
135 return baseset()
136 minroot = roots.min()
136 minroot = roots.min()
137 roots = list(roots)
137 roots = list(roots)
138 heads = list(heads)
138 heads = list(heads)
139 try:
139 try:
140 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
140 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
141 except AttributeError:
141 except AttributeError:
142 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
142 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
143 revs = baseset(revs)
143 revs = baseset(revs)
144 revs.sort()
144 revs.sort()
145 return revs
145 return revs
146
146
147 elements = {
147 elements = {
148 # token-type: binding-strength, primary, prefix, infix, suffix
148 # token-type: binding-strength, primary, prefix, infix, suffix
149 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
149 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
150 "##": (20, None, None, ("_concat", 20), None),
150 "##": (20, None, None, ("_concat", 20), None),
151 "~": (18, None, None, ("ancestor", 18), None),
151 "~": (18, None, None, ("ancestor", 18), None),
152 "^": (18, None, None, ("parent", 18), ("parentpost", 18)),
152 "^": (18, None, None, ("parent", 18), "parentpost"),
153 "-": (5, None, ("negate", 19), ("minus", 5), None),
153 "-": (5, None, ("negate", 19), ("minus", 5), None),
154 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17),
154 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"),
155 ("dagrangepost", 17)),
155 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"),
156 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17),
156 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), "rangepost"),
157 ("dagrangepost", 17)),
158 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), ("rangepost", 15)),
159 "not": (10, None, ("not", 10), None, None),
157 "not": (10, None, ("not", 10), None, None),
160 "!": (10, None, ("not", 10), None, None),
158 "!": (10, None, ("not", 10), None, None),
161 "and": (5, None, None, ("and", 5), None),
159 "and": (5, None, None, ("and", 5), None),
162 "&": (5, None, None, ("and", 5), None),
160 "&": (5, None, None, ("and", 5), None),
163 "%": (5, None, None, ("only", 5), ("onlypost", 5)),
161 "%": (5, None, None, ("only", 5), "onlypost"),
164 "or": (4, None, None, ("or", 4), None),
162 "or": (4, None, None, ("or", 4), None),
165 "|": (4, None, None, ("or", 4), None),
163 "|": (4, None, None, ("or", 4), None),
166 "+": (4, None, None, ("or", 4), None),
164 "+": (4, None, None, ("or", 4), None),
167 "=": (3, None, None, ("keyvalue", 3), None),
165 "=": (3, None, None, ("keyvalue", 3), None),
168 ",": (2, None, None, ("list", 2), None),
166 ",": (2, None, None, ("list", 2), None),
169 ")": (0, None, None, None, None),
167 ")": (0, None, None, None, None),
170 "symbol": (0, "symbol", None, None, None),
168 "symbol": (0, "symbol", None, None, None),
171 "string": (0, "string", None, None, None),
169 "string": (0, "string", None, None, None),
172 "end": (0, None, None, None, None),
170 "end": (0, None, None, None, None),
173 }
171 }
174
172
175 keywords = set(['and', 'or', 'not'])
173 keywords = set(['and', 'or', 'not'])
176
174
177 # default set of valid characters for the initial letter of symbols
175 # default set of valid characters for the initial letter of symbols
178 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
176 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
179 if c.isalnum() or c in '._@' or ord(c) > 127)
177 if c.isalnum() or c in '._@' or ord(c) > 127)
180
178
181 # default set of valid characters for non-initial letters of symbols
179 # default set of valid characters for non-initial letters of symbols
182 _symletters = set(c for c in [chr(i) for i in xrange(256)]
180 _symletters = set(c for c in [chr(i) for i in xrange(256)]
183 if c.isalnum() or c in '-._/@' or ord(c) > 127)
181 if c.isalnum() or c in '-._/@' or ord(c) > 127)
184
182
185 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
183 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
186 '''
184 '''
187 Parse a revset statement into a stream of tokens
185 Parse a revset statement into a stream of tokens
188
186
189 ``syminitletters`` is the set of valid characters for the initial
187 ``syminitletters`` is the set of valid characters for the initial
190 letter of symbols.
188 letter of symbols.
191
189
192 By default, character ``c`` is recognized as valid for initial
190 By default, character ``c`` is recognized as valid for initial
193 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
191 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
194
192
195 ``symletters`` is the set of valid characters for non-initial
193 ``symletters`` is the set of valid characters for non-initial
196 letters of symbols.
194 letters of symbols.
197
195
198 By default, character ``c`` is recognized as valid for non-initial
196 By default, character ``c`` is recognized as valid for non-initial
199 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
197 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
200
198
201 Check that @ is a valid unquoted token character (issue3686):
199 Check that @ is a valid unquoted token character (issue3686):
202 >>> list(tokenize("@::"))
200 >>> list(tokenize("@::"))
203 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
201 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
204
202
205 '''
203 '''
206 if syminitletters is None:
204 if syminitletters is None:
207 syminitletters = _syminitletters
205 syminitletters = _syminitletters
208 if symletters is None:
206 if symletters is None:
209 symletters = _symletters
207 symletters = _symletters
210
208
211 if program and lookup:
209 if program and lookup:
212 # attempt to parse old-style ranges first to deal with
210 # attempt to parse old-style ranges first to deal with
213 # things like old-tag which contain query metacharacters
211 # things like old-tag which contain query metacharacters
214 parts = program.split(':', 1)
212 parts = program.split(':', 1)
215 if all(lookup(sym) for sym in parts if sym):
213 if all(lookup(sym) for sym in parts if sym):
216 if parts[0]:
214 if parts[0]:
217 yield ('symbol', parts[0], 0)
215 yield ('symbol', parts[0], 0)
218 if len(parts) > 1:
216 if len(parts) > 1:
219 s = len(parts[0])
217 s = len(parts[0])
220 yield (':', None, s)
218 yield (':', None, s)
221 if parts[1]:
219 if parts[1]:
222 yield ('symbol', parts[1], s + 1)
220 yield ('symbol', parts[1], s + 1)
223 yield ('end', None, len(program))
221 yield ('end', None, len(program))
224 return
222 return
225
223
226 pos, l = 0, len(program)
224 pos, l = 0, len(program)
227 while pos < l:
225 while pos < l:
228 c = program[pos]
226 c = program[pos]
229 if c.isspace(): # skip inter-token whitespace
227 if c.isspace(): # skip inter-token whitespace
230 pass
228 pass
231 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
229 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
232 yield ('::', None, pos)
230 yield ('::', None, pos)
233 pos += 1 # skip ahead
231 pos += 1 # skip ahead
234 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
232 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
235 yield ('..', None, pos)
233 yield ('..', None, pos)
236 pos += 1 # skip ahead
234 pos += 1 # skip ahead
237 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
235 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
238 yield ('##', None, pos)
236 yield ('##', None, pos)
239 pos += 1 # skip ahead
237 pos += 1 # skip ahead
240 elif c in "():=,-|&+!~^%": # handle simple operators
238 elif c in "():=,-|&+!~^%": # handle simple operators
241 yield (c, None, pos)
239 yield (c, None, pos)
242 elif (c in '"\'' or c == 'r' and
240 elif (c in '"\'' or c == 'r' and
243 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
241 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
244 if c == 'r':
242 if c == 'r':
245 pos += 1
243 pos += 1
246 c = program[pos]
244 c = program[pos]
247 decode = lambda x: x
245 decode = lambda x: x
248 else:
246 else:
249 decode = parser.unescapestr
247 decode = parser.unescapestr
250 pos += 1
248 pos += 1
251 s = pos
249 s = pos
252 while pos < l: # find closing quote
250 while pos < l: # find closing quote
253 d = program[pos]
251 d = program[pos]
254 if d == '\\': # skip over escaped characters
252 if d == '\\': # skip over escaped characters
255 pos += 2
253 pos += 2
256 continue
254 continue
257 if d == c:
255 if d == c:
258 yield ('string', decode(program[s:pos]), s)
256 yield ('string', decode(program[s:pos]), s)
259 break
257 break
260 pos += 1
258 pos += 1
261 else:
259 else:
262 raise error.ParseError(_("unterminated string"), s)
260 raise error.ParseError(_("unterminated string"), s)
263 # gather up a symbol/keyword
261 # gather up a symbol/keyword
264 elif c in syminitletters:
262 elif c in syminitletters:
265 s = pos
263 s = pos
266 pos += 1
264 pos += 1
267 while pos < l: # find end of symbol
265 while pos < l: # find end of symbol
268 d = program[pos]
266 d = program[pos]
269 if d not in symletters:
267 if d not in symletters:
270 break
268 break
271 if d == '.' and program[pos - 1] == '.': # special case for ..
269 if d == '.' and program[pos - 1] == '.': # special case for ..
272 pos -= 1
270 pos -= 1
273 break
271 break
274 pos += 1
272 pos += 1
275 sym = program[s:pos]
273 sym = program[s:pos]
276 if sym in keywords: # operator keywords
274 if sym in keywords: # operator keywords
277 yield (sym, None, s)
275 yield (sym, None, s)
278 elif '-' in sym:
276 elif '-' in sym:
279 # some jerk gave us foo-bar-baz, try to check if it's a symbol
277 # some jerk gave us foo-bar-baz, try to check if it's a symbol
280 if lookup and lookup(sym):
278 if lookup and lookup(sym):
281 # looks like a real symbol
279 # looks like a real symbol
282 yield ('symbol', sym, s)
280 yield ('symbol', sym, s)
283 else:
281 else:
284 # looks like an expression
282 # looks like an expression
285 parts = sym.split('-')
283 parts = sym.split('-')
286 for p in parts[:-1]:
284 for p in parts[:-1]:
287 if p: # possible consecutive -
285 if p: # possible consecutive -
288 yield ('symbol', p, s)
286 yield ('symbol', p, s)
289 s += len(p)
287 s += len(p)
290 yield ('-', None, pos)
288 yield ('-', None, pos)
291 s += 1
289 s += 1
292 if parts[-1]: # possible trailing -
290 if parts[-1]: # possible trailing -
293 yield ('symbol', parts[-1], s)
291 yield ('symbol', parts[-1], s)
294 else:
292 else:
295 yield ('symbol', sym, s)
293 yield ('symbol', sym, s)
296 pos -= 1
294 pos -= 1
297 else:
295 else:
298 raise error.ParseError(_("syntax error in revset '%s'") %
296 raise error.ParseError(_("syntax error in revset '%s'") %
299 program, pos)
297 program, pos)
300 pos += 1
298 pos += 1
301 yield ('end', None, pos)
299 yield ('end', None, pos)
302
300
303 # helpers
301 # helpers
304
302
305 def getsymbol(x):
303 def getsymbol(x):
306 if x and x[0] == 'symbol':
304 if x and x[0] == 'symbol':
307 return x[1]
305 return x[1]
308 raise error.ParseError(_('not a symbol'))
306 raise error.ParseError(_('not a symbol'))
309
307
310 def getstring(x, err):
308 def getstring(x, err):
311 if x and (x[0] == 'string' or x[0] == 'symbol'):
309 if x and (x[0] == 'string' or x[0] == 'symbol'):
312 return x[1]
310 return x[1]
313 raise error.ParseError(err)
311 raise error.ParseError(err)
314
312
315 def getlist(x):
313 def getlist(x):
316 if not x:
314 if not x:
317 return []
315 return []
318 if x[0] == 'list':
316 if x[0] == 'list':
319 return list(x[1:])
317 return list(x[1:])
320 return [x]
318 return [x]
321
319
322 def getargs(x, min, max, err):
320 def getargs(x, min, max, err):
323 l = getlist(x)
321 l = getlist(x)
324 if len(l) < min or (max >= 0 and len(l) > max):
322 if len(l) < min or (max >= 0 and len(l) > max):
325 raise error.ParseError(err)
323 raise error.ParseError(err)
326 return l
324 return l
327
325
328 def getargsdict(x, funcname, keys):
326 def getargsdict(x, funcname, keys):
329 return parser.buildargsdict(getlist(x), funcname, keys.split(),
327 return parser.buildargsdict(getlist(x), funcname, keys.split(),
330 keyvaluenode='keyvalue', keynode='symbol')
328 keyvaluenode='keyvalue', keynode='symbol')
331
329
332 def getset(repo, subset, x):
330 def getset(repo, subset, x):
333 if not x:
331 if not x:
334 raise error.ParseError(_("missing argument"))
332 raise error.ParseError(_("missing argument"))
335 s = methods[x[0]](repo, subset, *x[1:])
333 s = methods[x[0]](repo, subset, *x[1:])
336 if util.safehasattr(s, 'isascending'):
334 if util.safehasattr(s, 'isascending'):
337 return s
335 return s
338 # else case should not happen, because all non-func are internal,
336 # else case should not happen, because all non-func are internal,
339 # ignoring for now.
337 # ignoring for now.
340 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
338 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
341 repo.ui.deprecwarn('revset "%s" uses list instead of smartset'
339 repo.ui.deprecwarn('revset "%s" uses list instead of smartset'
342 % x[1][1],
340 % x[1][1],
343 '3.9')
341 '3.9')
344 return baseset(s)
342 return baseset(s)
345
343
346 def _getrevsource(repo, r):
344 def _getrevsource(repo, r):
347 extra = repo[r].extra()
345 extra = repo[r].extra()
348 for label in ('source', 'transplant_source', 'rebase_source'):
346 for label in ('source', 'transplant_source', 'rebase_source'):
349 if label in extra:
347 if label in extra:
350 try:
348 try:
351 return repo[extra[label]].rev()
349 return repo[extra[label]].rev()
352 except error.RepoLookupError:
350 except error.RepoLookupError:
353 pass
351 pass
354 return None
352 return None
355
353
356 # operator methods
354 # operator methods
357
355
358 def stringset(repo, subset, x):
356 def stringset(repo, subset, x):
359 x = repo[x].rev()
357 x = repo[x].rev()
360 if (x in subset
358 if (x in subset
361 or x == node.nullrev and isinstance(subset, fullreposet)):
359 or x == node.nullrev and isinstance(subset, fullreposet)):
362 return baseset([x])
360 return baseset([x])
363 return baseset()
361 return baseset()
364
362
365 def rangeset(repo, subset, x, y):
363 def rangeset(repo, subset, x, y):
366 m = getset(repo, fullreposet(repo), x)
364 m = getset(repo, fullreposet(repo), x)
367 n = getset(repo, fullreposet(repo), y)
365 n = getset(repo, fullreposet(repo), y)
368
366
369 if not m or not n:
367 if not m or not n:
370 return baseset()
368 return baseset()
371 m, n = m.first(), n.last()
369 m, n = m.first(), n.last()
372
370
373 if m == n:
371 if m == n:
374 r = baseset([m])
372 r = baseset([m])
375 elif n == node.wdirrev:
373 elif n == node.wdirrev:
376 r = spanset(repo, m, len(repo)) + baseset([n])
374 r = spanset(repo, m, len(repo)) + baseset([n])
377 elif m == node.wdirrev:
375 elif m == node.wdirrev:
378 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
376 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
379 elif m < n:
377 elif m < n:
380 r = spanset(repo, m, n + 1)
378 r = spanset(repo, m, n + 1)
381 else:
379 else:
382 r = spanset(repo, m, n - 1)
380 r = spanset(repo, m, n - 1)
383 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
381 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
384 # necessary to ensure we preserve the order in subset.
382 # necessary to ensure we preserve the order in subset.
385 #
383 #
386 # This has performance implication, carrying the sorting over when possible
384 # This has performance implication, carrying the sorting over when possible
387 # would be more efficient.
385 # would be more efficient.
388 return r & subset
386 return r & subset
389
387
390 def dagrange(repo, subset, x, y):
388 def dagrange(repo, subset, x, y):
391 r = fullreposet(repo)
389 r = fullreposet(repo)
392 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
390 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
393 includepath=True)
391 includepath=True)
394 return subset & xs
392 return subset & xs
395
393
396 def andset(repo, subset, x, y):
394 def andset(repo, subset, x, y):
397 return getset(repo, getset(repo, subset, x), y)
395 return getset(repo, getset(repo, subset, x), y)
398
396
399 def differenceset(repo, subset, x, y):
397 def differenceset(repo, subset, x, y):
400 return getset(repo, subset, x) - getset(repo, subset, y)
398 return getset(repo, subset, x) - getset(repo, subset, y)
401
399
402 def orset(repo, subset, *xs):
400 def orset(repo, subset, *xs):
403 assert xs
401 assert xs
404 if len(xs) == 1:
402 if len(xs) == 1:
405 return getset(repo, subset, xs[0])
403 return getset(repo, subset, xs[0])
406 p = len(xs) // 2
404 p = len(xs) // 2
407 a = orset(repo, subset, *xs[:p])
405 a = orset(repo, subset, *xs[:p])
408 b = orset(repo, subset, *xs[p:])
406 b = orset(repo, subset, *xs[p:])
409 return a + b
407 return a + b
410
408
411 def notset(repo, subset, x):
409 def notset(repo, subset, x):
412 return subset - getset(repo, subset, x)
410 return subset - getset(repo, subset, x)
413
411
414 def listset(repo, subset, *xs):
412 def listset(repo, subset, *xs):
415 raise error.ParseError(_("can't use a list in this context"),
413 raise error.ParseError(_("can't use a list in this context"),
416 hint=_('see hg help "revsets.x or y"'))
414 hint=_('see hg help "revsets.x or y"'))
417
415
418 def keyvaluepair(repo, subset, k, v):
416 def keyvaluepair(repo, subset, k, v):
419 raise error.ParseError(_("can't use a key-value pair in this context"))
417 raise error.ParseError(_("can't use a key-value pair in this context"))
420
418
421 def func(repo, subset, a, b):
419 def func(repo, subset, a, b):
422 f = getsymbol(a)
420 f = getsymbol(a)
423 if f in symbols:
421 if f in symbols:
424 return symbols[f](repo, subset, b)
422 return symbols[f](repo, subset, b)
425
423
426 keep = lambda fn: getattr(fn, '__doc__', None) is not None
424 keep = lambda fn: getattr(fn, '__doc__', None) is not None
427
425
428 syms = [s for (s, fn) in symbols.items() if keep(fn)]
426 syms = [s for (s, fn) in symbols.items() if keep(fn)]
429 raise error.UnknownIdentifier(f, syms)
427 raise error.UnknownIdentifier(f, syms)
430
428
431 # functions
429 # functions
432
430
433 # symbols are callables like:
431 # symbols are callables like:
434 # fn(repo, subset, x)
432 # fn(repo, subset, x)
435 # with:
433 # with:
436 # repo - current repository instance
434 # repo - current repository instance
437 # subset - of revisions to be examined
435 # subset - of revisions to be examined
438 # x - argument in tree form
436 # x - argument in tree form
439 symbols = {}
437 symbols = {}
440
438
441 # symbols which can't be used for a DoS attack for any given input
439 # symbols which can't be used for a DoS attack for any given input
442 # (e.g. those which accept regexes as plain strings shouldn't be included)
440 # (e.g. those which accept regexes as plain strings shouldn't be included)
443 # functions that just return a lot of changesets (like all) don't count here
441 # functions that just return a lot of changesets (like all) don't count here
444 safesymbols = set()
442 safesymbols = set()
445
443
446 predicate = registrar.revsetpredicate()
444 predicate = registrar.revsetpredicate()
447
445
448 @predicate('_destupdate')
446 @predicate('_destupdate')
449 def _destupdate(repo, subset, x):
447 def _destupdate(repo, subset, x):
450 # experimental revset for update destination
448 # experimental revset for update destination
451 args = getargsdict(x, 'limit', 'clean check')
449 args = getargsdict(x, 'limit', 'clean check')
452 return subset & baseset([destutil.destupdate(repo, **args)[0]])
450 return subset & baseset([destutil.destupdate(repo, **args)[0]])
453
451
454 @predicate('_destmerge')
452 @predicate('_destmerge')
455 def _destmerge(repo, subset, x):
453 def _destmerge(repo, subset, x):
456 # experimental revset for merge destination
454 # experimental revset for merge destination
457 sourceset = None
455 sourceset = None
458 if x is not None:
456 if x is not None:
459 sourceset = getset(repo, fullreposet(repo), x)
457 sourceset = getset(repo, fullreposet(repo), x)
460 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
458 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
461
459
462 @predicate('adds(pattern)', safe=True)
460 @predicate('adds(pattern)', safe=True)
463 def adds(repo, subset, x):
461 def adds(repo, subset, x):
464 """Changesets that add a file matching pattern.
462 """Changesets that add a file matching pattern.
465
463
466 The pattern without explicit kind like ``glob:`` is expected to be
464 The pattern without explicit kind like ``glob:`` is expected to be
467 relative to the current directory and match against a file or a
465 relative to the current directory and match against a file or a
468 directory.
466 directory.
469 """
467 """
470 # i18n: "adds" is a keyword
468 # i18n: "adds" is a keyword
471 pat = getstring(x, _("adds requires a pattern"))
469 pat = getstring(x, _("adds requires a pattern"))
472 return checkstatus(repo, subset, pat, 1)
470 return checkstatus(repo, subset, pat, 1)
473
471
474 @predicate('ancestor(*changeset)', safe=True)
472 @predicate('ancestor(*changeset)', safe=True)
475 def ancestor(repo, subset, x):
473 def ancestor(repo, subset, x):
476 """A greatest common ancestor of the changesets.
474 """A greatest common ancestor of the changesets.
477
475
478 Accepts 0 or more changesets.
476 Accepts 0 or more changesets.
479 Will return empty list when passed no args.
477 Will return empty list when passed no args.
480 Greatest common ancestor of a single changeset is that changeset.
478 Greatest common ancestor of a single changeset is that changeset.
481 """
479 """
482 # i18n: "ancestor" is a keyword
480 # i18n: "ancestor" is a keyword
483 l = getlist(x)
481 l = getlist(x)
484 rl = fullreposet(repo)
482 rl = fullreposet(repo)
485 anc = None
483 anc = None
486
484
487 # (getset(repo, rl, i) for i in l) generates a list of lists
485 # (getset(repo, rl, i) for i in l) generates a list of lists
488 for revs in (getset(repo, rl, i) for i in l):
486 for revs in (getset(repo, rl, i) for i in l):
489 for r in revs:
487 for r in revs:
490 if anc is None:
488 if anc is None:
491 anc = repo[r]
489 anc = repo[r]
492 else:
490 else:
493 anc = anc.ancestor(repo[r])
491 anc = anc.ancestor(repo[r])
494
492
495 if anc is not None and anc.rev() in subset:
493 if anc is not None and anc.rev() in subset:
496 return baseset([anc.rev()])
494 return baseset([anc.rev()])
497 return baseset()
495 return baseset()
498
496
499 def _ancestors(repo, subset, x, followfirst=False):
497 def _ancestors(repo, subset, x, followfirst=False):
500 heads = getset(repo, fullreposet(repo), x)
498 heads = getset(repo, fullreposet(repo), x)
501 if not heads:
499 if not heads:
502 return baseset()
500 return baseset()
503 s = _revancestors(repo, heads, followfirst)
501 s = _revancestors(repo, heads, followfirst)
504 return subset & s
502 return subset & s
505
503
506 @predicate('ancestors(set)', safe=True)
504 @predicate('ancestors(set)', safe=True)
507 def ancestors(repo, subset, x):
505 def ancestors(repo, subset, x):
508 """Changesets that are ancestors of a changeset in set.
506 """Changesets that are ancestors of a changeset in set.
509 """
507 """
510 return _ancestors(repo, subset, x)
508 return _ancestors(repo, subset, x)
511
509
512 @predicate('_firstancestors', safe=True)
510 @predicate('_firstancestors', safe=True)
513 def _firstancestors(repo, subset, x):
511 def _firstancestors(repo, subset, x):
514 # ``_firstancestors(set)``
512 # ``_firstancestors(set)``
515 # Like ``ancestors(set)`` but follows only the first parents.
513 # Like ``ancestors(set)`` but follows only the first parents.
516 return _ancestors(repo, subset, x, followfirst=True)
514 return _ancestors(repo, subset, x, followfirst=True)
517
515
518 def ancestorspec(repo, subset, x, n):
516 def ancestorspec(repo, subset, x, n):
519 """``set~n``
517 """``set~n``
520 Changesets that are the Nth ancestor (first parents only) of a changeset
518 Changesets that are the Nth ancestor (first parents only) of a changeset
521 in set.
519 in set.
522 """
520 """
523 try:
521 try:
524 n = int(n[1])
522 n = int(n[1])
525 except (TypeError, ValueError):
523 except (TypeError, ValueError):
526 raise error.ParseError(_("~ expects a number"))
524 raise error.ParseError(_("~ expects a number"))
527 ps = set()
525 ps = set()
528 cl = repo.changelog
526 cl = repo.changelog
529 for r in getset(repo, fullreposet(repo), x):
527 for r in getset(repo, fullreposet(repo), x):
530 for i in range(n):
528 for i in range(n):
531 r = cl.parentrevs(r)[0]
529 r = cl.parentrevs(r)[0]
532 ps.add(r)
530 ps.add(r)
533 return subset & ps
531 return subset & ps
534
532
535 @predicate('author(string)', safe=True)
533 @predicate('author(string)', safe=True)
536 def author(repo, subset, x):
534 def author(repo, subset, x):
537 """Alias for ``user(string)``.
535 """Alias for ``user(string)``.
538 """
536 """
539 # i18n: "author" is a keyword
537 # i18n: "author" is a keyword
540 n = encoding.lower(getstring(x, _("author requires a string")))
538 n = encoding.lower(getstring(x, _("author requires a string")))
541 kind, pattern, matcher = _substringmatcher(n)
539 kind, pattern, matcher = _substringmatcher(n)
542 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())),
540 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())),
543 condrepr=('<user %r>', n))
541 condrepr=('<user %r>', n))
544
542
545 @predicate('bisect(string)', safe=True)
543 @predicate('bisect(string)', safe=True)
546 def bisect(repo, subset, x):
544 def bisect(repo, subset, x):
547 """Changesets marked in the specified bisect status:
545 """Changesets marked in the specified bisect status:
548
546
549 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
547 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
550 - ``goods``, ``bads`` : csets topologically good/bad
548 - ``goods``, ``bads`` : csets topologically good/bad
551 - ``range`` : csets taking part in the bisection
549 - ``range`` : csets taking part in the bisection
552 - ``pruned`` : csets that are goods, bads or skipped
550 - ``pruned`` : csets that are goods, bads or skipped
553 - ``untested`` : csets whose fate is yet unknown
551 - ``untested`` : csets whose fate is yet unknown
554 - ``ignored`` : csets ignored due to DAG topology
552 - ``ignored`` : csets ignored due to DAG topology
555 - ``current`` : the cset currently being bisected
553 - ``current`` : the cset currently being bisected
556 """
554 """
557 # i18n: "bisect" is a keyword
555 # i18n: "bisect" is a keyword
558 status = getstring(x, _("bisect requires a string")).lower()
556 status = getstring(x, _("bisect requires a string")).lower()
559 state = set(hbisect.get(repo, status))
557 state = set(hbisect.get(repo, status))
560 return subset & state
558 return subset & state
561
559
562 # Backward-compatibility
560 # Backward-compatibility
563 # - no help entry so that we do not advertise it any more
561 # - no help entry so that we do not advertise it any more
564 @predicate('bisected', safe=True)
562 @predicate('bisected', safe=True)
565 def bisected(repo, subset, x):
563 def bisected(repo, subset, x):
566 return bisect(repo, subset, x)
564 return bisect(repo, subset, x)
567
565
568 @predicate('bookmark([name])', safe=True)
566 @predicate('bookmark([name])', safe=True)
569 def bookmark(repo, subset, x):
567 def bookmark(repo, subset, x):
570 """The named bookmark or all bookmarks.
568 """The named bookmark or all bookmarks.
571
569
572 If `name` starts with `re:`, the remainder of the name is treated as
570 If `name` starts with `re:`, the remainder of the name is treated as
573 a regular expression. To match a bookmark that actually starts with `re:`,
571 a regular expression. To match a bookmark that actually starts with `re:`,
574 use the prefix `literal:`.
572 use the prefix `literal:`.
575 """
573 """
576 # i18n: "bookmark" is a keyword
574 # i18n: "bookmark" is a keyword
577 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
575 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
578 if args:
576 if args:
579 bm = getstring(args[0],
577 bm = getstring(args[0],
580 # i18n: "bookmark" is a keyword
578 # i18n: "bookmark" is a keyword
581 _('the argument to bookmark must be a string'))
579 _('the argument to bookmark must be a string'))
582 kind, pattern, matcher = util.stringmatcher(bm)
580 kind, pattern, matcher = util.stringmatcher(bm)
583 bms = set()
581 bms = set()
584 if kind == 'literal':
582 if kind == 'literal':
585 bmrev = repo._bookmarks.get(pattern, None)
583 bmrev = repo._bookmarks.get(pattern, None)
586 if not bmrev:
584 if not bmrev:
587 raise error.RepoLookupError(_("bookmark '%s' does not exist")
585 raise error.RepoLookupError(_("bookmark '%s' does not exist")
588 % pattern)
586 % pattern)
589 bms.add(repo[bmrev].rev())
587 bms.add(repo[bmrev].rev())
590 else:
588 else:
591 matchrevs = set()
589 matchrevs = set()
592 for name, bmrev in repo._bookmarks.iteritems():
590 for name, bmrev in repo._bookmarks.iteritems():
593 if matcher(name):
591 if matcher(name):
594 matchrevs.add(bmrev)
592 matchrevs.add(bmrev)
595 if not matchrevs:
593 if not matchrevs:
596 raise error.RepoLookupError(_("no bookmarks exist"
594 raise error.RepoLookupError(_("no bookmarks exist"
597 " that match '%s'") % pattern)
595 " that match '%s'") % pattern)
598 for bmrev in matchrevs:
596 for bmrev in matchrevs:
599 bms.add(repo[bmrev].rev())
597 bms.add(repo[bmrev].rev())
600 else:
598 else:
601 bms = set([repo[r].rev()
599 bms = set([repo[r].rev()
602 for r in repo._bookmarks.values()])
600 for r in repo._bookmarks.values()])
603 bms -= set([node.nullrev])
601 bms -= set([node.nullrev])
604 return subset & bms
602 return subset & bms
605
603
606 @predicate('branch(string or set)', safe=True)
604 @predicate('branch(string or set)', safe=True)
607 def branch(repo, subset, x):
605 def branch(repo, subset, x):
608 """
606 """
609 All changesets belonging to the given branch or the branches of the given
607 All changesets belonging to the given branch or the branches of the given
610 changesets.
608 changesets.
611
609
612 If `string` starts with `re:`, the remainder of the name is treated as
610 If `string` starts with `re:`, the remainder of the name is treated as
613 a regular expression. To match a branch that actually starts with `re:`,
611 a regular expression. To match a branch that actually starts with `re:`,
614 use the prefix `literal:`.
612 use the prefix `literal:`.
615 """
613 """
616 getbi = repo.revbranchcache().branchinfo
614 getbi = repo.revbranchcache().branchinfo
617
615
618 try:
616 try:
619 b = getstring(x, '')
617 b = getstring(x, '')
620 except error.ParseError:
618 except error.ParseError:
621 # not a string, but another revspec, e.g. tip()
619 # not a string, but another revspec, e.g. tip()
622 pass
620 pass
623 else:
621 else:
624 kind, pattern, matcher = util.stringmatcher(b)
622 kind, pattern, matcher = util.stringmatcher(b)
625 if kind == 'literal':
623 if kind == 'literal':
626 # note: falls through to the revspec case if no branch with
624 # note: falls through to the revspec case if no branch with
627 # this name exists and pattern kind is not specified explicitly
625 # this name exists and pattern kind is not specified explicitly
628 if pattern in repo.branchmap():
626 if pattern in repo.branchmap():
629 return subset.filter(lambda r: matcher(getbi(r)[0]),
627 return subset.filter(lambda r: matcher(getbi(r)[0]),
630 condrepr=('<branch %r>', b))
628 condrepr=('<branch %r>', b))
631 if b.startswith('literal:'):
629 if b.startswith('literal:'):
632 raise error.RepoLookupError(_("branch '%s' does not exist")
630 raise error.RepoLookupError(_("branch '%s' does not exist")
633 % pattern)
631 % pattern)
634 else:
632 else:
635 return subset.filter(lambda r: matcher(getbi(r)[0]),
633 return subset.filter(lambda r: matcher(getbi(r)[0]),
636 condrepr=('<branch %r>', b))
634 condrepr=('<branch %r>', b))
637
635
638 s = getset(repo, fullreposet(repo), x)
636 s = getset(repo, fullreposet(repo), x)
639 b = set()
637 b = set()
640 for r in s:
638 for r in s:
641 b.add(getbi(r)[0])
639 b.add(getbi(r)[0])
642 c = s.__contains__
640 c = s.__contains__
643 return subset.filter(lambda r: c(r) or getbi(r)[0] in b,
641 return subset.filter(lambda r: c(r) or getbi(r)[0] in b,
644 condrepr=lambda: '<branch %r>' % sorted(b))
642 condrepr=lambda: '<branch %r>' % sorted(b))
645
643
646 @predicate('bumped()', safe=True)
644 @predicate('bumped()', safe=True)
647 def bumped(repo, subset, x):
645 def bumped(repo, subset, x):
648 """Mutable changesets marked as successors of public changesets.
646 """Mutable changesets marked as successors of public changesets.
649
647
650 Only non-public and non-obsolete changesets can be `bumped`.
648 Only non-public and non-obsolete changesets can be `bumped`.
651 """
649 """
652 # i18n: "bumped" is a keyword
650 # i18n: "bumped" is a keyword
653 getargs(x, 0, 0, _("bumped takes no arguments"))
651 getargs(x, 0, 0, _("bumped takes no arguments"))
654 bumped = obsmod.getrevs(repo, 'bumped')
652 bumped = obsmod.getrevs(repo, 'bumped')
655 return subset & bumped
653 return subset & bumped
656
654
657 @predicate('bundle()', safe=True)
655 @predicate('bundle()', safe=True)
658 def bundle(repo, subset, x):
656 def bundle(repo, subset, x):
659 """Changesets in the bundle.
657 """Changesets in the bundle.
660
658
661 Bundle must be specified by the -R option."""
659 Bundle must be specified by the -R option."""
662
660
663 try:
661 try:
664 bundlerevs = repo.changelog.bundlerevs
662 bundlerevs = repo.changelog.bundlerevs
665 except AttributeError:
663 except AttributeError:
666 raise error.Abort(_("no bundle provided - specify with -R"))
664 raise error.Abort(_("no bundle provided - specify with -R"))
667 return subset & bundlerevs
665 return subset & bundlerevs
668
666
669 def checkstatus(repo, subset, pat, field):
667 def checkstatus(repo, subset, pat, field):
670 hasset = matchmod.patkind(pat) == 'set'
668 hasset = matchmod.patkind(pat) == 'set'
671
669
672 mcache = [None]
670 mcache = [None]
673 def matches(x):
671 def matches(x):
674 c = repo[x]
672 c = repo[x]
675 if not mcache[0] or hasset:
673 if not mcache[0] or hasset:
676 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
674 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
677 m = mcache[0]
675 m = mcache[0]
678 fname = None
676 fname = None
679 if not m.anypats() and len(m.files()) == 1:
677 if not m.anypats() and len(m.files()) == 1:
680 fname = m.files()[0]
678 fname = m.files()[0]
681 if fname is not None:
679 if fname is not None:
682 if fname not in c.files():
680 if fname not in c.files():
683 return False
681 return False
684 else:
682 else:
685 for f in c.files():
683 for f in c.files():
686 if m(f):
684 if m(f):
687 break
685 break
688 else:
686 else:
689 return False
687 return False
690 files = repo.status(c.p1().node(), c.node())[field]
688 files = repo.status(c.p1().node(), c.node())[field]
691 if fname is not None:
689 if fname is not None:
692 if fname in files:
690 if fname in files:
693 return True
691 return True
694 else:
692 else:
695 for f in files:
693 for f in files:
696 if m(f):
694 if m(f):
697 return True
695 return True
698
696
699 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
697 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
700
698
701 def _children(repo, subset, parentset):
699 def _children(repo, subset, parentset):
702 if not parentset:
700 if not parentset:
703 return baseset()
701 return baseset()
704 cs = set()
702 cs = set()
705 pr = repo.changelog.parentrevs
703 pr = repo.changelog.parentrevs
706 minrev = parentset.min()
704 minrev = parentset.min()
707 for r in subset:
705 for r in subset:
708 if r <= minrev:
706 if r <= minrev:
709 continue
707 continue
710 for p in pr(r):
708 for p in pr(r):
711 if p in parentset:
709 if p in parentset:
712 cs.add(r)
710 cs.add(r)
713 return baseset(cs)
711 return baseset(cs)
714
712
715 @predicate('children(set)', safe=True)
713 @predicate('children(set)', safe=True)
716 def children(repo, subset, x):
714 def children(repo, subset, x):
717 """Child changesets of changesets in set.
715 """Child changesets of changesets in set.
718 """
716 """
719 s = getset(repo, fullreposet(repo), x)
717 s = getset(repo, fullreposet(repo), x)
720 cs = _children(repo, subset, s)
718 cs = _children(repo, subset, s)
721 return subset & cs
719 return subset & cs
722
720
723 @predicate('closed()', safe=True)
721 @predicate('closed()', safe=True)
724 def closed(repo, subset, x):
722 def closed(repo, subset, x):
725 """Changeset is closed.
723 """Changeset is closed.
726 """
724 """
727 # i18n: "closed" is a keyword
725 # i18n: "closed" is a keyword
728 getargs(x, 0, 0, _("closed takes no arguments"))
726 getargs(x, 0, 0, _("closed takes no arguments"))
729 return subset.filter(lambda r: repo[r].closesbranch(),
727 return subset.filter(lambda r: repo[r].closesbranch(),
730 condrepr='<branch closed>')
728 condrepr='<branch closed>')
731
729
732 @predicate('contains(pattern)')
730 @predicate('contains(pattern)')
733 def contains(repo, subset, x):
731 def contains(repo, subset, x):
734 """The revision's manifest contains a file matching pattern (but might not
732 """The revision's manifest contains a file matching pattern (but might not
735 modify it). See :hg:`help patterns` for information about file patterns.
733 modify it). See :hg:`help patterns` for information about file patterns.
736
734
737 The pattern without explicit kind like ``glob:`` is expected to be
735 The pattern without explicit kind like ``glob:`` is expected to be
738 relative to the current directory and match against a file exactly
736 relative to the current directory and match against a file exactly
739 for efficiency.
737 for efficiency.
740 """
738 """
741 # i18n: "contains" is a keyword
739 # i18n: "contains" is a keyword
742 pat = getstring(x, _("contains requires a pattern"))
740 pat = getstring(x, _("contains requires a pattern"))
743
741
744 def matches(x):
742 def matches(x):
745 if not matchmod.patkind(pat):
743 if not matchmod.patkind(pat):
746 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
744 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
747 if pats in repo[x]:
745 if pats in repo[x]:
748 return True
746 return True
749 else:
747 else:
750 c = repo[x]
748 c = repo[x]
751 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
749 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
752 for f in c.manifest():
750 for f in c.manifest():
753 if m(f):
751 if m(f):
754 return True
752 return True
755 return False
753 return False
756
754
757 return subset.filter(matches, condrepr=('<contains %r>', pat))
755 return subset.filter(matches, condrepr=('<contains %r>', pat))
758
756
759 @predicate('converted([id])', safe=True)
757 @predicate('converted([id])', safe=True)
760 def converted(repo, subset, x):
758 def converted(repo, subset, x):
761 """Changesets converted from the given identifier in the old repository if
759 """Changesets converted from the given identifier in the old repository if
762 present, or all converted changesets if no identifier is specified.
760 present, or all converted changesets if no identifier is specified.
763 """
761 """
764
762
765 # There is exactly no chance of resolving the revision, so do a simple
763 # There is exactly no chance of resolving the revision, so do a simple
766 # string compare and hope for the best
764 # string compare and hope for the best
767
765
768 rev = None
766 rev = None
769 # i18n: "converted" is a keyword
767 # i18n: "converted" is a keyword
770 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
768 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
771 if l:
769 if l:
772 # i18n: "converted" is a keyword
770 # i18n: "converted" is a keyword
773 rev = getstring(l[0], _('converted requires a revision'))
771 rev = getstring(l[0], _('converted requires a revision'))
774
772
775 def _matchvalue(r):
773 def _matchvalue(r):
776 source = repo[r].extra().get('convert_revision', None)
774 source = repo[r].extra().get('convert_revision', None)
777 return source is not None and (rev is None or source.startswith(rev))
775 return source is not None and (rev is None or source.startswith(rev))
778
776
779 return subset.filter(lambda r: _matchvalue(r),
777 return subset.filter(lambda r: _matchvalue(r),
780 condrepr=('<converted %r>', rev))
778 condrepr=('<converted %r>', rev))
781
779
782 @predicate('date(interval)', safe=True)
780 @predicate('date(interval)', safe=True)
783 def date(repo, subset, x):
781 def date(repo, subset, x):
784 """Changesets within the interval, see :hg:`help dates`.
782 """Changesets within the interval, see :hg:`help dates`.
785 """
783 """
786 # i18n: "date" is a keyword
784 # i18n: "date" is a keyword
787 ds = getstring(x, _("date requires a string"))
785 ds = getstring(x, _("date requires a string"))
788 dm = util.matchdate(ds)
786 dm = util.matchdate(ds)
789 return subset.filter(lambda x: dm(repo[x].date()[0]),
787 return subset.filter(lambda x: dm(repo[x].date()[0]),
790 condrepr=('<date %r>', ds))
788 condrepr=('<date %r>', ds))
791
789
792 @predicate('desc(string)', safe=True)
790 @predicate('desc(string)', safe=True)
793 def desc(repo, subset, x):
791 def desc(repo, subset, x):
794 """Search commit message for string. The match is case-insensitive.
792 """Search commit message for string. The match is case-insensitive.
795 """
793 """
796 # i18n: "desc" is a keyword
794 # i18n: "desc" is a keyword
797 ds = encoding.lower(getstring(x, _("desc requires a string")))
795 ds = encoding.lower(getstring(x, _("desc requires a string")))
798
796
799 def matches(x):
797 def matches(x):
800 c = repo[x]
798 c = repo[x]
801 return ds in encoding.lower(c.description())
799 return ds in encoding.lower(c.description())
802
800
803 return subset.filter(matches, condrepr=('<desc %r>', ds))
801 return subset.filter(matches, condrepr=('<desc %r>', ds))
804
802
805 def _descendants(repo, subset, x, followfirst=False):
803 def _descendants(repo, subset, x, followfirst=False):
806 roots = getset(repo, fullreposet(repo), x)
804 roots = getset(repo, fullreposet(repo), x)
807 if not roots:
805 if not roots:
808 return baseset()
806 return baseset()
809 s = _revdescendants(repo, roots, followfirst)
807 s = _revdescendants(repo, roots, followfirst)
810
808
811 # Both sets need to be ascending in order to lazily return the union
809 # Both sets need to be ascending in order to lazily return the union
812 # in the correct order.
810 # in the correct order.
813 base = subset & roots
811 base = subset & roots
814 desc = subset & s
812 desc = subset & s
815 result = base + desc
813 result = base + desc
816 if subset.isascending():
814 if subset.isascending():
817 result.sort()
815 result.sort()
818 elif subset.isdescending():
816 elif subset.isdescending():
819 result.sort(reverse=True)
817 result.sort(reverse=True)
820 else:
818 else:
821 result = subset & result
819 result = subset & result
822 return result
820 return result
823
821
824 @predicate('descendants(set)', safe=True)
822 @predicate('descendants(set)', safe=True)
825 def descendants(repo, subset, x):
823 def descendants(repo, subset, x):
826 """Changesets which are descendants of changesets in set.
824 """Changesets which are descendants of changesets in set.
827 """
825 """
828 return _descendants(repo, subset, x)
826 return _descendants(repo, subset, x)
829
827
830 @predicate('_firstdescendants', safe=True)
828 @predicate('_firstdescendants', safe=True)
831 def _firstdescendants(repo, subset, x):
829 def _firstdescendants(repo, subset, x):
832 # ``_firstdescendants(set)``
830 # ``_firstdescendants(set)``
833 # Like ``descendants(set)`` but follows only the first parents.
831 # Like ``descendants(set)`` but follows only the first parents.
834 return _descendants(repo, subset, x, followfirst=True)
832 return _descendants(repo, subset, x, followfirst=True)
835
833
836 @predicate('destination([set])', safe=True)
834 @predicate('destination([set])', safe=True)
837 def destination(repo, subset, x):
835 def destination(repo, subset, x):
838 """Changesets that were created by a graft, transplant or rebase operation,
836 """Changesets that were created by a graft, transplant or rebase operation,
839 with the given revisions specified as the source. Omitting the optional set
837 with the given revisions specified as the source. Omitting the optional set
840 is the same as passing all().
838 is the same as passing all().
841 """
839 """
842 if x is not None:
840 if x is not None:
843 sources = getset(repo, fullreposet(repo), x)
841 sources = getset(repo, fullreposet(repo), x)
844 else:
842 else:
845 sources = fullreposet(repo)
843 sources = fullreposet(repo)
846
844
847 dests = set()
845 dests = set()
848
846
849 # subset contains all of the possible destinations that can be returned, so
847 # subset contains all of the possible destinations that can be returned, so
850 # iterate over them and see if their source(s) were provided in the arg set.
848 # iterate over them and see if their source(s) were provided in the arg set.
851 # Even if the immediate src of r is not in the arg set, src's source (or
849 # Even if the immediate src of r is not in the arg set, src's source (or
852 # further back) may be. Scanning back further than the immediate src allows
850 # further back) may be. Scanning back further than the immediate src allows
853 # transitive transplants and rebases to yield the same results as transitive
851 # transitive transplants and rebases to yield the same results as transitive
854 # grafts.
852 # grafts.
855 for r in subset:
853 for r in subset:
856 src = _getrevsource(repo, r)
854 src = _getrevsource(repo, r)
857 lineage = None
855 lineage = None
858
856
859 while src is not None:
857 while src is not None:
860 if lineage is None:
858 if lineage is None:
861 lineage = list()
859 lineage = list()
862
860
863 lineage.append(r)
861 lineage.append(r)
864
862
865 # The visited lineage is a match if the current source is in the arg
863 # The visited lineage is a match if the current source is in the arg
866 # set. Since every candidate dest is visited by way of iterating
864 # set. Since every candidate dest is visited by way of iterating
867 # subset, any dests further back in the lineage will be tested by a
865 # subset, any dests further back in the lineage will be tested by a
868 # different iteration over subset. Likewise, if the src was already
866 # different iteration over subset. Likewise, if the src was already
869 # selected, the current lineage can be selected without going back
867 # selected, the current lineage can be selected without going back
870 # further.
868 # further.
871 if src in sources or src in dests:
869 if src in sources or src in dests:
872 dests.update(lineage)
870 dests.update(lineage)
873 break
871 break
874
872
875 r = src
873 r = src
876 src = _getrevsource(repo, r)
874 src = _getrevsource(repo, r)
877
875
878 return subset.filter(dests.__contains__,
876 return subset.filter(dests.__contains__,
879 condrepr=lambda: '<destination %r>' % sorted(dests))
877 condrepr=lambda: '<destination %r>' % sorted(dests))
880
878
881 @predicate('divergent()', safe=True)
879 @predicate('divergent()', safe=True)
882 def divergent(repo, subset, x):
880 def divergent(repo, subset, x):
883 """
881 """
884 Final successors of changesets with an alternative set of final successors.
882 Final successors of changesets with an alternative set of final successors.
885 """
883 """
886 # i18n: "divergent" is a keyword
884 # i18n: "divergent" is a keyword
887 getargs(x, 0, 0, _("divergent takes no arguments"))
885 getargs(x, 0, 0, _("divergent takes no arguments"))
888 divergent = obsmod.getrevs(repo, 'divergent')
886 divergent = obsmod.getrevs(repo, 'divergent')
889 return subset & divergent
887 return subset & divergent
890
888
891 @predicate('extinct()', safe=True)
889 @predicate('extinct()', safe=True)
892 def extinct(repo, subset, x):
890 def extinct(repo, subset, x):
893 """Obsolete changesets with obsolete descendants only.
891 """Obsolete changesets with obsolete descendants only.
894 """
892 """
895 # i18n: "extinct" is a keyword
893 # i18n: "extinct" is a keyword
896 getargs(x, 0, 0, _("extinct takes no arguments"))
894 getargs(x, 0, 0, _("extinct takes no arguments"))
897 extincts = obsmod.getrevs(repo, 'extinct')
895 extincts = obsmod.getrevs(repo, 'extinct')
898 return subset & extincts
896 return subset & extincts
899
897
900 @predicate('extra(label, [value])', safe=True)
898 @predicate('extra(label, [value])', safe=True)
901 def extra(repo, subset, x):
899 def extra(repo, subset, x):
902 """Changesets with the given label in the extra metadata, with the given
900 """Changesets with the given label in the extra metadata, with the given
903 optional value.
901 optional value.
904
902
905 If `value` starts with `re:`, the remainder of the value is treated as
903 If `value` starts with `re:`, the remainder of the value is treated as
906 a regular expression. To match a value that actually starts with `re:`,
904 a regular expression. To match a value that actually starts with `re:`,
907 use the prefix `literal:`.
905 use the prefix `literal:`.
908 """
906 """
909 args = getargsdict(x, 'extra', 'label value')
907 args = getargsdict(x, 'extra', 'label value')
910 if 'label' not in args:
908 if 'label' not in args:
911 # i18n: "extra" is a keyword
909 # i18n: "extra" is a keyword
912 raise error.ParseError(_('extra takes at least 1 argument'))
910 raise error.ParseError(_('extra takes at least 1 argument'))
913 # i18n: "extra" is a keyword
911 # i18n: "extra" is a keyword
914 label = getstring(args['label'], _('first argument to extra must be '
912 label = getstring(args['label'], _('first argument to extra must be '
915 'a string'))
913 'a string'))
916 value = None
914 value = None
917
915
918 if 'value' in args:
916 if 'value' in args:
919 # i18n: "extra" is a keyword
917 # i18n: "extra" is a keyword
920 value = getstring(args['value'], _('second argument to extra must be '
918 value = getstring(args['value'], _('second argument to extra must be '
921 'a string'))
919 'a string'))
922 kind, value, matcher = util.stringmatcher(value)
920 kind, value, matcher = util.stringmatcher(value)
923
921
924 def _matchvalue(r):
922 def _matchvalue(r):
925 extra = repo[r].extra()
923 extra = repo[r].extra()
926 return label in extra and (value is None or matcher(extra[label]))
924 return label in extra and (value is None or matcher(extra[label]))
927
925
928 return subset.filter(lambda r: _matchvalue(r),
926 return subset.filter(lambda r: _matchvalue(r),
929 condrepr=('<extra[%r] %r>', label, value))
927 condrepr=('<extra[%r] %r>', label, value))
930
928
931 @predicate('filelog(pattern)', safe=True)
929 @predicate('filelog(pattern)', safe=True)
932 def filelog(repo, subset, x):
930 def filelog(repo, subset, x):
933 """Changesets connected to the specified filelog.
931 """Changesets connected to the specified filelog.
934
932
935 For performance reasons, visits only revisions mentioned in the file-level
933 For performance reasons, visits only revisions mentioned in the file-level
936 filelog, rather than filtering through all changesets (much faster, but
934 filelog, rather than filtering through all changesets (much faster, but
937 doesn't include deletes or duplicate changes). For a slower, more accurate
935 doesn't include deletes or duplicate changes). For a slower, more accurate
938 result, use ``file()``.
936 result, use ``file()``.
939
937
940 The pattern without explicit kind like ``glob:`` is expected to be
938 The pattern without explicit kind like ``glob:`` is expected to be
941 relative to the current directory and match against a file exactly
939 relative to the current directory and match against a file exactly
942 for efficiency.
940 for efficiency.
943
941
944 If some linkrev points to revisions filtered by the current repoview, we'll
942 If some linkrev points to revisions filtered by the current repoview, we'll
945 work around it to return a non-filtered value.
943 work around it to return a non-filtered value.
946 """
944 """
947
945
948 # i18n: "filelog" is a keyword
946 # i18n: "filelog" is a keyword
949 pat = getstring(x, _("filelog requires a pattern"))
947 pat = getstring(x, _("filelog requires a pattern"))
950 s = set()
948 s = set()
951 cl = repo.changelog
949 cl = repo.changelog
952
950
953 if not matchmod.patkind(pat):
951 if not matchmod.patkind(pat):
954 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
952 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
955 files = [f]
953 files = [f]
956 else:
954 else:
957 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
955 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
958 files = (f for f in repo[None] if m(f))
956 files = (f for f in repo[None] if m(f))
959
957
960 for f in files:
958 for f in files:
961 fl = repo.file(f)
959 fl = repo.file(f)
962 known = {}
960 known = {}
963 scanpos = 0
961 scanpos = 0
964 for fr in list(fl):
962 for fr in list(fl):
965 fn = fl.node(fr)
963 fn = fl.node(fr)
966 if fn in known:
964 if fn in known:
967 s.add(known[fn])
965 s.add(known[fn])
968 continue
966 continue
969
967
970 lr = fl.linkrev(fr)
968 lr = fl.linkrev(fr)
971 if lr in cl:
969 if lr in cl:
972 s.add(lr)
970 s.add(lr)
973 elif scanpos is not None:
971 elif scanpos is not None:
974 # lowest matching changeset is filtered, scan further
972 # lowest matching changeset is filtered, scan further
975 # ahead in changelog
973 # ahead in changelog
976 start = max(lr, scanpos) + 1
974 start = max(lr, scanpos) + 1
977 scanpos = None
975 scanpos = None
978 for r in cl.revs(start):
976 for r in cl.revs(start):
979 # minimize parsing of non-matching entries
977 # minimize parsing of non-matching entries
980 if f in cl.revision(r) and f in cl.readfiles(r):
978 if f in cl.revision(r) and f in cl.readfiles(r):
981 try:
979 try:
982 # try to use manifest delta fastpath
980 # try to use manifest delta fastpath
983 n = repo[r].filenode(f)
981 n = repo[r].filenode(f)
984 if n not in known:
982 if n not in known:
985 if n == fn:
983 if n == fn:
986 s.add(r)
984 s.add(r)
987 scanpos = r
985 scanpos = r
988 break
986 break
989 else:
987 else:
990 known[n] = r
988 known[n] = r
991 except error.ManifestLookupError:
989 except error.ManifestLookupError:
992 # deletion in changelog
990 # deletion in changelog
993 continue
991 continue
994
992
995 return subset & s
993 return subset & s
996
994
997 @predicate('first(set, [n])', safe=True)
995 @predicate('first(set, [n])', safe=True)
998 def first(repo, subset, x):
996 def first(repo, subset, x):
999 """An alias for limit().
997 """An alias for limit().
1000 """
998 """
1001 return limit(repo, subset, x)
999 return limit(repo, subset, x)
1002
1000
1003 def _follow(repo, subset, x, name, followfirst=False):
1001 def _follow(repo, subset, x, name, followfirst=False):
1004 l = getargs(x, 0, 1, _("%s takes no arguments or a pattern") % name)
1002 l = getargs(x, 0, 1, _("%s takes no arguments or a pattern") % name)
1005 c = repo['.']
1003 c = repo['.']
1006 if l:
1004 if l:
1007 x = getstring(l[0], _("%s expected a pattern") % name)
1005 x = getstring(l[0], _("%s expected a pattern") % name)
1008 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
1006 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
1009 ctx=repo[None], default='path')
1007 ctx=repo[None], default='path')
1010
1008
1011 files = c.manifest().walk(matcher)
1009 files = c.manifest().walk(matcher)
1012
1010
1013 s = set()
1011 s = set()
1014 for fname in files:
1012 for fname in files:
1015 fctx = c[fname]
1013 fctx = c[fname]
1016 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
1014 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
1017 # include the revision responsible for the most recent version
1015 # include the revision responsible for the most recent version
1018 s.add(fctx.introrev())
1016 s.add(fctx.introrev())
1019 else:
1017 else:
1020 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1018 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1021
1019
1022 return subset & s
1020 return subset & s
1023
1021
1024 @predicate('follow([pattern])', safe=True)
1022 @predicate('follow([pattern])', safe=True)
1025 def follow(repo, subset, x):
1023 def follow(repo, subset, x):
1026 """
1024 """
1027 An alias for ``::.`` (ancestors of the working directory's first parent).
1025 An alias for ``::.`` (ancestors of the working directory's first parent).
1028 If pattern is specified, the histories of files matching given
1026 If pattern is specified, the histories of files matching given
1029 pattern is followed, including copies.
1027 pattern is followed, including copies.
1030 """
1028 """
1031 return _follow(repo, subset, x, 'follow')
1029 return _follow(repo, subset, x, 'follow')
1032
1030
1033 @predicate('_followfirst', safe=True)
1031 @predicate('_followfirst', safe=True)
1034 def _followfirst(repo, subset, x):
1032 def _followfirst(repo, subset, x):
1035 # ``followfirst([pattern])``
1033 # ``followfirst([pattern])``
1036 # Like ``follow([pattern])`` but follows only the first parent of
1034 # Like ``follow([pattern])`` but follows only the first parent of
1037 # every revisions or files revisions.
1035 # every revisions or files revisions.
1038 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1036 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1039
1037
1040 @predicate('all()', safe=True)
1038 @predicate('all()', safe=True)
1041 def getall(repo, subset, x):
1039 def getall(repo, subset, x):
1042 """All changesets, the same as ``0:tip``.
1040 """All changesets, the same as ``0:tip``.
1043 """
1041 """
1044 # i18n: "all" is a keyword
1042 # i18n: "all" is a keyword
1045 getargs(x, 0, 0, _("all takes no arguments"))
1043 getargs(x, 0, 0, _("all takes no arguments"))
1046 return subset & spanset(repo) # drop "null" if any
1044 return subset & spanset(repo) # drop "null" if any
1047
1045
1048 @predicate('grep(regex)')
1046 @predicate('grep(regex)')
1049 def grep(repo, subset, x):
1047 def grep(repo, subset, x):
1050 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1048 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1051 to ensure special escape characters are handled correctly. Unlike
1049 to ensure special escape characters are handled correctly. Unlike
1052 ``keyword(string)``, the match is case-sensitive.
1050 ``keyword(string)``, the match is case-sensitive.
1053 """
1051 """
1054 try:
1052 try:
1055 # i18n: "grep" is a keyword
1053 # i18n: "grep" is a keyword
1056 gr = re.compile(getstring(x, _("grep requires a string")))
1054 gr = re.compile(getstring(x, _("grep requires a string")))
1057 except re.error as e:
1055 except re.error as e:
1058 raise error.ParseError(_('invalid match pattern: %s') % e)
1056 raise error.ParseError(_('invalid match pattern: %s') % e)
1059
1057
1060 def matches(x):
1058 def matches(x):
1061 c = repo[x]
1059 c = repo[x]
1062 for e in c.files() + [c.user(), c.description()]:
1060 for e in c.files() + [c.user(), c.description()]:
1063 if gr.search(e):
1061 if gr.search(e):
1064 return True
1062 return True
1065 return False
1063 return False
1066
1064
1067 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1065 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1068
1066
1069 @predicate('_matchfiles', safe=True)
1067 @predicate('_matchfiles', safe=True)
1070 def _matchfiles(repo, subset, x):
1068 def _matchfiles(repo, subset, x):
1071 # _matchfiles takes a revset list of prefixed arguments:
1069 # _matchfiles takes a revset list of prefixed arguments:
1072 #
1070 #
1073 # [p:foo, i:bar, x:baz]
1071 # [p:foo, i:bar, x:baz]
1074 #
1072 #
1075 # builds a match object from them and filters subset. Allowed
1073 # builds a match object from them and filters subset. Allowed
1076 # prefixes are 'p:' for regular patterns, 'i:' for include
1074 # prefixes are 'p:' for regular patterns, 'i:' for include
1077 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1075 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1078 # a revision identifier, or the empty string to reference the
1076 # a revision identifier, or the empty string to reference the
1079 # working directory, from which the match object is
1077 # working directory, from which the match object is
1080 # initialized. Use 'd:' to set the default matching mode, default
1078 # initialized. Use 'd:' to set the default matching mode, default
1081 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1079 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1082
1080
1083 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1081 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1084 pats, inc, exc = [], [], []
1082 pats, inc, exc = [], [], []
1085 rev, default = None, None
1083 rev, default = None, None
1086 for arg in l:
1084 for arg in l:
1087 s = getstring(arg, "_matchfiles requires string arguments")
1085 s = getstring(arg, "_matchfiles requires string arguments")
1088 prefix, value = s[:2], s[2:]
1086 prefix, value = s[:2], s[2:]
1089 if prefix == 'p:':
1087 if prefix == 'p:':
1090 pats.append(value)
1088 pats.append(value)
1091 elif prefix == 'i:':
1089 elif prefix == 'i:':
1092 inc.append(value)
1090 inc.append(value)
1093 elif prefix == 'x:':
1091 elif prefix == 'x:':
1094 exc.append(value)
1092 exc.append(value)
1095 elif prefix == 'r:':
1093 elif prefix == 'r:':
1096 if rev is not None:
1094 if rev is not None:
1097 raise error.ParseError('_matchfiles expected at most one '
1095 raise error.ParseError('_matchfiles expected at most one '
1098 'revision')
1096 'revision')
1099 if value != '': # empty means working directory; leave rev as None
1097 if value != '': # empty means working directory; leave rev as None
1100 rev = value
1098 rev = value
1101 elif prefix == 'd:':
1099 elif prefix == 'd:':
1102 if default is not None:
1100 if default is not None:
1103 raise error.ParseError('_matchfiles expected at most one '
1101 raise error.ParseError('_matchfiles expected at most one '
1104 'default mode')
1102 'default mode')
1105 default = value
1103 default = value
1106 else:
1104 else:
1107 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1105 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1108 if not default:
1106 if not default:
1109 default = 'glob'
1107 default = 'glob'
1110
1108
1111 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1109 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1112 exclude=exc, ctx=repo[rev], default=default)
1110 exclude=exc, ctx=repo[rev], default=default)
1113
1111
1114 # This directly read the changelog data as creating changectx for all
1112 # This directly read the changelog data as creating changectx for all
1115 # revisions is quite expensive.
1113 # revisions is quite expensive.
1116 getfiles = repo.changelog.readfiles
1114 getfiles = repo.changelog.readfiles
1117 wdirrev = node.wdirrev
1115 wdirrev = node.wdirrev
1118 def matches(x):
1116 def matches(x):
1119 if x == wdirrev:
1117 if x == wdirrev:
1120 files = repo[x].files()
1118 files = repo[x].files()
1121 else:
1119 else:
1122 files = getfiles(x)
1120 files = getfiles(x)
1123 for f in files:
1121 for f in files:
1124 if m(f):
1122 if m(f):
1125 return True
1123 return True
1126 return False
1124 return False
1127
1125
1128 return subset.filter(matches,
1126 return subset.filter(matches,
1129 condrepr=('<matchfiles patterns=%r, include=%r '
1127 condrepr=('<matchfiles patterns=%r, include=%r '
1130 'exclude=%r, default=%r, rev=%r>',
1128 'exclude=%r, default=%r, rev=%r>',
1131 pats, inc, exc, default, rev))
1129 pats, inc, exc, default, rev))
1132
1130
1133 @predicate('file(pattern)', safe=True)
1131 @predicate('file(pattern)', safe=True)
1134 def hasfile(repo, subset, x):
1132 def hasfile(repo, subset, x):
1135 """Changesets affecting files matched by pattern.
1133 """Changesets affecting files matched by pattern.
1136
1134
1137 For a faster but less accurate result, consider using ``filelog()``
1135 For a faster but less accurate result, consider using ``filelog()``
1138 instead.
1136 instead.
1139
1137
1140 This predicate uses ``glob:`` as the default kind of pattern.
1138 This predicate uses ``glob:`` as the default kind of pattern.
1141 """
1139 """
1142 # i18n: "file" is a keyword
1140 # i18n: "file" is a keyword
1143 pat = getstring(x, _("file requires a pattern"))
1141 pat = getstring(x, _("file requires a pattern"))
1144 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1142 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1145
1143
1146 @predicate('head()', safe=True)
1144 @predicate('head()', safe=True)
1147 def head(repo, subset, x):
1145 def head(repo, subset, x):
1148 """Changeset is a named branch head.
1146 """Changeset is a named branch head.
1149 """
1147 """
1150 # i18n: "head" is a keyword
1148 # i18n: "head" is a keyword
1151 getargs(x, 0, 0, _("head takes no arguments"))
1149 getargs(x, 0, 0, _("head takes no arguments"))
1152 hs = set()
1150 hs = set()
1153 cl = repo.changelog
1151 cl = repo.changelog
1154 for ls in repo.branchmap().itervalues():
1152 for ls in repo.branchmap().itervalues():
1155 hs.update(cl.rev(h) for h in ls)
1153 hs.update(cl.rev(h) for h in ls)
1156 return subset & baseset(hs)
1154 return subset & baseset(hs)
1157
1155
1158 @predicate('heads(set)', safe=True)
1156 @predicate('heads(set)', safe=True)
1159 def heads(repo, subset, x):
1157 def heads(repo, subset, x):
1160 """Members of set with no children in set.
1158 """Members of set with no children in set.
1161 """
1159 """
1162 s = getset(repo, subset, x)
1160 s = getset(repo, subset, x)
1163 ps = parents(repo, subset, x)
1161 ps = parents(repo, subset, x)
1164 return s - ps
1162 return s - ps
1165
1163
1166 @predicate('hidden()', safe=True)
1164 @predicate('hidden()', safe=True)
1167 def hidden(repo, subset, x):
1165 def hidden(repo, subset, x):
1168 """Hidden changesets.
1166 """Hidden changesets.
1169 """
1167 """
1170 # i18n: "hidden" is a keyword
1168 # i18n: "hidden" is a keyword
1171 getargs(x, 0, 0, _("hidden takes no arguments"))
1169 getargs(x, 0, 0, _("hidden takes no arguments"))
1172 hiddenrevs = repoview.filterrevs(repo, 'visible')
1170 hiddenrevs = repoview.filterrevs(repo, 'visible')
1173 return subset & hiddenrevs
1171 return subset & hiddenrevs
1174
1172
1175 @predicate('keyword(string)', safe=True)
1173 @predicate('keyword(string)', safe=True)
1176 def keyword(repo, subset, x):
1174 def keyword(repo, subset, x):
1177 """Search commit message, user name, and names of changed files for
1175 """Search commit message, user name, and names of changed files for
1178 string. The match is case-insensitive.
1176 string. The match is case-insensitive.
1179 """
1177 """
1180 # i18n: "keyword" is a keyword
1178 # i18n: "keyword" is a keyword
1181 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1179 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1182
1180
1183 def matches(r):
1181 def matches(r):
1184 c = repo[r]
1182 c = repo[r]
1185 return any(kw in encoding.lower(t)
1183 return any(kw in encoding.lower(t)
1186 for t in c.files() + [c.user(), c.description()])
1184 for t in c.files() + [c.user(), c.description()])
1187
1185
1188 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1186 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1189
1187
1190 @predicate('limit(set[, n[, offset]])', safe=True)
1188 @predicate('limit(set[, n[, offset]])', safe=True)
1191 def limit(repo, subset, x):
1189 def limit(repo, subset, x):
1192 """First n members of set, defaulting to 1, starting from offset.
1190 """First n members of set, defaulting to 1, starting from offset.
1193 """
1191 """
1194 args = getargsdict(x, 'limit', 'set n offset')
1192 args = getargsdict(x, 'limit', 'set n offset')
1195 if 'set' not in args:
1193 if 'set' not in args:
1196 # i18n: "limit" is a keyword
1194 # i18n: "limit" is a keyword
1197 raise error.ParseError(_("limit requires one to three arguments"))
1195 raise error.ParseError(_("limit requires one to three arguments"))
1198 try:
1196 try:
1199 lim, ofs = 1, 0
1197 lim, ofs = 1, 0
1200 if 'n' in args:
1198 if 'n' in args:
1201 # i18n: "limit" is a keyword
1199 # i18n: "limit" is a keyword
1202 lim = int(getstring(args['n'], _("limit requires a number")))
1200 lim = int(getstring(args['n'], _("limit requires a number")))
1203 if 'offset' in args:
1201 if 'offset' in args:
1204 # i18n: "limit" is a keyword
1202 # i18n: "limit" is a keyword
1205 ofs = int(getstring(args['offset'], _("limit requires a number")))
1203 ofs = int(getstring(args['offset'], _("limit requires a number")))
1206 if ofs < 0:
1204 if ofs < 0:
1207 raise error.ParseError(_("negative offset"))
1205 raise error.ParseError(_("negative offset"))
1208 except (TypeError, ValueError):
1206 except (TypeError, ValueError):
1209 # i18n: "limit" is a keyword
1207 # i18n: "limit" is a keyword
1210 raise error.ParseError(_("limit expects a number"))
1208 raise error.ParseError(_("limit expects a number"))
1211 os = getset(repo, fullreposet(repo), args['set'])
1209 os = getset(repo, fullreposet(repo), args['set'])
1212 result = []
1210 result = []
1213 it = iter(os)
1211 it = iter(os)
1214 for x in xrange(ofs):
1212 for x in xrange(ofs):
1215 y = next(it, None)
1213 y = next(it, None)
1216 if y is None:
1214 if y is None:
1217 break
1215 break
1218 for x in xrange(lim):
1216 for x in xrange(lim):
1219 y = next(it, None)
1217 y = next(it, None)
1220 if y is None:
1218 if y is None:
1221 break
1219 break
1222 elif y in subset:
1220 elif y in subset:
1223 result.append(y)
1221 result.append(y)
1224 return baseset(result, datarepr=('<limit n=%d, offset=%d, %r, %r>',
1222 return baseset(result, datarepr=('<limit n=%d, offset=%d, %r, %r>',
1225 lim, ofs, subset, os))
1223 lim, ofs, subset, os))
1226
1224
1227 @predicate('last(set, [n])', safe=True)
1225 @predicate('last(set, [n])', safe=True)
1228 def last(repo, subset, x):
1226 def last(repo, subset, x):
1229 """Last n members of set, defaulting to 1.
1227 """Last n members of set, defaulting to 1.
1230 """
1228 """
1231 # i18n: "last" is a keyword
1229 # i18n: "last" is a keyword
1232 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1230 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1233 try:
1231 try:
1234 lim = 1
1232 lim = 1
1235 if len(l) == 2:
1233 if len(l) == 2:
1236 # i18n: "last" is a keyword
1234 # i18n: "last" is a keyword
1237 lim = int(getstring(l[1], _("last requires a number")))
1235 lim = int(getstring(l[1], _("last requires a number")))
1238 except (TypeError, ValueError):
1236 except (TypeError, ValueError):
1239 # i18n: "last" is a keyword
1237 # i18n: "last" is a keyword
1240 raise error.ParseError(_("last expects a number"))
1238 raise error.ParseError(_("last expects a number"))
1241 os = getset(repo, fullreposet(repo), l[0])
1239 os = getset(repo, fullreposet(repo), l[0])
1242 os.reverse()
1240 os.reverse()
1243 result = []
1241 result = []
1244 it = iter(os)
1242 it = iter(os)
1245 for x in xrange(lim):
1243 for x in xrange(lim):
1246 y = next(it, None)
1244 y = next(it, None)
1247 if y is None:
1245 if y is None:
1248 break
1246 break
1249 elif y in subset:
1247 elif y in subset:
1250 result.append(y)
1248 result.append(y)
1251 return baseset(result, datarepr=('<last n=%d, %r, %r>', lim, subset, os))
1249 return baseset(result, datarepr=('<last n=%d, %r, %r>', lim, subset, os))
1252
1250
1253 @predicate('max(set)', safe=True)
1251 @predicate('max(set)', safe=True)
1254 def maxrev(repo, subset, x):
1252 def maxrev(repo, subset, x):
1255 """Changeset with highest revision number in set.
1253 """Changeset with highest revision number in set.
1256 """
1254 """
1257 os = getset(repo, fullreposet(repo), x)
1255 os = getset(repo, fullreposet(repo), x)
1258 try:
1256 try:
1259 m = os.max()
1257 m = os.max()
1260 if m in subset:
1258 if m in subset:
1261 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1259 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1262 except ValueError:
1260 except ValueError:
1263 # os.max() throws a ValueError when the collection is empty.
1261 # os.max() throws a ValueError when the collection is empty.
1264 # Same as python's max().
1262 # Same as python's max().
1265 pass
1263 pass
1266 return baseset(datarepr=('<max %r, %r>', subset, os))
1264 return baseset(datarepr=('<max %r, %r>', subset, os))
1267
1265
1268 @predicate('merge()', safe=True)
1266 @predicate('merge()', safe=True)
1269 def merge(repo, subset, x):
1267 def merge(repo, subset, x):
1270 """Changeset is a merge changeset.
1268 """Changeset is a merge changeset.
1271 """
1269 """
1272 # i18n: "merge" is a keyword
1270 # i18n: "merge" is a keyword
1273 getargs(x, 0, 0, _("merge takes no arguments"))
1271 getargs(x, 0, 0, _("merge takes no arguments"))
1274 cl = repo.changelog
1272 cl = repo.changelog
1275 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1273 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1276 condrepr='<merge>')
1274 condrepr='<merge>')
1277
1275
1278 @predicate('branchpoint()', safe=True)
1276 @predicate('branchpoint()', safe=True)
1279 def branchpoint(repo, subset, x):
1277 def branchpoint(repo, subset, x):
1280 """Changesets with more than one child.
1278 """Changesets with more than one child.
1281 """
1279 """
1282 # i18n: "branchpoint" is a keyword
1280 # i18n: "branchpoint" is a keyword
1283 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1281 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1284 cl = repo.changelog
1282 cl = repo.changelog
1285 if not subset:
1283 if not subset:
1286 return baseset()
1284 return baseset()
1287 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1285 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1288 # (and if it is not, it should.)
1286 # (and if it is not, it should.)
1289 baserev = min(subset)
1287 baserev = min(subset)
1290 parentscount = [0]*(len(repo) - baserev)
1288 parentscount = [0]*(len(repo) - baserev)
1291 for r in cl.revs(start=baserev + 1):
1289 for r in cl.revs(start=baserev + 1):
1292 for p in cl.parentrevs(r):
1290 for p in cl.parentrevs(r):
1293 if p >= baserev:
1291 if p >= baserev:
1294 parentscount[p - baserev] += 1
1292 parentscount[p - baserev] += 1
1295 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1293 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1296 condrepr='<branchpoint>')
1294 condrepr='<branchpoint>')
1297
1295
1298 @predicate('min(set)', safe=True)
1296 @predicate('min(set)', safe=True)
1299 def minrev(repo, subset, x):
1297 def minrev(repo, subset, x):
1300 """Changeset with lowest revision number in set.
1298 """Changeset with lowest revision number in set.
1301 """
1299 """
1302 os = getset(repo, fullreposet(repo), x)
1300 os = getset(repo, fullreposet(repo), x)
1303 try:
1301 try:
1304 m = os.min()
1302 m = os.min()
1305 if m in subset:
1303 if m in subset:
1306 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1304 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1307 except ValueError:
1305 except ValueError:
1308 # os.min() throws a ValueError when the collection is empty.
1306 # os.min() throws a ValueError when the collection is empty.
1309 # Same as python's min().
1307 # Same as python's min().
1310 pass
1308 pass
1311 return baseset(datarepr=('<min %r, %r>', subset, os))
1309 return baseset(datarepr=('<min %r, %r>', subset, os))
1312
1310
1313 @predicate('modifies(pattern)', safe=True)
1311 @predicate('modifies(pattern)', safe=True)
1314 def modifies(repo, subset, x):
1312 def modifies(repo, subset, x):
1315 """Changesets modifying files matched by pattern.
1313 """Changesets modifying files matched by pattern.
1316
1314
1317 The pattern without explicit kind like ``glob:`` is expected to be
1315 The pattern without explicit kind like ``glob:`` is expected to be
1318 relative to the current directory and match against a file or a
1316 relative to the current directory and match against a file or a
1319 directory.
1317 directory.
1320 """
1318 """
1321 # i18n: "modifies" is a keyword
1319 # i18n: "modifies" is a keyword
1322 pat = getstring(x, _("modifies requires a pattern"))
1320 pat = getstring(x, _("modifies requires a pattern"))
1323 return checkstatus(repo, subset, pat, 0)
1321 return checkstatus(repo, subset, pat, 0)
1324
1322
1325 @predicate('named(namespace)')
1323 @predicate('named(namespace)')
1326 def named(repo, subset, x):
1324 def named(repo, subset, x):
1327 """The changesets in a given namespace.
1325 """The changesets in a given namespace.
1328
1326
1329 If `namespace` starts with `re:`, the remainder of the string is treated as
1327 If `namespace` starts with `re:`, the remainder of the string is treated as
1330 a regular expression. To match a namespace that actually starts with `re:`,
1328 a regular expression. To match a namespace that actually starts with `re:`,
1331 use the prefix `literal:`.
1329 use the prefix `literal:`.
1332 """
1330 """
1333 # i18n: "named" is a keyword
1331 # i18n: "named" is a keyword
1334 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1332 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1335
1333
1336 ns = getstring(args[0],
1334 ns = getstring(args[0],
1337 # i18n: "named" is a keyword
1335 # i18n: "named" is a keyword
1338 _('the argument to named must be a string'))
1336 _('the argument to named must be a string'))
1339 kind, pattern, matcher = util.stringmatcher(ns)
1337 kind, pattern, matcher = util.stringmatcher(ns)
1340 namespaces = set()
1338 namespaces = set()
1341 if kind == 'literal':
1339 if kind == 'literal':
1342 if pattern not in repo.names:
1340 if pattern not in repo.names:
1343 raise error.RepoLookupError(_("namespace '%s' does not exist")
1341 raise error.RepoLookupError(_("namespace '%s' does not exist")
1344 % ns)
1342 % ns)
1345 namespaces.add(repo.names[pattern])
1343 namespaces.add(repo.names[pattern])
1346 else:
1344 else:
1347 for name, ns in repo.names.iteritems():
1345 for name, ns in repo.names.iteritems():
1348 if matcher(name):
1346 if matcher(name):
1349 namespaces.add(ns)
1347 namespaces.add(ns)
1350 if not namespaces:
1348 if not namespaces:
1351 raise error.RepoLookupError(_("no namespace exists"
1349 raise error.RepoLookupError(_("no namespace exists"
1352 " that match '%s'") % pattern)
1350 " that match '%s'") % pattern)
1353
1351
1354 names = set()
1352 names = set()
1355 for ns in namespaces:
1353 for ns in namespaces:
1356 for name in ns.listnames(repo):
1354 for name in ns.listnames(repo):
1357 if name not in ns.deprecated:
1355 if name not in ns.deprecated:
1358 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1356 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1359
1357
1360 names -= set([node.nullrev])
1358 names -= set([node.nullrev])
1361 return subset & names
1359 return subset & names
1362
1360
1363 @predicate('id(string)', safe=True)
1361 @predicate('id(string)', safe=True)
1364 def node_(repo, subset, x):
1362 def node_(repo, subset, x):
1365 """Revision non-ambiguously specified by the given hex string prefix.
1363 """Revision non-ambiguously specified by the given hex string prefix.
1366 """
1364 """
1367 # i18n: "id" is a keyword
1365 # i18n: "id" is a keyword
1368 l = getargs(x, 1, 1, _("id requires one argument"))
1366 l = getargs(x, 1, 1, _("id requires one argument"))
1369 # i18n: "id" is a keyword
1367 # i18n: "id" is a keyword
1370 n = getstring(l[0], _("id requires a string"))
1368 n = getstring(l[0], _("id requires a string"))
1371 if len(n) == 40:
1369 if len(n) == 40:
1372 try:
1370 try:
1373 rn = repo.changelog.rev(node.bin(n))
1371 rn = repo.changelog.rev(node.bin(n))
1374 except (LookupError, TypeError):
1372 except (LookupError, TypeError):
1375 rn = None
1373 rn = None
1376 else:
1374 else:
1377 rn = None
1375 rn = None
1378 pm = repo.changelog._partialmatch(n)
1376 pm = repo.changelog._partialmatch(n)
1379 if pm is not None:
1377 if pm is not None:
1380 rn = repo.changelog.rev(pm)
1378 rn = repo.changelog.rev(pm)
1381
1379
1382 if rn is None:
1380 if rn is None:
1383 return baseset()
1381 return baseset()
1384 result = baseset([rn])
1382 result = baseset([rn])
1385 return result & subset
1383 return result & subset
1386
1384
1387 @predicate('obsolete()', safe=True)
1385 @predicate('obsolete()', safe=True)
1388 def obsolete(repo, subset, x):
1386 def obsolete(repo, subset, x):
1389 """Mutable changeset with a newer version."""
1387 """Mutable changeset with a newer version."""
1390 # i18n: "obsolete" is a keyword
1388 # i18n: "obsolete" is a keyword
1391 getargs(x, 0, 0, _("obsolete takes no arguments"))
1389 getargs(x, 0, 0, _("obsolete takes no arguments"))
1392 obsoletes = obsmod.getrevs(repo, 'obsolete')
1390 obsoletes = obsmod.getrevs(repo, 'obsolete')
1393 return subset & obsoletes
1391 return subset & obsoletes
1394
1392
1395 @predicate('only(set, [set])', safe=True)
1393 @predicate('only(set, [set])', safe=True)
1396 def only(repo, subset, x):
1394 def only(repo, subset, x):
1397 """Changesets that are ancestors of the first set that are not ancestors
1395 """Changesets that are ancestors of the first set that are not ancestors
1398 of any other head in the repo. If a second set is specified, the result
1396 of any other head in the repo. If a second set is specified, the result
1399 is ancestors of the first set that are not ancestors of the second set
1397 is ancestors of the first set that are not ancestors of the second set
1400 (i.e. ::<set1> - ::<set2>).
1398 (i.e. ::<set1> - ::<set2>).
1401 """
1399 """
1402 cl = repo.changelog
1400 cl = repo.changelog
1403 # i18n: "only" is a keyword
1401 # i18n: "only" is a keyword
1404 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1402 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1405 include = getset(repo, fullreposet(repo), args[0])
1403 include = getset(repo, fullreposet(repo), args[0])
1406 if len(args) == 1:
1404 if len(args) == 1:
1407 if not include:
1405 if not include:
1408 return baseset()
1406 return baseset()
1409
1407
1410 descendants = set(_revdescendants(repo, include, False))
1408 descendants = set(_revdescendants(repo, include, False))
1411 exclude = [rev for rev in cl.headrevs()
1409 exclude = [rev for rev in cl.headrevs()
1412 if not rev in descendants and not rev in include]
1410 if not rev in descendants and not rev in include]
1413 else:
1411 else:
1414 exclude = getset(repo, fullreposet(repo), args[1])
1412 exclude = getset(repo, fullreposet(repo), args[1])
1415
1413
1416 results = set(cl.findmissingrevs(common=exclude, heads=include))
1414 results = set(cl.findmissingrevs(common=exclude, heads=include))
1417 # XXX we should turn this into a baseset instead of a set, smartset may do
1415 # XXX we should turn this into a baseset instead of a set, smartset may do
1418 # some optimisations from the fact this is a baseset.
1416 # some optimisations from the fact this is a baseset.
1419 return subset & results
1417 return subset & results
1420
1418
1421 @predicate('origin([set])', safe=True)
1419 @predicate('origin([set])', safe=True)
1422 def origin(repo, subset, x):
1420 def origin(repo, subset, x):
1423 """
1421 """
1424 Changesets that were specified as a source for the grafts, transplants or
1422 Changesets that were specified as a source for the grafts, transplants or
1425 rebases that created the given revisions. Omitting the optional set is the
1423 rebases that created the given revisions. Omitting the optional set is the
1426 same as passing all(). If a changeset created by these operations is itself
1424 same as passing all(). If a changeset created by these operations is itself
1427 specified as a source for one of these operations, only the source changeset
1425 specified as a source for one of these operations, only the source changeset
1428 for the first operation is selected.
1426 for the first operation is selected.
1429 """
1427 """
1430 if x is not None:
1428 if x is not None:
1431 dests = getset(repo, fullreposet(repo), x)
1429 dests = getset(repo, fullreposet(repo), x)
1432 else:
1430 else:
1433 dests = fullreposet(repo)
1431 dests = fullreposet(repo)
1434
1432
1435 def _firstsrc(rev):
1433 def _firstsrc(rev):
1436 src = _getrevsource(repo, rev)
1434 src = _getrevsource(repo, rev)
1437 if src is None:
1435 if src is None:
1438 return None
1436 return None
1439
1437
1440 while True:
1438 while True:
1441 prev = _getrevsource(repo, src)
1439 prev = _getrevsource(repo, src)
1442
1440
1443 if prev is None:
1441 if prev is None:
1444 return src
1442 return src
1445 src = prev
1443 src = prev
1446
1444
1447 o = set([_firstsrc(r) for r in dests])
1445 o = set([_firstsrc(r) for r in dests])
1448 o -= set([None])
1446 o -= set([None])
1449 # XXX we should turn this into a baseset instead of a set, smartset may do
1447 # XXX we should turn this into a baseset instead of a set, smartset may do
1450 # some optimisations from the fact this is a baseset.
1448 # some optimisations from the fact this is a baseset.
1451 return subset & o
1449 return subset & o
1452
1450
1453 @predicate('outgoing([path])', safe=True)
1451 @predicate('outgoing([path])', safe=True)
1454 def outgoing(repo, subset, x):
1452 def outgoing(repo, subset, x):
1455 """Changesets not found in the specified destination repository, or the
1453 """Changesets not found in the specified destination repository, or the
1456 default push location.
1454 default push location.
1457 """
1455 """
1458 # Avoid cycles.
1456 # Avoid cycles.
1459 from . import (
1457 from . import (
1460 discovery,
1458 discovery,
1461 hg,
1459 hg,
1462 )
1460 )
1463 # i18n: "outgoing" is a keyword
1461 # i18n: "outgoing" is a keyword
1464 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1462 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1465 # i18n: "outgoing" is a keyword
1463 # i18n: "outgoing" is a keyword
1466 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1464 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1467 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1465 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1468 dest, branches = hg.parseurl(dest)
1466 dest, branches = hg.parseurl(dest)
1469 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1467 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1470 if revs:
1468 if revs:
1471 revs = [repo.lookup(rev) for rev in revs]
1469 revs = [repo.lookup(rev) for rev in revs]
1472 other = hg.peer(repo, {}, dest)
1470 other = hg.peer(repo, {}, dest)
1473 repo.ui.pushbuffer()
1471 repo.ui.pushbuffer()
1474 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1472 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1475 repo.ui.popbuffer()
1473 repo.ui.popbuffer()
1476 cl = repo.changelog
1474 cl = repo.changelog
1477 o = set([cl.rev(r) for r in outgoing.missing])
1475 o = set([cl.rev(r) for r in outgoing.missing])
1478 return subset & o
1476 return subset & o
1479
1477
1480 @predicate('p1([set])', safe=True)
1478 @predicate('p1([set])', safe=True)
1481 def p1(repo, subset, x):
1479 def p1(repo, subset, x):
1482 """First parent of changesets in set, or the working directory.
1480 """First parent of changesets in set, or the working directory.
1483 """
1481 """
1484 if x is None:
1482 if x is None:
1485 p = repo[x].p1().rev()
1483 p = repo[x].p1().rev()
1486 if p >= 0:
1484 if p >= 0:
1487 return subset & baseset([p])
1485 return subset & baseset([p])
1488 return baseset()
1486 return baseset()
1489
1487
1490 ps = set()
1488 ps = set()
1491 cl = repo.changelog
1489 cl = repo.changelog
1492 for r in getset(repo, fullreposet(repo), x):
1490 for r in getset(repo, fullreposet(repo), x):
1493 ps.add(cl.parentrevs(r)[0])
1491 ps.add(cl.parentrevs(r)[0])
1494 ps -= set([node.nullrev])
1492 ps -= set([node.nullrev])
1495 # XXX we should turn this into a baseset instead of a set, smartset may do
1493 # XXX we should turn this into a baseset instead of a set, smartset may do
1496 # some optimisations from the fact this is a baseset.
1494 # some optimisations from the fact this is a baseset.
1497 return subset & ps
1495 return subset & ps
1498
1496
1499 @predicate('p2([set])', safe=True)
1497 @predicate('p2([set])', safe=True)
1500 def p2(repo, subset, x):
1498 def p2(repo, subset, x):
1501 """Second parent of changesets in set, or the working directory.
1499 """Second parent of changesets in set, or the working directory.
1502 """
1500 """
1503 if x is None:
1501 if x is None:
1504 ps = repo[x].parents()
1502 ps = repo[x].parents()
1505 try:
1503 try:
1506 p = ps[1].rev()
1504 p = ps[1].rev()
1507 if p >= 0:
1505 if p >= 0:
1508 return subset & baseset([p])
1506 return subset & baseset([p])
1509 return baseset()
1507 return baseset()
1510 except IndexError:
1508 except IndexError:
1511 return baseset()
1509 return baseset()
1512
1510
1513 ps = set()
1511 ps = set()
1514 cl = repo.changelog
1512 cl = repo.changelog
1515 for r in getset(repo, fullreposet(repo), x):
1513 for r in getset(repo, fullreposet(repo), x):
1516 ps.add(cl.parentrevs(r)[1])
1514 ps.add(cl.parentrevs(r)[1])
1517 ps -= set([node.nullrev])
1515 ps -= set([node.nullrev])
1518 # XXX we should turn this into a baseset instead of a set, smartset may do
1516 # XXX we should turn this into a baseset instead of a set, smartset may do
1519 # some optimisations from the fact this is a baseset.
1517 # some optimisations from the fact this is a baseset.
1520 return subset & ps
1518 return subset & ps
1521
1519
1522 @predicate('parents([set])', safe=True)
1520 @predicate('parents([set])', safe=True)
1523 def parents(repo, subset, x):
1521 def parents(repo, subset, x):
1524 """
1522 """
1525 The set of all parents for all changesets in set, or the working directory.
1523 The set of all parents for all changesets in set, or the working directory.
1526 """
1524 """
1527 if x is None:
1525 if x is None:
1528 ps = set(p.rev() for p in repo[x].parents())
1526 ps = set(p.rev() for p in repo[x].parents())
1529 else:
1527 else:
1530 ps = set()
1528 ps = set()
1531 cl = repo.changelog
1529 cl = repo.changelog
1532 up = ps.update
1530 up = ps.update
1533 parentrevs = cl.parentrevs
1531 parentrevs = cl.parentrevs
1534 for r in getset(repo, fullreposet(repo), x):
1532 for r in getset(repo, fullreposet(repo), x):
1535 if r == node.wdirrev:
1533 if r == node.wdirrev:
1536 up(p.rev() for p in repo[r].parents())
1534 up(p.rev() for p in repo[r].parents())
1537 else:
1535 else:
1538 up(parentrevs(r))
1536 up(parentrevs(r))
1539 ps -= set([node.nullrev])
1537 ps -= set([node.nullrev])
1540 return subset & ps
1538 return subset & ps
1541
1539
1542 def _phase(repo, subset, target):
1540 def _phase(repo, subset, target):
1543 """helper to select all rev in phase <target>"""
1541 """helper to select all rev in phase <target>"""
1544 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1542 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1545 if repo._phasecache._phasesets:
1543 if repo._phasecache._phasesets:
1546 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1544 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1547 s = baseset(s)
1545 s = baseset(s)
1548 s.sort() # set are non ordered, so we enforce ascending
1546 s.sort() # set are non ordered, so we enforce ascending
1549 return subset & s
1547 return subset & s
1550 else:
1548 else:
1551 phase = repo._phasecache.phase
1549 phase = repo._phasecache.phase
1552 condition = lambda r: phase(repo, r) == target
1550 condition = lambda r: phase(repo, r) == target
1553 return subset.filter(condition, condrepr=('<phase %r>', target),
1551 return subset.filter(condition, condrepr=('<phase %r>', target),
1554 cache=False)
1552 cache=False)
1555
1553
1556 @predicate('draft()', safe=True)
1554 @predicate('draft()', safe=True)
1557 def draft(repo, subset, x):
1555 def draft(repo, subset, x):
1558 """Changeset in draft phase."""
1556 """Changeset in draft phase."""
1559 # i18n: "draft" is a keyword
1557 # i18n: "draft" is a keyword
1560 getargs(x, 0, 0, _("draft takes no arguments"))
1558 getargs(x, 0, 0, _("draft takes no arguments"))
1561 target = phases.draft
1559 target = phases.draft
1562 return _phase(repo, subset, target)
1560 return _phase(repo, subset, target)
1563
1561
1564 @predicate('secret()', safe=True)
1562 @predicate('secret()', safe=True)
1565 def secret(repo, subset, x):
1563 def secret(repo, subset, x):
1566 """Changeset in secret phase."""
1564 """Changeset in secret phase."""
1567 # i18n: "secret" is a keyword
1565 # i18n: "secret" is a keyword
1568 getargs(x, 0, 0, _("secret takes no arguments"))
1566 getargs(x, 0, 0, _("secret takes no arguments"))
1569 target = phases.secret
1567 target = phases.secret
1570 return _phase(repo, subset, target)
1568 return _phase(repo, subset, target)
1571
1569
1572 def parentspec(repo, subset, x, n):
1570 def parentspec(repo, subset, x, n):
1573 """``set^0``
1571 """``set^0``
1574 The set.
1572 The set.
1575 ``set^1`` (or ``set^``), ``set^2``
1573 ``set^1`` (or ``set^``), ``set^2``
1576 First or second parent, respectively, of all changesets in set.
1574 First or second parent, respectively, of all changesets in set.
1577 """
1575 """
1578 try:
1576 try:
1579 n = int(n[1])
1577 n = int(n[1])
1580 if n not in (0, 1, 2):
1578 if n not in (0, 1, 2):
1581 raise ValueError
1579 raise ValueError
1582 except (TypeError, ValueError):
1580 except (TypeError, ValueError):
1583 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1581 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1584 ps = set()
1582 ps = set()
1585 cl = repo.changelog
1583 cl = repo.changelog
1586 for r in getset(repo, fullreposet(repo), x):
1584 for r in getset(repo, fullreposet(repo), x):
1587 if n == 0:
1585 if n == 0:
1588 ps.add(r)
1586 ps.add(r)
1589 elif n == 1:
1587 elif n == 1:
1590 ps.add(cl.parentrevs(r)[0])
1588 ps.add(cl.parentrevs(r)[0])
1591 elif n == 2:
1589 elif n == 2:
1592 parents = cl.parentrevs(r)
1590 parents = cl.parentrevs(r)
1593 if len(parents) > 1:
1591 if len(parents) > 1:
1594 ps.add(parents[1])
1592 ps.add(parents[1])
1595 return subset & ps
1593 return subset & ps
1596
1594
1597 @predicate('present(set)', safe=True)
1595 @predicate('present(set)', safe=True)
1598 def present(repo, subset, x):
1596 def present(repo, subset, x):
1599 """An empty set, if any revision in set isn't found; otherwise,
1597 """An empty set, if any revision in set isn't found; otherwise,
1600 all revisions in set.
1598 all revisions in set.
1601
1599
1602 If any of specified revisions is not present in the local repository,
1600 If any of specified revisions is not present in the local repository,
1603 the query is normally aborted. But this predicate allows the query
1601 the query is normally aborted. But this predicate allows the query
1604 to continue even in such cases.
1602 to continue even in such cases.
1605 """
1603 """
1606 try:
1604 try:
1607 return getset(repo, subset, x)
1605 return getset(repo, subset, x)
1608 except error.RepoLookupError:
1606 except error.RepoLookupError:
1609 return baseset()
1607 return baseset()
1610
1608
1611 # for internal use
1609 # for internal use
1612 @predicate('_notpublic', safe=True)
1610 @predicate('_notpublic', safe=True)
1613 def _notpublic(repo, subset, x):
1611 def _notpublic(repo, subset, x):
1614 getargs(x, 0, 0, "_notpublic takes no arguments")
1612 getargs(x, 0, 0, "_notpublic takes no arguments")
1615 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1613 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1616 if repo._phasecache._phasesets:
1614 if repo._phasecache._phasesets:
1617 s = set()
1615 s = set()
1618 for u in repo._phasecache._phasesets[1:]:
1616 for u in repo._phasecache._phasesets[1:]:
1619 s.update(u)
1617 s.update(u)
1620 s = baseset(s - repo.changelog.filteredrevs)
1618 s = baseset(s - repo.changelog.filteredrevs)
1621 s.sort()
1619 s.sort()
1622 return subset & s
1620 return subset & s
1623 else:
1621 else:
1624 phase = repo._phasecache.phase
1622 phase = repo._phasecache.phase
1625 target = phases.public
1623 target = phases.public
1626 condition = lambda r: phase(repo, r) != target
1624 condition = lambda r: phase(repo, r) != target
1627 return subset.filter(condition, condrepr=('<phase %r>', target),
1625 return subset.filter(condition, condrepr=('<phase %r>', target),
1628 cache=False)
1626 cache=False)
1629
1627
1630 @predicate('public()', safe=True)
1628 @predicate('public()', safe=True)
1631 def public(repo, subset, x):
1629 def public(repo, subset, x):
1632 """Changeset in public phase."""
1630 """Changeset in public phase."""
1633 # i18n: "public" is a keyword
1631 # i18n: "public" is a keyword
1634 getargs(x, 0, 0, _("public takes no arguments"))
1632 getargs(x, 0, 0, _("public takes no arguments"))
1635 phase = repo._phasecache.phase
1633 phase = repo._phasecache.phase
1636 target = phases.public
1634 target = phases.public
1637 condition = lambda r: phase(repo, r) == target
1635 condition = lambda r: phase(repo, r) == target
1638 return subset.filter(condition, condrepr=('<phase %r>', target),
1636 return subset.filter(condition, condrepr=('<phase %r>', target),
1639 cache=False)
1637 cache=False)
1640
1638
1641 @predicate('remote([id [,path]])', safe=True)
1639 @predicate('remote([id [,path]])', safe=True)
1642 def remote(repo, subset, x):
1640 def remote(repo, subset, x):
1643 """Local revision that corresponds to the given identifier in a
1641 """Local revision that corresponds to the given identifier in a
1644 remote repository, if present. Here, the '.' identifier is a
1642 remote repository, if present. Here, the '.' identifier is a
1645 synonym for the current local branch.
1643 synonym for the current local branch.
1646 """
1644 """
1647
1645
1648 from . import hg # avoid start-up nasties
1646 from . import hg # avoid start-up nasties
1649 # i18n: "remote" is a keyword
1647 # i18n: "remote" is a keyword
1650 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1648 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1651
1649
1652 q = '.'
1650 q = '.'
1653 if len(l) > 0:
1651 if len(l) > 0:
1654 # i18n: "remote" is a keyword
1652 # i18n: "remote" is a keyword
1655 q = getstring(l[0], _("remote requires a string id"))
1653 q = getstring(l[0], _("remote requires a string id"))
1656 if q == '.':
1654 if q == '.':
1657 q = repo['.'].branch()
1655 q = repo['.'].branch()
1658
1656
1659 dest = ''
1657 dest = ''
1660 if len(l) > 1:
1658 if len(l) > 1:
1661 # i18n: "remote" is a keyword
1659 # i18n: "remote" is a keyword
1662 dest = getstring(l[1], _("remote requires a repository path"))
1660 dest = getstring(l[1], _("remote requires a repository path"))
1663 dest = repo.ui.expandpath(dest or 'default')
1661 dest = repo.ui.expandpath(dest or 'default')
1664 dest, branches = hg.parseurl(dest)
1662 dest, branches = hg.parseurl(dest)
1665 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1663 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1666 if revs:
1664 if revs:
1667 revs = [repo.lookup(rev) for rev in revs]
1665 revs = [repo.lookup(rev) for rev in revs]
1668 other = hg.peer(repo, {}, dest)
1666 other = hg.peer(repo, {}, dest)
1669 n = other.lookup(q)
1667 n = other.lookup(q)
1670 if n in repo:
1668 if n in repo:
1671 r = repo[n].rev()
1669 r = repo[n].rev()
1672 if r in subset:
1670 if r in subset:
1673 return baseset([r])
1671 return baseset([r])
1674 return baseset()
1672 return baseset()
1675
1673
1676 @predicate('removes(pattern)', safe=True)
1674 @predicate('removes(pattern)', safe=True)
1677 def removes(repo, subset, x):
1675 def removes(repo, subset, x):
1678 """Changesets which remove files matching pattern.
1676 """Changesets which remove files matching pattern.
1679
1677
1680 The pattern without explicit kind like ``glob:`` is expected to be
1678 The pattern without explicit kind like ``glob:`` is expected to be
1681 relative to the current directory and match against a file or a
1679 relative to the current directory and match against a file or a
1682 directory.
1680 directory.
1683 """
1681 """
1684 # i18n: "removes" is a keyword
1682 # i18n: "removes" is a keyword
1685 pat = getstring(x, _("removes requires a pattern"))
1683 pat = getstring(x, _("removes requires a pattern"))
1686 return checkstatus(repo, subset, pat, 2)
1684 return checkstatus(repo, subset, pat, 2)
1687
1685
1688 @predicate('rev(number)', safe=True)
1686 @predicate('rev(number)', safe=True)
1689 def rev(repo, subset, x):
1687 def rev(repo, subset, x):
1690 """Revision with the given numeric identifier.
1688 """Revision with the given numeric identifier.
1691 """
1689 """
1692 # i18n: "rev" is a keyword
1690 # i18n: "rev" is a keyword
1693 l = getargs(x, 1, 1, _("rev requires one argument"))
1691 l = getargs(x, 1, 1, _("rev requires one argument"))
1694 try:
1692 try:
1695 # i18n: "rev" is a keyword
1693 # i18n: "rev" is a keyword
1696 l = int(getstring(l[0], _("rev requires a number")))
1694 l = int(getstring(l[0], _("rev requires a number")))
1697 except (TypeError, ValueError):
1695 except (TypeError, ValueError):
1698 # i18n: "rev" is a keyword
1696 # i18n: "rev" is a keyword
1699 raise error.ParseError(_("rev expects a number"))
1697 raise error.ParseError(_("rev expects a number"))
1700 if l not in repo.changelog and l != node.nullrev:
1698 if l not in repo.changelog and l != node.nullrev:
1701 return baseset()
1699 return baseset()
1702 return subset & baseset([l])
1700 return subset & baseset([l])
1703
1701
1704 @predicate('matching(revision [, field])', safe=True)
1702 @predicate('matching(revision [, field])', safe=True)
1705 def matching(repo, subset, x):
1703 def matching(repo, subset, x):
1706 """Changesets in which a given set of fields match the set of fields in the
1704 """Changesets in which a given set of fields match the set of fields in the
1707 selected revision or set.
1705 selected revision or set.
1708
1706
1709 To match more than one field pass the list of fields to match separated
1707 To match more than one field pass the list of fields to match separated
1710 by spaces (e.g. ``author description``).
1708 by spaces (e.g. ``author description``).
1711
1709
1712 Valid fields are most regular revision fields and some special fields.
1710 Valid fields are most regular revision fields and some special fields.
1713
1711
1714 Regular revision fields are ``description``, ``author``, ``branch``,
1712 Regular revision fields are ``description``, ``author``, ``branch``,
1715 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1713 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1716 and ``diff``.
1714 and ``diff``.
1717 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1715 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1718 contents of the revision. Two revisions matching their ``diff`` will
1716 contents of the revision. Two revisions matching their ``diff`` will
1719 also match their ``files``.
1717 also match their ``files``.
1720
1718
1721 Special fields are ``summary`` and ``metadata``:
1719 Special fields are ``summary`` and ``metadata``:
1722 ``summary`` matches the first line of the description.
1720 ``summary`` matches the first line of the description.
1723 ``metadata`` is equivalent to matching ``description user date``
1721 ``metadata`` is equivalent to matching ``description user date``
1724 (i.e. it matches the main metadata fields).
1722 (i.e. it matches the main metadata fields).
1725
1723
1726 ``metadata`` is the default field which is used when no fields are
1724 ``metadata`` is the default field which is used when no fields are
1727 specified. You can match more than one field at a time.
1725 specified. You can match more than one field at a time.
1728 """
1726 """
1729 # i18n: "matching" is a keyword
1727 # i18n: "matching" is a keyword
1730 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1728 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1731
1729
1732 revs = getset(repo, fullreposet(repo), l[0])
1730 revs = getset(repo, fullreposet(repo), l[0])
1733
1731
1734 fieldlist = ['metadata']
1732 fieldlist = ['metadata']
1735 if len(l) > 1:
1733 if len(l) > 1:
1736 fieldlist = getstring(l[1],
1734 fieldlist = getstring(l[1],
1737 # i18n: "matching" is a keyword
1735 # i18n: "matching" is a keyword
1738 _("matching requires a string "
1736 _("matching requires a string "
1739 "as its second argument")).split()
1737 "as its second argument")).split()
1740
1738
1741 # Make sure that there are no repeated fields,
1739 # Make sure that there are no repeated fields,
1742 # expand the 'special' 'metadata' field type
1740 # expand the 'special' 'metadata' field type
1743 # and check the 'files' whenever we check the 'diff'
1741 # and check the 'files' whenever we check the 'diff'
1744 fields = []
1742 fields = []
1745 for field in fieldlist:
1743 for field in fieldlist:
1746 if field == 'metadata':
1744 if field == 'metadata':
1747 fields += ['user', 'description', 'date']
1745 fields += ['user', 'description', 'date']
1748 elif field == 'diff':
1746 elif field == 'diff':
1749 # a revision matching the diff must also match the files
1747 # a revision matching the diff must also match the files
1750 # since matching the diff is very costly, make sure to
1748 # since matching the diff is very costly, make sure to
1751 # also match the files first
1749 # also match the files first
1752 fields += ['files', 'diff']
1750 fields += ['files', 'diff']
1753 else:
1751 else:
1754 if field == 'author':
1752 if field == 'author':
1755 field = 'user'
1753 field = 'user'
1756 fields.append(field)
1754 fields.append(field)
1757 fields = set(fields)
1755 fields = set(fields)
1758 if 'summary' in fields and 'description' in fields:
1756 if 'summary' in fields and 'description' in fields:
1759 # If a revision matches its description it also matches its summary
1757 # If a revision matches its description it also matches its summary
1760 fields.discard('summary')
1758 fields.discard('summary')
1761
1759
1762 # We may want to match more than one field
1760 # We may want to match more than one field
1763 # Not all fields take the same amount of time to be matched
1761 # Not all fields take the same amount of time to be matched
1764 # Sort the selected fields in order of increasing matching cost
1762 # Sort the selected fields in order of increasing matching cost
1765 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1763 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1766 'files', 'description', 'substate', 'diff']
1764 'files', 'description', 'substate', 'diff']
1767 def fieldkeyfunc(f):
1765 def fieldkeyfunc(f):
1768 try:
1766 try:
1769 return fieldorder.index(f)
1767 return fieldorder.index(f)
1770 except ValueError:
1768 except ValueError:
1771 # assume an unknown field is very costly
1769 # assume an unknown field is very costly
1772 return len(fieldorder)
1770 return len(fieldorder)
1773 fields = list(fields)
1771 fields = list(fields)
1774 fields.sort(key=fieldkeyfunc)
1772 fields.sort(key=fieldkeyfunc)
1775
1773
1776 # Each field will be matched with its own "getfield" function
1774 # Each field will be matched with its own "getfield" function
1777 # which will be added to the getfieldfuncs array of functions
1775 # which will be added to the getfieldfuncs array of functions
1778 getfieldfuncs = []
1776 getfieldfuncs = []
1779 _funcs = {
1777 _funcs = {
1780 'user': lambda r: repo[r].user(),
1778 'user': lambda r: repo[r].user(),
1781 'branch': lambda r: repo[r].branch(),
1779 'branch': lambda r: repo[r].branch(),
1782 'date': lambda r: repo[r].date(),
1780 'date': lambda r: repo[r].date(),
1783 'description': lambda r: repo[r].description(),
1781 'description': lambda r: repo[r].description(),
1784 'files': lambda r: repo[r].files(),
1782 'files': lambda r: repo[r].files(),
1785 'parents': lambda r: repo[r].parents(),
1783 'parents': lambda r: repo[r].parents(),
1786 'phase': lambda r: repo[r].phase(),
1784 'phase': lambda r: repo[r].phase(),
1787 'substate': lambda r: repo[r].substate,
1785 'substate': lambda r: repo[r].substate,
1788 'summary': lambda r: repo[r].description().splitlines()[0],
1786 'summary': lambda r: repo[r].description().splitlines()[0],
1789 'diff': lambda r: list(repo[r].diff(git=True),)
1787 'diff': lambda r: list(repo[r].diff(git=True),)
1790 }
1788 }
1791 for info in fields:
1789 for info in fields:
1792 getfield = _funcs.get(info, None)
1790 getfield = _funcs.get(info, None)
1793 if getfield is None:
1791 if getfield is None:
1794 raise error.ParseError(
1792 raise error.ParseError(
1795 # i18n: "matching" is a keyword
1793 # i18n: "matching" is a keyword
1796 _("unexpected field name passed to matching: %s") % info)
1794 _("unexpected field name passed to matching: %s") % info)
1797 getfieldfuncs.append(getfield)
1795 getfieldfuncs.append(getfield)
1798 # convert the getfield array of functions into a "getinfo" function
1796 # convert the getfield array of functions into a "getinfo" function
1799 # which returns an array of field values (or a single value if there
1797 # which returns an array of field values (or a single value if there
1800 # is only one field to match)
1798 # is only one field to match)
1801 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1799 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1802
1800
1803 def matches(x):
1801 def matches(x):
1804 for rev in revs:
1802 for rev in revs:
1805 target = getinfo(rev)
1803 target = getinfo(rev)
1806 match = True
1804 match = True
1807 for n, f in enumerate(getfieldfuncs):
1805 for n, f in enumerate(getfieldfuncs):
1808 if target[n] != f(x):
1806 if target[n] != f(x):
1809 match = False
1807 match = False
1810 if match:
1808 if match:
1811 return True
1809 return True
1812 return False
1810 return False
1813
1811
1814 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1812 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1815
1813
1816 @predicate('reverse(set)', safe=True)
1814 @predicate('reverse(set)', safe=True)
1817 def reverse(repo, subset, x):
1815 def reverse(repo, subset, x):
1818 """Reverse order of set.
1816 """Reverse order of set.
1819 """
1817 """
1820 l = getset(repo, subset, x)
1818 l = getset(repo, subset, x)
1821 l.reverse()
1819 l.reverse()
1822 return l
1820 return l
1823
1821
1824 @predicate('roots(set)', safe=True)
1822 @predicate('roots(set)', safe=True)
1825 def roots(repo, subset, x):
1823 def roots(repo, subset, x):
1826 """Changesets in set with no parent changeset in set.
1824 """Changesets in set with no parent changeset in set.
1827 """
1825 """
1828 s = getset(repo, fullreposet(repo), x)
1826 s = getset(repo, fullreposet(repo), x)
1829 parents = repo.changelog.parentrevs
1827 parents = repo.changelog.parentrevs
1830 def filter(r):
1828 def filter(r):
1831 for p in parents(r):
1829 for p in parents(r):
1832 if 0 <= p and p in s:
1830 if 0 <= p and p in s:
1833 return False
1831 return False
1834 return True
1832 return True
1835 return subset & s.filter(filter, condrepr='<roots>')
1833 return subset & s.filter(filter, condrepr='<roots>')
1836
1834
1837 _sortkeyfuncs = {
1835 _sortkeyfuncs = {
1838 'rev': lambda c: c.rev(),
1836 'rev': lambda c: c.rev(),
1839 'branch': lambda c: c.branch(),
1837 'branch': lambda c: c.branch(),
1840 'desc': lambda c: c.description(),
1838 'desc': lambda c: c.description(),
1841 'user': lambda c: c.user(),
1839 'user': lambda c: c.user(),
1842 'author': lambda c: c.user(),
1840 'author': lambda c: c.user(),
1843 'date': lambda c: c.date()[0],
1841 'date': lambda c: c.date()[0],
1844 }
1842 }
1845
1843
1846 def _getsortargs(x):
1844 def _getsortargs(x):
1847 """Parse sort options into (set, [(key, reverse)], opts)"""
1845 """Parse sort options into (set, [(key, reverse)], opts)"""
1848 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
1846 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
1849 if 'set' not in args:
1847 if 'set' not in args:
1850 # i18n: "sort" is a keyword
1848 # i18n: "sort" is a keyword
1851 raise error.ParseError(_('sort requires one or two arguments'))
1849 raise error.ParseError(_('sort requires one or two arguments'))
1852 keys = "rev"
1850 keys = "rev"
1853 if 'keys' in args:
1851 if 'keys' in args:
1854 # i18n: "sort" is a keyword
1852 # i18n: "sort" is a keyword
1855 keys = getstring(args['keys'], _("sort spec must be a string"))
1853 keys = getstring(args['keys'], _("sort spec must be a string"))
1856
1854
1857 keyflags = []
1855 keyflags = []
1858 for k in keys.split():
1856 for k in keys.split():
1859 fk = k
1857 fk = k
1860 reverse = (k[0] == '-')
1858 reverse = (k[0] == '-')
1861 if reverse:
1859 if reverse:
1862 k = k[1:]
1860 k = k[1:]
1863 if k not in _sortkeyfuncs and k != 'topo':
1861 if k not in _sortkeyfuncs and k != 'topo':
1864 raise error.ParseError(_("unknown sort key %r") % fk)
1862 raise error.ParseError(_("unknown sort key %r") % fk)
1865 keyflags.append((k, reverse))
1863 keyflags.append((k, reverse))
1866
1864
1867 if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
1865 if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
1868 # i18n: "topo" is a keyword
1866 # i18n: "topo" is a keyword
1869 raise error.ParseError(_('topo sort order cannot be combined '
1867 raise error.ParseError(_('topo sort order cannot be combined '
1870 'with other sort keys'))
1868 'with other sort keys'))
1871
1869
1872 opts = {}
1870 opts = {}
1873 if 'topo.firstbranch' in args:
1871 if 'topo.firstbranch' in args:
1874 if any(k == 'topo' for k, reverse in keyflags):
1872 if any(k == 'topo' for k, reverse in keyflags):
1875 opts['topo.firstbranch'] = args['topo.firstbranch']
1873 opts['topo.firstbranch'] = args['topo.firstbranch']
1876 else:
1874 else:
1877 # i18n: "topo" and "topo.firstbranch" are keywords
1875 # i18n: "topo" and "topo.firstbranch" are keywords
1878 raise error.ParseError(_('topo.firstbranch can only be used '
1876 raise error.ParseError(_('topo.firstbranch can only be used '
1879 'when using the topo sort key'))
1877 'when using the topo sort key'))
1880
1878
1881 return args['set'], keyflags, opts
1879 return args['set'], keyflags, opts
1882
1880
1883 @predicate('sort(set[, [-]key... [, ...]])', safe=True)
1881 @predicate('sort(set[, [-]key... [, ...]])', safe=True)
1884 def sort(repo, subset, x):
1882 def sort(repo, subset, x):
1885 """Sort set by keys. The default sort order is ascending, specify a key
1883 """Sort set by keys. The default sort order is ascending, specify a key
1886 as ``-key`` to sort in descending order.
1884 as ``-key`` to sort in descending order.
1887
1885
1888 The keys can be:
1886 The keys can be:
1889
1887
1890 - ``rev`` for the revision number,
1888 - ``rev`` for the revision number,
1891 - ``branch`` for the branch name,
1889 - ``branch`` for the branch name,
1892 - ``desc`` for the commit message (description),
1890 - ``desc`` for the commit message (description),
1893 - ``user`` for user name (``author`` can be used as an alias),
1891 - ``user`` for user name (``author`` can be used as an alias),
1894 - ``date`` for the commit date
1892 - ``date`` for the commit date
1895 - ``topo`` for a reverse topographical sort
1893 - ``topo`` for a reverse topographical sort
1896
1894
1897 The ``topo`` sort order cannot be combined with other sort keys. This sort
1895 The ``topo`` sort order cannot be combined with other sort keys. This sort
1898 takes one optional argument, ``topo.firstbranch``, which takes a revset that
1896 takes one optional argument, ``topo.firstbranch``, which takes a revset that
1899 specifies what topographical branches to prioritize in the sort.
1897 specifies what topographical branches to prioritize in the sort.
1900
1898
1901 """
1899 """
1902 s, keyflags, opts = _getsortargs(x)
1900 s, keyflags, opts = _getsortargs(x)
1903 revs = getset(repo, subset, s)
1901 revs = getset(repo, subset, s)
1904
1902
1905 if not keyflags:
1903 if not keyflags:
1906 return revs
1904 return revs
1907 if len(keyflags) == 1 and keyflags[0][0] == "rev":
1905 if len(keyflags) == 1 and keyflags[0][0] == "rev":
1908 revs.sort(reverse=keyflags[0][1])
1906 revs.sort(reverse=keyflags[0][1])
1909 return revs
1907 return revs
1910 elif keyflags[0][0] == "topo":
1908 elif keyflags[0][0] == "topo":
1911 firstbranch = ()
1909 firstbranch = ()
1912 if 'topo.firstbranch' in opts:
1910 if 'topo.firstbranch' in opts:
1913 firstbranch = getset(repo, subset, opts['topo.firstbranch'])
1911 firstbranch = getset(repo, subset, opts['topo.firstbranch'])
1914 revs = baseset(_toposort(revs, repo.changelog.parentrevs, firstbranch),
1912 revs = baseset(_toposort(revs, repo.changelog.parentrevs, firstbranch),
1915 istopo=True)
1913 istopo=True)
1916 if keyflags[0][1]:
1914 if keyflags[0][1]:
1917 revs.reverse()
1915 revs.reverse()
1918 return revs
1916 return revs
1919
1917
1920 # sort() is guaranteed to be stable
1918 # sort() is guaranteed to be stable
1921 ctxs = [repo[r] for r in revs]
1919 ctxs = [repo[r] for r in revs]
1922 for k, reverse in reversed(keyflags):
1920 for k, reverse in reversed(keyflags):
1923 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
1921 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
1924 return baseset([c.rev() for c in ctxs])
1922 return baseset([c.rev() for c in ctxs])
1925
1923
1926 def _toposort(revs, parentsfunc, firstbranch=()):
1924 def _toposort(revs, parentsfunc, firstbranch=()):
1927 """Yield revisions from heads to roots one (topo) branch at a time.
1925 """Yield revisions from heads to roots one (topo) branch at a time.
1928
1926
1929 This function aims to be used by a graph generator that wishes to minimize
1927 This function aims to be used by a graph generator that wishes to minimize
1930 the number of parallel branches and their interleaving.
1928 the number of parallel branches and their interleaving.
1931
1929
1932 Example iteration order (numbers show the "true" order in a changelog):
1930 Example iteration order (numbers show the "true" order in a changelog):
1933
1931
1934 o 4
1932 o 4
1935 |
1933 |
1936 o 1
1934 o 1
1937 |
1935 |
1938 | o 3
1936 | o 3
1939 | |
1937 | |
1940 | o 2
1938 | o 2
1941 |/
1939 |/
1942 o 0
1940 o 0
1943
1941
1944 Note that the ancestors of merges are understood by the current
1942 Note that the ancestors of merges are understood by the current
1945 algorithm to be on the same branch. This means no reordering will
1943 algorithm to be on the same branch. This means no reordering will
1946 occur behind a merge.
1944 occur behind a merge.
1947 """
1945 """
1948
1946
1949 ### Quick summary of the algorithm
1947 ### Quick summary of the algorithm
1950 #
1948 #
1951 # This function is based around a "retention" principle. We keep revisions
1949 # This function is based around a "retention" principle. We keep revisions
1952 # in memory until we are ready to emit a whole branch that immediately
1950 # in memory until we are ready to emit a whole branch that immediately
1953 # "merges" into an existing one. This reduces the number of parallel
1951 # "merges" into an existing one. This reduces the number of parallel
1954 # branches with interleaved revisions.
1952 # branches with interleaved revisions.
1955 #
1953 #
1956 # During iteration revs are split into two groups:
1954 # During iteration revs are split into two groups:
1957 # A) revision already emitted
1955 # A) revision already emitted
1958 # B) revision in "retention". They are stored as different subgroups.
1956 # B) revision in "retention". They are stored as different subgroups.
1959 #
1957 #
1960 # for each REV, we do the following logic:
1958 # for each REV, we do the following logic:
1961 #
1959 #
1962 # 1) if REV is a parent of (A), we will emit it. If there is a
1960 # 1) if REV is a parent of (A), we will emit it. If there is a
1963 # retention group ((B) above) that is blocked on REV being
1961 # retention group ((B) above) that is blocked on REV being
1964 # available, we emit all the revisions out of that retention
1962 # available, we emit all the revisions out of that retention
1965 # group first.
1963 # group first.
1966 #
1964 #
1967 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
1965 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
1968 # available, if such subgroup exist, we add REV to it and the subgroup is
1966 # available, if such subgroup exist, we add REV to it and the subgroup is
1969 # now awaiting for REV.parents() to be available.
1967 # now awaiting for REV.parents() to be available.
1970 #
1968 #
1971 # 3) finally if no such group existed in (B), we create a new subgroup.
1969 # 3) finally if no such group existed in (B), we create a new subgroup.
1972 #
1970 #
1973 #
1971 #
1974 # To bootstrap the algorithm, we emit the tipmost revision (which
1972 # To bootstrap the algorithm, we emit the tipmost revision (which
1975 # puts it in group (A) from above).
1973 # puts it in group (A) from above).
1976
1974
1977 revs.sort(reverse=True)
1975 revs.sort(reverse=True)
1978
1976
1979 # Set of parents of revision that have been emitted. They can be considered
1977 # Set of parents of revision that have been emitted. They can be considered
1980 # unblocked as the graph generator is already aware of them so there is no
1978 # unblocked as the graph generator is already aware of them so there is no
1981 # need to delay the revisions that reference them.
1979 # need to delay the revisions that reference them.
1982 #
1980 #
1983 # If someone wants to prioritize a branch over the others, pre-filling this
1981 # If someone wants to prioritize a branch over the others, pre-filling this
1984 # set will force all other branches to wait until this branch is ready to be
1982 # set will force all other branches to wait until this branch is ready to be
1985 # emitted.
1983 # emitted.
1986 unblocked = set(firstbranch)
1984 unblocked = set(firstbranch)
1987
1985
1988 # list of groups waiting to be displayed, each group is defined by:
1986 # list of groups waiting to be displayed, each group is defined by:
1989 #
1987 #
1990 # (revs: lists of revs waiting to be displayed,
1988 # (revs: lists of revs waiting to be displayed,
1991 # blocked: set of that cannot be displayed before those in 'revs')
1989 # blocked: set of that cannot be displayed before those in 'revs')
1992 #
1990 #
1993 # The second value ('blocked') correspond to parents of any revision in the
1991 # The second value ('blocked') correspond to parents of any revision in the
1994 # group ('revs') that is not itself contained in the group. The main idea
1992 # group ('revs') that is not itself contained in the group. The main idea
1995 # of this algorithm is to delay as much as possible the emission of any
1993 # of this algorithm is to delay as much as possible the emission of any
1996 # revision. This means waiting for the moment we are about to display
1994 # revision. This means waiting for the moment we are about to display
1997 # these parents to display the revs in a group.
1995 # these parents to display the revs in a group.
1998 #
1996 #
1999 # This first implementation is smart until it encounters a merge: it will
1997 # This first implementation is smart until it encounters a merge: it will
2000 # emit revs as soon as any parent is about to be emitted and can grow an
1998 # emit revs as soon as any parent is about to be emitted and can grow an
2001 # arbitrary number of revs in 'blocked'. In practice this mean we properly
1999 # arbitrary number of revs in 'blocked'. In practice this mean we properly
2002 # retains new branches but gives up on any special ordering for ancestors
2000 # retains new branches but gives up on any special ordering for ancestors
2003 # of merges. The implementation can be improved to handle this better.
2001 # of merges. The implementation can be improved to handle this better.
2004 #
2002 #
2005 # The first subgroup is special. It corresponds to all the revision that
2003 # The first subgroup is special. It corresponds to all the revision that
2006 # were already emitted. The 'revs' lists is expected to be empty and the
2004 # were already emitted. The 'revs' lists is expected to be empty and the
2007 # 'blocked' set contains the parents revisions of already emitted revision.
2005 # 'blocked' set contains the parents revisions of already emitted revision.
2008 #
2006 #
2009 # You could pre-seed the <parents> set of groups[0] to a specific
2007 # You could pre-seed the <parents> set of groups[0] to a specific
2010 # changesets to select what the first emitted branch should be.
2008 # changesets to select what the first emitted branch should be.
2011 groups = [([], unblocked)]
2009 groups = [([], unblocked)]
2012 pendingheap = []
2010 pendingheap = []
2013 pendingset = set()
2011 pendingset = set()
2014
2012
2015 heapq.heapify(pendingheap)
2013 heapq.heapify(pendingheap)
2016 heappop = heapq.heappop
2014 heappop = heapq.heappop
2017 heappush = heapq.heappush
2015 heappush = heapq.heappush
2018 for currentrev in revs:
2016 for currentrev in revs:
2019 # Heap works with smallest element, we want highest so we invert
2017 # Heap works with smallest element, we want highest so we invert
2020 if currentrev not in pendingset:
2018 if currentrev not in pendingset:
2021 heappush(pendingheap, -currentrev)
2019 heappush(pendingheap, -currentrev)
2022 pendingset.add(currentrev)
2020 pendingset.add(currentrev)
2023 # iterates on pending rev until after the current rev have been
2021 # iterates on pending rev until after the current rev have been
2024 # processed.
2022 # processed.
2025 rev = None
2023 rev = None
2026 while rev != currentrev:
2024 while rev != currentrev:
2027 rev = -heappop(pendingheap)
2025 rev = -heappop(pendingheap)
2028 pendingset.remove(rev)
2026 pendingset.remove(rev)
2029
2027
2030 # Seek for a subgroup blocked, waiting for the current revision.
2028 # Seek for a subgroup blocked, waiting for the current revision.
2031 matching = [i for i, g in enumerate(groups) if rev in g[1]]
2029 matching = [i for i, g in enumerate(groups) if rev in g[1]]
2032
2030
2033 if matching:
2031 if matching:
2034 # The main idea is to gather together all sets that are blocked
2032 # The main idea is to gather together all sets that are blocked
2035 # on the same revision.
2033 # on the same revision.
2036 #
2034 #
2037 # Groups are merged when a common blocking ancestor is
2035 # Groups are merged when a common blocking ancestor is
2038 # observed. For example, given two groups:
2036 # observed. For example, given two groups:
2039 #
2037 #
2040 # revs [5, 4] waiting for 1
2038 # revs [5, 4] waiting for 1
2041 # revs [3, 2] waiting for 1
2039 # revs [3, 2] waiting for 1
2042 #
2040 #
2043 # These two groups will be merged when we process
2041 # These two groups will be merged when we process
2044 # 1. In theory, we could have merged the groups when
2042 # 1. In theory, we could have merged the groups when
2045 # we added 2 to the group it is now in (we could have
2043 # we added 2 to the group it is now in (we could have
2046 # noticed the groups were both blocked on 1 then), but
2044 # noticed the groups were both blocked on 1 then), but
2047 # the way it works now makes the algorithm simpler.
2045 # the way it works now makes the algorithm simpler.
2048 #
2046 #
2049 # We also always keep the oldest subgroup first. We can
2047 # We also always keep the oldest subgroup first. We can
2050 # probably improve the behavior by having the longest set
2048 # probably improve the behavior by having the longest set
2051 # first. That way, graph algorithms could minimise the length
2049 # first. That way, graph algorithms could minimise the length
2052 # of parallel lines their drawing. This is currently not done.
2050 # of parallel lines their drawing. This is currently not done.
2053 targetidx = matching.pop(0)
2051 targetidx = matching.pop(0)
2054 trevs, tparents = groups[targetidx]
2052 trevs, tparents = groups[targetidx]
2055 for i in matching:
2053 for i in matching:
2056 gr = groups[i]
2054 gr = groups[i]
2057 trevs.extend(gr[0])
2055 trevs.extend(gr[0])
2058 tparents |= gr[1]
2056 tparents |= gr[1]
2059 # delete all merged subgroups (except the one we kept)
2057 # delete all merged subgroups (except the one we kept)
2060 # (starting from the last subgroup for performance and
2058 # (starting from the last subgroup for performance and
2061 # sanity reasons)
2059 # sanity reasons)
2062 for i in reversed(matching):
2060 for i in reversed(matching):
2063 del groups[i]
2061 del groups[i]
2064 else:
2062 else:
2065 # This is a new head. We create a new subgroup for it.
2063 # This is a new head. We create a new subgroup for it.
2066 targetidx = len(groups)
2064 targetidx = len(groups)
2067 groups.append(([], set([rev])))
2065 groups.append(([], set([rev])))
2068
2066
2069 gr = groups[targetidx]
2067 gr = groups[targetidx]
2070
2068
2071 # We now add the current nodes to this subgroups. This is done
2069 # We now add the current nodes to this subgroups. This is done
2072 # after the subgroup merging because all elements from a subgroup
2070 # after the subgroup merging because all elements from a subgroup
2073 # that relied on this rev must precede it.
2071 # that relied on this rev must precede it.
2074 #
2072 #
2075 # we also update the <parents> set to include the parents of the
2073 # we also update the <parents> set to include the parents of the
2076 # new nodes.
2074 # new nodes.
2077 if rev == currentrev: # only display stuff in rev
2075 if rev == currentrev: # only display stuff in rev
2078 gr[0].append(rev)
2076 gr[0].append(rev)
2079 gr[1].remove(rev)
2077 gr[1].remove(rev)
2080 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
2078 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
2081 gr[1].update(parents)
2079 gr[1].update(parents)
2082 for p in parents:
2080 for p in parents:
2083 if p not in pendingset:
2081 if p not in pendingset:
2084 pendingset.add(p)
2082 pendingset.add(p)
2085 heappush(pendingheap, -p)
2083 heappush(pendingheap, -p)
2086
2084
2087 # Look for a subgroup to display
2085 # Look for a subgroup to display
2088 #
2086 #
2089 # When unblocked is empty (if clause), we were not waiting for any
2087 # When unblocked is empty (if clause), we were not waiting for any
2090 # revisions during the first iteration (if no priority was given) or
2088 # revisions during the first iteration (if no priority was given) or
2091 # if we emitted a whole disconnected set of the graph (reached a
2089 # if we emitted a whole disconnected set of the graph (reached a
2092 # root). In that case we arbitrarily take the oldest known
2090 # root). In that case we arbitrarily take the oldest known
2093 # subgroup. The heuristic could probably be better.
2091 # subgroup. The heuristic could probably be better.
2094 #
2092 #
2095 # Otherwise (elif clause) if the subgroup is blocked on
2093 # Otherwise (elif clause) if the subgroup is blocked on
2096 # a revision we just emitted, we can safely emit it as
2094 # a revision we just emitted, we can safely emit it as
2097 # well.
2095 # well.
2098 if not unblocked:
2096 if not unblocked:
2099 if len(groups) > 1: # display other subset
2097 if len(groups) > 1: # display other subset
2100 targetidx = 1
2098 targetidx = 1
2101 gr = groups[1]
2099 gr = groups[1]
2102 elif not gr[1] & unblocked:
2100 elif not gr[1] & unblocked:
2103 gr = None
2101 gr = None
2104
2102
2105 if gr is not None:
2103 if gr is not None:
2106 # update the set of awaited revisions with the one from the
2104 # update the set of awaited revisions with the one from the
2107 # subgroup
2105 # subgroup
2108 unblocked |= gr[1]
2106 unblocked |= gr[1]
2109 # output all revisions in the subgroup
2107 # output all revisions in the subgroup
2110 for r in gr[0]:
2108 for r in gr[0]:
2111 yield r
2109 yield r
2112 # delete the subgroup that you just output
2110 # delete the subgroup that you just output
2113 # unless it is groups[0] in which case you just empty it.
2111 # unless it is groups[0] in which case you just empty it.
2114 if targetidx:
2112 if targetidx:
2115 del groups[targetidx]
2113 del groups[targetidx]
2116 else:
2114 else:
2117 gr[0][:] = []
2115 gr[0][:] = []
2118 # Check if we have some subgroup waiting for revisions we are not going to
2116 # Check if we have some subgroup waiting for revisions we are not going to
2119 # iterate over
2117 # iterate over
2120 for g in groups:
2118 for g in groups:
2121 for r in g[0]:
2119 for r in g[0]:
2122 yield r
2120 yield r
2123
2121
2124 @predicate('subrepo([pattern])')
2122 @predicate('subrepo([pattern])')
2125 def subrepo(repo, subset, x):
2123 def subrepo(repo, subset, x):
2126 """Changesets that add, modify or remove the given subrepo. If no subrepo
2124 """Changesets that add, modify or remove the given subrepo. If no subrepo
2127 pattern is named, any subrepo changes are returned.
2125 pattern is named, any subrepo changes are returned.
2128 """
2126 """
2129 # i18n: "subrepo" is a keyword
2127 # i18n: "subrepo" is a keyword
2130 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
2128 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
2131 pat = None
2129 pat = None
2132 if len(args) != 0:
2130 if len(args) != 0:
2133 pat = getstring(args[0], _("subrepo requires a pattern"))
2131 pat = getstring(args[0], _("subrepo requires a pattern"))
2134
2132
2135 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
2133 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
2136
2134
2137 def submatches(names):
2135 def submatches(names):
2138 k, p, m = util.stringmatcher(pat)
2136 k, p, m = util.stringmatcher(pat)
2139 for name in names:
2137 for name in names:
2140 if m(name):
2138 if m(name):
2141 yield name
2139 yield name
2142
2140
2143 def matches(x):
2141 def matches(x):
2144 c = repo[x]
2142 c = repo[x]
2145 s = repo.status(c.p1().node(), c.node(), match=m)
2143 s = repo.status(c.p1().node(), c.node(), match=m)
2146
2144
2147 if pat is None:
2145 if pat is None:
2148 return s.added or s.modified or s.removed
2146 return s.added or s.modified or s.removed
2149
2147
2150 if s.added:
2148 if s.added:
2151 return any(submatches(c.substate.keys()))
2149 return any(submatches(c.substate.keys()))
2152
2150
2153 if s.modified:
2151 if s.modified:
2154 subs = set(c.p1().substate.keys())
2152 subs = set(c.p1().substate.keys())
2155 subs.update(c.substate.keys())
2153 subs.update(c.substate.keys())
2156
2154
2157 for path in submatches(subs):
2155 for path in submatches(subs):
2158 if c.p1().substate.get(path) != c.substate.get(path):
2156 if c.p1().substate.get(path) != c.substate.get(path):
2159 return True
2157 return True
2160
2158
2161 if s.removed:
2159 if s.removed:
2162 return any(submatches(c.p1().substate.keys()))
2160 return any(submatches(c.p1().substate.keys()))
2163
2161
2164 return False
2162 return False
2165
2163
2166 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
2164 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
2167
2165
2168 def _substringmatcher(pattern):
2166 def _substringmatcher(pattern):
2169 kind, pattern, matcher = util.stringmatcher(pattern)
2167 kind, pattern, matcher = util.stringmatcher(pattern)
2170 if kind == 'literal':
2168 if kind == 'literal':
2171 matcher = lambda s: pattern in s
2169 matcher = lambda s: pattern in s
2172 return kind, pattern, matcher
2170 return kind, pattern, matcher
2173
2171
2174 @predicate('tag([name])', safe=True)
2172 @predicate('tag([name])', safe=True)
2175 def tag(repo, subset, x):
2173 def tag(repo, subset, x):
2176 """The specified tag by name, or all tagged revisions if no name is given.
2174 """The specified tag by name, or all tagged revisions if no name is given.
2177
2175
2178 If `name` starts with `re:`, the remainder of the name is treated as
2176 If `name` starts with `re:`, the remainder of the name is treated as
2179 a regular expression. To match a tag that actually starts with `re:`,
2177 a regular expression. To match a tag that actually starts with `re:`,
2180 use the prefix `literal:`.
2178 use the prefix `literal:`.
2181 """
2179 """
2182 # i18n: "tag" is a keyword
2180 # i18n: "tag" is a keyword
2183 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2181 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2184 cl = repo.changelog
2182 cl = repo.changelog
2185 if args:
2183 if args:
2186 pattern = getstring(args[0],
2184 pattern = getstring(args[0],
2187 # i18n: "tag" is a keyword
2185 # i18n: "tag" is a keyword
2188 _('the argument to tag must be a string'))
2186 _('the argument to tag must be a string'))
2189 kind, pattern, matcher = util.stringmatcher(pattern)
2187 kind, pattern, matcher = util.stringmatcher(pattern)
2190 if kind == 'literal':
2188 if kind == 'literal':
2191 # avoid resolving all tags
2189 # avoid resolving all tags
2192 tn = repo._tagscache.tags.get(pattern, None)
2190 tn = repo._tagscache.tags.get(pattern, None)
2193 if tn is None:
2191 if tn is None:
2194 raise error.RepoLookupError(_("tag '%s' does not exist")
2192 raise error.RepoLookupError(_("tag '%s' does not exist")
2195 % pattern)
2193 % pattern)
2196 s = set([repo[tn].rev()])
2194 s = set([repo[tn].rev()])
2197 else:
2195 else:
2198 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2196 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2199 else:
2197 else:
2200 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2198 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2201 return subset & s
2199 return subset & s
2202
2200
2203 @predicate('tagged', safe=True)
2201 @predicate('tagged', safe=True)
2204 def tagged(repo, subset, x):
2202 def tagged(repo, subset, x):
2205 return tag(repo, subset, x)
2203 return tag(repo, subset, x)
2206
2204
2207 @predicate('unstable()', safe=True)
2205 @predicate('unstable()', safe=True)
2208 def unstable(repo, subset, x):
2206 def unstable(repo, subset, x):
2209 """Non-obsolete changesets with obsolete ancestors.
2207 """Non-obsolete changesets with obsolete ancestors.
2210 """
2208 """
2211 # i18n: "unstable" is a keyword
2209 # i18n: "unstable" is a keyword
2212 getargs(x, 0, 0, _("unstable takes no arguments"))
2210 getargs(x, 0, 0, _("unstable takes no arguments"))
2213 unstables = obsmod.getrevs(repo, 'unstable')
2211 unstables = obsmod.getrevs(repo, 'unstable')
2214 return subset & unstables
2212 return subset & unstables
2215
2213
2216
2214
2217 @predicate('user(string)', safe=True)
2215 @predicate('user(string)', safe=True)
2218 def user(repo, subset, x):
2216 def user(repo, subset, x):
2219 """User name contains string. The match is case-insensitive.
2217 """User name contains string. The match is case-insensitive.
2220
2218
2221 If `string` starts with `re:`, the remainder of the string is treated as
2219 If `string` starts with `re:`, the remainder of the string is treated as
2222 a regular expression. To match a user that actually contains `re:`, use
2220 a regular expression. To match a user that actually contains `re:`, use
2223 the prefix `literal:`.
2221 the prefix `literal:`.
2224 """
2222 """
2225 return author(repo, subset, x)
2223 return author(repo, subset, x)
2226
2224
2227 # experimental
2225 # experimental
2228 @predicate('wdir', safe=True)
2226 @predicate('wdir', safe=True)
2229 def wdir(repo, subset, x):
2227 def wdir(repo, subset, x):
2230 # i18n: "wdir" is a keyword
2228 # i18n: "wdir" is a keyword
2231 getargs(x, 0, 0, _("wdir takes no arguments"))
2229 getargs(x, 0, 0, _("wdir takes no arguments"))
2232 if node.wdirrev in subset or isinstance(subset, fullreposet):
2230 if node.wdirrev in subset or isinstance(subset, fullreposet):
2233 return baseset([node.wdirrev])
2231 return baseset([node.wdirrev])
2234 return baseset()
2232 return baseset()
2235
2233
2236 # for internal use
2234 # for internal use
2237 @predicate('_list', safe=True)
2235 @predicate('_list', safe=True)
2238 def _list(repo, subset, x):
2236 def _list(repo, subset, x):
2239 s = getstring(x, "internal error")
2237 s = getstring(x, "internal error")
2240 if not s:
2238 if not s:
2241 return baseset()
2239 return baseset()
2242 # remove duplicates here. it's difficult for caller to deduplicate sets
2240 # remove duplicates here. it's difficult for caller to deduplicate sets
2243 # because different symbols can point to the same rev.
2241 # because different symbols can point to the same rev.
2244 cl = repo.changelog
2242 cl = repo.changelog
2245 ls = []
2243 ls = []
2246 seen = set()
2244 seen = set()
2247 for t in s.split('\0'):
2245 for t in s.split('\0'):
2248 try:
2246 try:
2249 # fast path for integer revision
2247 # fast path for integer revision
2250 r = int(t)
2248 r = int(t)
2251 if str(r) != t or r not in cl:
2249 if str(r) != t or r not in cl:
2252 raise ValueError
2250 raise ValueError
2253 revs = [r]
2251 revs = [r]
2254 except ValueError:
2252 except ValueError:
2255 revs = stringset(repo, subset, t)
2253 revs = stringset(repo, subset, t)
2256
2254
2257 for r in revs:
2255 for r in revs:
2258 if r in seen:
2256 if r in seen:
2259 continue
2257 continue
2260 if (r in subset
2258 if (r in subset
2261 or r == node.nullrev and isinstance(subset, fullreposet)):
2259 or r == node.nullrev and isinstance(subset, fullreposet)):
2262 ls.append(r)
2260 ls.append(r)
2263 seen.add(r)
2261 seen.add(r)
2264 return baseset(ls)
2262 return baseset(ls)
2265
2263
2266 # for internal use
2264 # for internal use
2267 @predicate('_intlist', safe=True)
2265 @predicate('_intlist', safe=True)
2268 def _intlist(repo, subset, x):
2266 def _intlist(repo, subset, x):
2269 s = getstring(x, "internal error")
2267 s = getstring(x, "internal error")
2270 if not s:
2268 if not s:
2271 return baseset()
2269 return baseset()
2272 ls = [int(r) for r in s.split('\0')]
2270 ls = [int(r) for r in s.split('\0')]
2273 s = subset
2271 s = subset
2274 return baseset([r for r in ls if r in s])
2272 return baseset([r for r in ls if r in s])
2275
2273
2276 # for internal use
2274 # for internal use
2277 @predicate('_hexlist', safe=True)
2275 @predicate('_hexlist', safe=True)
2278 def _hexlist(repo, subset, x):
2276 def _hexlist(repo, subset, x):
2279 s = getstring(x, "internal error")
2277 s = getstring(x, "internal error")
2280 if not s:
2278 if not s:
2281 return baseset()
2279 return baseset()
2282 cl = repo.changelog
2280 cl = repo.changelog
2283 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2281 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2284 s = subset
2282 s = subset
2285 return baseset([r for r in ls if r in s])
2283 return baseset([r for r in ls if r in s])
2286
2284
2287 methods = {
2285 methods = {
2288 "range": rangeset,
2286 "range": rangeset,
2289 "dagrange": dagrange,
2287 "dagrange": dagrange,
2290 "string": stringset,
2288 "string": stringset,
2291 "symbol": stringset,
2289 "symbol": stringset,
2292 "and": andset,
2290 "and": andset,
2293 "or": orset,
2291 "or": orset,
2294 "not": notset,
2292 "not": notset,
2295 "difference": differenceset,
2293 "difference": differenceset,
2296 "list": listset,
2294 "list": listset,
2297 "keyvalue": keyvaluepair,
2295 "keyvalue": keyvaluepair,
2298 "func": func,
2296 "func": func,
2299 "ancestor": ancestorspec,
2297 "ancestor": ancestorspec,
2300 "parent": parentspec,
2298 "parent": parentspec,
2301 "parentpost": p1,
2299 "parentpost": p1,
2302 }
2300 }
2303
2301
2304 def _matchonly(revs, bases):
2302 def _matchonly(revs, bases):
2305 """
2303 """
2306 >>> f = lambda *args: _matchonly(*map(parse, args))
2304 >>> f = lambda *args: _matchonly(*map(parse, args))
2307 >>> f('ancestors(A)', 'not ancestors(B)')
2305 >>> f('ancestors(A)', 'not ancestors(B)')
2308 ('list', ('symbol', 'A'), ('symbol', 'B'))
2306 ('list', ('symbol', 'A'), ('symbol', 'B'))
2309 """
2307 """
2310 if (revs is not None
2308 if (revs is not None
2311 and revs[0] == 'func'
2309 and revs[0] == 'func'
2312 and getsymbol(revs[1]) == 'ancestors'
2310 and getsymbol(revs[1]) == 'ancestors'
2313 and bases is not None
2311 and bases is not None
2314 and bases[0] == 'not'
2312 and bases[0] == 'not'
2315 and bases[1][0] == 'func'
2313 and bases[1][0] == 'func'
2316 and getsymbol(bases[1][1]) == 'ancestors'):
2314 and getsymbol(bases[1][1]) == 'ancestors'):
2317 return ('list', revs[2], bases[1][2])
2315 return ('list', revs[2], bases[1][2])
2318
2316
2319 def _optimize(x, small):
2317 def _optimize(x, small):
2320 if x is None:
2318 if x is None:
2321 return 0, x
2319 return 0, x
2322
2320
2323 smallbonus = 1
2321 smallbonus = 1
2324 if small:
2322 if small:
2325 smallbonus = .5
2323 smallbonus = .5
2326
2324
2327 op = x[0]
2325 op = x[0]
2328 if op == 'minus':
2326 if op == 'minus':
2329 return _optimize(('and', x[1], ('not', x[2])), small)
2327 return _optimize(('and', x[1], ('not', x[2])), small)
2330 elif op == 'only':
2328 elif op == 'only':
2331 t = ('func', ('symbol', 'only'), ('list', x[1], x[2]))
2329 t = ('func', ('symbol', 'only'), ('list', x[1], x[2]))
2332 return _optimize(t, small)
2330 return _optimize(t, small)
2333 elif op == 'onlypost':
2331 elif op == 'onlypost':
2334 return _optimize(('func', ('symbol', 'only'), x[1]), small)
2332 return _optimize(('func', ('symbol', 'only'), x[1]), small)
2335 elif op == 'dagrangepre':
2333 elif op == 'dagrangepre':
2336 return _optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2334 return _optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2337 elif op == 'dagrangepost':
2335 elif op == 'dagrangepost':
2338 return _optimize(('func', ('symbol', 'descendants'), x[1]), small)
2336 return _optimize(('func', ('symbol', 'descendants'), x[1]), small)
2339 elif op == 'rangeall':
2337 elif op == 'rangeall':
2340 return _optimize(('range', ('string', '0'), ('string', 'tip')), small)
2338 return _optimize(('range', ('string', '0'), ('string', 'tip')), small)
2341 elif op == 'rangepre':
2339 elif op == 'rangepre':
2342 return _optimize(('range', ('string', '0'), x[1]), small)
2340 return _optimize(('range', ('string', '0'), x[1]), small)
2343 elif op == 'rangepost':
2341 elif op == 'rangepost':
2344 return _optimize(('range', x[1], ('string', 'tip')), small)
2342 return _optimize(('range', x[1], ('string', 'tip')), small)
2345 elif op == 'negate':
2343 elif op == 'negate':
2346 s = getstring(x[1], _("can't negate that"))
2344 s = getstring(x[1], _("can't negate that"))
2347 return _optimize(('string', '-' + s), small)
2345 return _optimize(('string', '-' + s), small)
2348 elif op in 'string symbol negate':
2346 elif op in 'string symbol negate':
2349 return smallbonus, x # single revisions are small
2347 return smallbonus, x # single revisions are small
2350 elif op == 'and':
2348 elif op == 'and':
2351 wa, ta = _optimize(x[1], True)
2349 wa, ta = _optimize(x[1], True)
2352 wb, tb = _optimize(x[2], True)
2350 wb, tb = _optimize(x[2], True)
2353 w = min(wa, wb)
2351 w = min(wa, wb)
2354
2352
2355 # (::x and not ::y)/(not ::y and ::x) have a fast path
2353 # (::x and not ::y)/(not ::y and ::x) have a fast path
2356 tm = _matchonly(ta, tb) or _matchonly(tb, ta)
2354 tm = _matchonly(ta, tb) or _matchonly(tb, ta)
2357 if tm:
2355 if tm:
2358 return w, ('func', ('symbol', 'only'), tm)
2356 return w, ('func', ('symbol', 'only'), tm)
2359
2357
2360 if tb is not None and tb[0] == 'not':
2358 if tb is not None and tb[0] == 'not':
2361 return wa, ('difference', ta, tb[1])
2359 return wa, ('difference', ta, tb[1])
2362
2360
2363 if wa > wb:
2361 if wa > wb:
2364 return w, (op, tb, ta)
2362 return w, (op, tb, ta)
2365 return w, (op, ta, tb)
2363 return w, (op, ta, tb)
2366 elif op == 'or':
2364 elif op == 'or':
2367 # fast path for machine-generated expression, that is likely to have
2365 # fast path for machine-generated expression, that is likely to have
2368 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2366 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2369 ws, ts, ss = [], [], []
2367 ws, ts, ss = [], [], []
2370 def flushss():
2368 def flushss():
2371 if not ss:
2369 if not ss:
2372 return
2370 return
2373 if len(ss) == 1:
2371 if len(ss) == 1:
2374 w, t = ss[0]
2372 w, t = ss[0]
2375 else:
2373 else:
2376 s = '\0'.join(t[1] for w, t in ss)
2374 s = '\0'.join(t[1] for w, t in ss)
2377 y = ('func', ('symbol', '_list'), ('string', s))
2375 y = ('func', ('symbol', '_list'), ('string', s))
2378 w, t = _optimize(y, False)
2376 w, t = _optimize(y, False)
2379 ws.append(w)
2377 ws.append(w)
2380 ts.append(t)
2378 ts.append(t)
2381 del ss[:]
2379 del ss[:]
2382 for y in x[1:]:
2380 for y in x[1:]:
2383 w, t = _optimize(y, False)
2381 w, t = _optimize(y, False)
2384 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2382 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2385 ss.append((w, t))
2383 ss.append((w, t))
2386 continue
2384 continue
2387 flushss()
2385 flushss()
2388 ws.append(w)
2386 ws.append(w)
2389 ts.append(t)
2387 ts.append(t)
2390 flushss()
2388 flushss()
2391 if len(ts) == 1:
2389 if len(ts) == 1:
2392 return ws[0], ts[0] # 'or' operation is fully optimized out
2390 return ws[0], ts[0] # 'or' operation is fully optimized out
2393 # we can't reorder trees by weight because it would change the order.
2391 # we can't reorder trees by weight because it would change the order.
2394 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2392 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2395 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2393 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2396 return max(ws), (op,) + tuple(ts)
2394 return max(ws), (op,) + tuple(ts)
2397 elif op == 'not':
2395 elif op == 'not':
2398 # Optimize not public() to _notpublic() because we have a fast version
2396 # Optimize not public() to _notpublic() because we have a fast version
2399 if x[1] == ('func', ('symbol', 'public'), None):
2397 if x[1] == ('func', ('symbol', 'public'), None):
2400 newsym = ('func', ('symbol', '_notpublic'), None)
2398 newsym = ('func', ('symbol', '_notpublic'), None)
2401 o = _optimize(newsym, not small)
2399 o = _optimize(newsym, not small)
2402 return o[0], o[1]
2400 return o[0], o[1]
2403 else:
2401 else:
2404 o = _optimize(x[1], not small)
2402 o = _optimize(x[1], not small)
2405 return o[0], (op, o[1])
2403 return o[0], (op, o[1])
2406 elif op == 'parentpost':
2404 elif op == 'parentpost':
2407 o = _optimize(x[1], small)
2405 o = _optimize(x[1], small)
2408 return o[0], (op, o[1])
2406 return o[0], (op, o[1])
2409 elif op == 'group':
2407 elif op == 'group':
2410 return _optimize(x[1], small)
2408 return _optimize(x[1], small)
2411 elif op in 'dagrange range parent ancestorspec':
2409 elif op in 'dagrange range parent ancestorspec':
2412 if op == 'parent':
2410 if op == 'parent':
2413 # x^:y means (x^) : y, not x ^ (:y)
2411 # x^:y means (x^) : y, not x ^ (:y)
2414 post = ('parentpost', x[1])
2412 post = ('parentpost', x[1])
2415 if x[2][0] == 'dagrangepre':
2413 if x[2][0] == 'dagrangepre':
2416 return _optimize(('dagrange', post, x[2][1]), small)
2414 return _optimize(('dagrange', post, x[2][1]), small)
2417 elif x[2][0] == 'rangepre':
2415 elif x[2][0] == 'rangepre':
2418 return _optimize(('range', post, x[2][1]), small)
2416 return _optimize(('range', post, x[2][1]), small)
2419
2417
2420 wa, ta = _optimize(x[1], small)
2418 wa, ta = _optimize(x[1], small)
2421 wb, tb = _optimize(x[2], small)
2419 wb, tb = _optimize(x[2], small)
2422 return wa + wb, (op, ta, tb)
2420 return wa + wb, (op, ta, tb)
2423 elif op == 'list':
2421 elif op == 'list':
2424 ws, ts = zip(*(_optimize(y, small) for y in x[1:]))
2422 ws, ts = zip(*(_optimize(y, small) for y in x[1:]))
2425 return sum(ws), (op,) + ts
2423 return sum(ws), (op,) + ts
2426 elif op == 'func':
2424 elif op == 'func':
2427 f = getsymbol(x[1])
2425 f = getsymbol(x[1])
2428 wa, ta = _optimize(x[2], small)
2426 wa, ta = _optimize(x[2], small)
2429 if f in ("author branch closed date desc file grep keyword "
2427 if f in ("author branch closed date desc file grep keyword "
2430 "outgoing user"):
2428 "outgoing user"):
2431 w = 10 # slow
2429 w = 10 # slow
2432 elif f in "modifies adds removes":
2430 elif f in "modifies adds removes":
2433 w = 30 # slower
2431 w = 30 # slower
2434 elif f == "contains":
2432 elif f == "contains":
2435 w = 100 # very slow
2433 w = 100 # very slow
2436 elif f == "ancestor":
2434 elif f == "ancestor":
2437 w = 1 * smallbonus
2435 w = 1 * smallbonus
2438 elif f in "reverse limit first _intlist":
2436 elif f in "reverse limit first _intlist":
2439 w = 0
2437 w = 0
2440 elif f in "sort":
2438 elif f in "sort":
2441 w = 10 # assume most sorts look at changelog
2439 w = 10 # assume most sorts look at changelog
2442 else:
2440 else:
2443 w = 1
2441 w = 1
2444 return w + wa, (op, x[1], ta)
2442 return w + wa, (op, x[1], ta)
2445 return 1, x
2443 return 1, x
2446
2444
2447 def optimize(tree):
2445 def optimize(tree):
2448 _weight, newtree = _optimize(tree, small=True)
2446 _weight, newtree = _optimize(tree, small=True)
2449 return newtree
2447 return newtree
2450
2448
2451 # the set of valid characters for the initial letter of symbols in
2449 # the set of valid characters for the initial letter of symbols in
2452 # alias declarations and definitions
2450 # alias declarations and definitions
2453 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2451 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2454 if c.isalnum() or c in '._@$' or ord(c) > 127)
2452 if c.isalnum() or c in '._@$' or ord(c) > 127)
2455
2453
2456 def _parsewith(spec, lookup=None, syminitletters=None):
2454 def _parsewith(spec, lookup=None, syminitletters=None):
2457 """Generate a parse tree of given spec with given tokenizing options
2455 """Generate a parse tree of given spec with given tokenizing options
2458
2456
2459 >>> _parsewith('foo($1)', syminitletters=_aliassyminitletters)
2457 >>> _parsewith('foo($1)', syminitletters=_aliassyminitletters)
2460 ('func', ('symbol', 'foo'), ('symbol', '$1'))
2458 ('func', ('symbol', 'foo'), ('symbol', '$1'))
2461 >>> _parsewith('$1')
2459 >>> _parsewith('$1')
2462 Traceback (most recent call last):
2460 Traceback (most recent call last):
2463 ...
2461 ...
2464 ParseError: ("syntax error in revset '$1'", 0)
2462 ParseError: ("syntax error in revset '$1'", 0)
2465 >>> _parsewith('foo bar')
2463 >>> _parsewith('foo bar')
2466 Traceback (most recent call last):
2464 Traceback (most recent call last):
2467 ...
2465 ...
2468 ParseError: ('invalid token', 4)
2466 ParseError: ('invalid token', 4)
2469 """
2467 """
2470 p = parser.parser(elements)
2468 p = parser.parser(elements)
2471 tree, pos = p.parse(tokenize(spec, lookup=lookup,
2469 tree, pos = p.parse(tokenize(spec, lookup=lookup,
2472 syminitletters=syminitletters))
2470 syminitletters=syminitletters))
2473 if pos != len(spec):
2471 if pos != len(spec):
2474 raise error.ParseError(_('invalid token'), pos)
2472 raise error.ParseError(_('invalid token'), pos)
2475 return parser.simplifyinfixops(tree, ('list', 'or'))
2473 return parser.simplifyinfixops(tree, ('list', 'or'))
2476
2474
2477 class _aliasrules(parser.basealiasrules):
2475 class _aliasrules(parser.basealiasrules):
2478 """Parsing and expansion rule set of revset aliases"""
2476 """Parsing and expansion rule set of revset aliases"""
2479 _section = _('revset alias')
2477 _section = _('revset alias')
2480
2478
2481 @staticmethod
2479 @staticmethod
2482 def _parse(spec):
2480 def _parse(spec):
2483 """Parse alias declaration/definition ``spec``
2481 """Parse alias declaration/definition ``spec``
2484
2482
2485 This allows symbol names to use also ``$`` as an initial letter
2483 This allows symbol names to use also ``$`` as an initial letter
2486 (for backward compatibility), and callers of this function should
2484 (for backward compatibility), and callers of this function should
2487 examine whether ``$`` is used also for unexpected symbols or not.
2485 examine whether ``$`` is used also for unexpected symbols or not.
2488 """
2486 """
2489 return _parsewith(spec, syminitletters=_aliassyminitletters)
2487 return _parsewith(spec, syminitletters=_aliassyminitletters)
2490
2488
2491 @staticmethod
2489 @staticmethod
2492 def _trygetfunc(tree):
2490 def _trygetfunc(tree):
2493 if tree[0] == 'func' and tree[1][0] == 'symbol':
2491 if tree[0] == 'func' and tree[1][0] == 'symbol':
2494 return tree[1][1], getlist(tree[2])
2492 return tree[1][1], getlist(tree[2])
2495
2493
2496 def expandaliases(ui, tree, showwarning=None):
2494 def expandaliases(ui, tree, showwarning=None):
2497 aliases = _aliasrules.buildmap(ui.configitems('revsetalias'))
2495 aliases = _aliasrules.buildmap(ui.configitems('revsetalias'))
2498 tree = _aliasrules.expand(aliases, tree)
2496 tree = _aliasrules.expand(aliases, tree)
2499 if showwarning:
2497 if showwarning:
2500 # warn about problematic (but not referred) aliases
2498 # warn about problematic (but not referred) aliases
2501 for name, alias in sorted(aliases.iteritems()):
2499 for name, alias in sorted(aliases.iteritems()):
2502 if alias.error and not alias.warned:
2500 if alias.error and not alias.warned:
2503 showwarning(_('warning: %s\n') % (alias.error))
2501 showwarning(_('warning: %s\n') % (alias.error))
2504 alias.warned = True
2502 alias.warned = True
2505 return tree
2503 return tree
2506
2504
2507 def foldconcat(tree):
2505 def foldconcat(tree):
2508 """Fold elements to be concatenated by `##`
2506 """Fold elements to be concatenated by `##`
2509 """
2507 """
2510 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2508 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2511 return tree
2509 return tree
2512 if tree[0] == '_concat':
2510 if tree[0] == '_concat':
2513 pending = [tree]
2511 pending = [tree]
2514 l = []
2512 l = []
2515 while pending:
2513 while pending:
2516 e = pending.pop()
2514 e = pending.pop()
2517 if e[0] == '_concat':
2515 if e[0] == '_concat':
2518 pending.extend(reversed(e[1:]))
2516 pending.extend(reversed(e[1:]))
2519 elif e[0] in ('string', 'symbol'):
2517 elif e[0] in ('string', 'symbol'):
2520 l.append(e[1])
2518 l.append(e[1])
2521 else:
2519 else:
2522 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2520 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2523 raise error.ParseError(msg)
2521 raise error.ParseError(msg)
2524 return ('string', ''.join(l))
2522 return ('string', ''.join(l))
2525 else:
2523 else:
2526 return tuple(foldconcat(t) for t in tree)
2524 return tuple(foldconcat(t) for t in tree)
2527
2525
2528 def parse(spec, lookup=None):
2526 def parse(spec, lookup=None):
2529 return _parsewith(spec, lookup=lookup)
2527 return _parsewith(spec, lookup=lookup)
2530
2528
2531 def posttreebuilthook(tree, repo):
2529 def posttreebuilthook(tree, repo):
2532 # hook for extensions to execute code on the optimized tree
2530 # hook for extensions to execute code on the optimized tree
2533 pass
2531 pass
2534
2532
2535 def match(ui, spec, repo=None):
2533 def match(ui, spec, repo=None):
2536 """Create a matcher for a single revision spec."""
2534 """Create a matcher for a single revision spec."""
2537 return matchany(ui, [spec], repo=repo)
2535 return matchany(ui, [spec], repo=repo)
2538
2536
2539 def matchany(ui, specs, repo=None):
2537 def matchany(ui, specs, repo=None):
2540 """Create a matcher that will include any revisions matching one of the
2538 """Create a matcher that will include any revisions matching one of the
2541 given specs"""
2539 given specs"""
2542 if not specs:
2540 if not specs:
2543 def mfunc(repo, subset=None):
2541 def mfunc(repo, subset=None):
2544 return baseset()
2542 return baseset()
2545 return mfunc
2543 return mfunc
2546 if not all(specs):
2544 if not all(specs):
2547 raise error.ParseError(_("empty query"))
2545 raise error.ParseError(_("empty query"))
2548 lookup = None
2546 lookup = None
2549 if repo:
2547 if repo:
2550 lookup = repo.__contains__
2548 lookup = repo.__contains__
2551 if len(specs) == 1:
2549 if len(specs) == 1:
2552 tree = parse(specs[0], lookup)
2550 tree = parse(specs[0], lookup)
2553 else:
2551 else:
2554 tree = ('or',) + tuple(parse(s, lookup) for s in specs)
2552 tree = ('or',) + tuple(parse(s, lookup) for s in specs)
2555 return _makematcher(ui, tree, repo)
2553 return _makematcher(ui, tree, repo)
2556
2554
2557 def _makematcher(ui, tree, repo):
2555 def _makematcher(ui, tree, repo):
2558 if ui:
2556 if ui:
2559 tree = expandaliases(ui, tree, showwarning=ui.warn)
2557 tree = expandaliases(ui, tree, showwarning=ui.warn)
2560 tree = foldconcat(tree)
2558 tree = foldconcat(tree)
2561 tree = optimize(tree)
2559 tree = optimize(tree)
2562 posttreebuilthook(tree, repo)
2560 posttreebuilthook(tree, repo)
2563 def mfunc(repo, subset=None):
2561 def mfunc(repo, subset=None):
2564 if subset is None:
2562 if subset is None:
2565 subset = fullreposet(repo)
2563 subset = fullreposet(repo)
2566 if util.safehasattr(subset, 'isascending'):
2564 if util.safehasattr(subset, 'isascending'):
2567 result = getset(repo, subset, tree)
2565 result = getset(repo, subset, tree)
2568 else:
2566 else:
2569 result = getset(repo, baseset(subset), tree)
2567 result = getset(repo, baseset(subset), tree)
2570 return result
2568 return result
2571 return mfunc
2569 return mfunc
2572
2570
2573 def formatspec(expr, *args):
2571 def formatspec(expr, *args):
2574 '''
2572 '''
2575 This is a convenience function for using revsets internally, and
2573 This is a convenience function for using revsets internally, and
2576 escapes arguments appropriately. Aliases are intentionally ignored
2574 escapes arguments appropriately. Aliases are intentionally ignored
2577 so that intended expression behavior isn't accidentally subverted.
2575 so that intended expression behavior isn't accidentally subverted.
2578
2576
2579 Supported arguments:
2577 Supported arguments:
2580
2578
2581 %r = revset expression, parenthesized
2579 %r = revset expression, parenthesized
2582 %d = int(arg), no quoting
2580 %d = int(arg), no quoting
2583 %s = string(arg), escaped and single-quoted
2581 %s = string(arg), escaped and single-quoted
2584 %b = arg.branch(), escaped and single-quoted
2582 %b = arg.branch(), escaped and single-quoted
2585 %n = hex(arg), single-quoted
2583 %n = hex(arg), single-quoted
2586 %% = a literal '%'
2584 %% = a literal '%'
2587
2585
2588 Prefixing the type with 'l' specifies a parenthesized list of that type.
2586 Prefixing the type with 'l' specifies a parenthesized list of that type.
2589
2587
2590 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2588 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2591 '(10 or 11):: and ((this()) or (that()))'
2589 '(10 or 11):: and ((this()) or (that()))'
2592 >>> formatspec('%d:: and not %d::', 10, 20)
2590 >>> formatspec('%d:: and not %d::', 10, 20)
2593 '10:: and not 20::'
2591 '10:: and not 20::'
2594 >>> formatspec('%ld or %ld', [], [1])
2592 >>> formatspec('%ld or %ld', [], [1])
2595 "_list('') or 1"
2593 "_list('') or 1"
2596 >>> formatspec('keyword(%s)', 'foo\\xe9')
2594 >>> formatspec('keyword(%s)', 'foo\\xe9')
2597 "keyword('foo\\\\xe9')"
2595 "keyword('foo\\\\xe9')"
2598 >>> b = lambda: 'default'
2596 >>> b = lambda: 'default'
2599 >>> b.branch = b
2597 >>> b.branch = b
2600 >>> formatspec('branch(%b)', b)
2598 >>> formatspec('branch(%b)', b)
2601 "branch('default')"
2599 "branch('default')"
2602 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2600 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2603 "root(_list('a\\x00b\\x00c\\x00d'))"
2601 "root(_list('a\\x00b\\x00c\\x00d'))"
2604 '''
2602 '''
2605
2603
2606 def quote(s):
2604 def quote(s):
2607 return repr(str(s))
2605 return repr(str(s))
2608
2606
2609 def argtype(c, arg):
2607 def argtype(c, arg):
2610 if c == 'd':
2608 if c == 'd':
2611 return str(int(arg))
2609 return str(int(arg))
2612 elif c == 's':
2610 elif c == 's':
2613 return quote(arg)
2611 return quote(arg)
2614 elif c == 'r':
2612 elif c == 'r':
2615 parse(arg) # make sure syntax errors are confined
2613 parse(arg) # make sure syntax errors are confined
2616 return '(%s)' % arg
2614 return '(%s)' % arg
2617 elif c == 'n':
2615 elif c == 'n':
2618 return quote(node.hex(arg))
2616 return quote(node.hex(arg))
2619 elif c == 'b':
2617 elif c == 'b':
2620 return quote(arg.branch())
2618 return quote(arg.branch())
2621
2619
2622 def listexp(s, t):
2620 def listexp(s, t):
2623 l = len(s)
2621 l = len(s)
2624 if l == 0:
2622 if l == 0:
2625 return "_list('')"
2623 return "_list('')"
2626 elif l == 1:
2624 elif l == 1:
2627 return argtype(t, s[0])
2625 return argtype(t, s[0])
2628 elif t == 'd':
2626 elif t == 'd':
2629 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2627 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2630 elif t == 's':
2628 elif t == 's':
2631 return "_list('%s')" % "\0".join(s)
2629 return "_list('%s')" % "\0".join(s)
2632 elif t == 'n':
2630 elif t == 'n':
2633 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2631 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2634 elif t == 'b':
2632 elif t == 'b':
2635 return "_list('%s')" % "\0".join(a.branch() for a in s)
2633 return "_list('%s')" % "\0".join(a.branch() for a in s)
2636
2634
2637 m = l // 2
2635 m = l // 2
2638 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2636 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2639
2637
2640 ret = ''
2638 ret = ''
2641 pos = 0
2639 pos = 0
2642 arg = 0
2640 arg = 0
2643 while pos < len(expr):
2641 while pos < len(expr):
2644 c = expr[pos]
2642 c = expr[pos]
2645 if c == '%':
2643 if c == '%':
2646 pos += 1
2644 pos += 1
2647 d = expr[pos]
2645 d = expr[pos]
2648 if d == '%':
2646 if d == '%':
2649 ret += d
2647 ret += d
2650 elif d in 'dsnbr':
2648 elif d in 'dsnbr':
2651 ret += argtype(d, args[arg])
2649 ret += argtype(d, args[arg])
2652 arg += 1
2650 arg += 1
2653 elif d == 'l':
2651 elif d == 'l':
2654 # a list of some type
2652 # a list of some type
2655 pos += 1
2653 pos += 1
2656 d = expr[pos]
2654 d = expr[pos]
2657 ret += listexp(list(args[arg]), d)
2655 ret += listexp(list(args[arg]), d)
2658 arg += 1
2656 arg += 1
2659 else:
2657 else:
2660 raise error.Abort(_('unexpected revspec format character %s')
2658 raise error.Abort(_('unexpected revspec format character %s')
2661 % d)
2659 % d)
2662 else:
2660 else:
2663 ret += c
2661 ret += c
2664 pos += 1
2662 pos += 1
2665
2663
2666 return ret
2664 return ret
2667
2665
2668 def prettyformat(tree):
2666 def prettyformat(tree):
2669 return parser.prettyformat(tree, ('string', 'symbol'))
2667 return parser.prettyformat(tree, ('string', 'symbol'))
2670
2668
2671 def depth(tree):
2669 def depth(tree):
2672 if isinstance(tree, tuple):
2670 if isinstance(tree, tuple):
2673 return max(map(depth, tree)) + 1
2671 return max(map(depth, tree)) + 1
2674 else:
2672 else:
2675 return 0
2673 return 0
2676
2674
2677 def funcsused(tree):
2675 def funcsused(tree):
2678 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2676 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2679 return set()
2677 return set()
2680 else:
2678 else:
2681 funcs = set()
2679 funcs = set()
2682 for s in tree[1:]:
2680 for s in tree[1:]:
2683 funcs |= funcsused(s)
2681 funcs |= funcsused(s)
2684 if tree[0] == 'func':
2682 if tree[0] == 'func':
2685 funcs.add(tree[1][1])
2683 funcs.add(tree[1][1])
2686 return funcs
2684 return funcs
2687
2685
2688 def _formatsetrepr(r):
2686 def _formatsetrepr(r):
2689 """Format an optional printable representation of a set
2687 """Format an optional printable representation of a set
2690
2688
2691 ======== =================================
2689 ======== =================================
2692 type(r) example
2690 type(r) example
2693 ======== =================================
2691 ======== =================================
2694 tuple ('<not %r>', other)
2692 tuple ('<not %r>', other)
2695 str '<branch closed>'
2693 str '<branch closed>'
2696 callable lambda: '<branch %r>' % sorted(b)
2694 callable lambda: '<branch %r>' % sorted(b)
2697 object other
2695 object other
2698 ======== =================================
2696 ======== =================================
2699 """
2697 """
2700 if r is None:
2698 if r is None:
2701 return ''
2699 return ''
2702 elif isinstance(r, tuple):
2700 elif isinstance(r, tuple):
2703 return r[0] % r[1:]
2701 return r[0] % r[1:]
2704 elif isinstance(r, str):
2702 elif isinstance(r, str):
2705 return r
2703 return r
2706 elif callable(r):
2704 elif callable(r):
2707 return r()
2705 return r()
2708 else:
2706 else:
2709 return repr(r)
2707 return repr(r)
2710
2708
2711 class abstractsmartset(object):
2709 class abstractsmartset(object):
2712
2710
2713 def __nonzero__(self):
2711 def __nonzero__(self):
2714 """True if the smartset is not empty"""
2712 """True if the smartset is not empty"""
2715 raise NotImplementedError()
2713 raise NotImplementedError()
2716
2714
2717 def __contains__(self, rev):
2715 def __contains__(self, rev):
2718 """provide fast membership testing"""
2716 """provide fast membership testing"""
2719 raise NotImplementedError()
2717 raise NotImplementedError()
2720
2718
2721 def __iter__(self):
2719 def __iter__(self):
2722 """iterate the set in the order it is supposed to be iterated"""
2720 """iterate the set in the order it is supposed to be iterated"""
2723 raise NotImplementedError()
2721 raise NotImplementedError()
2724
2722
2725 # Attributes containing a function to perform a fast iteration in a given
2723 # Attributes containing a function to perform a fast iteration in a given
2726 # direction. A smartset can have none, one, or both defined.
2724 # direction. A smartset can have none, one, or both defined.
2727 #
2725 #
2728 # Default value is None instead of a function returning None to avoid
2726 # Default value is None instead of a function returning None to avoid
2729 # initializing an iterator just for testing if a fast method exists.
2727 # initializing an iterator just for testing if a fast method exists.
2730 fastasc = None
2728 fastasc = None
2731 fastdesc = None
2729 fastdesc = None
2732
2730
2733 def isascending(self):
2731 def isascending(self):
2734 """True if the set will iterate in ascending order"""
2732 """True if the set will iterate in ascending order"""
2735 raise NotImplementedError()
2733 raise NotImplementedError()
2736
2734
2737 def isdescending(self):
2735 def isdescending(self):
2738 """True if the set will iterate in descending order"""
2736 """True if the set will iterate in descending order"""
2739 raise NotImplementedError()
2737 raise NotImplementedError()
2740
2738
2741 def istopo(self):
2739 def istopo(self):
2742 """True if the set will iterate in topographical order"""
2740 """True if the set will iterate in topographical order"""
2743 raise NotImplementedError()
2741 raise NotImplementedError()
2744
2742
2745 @util.cachefunc
2743 @util.cachefunc
2746 def min(self):
2744 def min(self):
2747 """return the minimum element in the set"""
2745 """return the minimum element in the set"""
2748 if self.fastasc is not None:
2746 if self.fastasc is not None:
2749 for r in self.fastasc():
2747 for r in self.fastasc():
2750 return r
2748 return r
2751 raise ValueError('arg is an empty sequence')
2749 raise ValueError('arg is an empty sequence')
2752 return min(self)
2750 return min(self)
2753
2751
2754 @util.cachefunc
2752 @util.cachefunc
2755 def max(self):
2753 def max(self):
2756 """return the maximum element in the set"""
2754 """return the maximum element in the set"""
2757 if self.fastdesc is not None:
2755 if self.fastdesc is not None:
2758 for r in self.fastdesc():
2756 for r in self.fastdesc():
2759 return r
2757 return r
2760 raise ValueError('arg is an empty sequence')
2758 raise ValueError('arg is an empty sequence')
2761 return max(self)
2759 return max(self)
2762
2760
2763 def first(self):
2761 def first(self):
2764 """return the first element in the set (user iteration perspective)
2762 """return the first element in the set (user iteration perspective)
2765
2763
2766 Return None if the set is empty"""
2764 Return None if the set is empty"""
2767 raise NotImplementedError()
2765 raise NotImplementedError()
2768
2766
2769 def last(self):
2767 def last(self):
2770 """return the last element in the set (user iteration perspective)
2768 """return the last element in the set (user iteration perspective)
2771
2769
2772 Return None if the set is empty"""
2770 Return None if the set is empty"""
2773 raise NotImplementedError()
2771 raise NotImplementedError()
2774
2772
2775 def __len__(self):
2773 def __len__(self):
2776 """return the length of the smartsets
2774 """return the length of the smartsets
2777
2775
2778 This can be expensive on smartset that could be lazy otherwise."""
2776 This can be expensive on smartset that could be lazy otherwise."""
2779 raise NotImplementedError()
2777 raise NotImplementedError()
2780
2778
2781 def reverse(self):
2779 def reverse(self):
2782 """reverse the expected iteration order"""
2780 """reverse the expected iteration order"""
2783 raise NotImplementedError()
2781 raise NotImplementedError()
2784
2782
2785 def sort(self, reverse=True):
2783 def sort(self, reverse=True):
2786 """get the set to iterate in an ascending or descending order"""
2784 """get the set to iterate in an ascending or descending order"""
2787 raise NotImplementedError()
2785 raise NotImplementedError()
2788
2786
2789 def __and__(self, other):
2787 def __and__(self, other):
2790 """Returns a new object with the intersection of the two collections.
2788 """Returns a new object with the intersection of the two collections.
2791
2789
2792 This is part of the mandatory API for smartset."""
2790 This is part of the mandatory API for smartset."""
2793 if isinstance(other, fullreposet):
2791 if isinstance(other, fullreposet):
2794 return self
2792 return self
2795 return self.filter(other.__contains__, condrepr=other, cache=False)
2793 return self.filter(other.__contains__, condrepr=other, cache=False)
2796
2794
2797 def __add__(self, other):
2795 def __add__(self, other):
2798 """Returns a new object with the union of the two collections.
2796 """Returns a new object with the union of the two collections.
2799
2797
2800 This is part of the mandatory API for smartset."""
2798 This is part of the mandatory API for smartset."""
2801 return addset(self, other)
2799 return addset(self, other)
2802
2800
2803 def __sub__(self, other):
2801 def __sub__(self, other):
2804 """Returns a new object with the substraction of the two collections.
2802 """Returns a new object with the substraction of the two collections.
2805
2803
2806 This is part of the mandatory API for smartset."""
2804 This is part of the mandatory API for smartset."""
2807 c = other.__contains__
2805 c = other.__contains__
2808 return self.filter(lambda r: not c(r), condrepr=('<not %r>', other),
2806 return self.filter(lambda r: not c(r), condrepr=('<not %r>', other),
2809 cache=False)
2807 cache=False)
2810
2808
2811 def filter(self, condition, condrepr=None, cache=True):
2809 def filter(self, condition, condrepr=None, cache=True):
2812 """Returns this smartset filtered by condition as a new smartset.
2810 """Returns this smartset filtered by condition as a new smartset.
2813
2811
2814 `condition` is a callable which takes a revision number and returns a
2812 `condition` is a callable which takes a revision number and returns a
2815 boolean. Optional `condrepr` provides a printable representation of
2813 boolean. Optional `condrepr` provides a printable representation of
2816 the given `condition`.
2814 the given `condition`.
2817
2815
2818 This is part of the mandatory API for smartset."""
2816 This is part of the mandatory API for smartset."""
2819 # builtin cannot be cached. but do not needs to
2817 # builtin cannot be cached. but do not needs to
2820 if cache and util.safehasattr(condition, 'func_code'):
2818 if cache and util.safehasattr(condition, 'func_code'):
2821 condition = util.cachefunc(condition)
2819 condition = util.cachefunc(condition)
2822 return filteredset(self, condition, condrepr)
2820 return filteredset(self, condition, condrepr)
2823
2821
2824 class baseset(abstractsmartset):
2822 class baseset(abstractsmartset):
2825 """Basic data structure that represents a revset and contains the basic
2823 """Basic data structure that represents a revset and contains the basic
2826 operation that it should be able to perform.
2824 operation that it should be able to perform.
2827
2825
2828 Every method in this class should be implemented by any smartset class.
2826 Every method in this class should be implemented by any smartset class.
2829 """
2827 """
2830 def __init__(self, data=(), datarepr=None, istopo=False):
2828 def __init__(self, data=(), datarepr=None, istopo=False):
2831 """
2829 """
2832 datarepr: a tuple of (format, obj, ...), a function or an object that
2830 datarepr: a tuple of (format, obj, ...), a function or an object that
2833 provides a printable representation of the given data.
2831 provides a printable representation of the given data.
2834 """
2832 """
2835 self._ascending = None
2833 self._ascending = None
2836 self._istopo = istopo
2834 self._istopo = istopo
2837 if not isinstance(data, list):
2835 if not isinstance(data, list):
2838 if isinstance(data, set):
2836 if isinstance(data, set):
2839 self._set = data
2837 self._set = data
2840 # set has no order we pick one for stability purpose
2838 # set has no order we pick one for stability purpose
2841 self._ascending = True
2839 self._ascending = True
2842 data = list(data)
2840 data = list(data)
2843 self._list = data
2841 self._list = data
2844 self._datarepr = datarepr
2842 self._datarepr = datarepr
2845
2843
2846 @util.propertycache
2844 @util.propertycache
2847 def _set(self):
2845 def _set(self):
2848 return set(self._list)
2846 return set(self._list)
2849
2847
2850 @util.propertycache
2848 @util.propertycache
2851 def _asclist(self):
2849 def _asclist(self):
2852 asclist = self._list[:]
2850 asclist = self._list[:]
2853 asclist.sort()
2851 asclist.sort()
2854 return asclist
2852 return asclist
2855
2853
2856 def __iter__(self):
2854 def __iter__(self):
2857 if self._ascending is None:
2855 if self._ascending is None:
2858 return iter(self._list)
2856 return iter(self._list)
2859 elif self._ascending:
2857 elif self._ascending:
2860 return iter(self._asclist)
2858 return iter(self._asclist)
2861 else:
2859 else:
2862 return reversed(self._asclist)
2860 return reversed(self._asclist)
2863
2861
2864 def fastasc(self):
2862 def fastasc(self):
2865 return iter(self._asclist)
2863 return iter(self._asclist)
2866
2864
2867 def fastdesc(self):
2865 def fastdesc(self):
2868 return reversed(self._asclist)
2866 return reversed(self._asclist)
2869
2867
2870 @util.propertycache
2868 @util.propertycache
2871 def __contains__(self):
2869 def __contains__(self):
2872 return self._set.__contains__
2870 return self._set.__contains__
2873
2871
2874 def __nonzero__(self):
2872 def __nonzero__(self):
2875 return bool(self._list)
2873 return bool(self._list)
2876
2874
2877 def sort(self, reverse=False):
2875 def sort(self, reverse=False):
2878 self._ascending = not bool(reverse)
2876 self._ascending = not bool(reverse)
2879 self._istopo = False
2877 self._istopo = False
2880
2878
2881 def reverse(self):
2879 def reverse(self):
2882 if self._ascending is None:
2880 if self._ascending is None:
2883 self._list.reverse()
2881 self._list.reverse()
2884 else:
2882 else:
2885 self._ascending = not self._ascending
2883 self._ascending = not self._ascending
2886 self._istopo = False
2884 self._istopo = False
2887
2885
2888 def __len__(self):
2886 def __len__(self):
2889 return len(self._list)
2887 return len(self._list)
2890
2888
2891 def isascending(self):
2889 def isascending(self):
2892 """Returns True if the collection is ascending order, False if not.
2890 """Returns True if the collection is ascending order, False if not.
2893
2891
2894 This is part of the mandatory API for smartset."""
2892 This is part of the mandatory API for smartset."""
2895 if len(self) <= 1:
2893 if len(self) <= 1:
2896 return True
2894 return True
2897 return self._ascending is not None and self._ascending
2895 return self._ascending is not None and self._ascending
2898
2896
2899 def isdescending(self):
2897 def isdescending(self):
2900 """Returns True if the collection is descending order, False if not.
2898 """Returns True if the collection is descending order, False if not.
2901
2899
2902 This is part of the mandatory API for smartset."""
2900 This is part of the mandatory API for smartset."""
2903 if len(self) <= 1:
2901 if len(self) <= 1:
2904 return True
2902 return True
2905 return self._ascending is not None and not self._ascending
2903 return self._ascending is not None and not self._ascending
2906
2904
2907 def istopo(self):
2905 def istopo(self):
2908 """Is the collection is in topographical order or not.
2906 """Is the collection is in topographical order or not.
2909
2907
2910 This is part of the mandatory API for smartset."""
2908 This is part of the mandatory API for smartset."""
2911 if len(self) <= 1:
2909 if len(self) <= 1:
2912 return True
2910 return True
2913 return self._istopo
2911 return self._istopo
2914
2912
2915 def first(self):
2913 def first(self):
2916 if self:
2914 if self:
2917 if self._ascending is None:
2915 if self._ascending is None:
2918 return self._list[0]
2916 return self._list[0]
2919 elif self._ascending:
2917 elif self._ascending:
2920 return self._asclist[0]
2918 return self._asclist[0]
2921 else:
2919 else:
2922 return self._asclist[-1]
2920 return self._asclist[-1]
2923 return None
2921 return None
2924
2922
2925 def last(self):
2923 def last(self):
2926 if self:
2924 if self:
2927 if self._ascending is None:
2925 if self._ascending is None:
2928 return self._list[-1]
2926 return self._list[-1]
2929 elif self._ascending:
2927 elif self._ascending:
2930 return self._asclist[-1]
2928 return self._asclist[-1]
2931 else:
2929 else:
2932 return self._asclist[0]
2930 return self._asclist[0]
2933 return None
2931 return None
2934
2932
2935 def __repr__(self):
2933 def __repr__(self):
2936 d = {None: '', False: '-', True: '+'}[self._ascending]
2934 d = {None: '', False: '-', True: '+'}[self._ascending]
2937 s = _formatsetrepr(self._datarepr)
2935 s = _formatsetrepr(self._datarepr)
2938 if not s:
2936 if not s:
2939 l = self._list
2937 l = self._list
2940 # if _list has been built from a set, it might have a different
2938 # if _list has been built from a set, it might have a different
2941 # order from one python implementation to another.
2939 # order from one python implementation to another.
2942 # We fallback to the sorted version for a stable output.
2940 # We fallback to the sorted version for a stable output.
2943 if self._ascending is not None:
2941 if self._ascending is not None:
2944 l = self._asclist
2942 l = self._asclist
2945 s = repr(l)
2943 s = repr(l)
2946 return '<%s%s %s>' % (type(self).__name__, d, s)
2944 return '<%s%s %s>' % (type(self).__name__, d, s)
2947
2945
2948 class filteredset(abstractsmartset):
2946 class filteredset(abstractsmartset):
2949 """Duck type for baseset class which iterates lazily over the revisions in
2947 """Duck type for baseset class which iterates lazily over the revisions in
2950 the subset and contains a function which tests for membership in the
2948 the subset and contains a function which tests for membership in the
2951 revset
2949 revset
2952 """
2950 """
2953 def __init__(self, subset, condition=lambda x: True, condrepr=None):
2951 def __init__(self, subset, condition=lambda x: True, condrepr=None):
2954 """
2952 """
2955 condition: a function that decide whether a revision in the subset
2953 condition: a function that decide whether a revision in the subset
2956 belongs to the revset or not.
2954 belongs to the revset or not.
2957 condrepr: a tuple of (format, obj, ...), a function or an object that
2955 condrepr: a tuple of (format, obj, ...), a function or an object that
2958 provides a printable representation of the given condition.
2956 provides a printable representation of the given condition.
2959 """
2957 """
2960 self._subset = subset
2958 self._subset = subset
2961 self._condition = condition
2959 self._condition = condition
2962 self._condrepr = condrepr
2960 self._condrepr = condrepr
2963
2961
2964 def __contains__(self, x):
2962 def __contains__(self, x):
2965 return x in self._subset and self._condition(x)
2963 return x in self._subset and self._condition(x)
2966
2964
2967 def __iter__(self):
2965 def __iter__(self):
2968 return self._iterfilter(self._subset)
2966 return self._iterfilter(self._subset)
2969
2967
2970 def _iterfilter(self, it):
2968 def _iterfilter(self, it):
2971 cond = self._condition
2969 cond = self._condition
2972 for x in it:
2970 for x in it:
2973 if cond(x):
2971 if cond(x):
2974 yield x
2972 yield x
2975
2973
2976 @property
2974 @property
2977 def fastasc(self):
2975 def fastasc(self):
2978 it = self._subset.fastasc
2976 it = self._subset.fastasc
2979 if it is None:
2977 if it is None:
2980 return None
2978 return None
2981 return lambda: self._iterfilter(it())
2979 return lambda: self._iterfilter(it())
2982
2980
2983 @property
2981 @property
2984 def fastdesc(self):
2982 def fastdesc(self):
2985 it = self._subset.fastdesc
2983 it = self._subset.fastdesc
2986 if it is None:
2984 if it is None:
2987 return None
2985 return None
2988 return lambda: self._iterfilter(it())
2986 return lambda: self._iterfilter(it())
2989
2987
2990 def __nonzero__(self):
2988 def __nonzero__(self):
2991 fast = None
2989 fast = None
2992 candidates = [self.fastasc if self.isascending() else None,
2990 candidates = [self.fastasc if self.isascending() else None,
2993 self.fastdesc if self.isdescending() else None,
2991 self.fastdesc if self.isdescending() else None,
2994 self.fastasc,
2992 self.fastasc,
2995 self.fastdesc]
2993 self.fastdesc]
2996 for candidate in candidates:
2994 for candidate in candidates:
2997 if candidate is not None:
2995 if candidate is not None:
2998 fast = candidate
2996 fast = candidate
2999 break
2997 break
3000
2998
3001 if fast is not None:
2999 if fast is not None:
3002 it = fast()
3000 it = fast()
3003 else:
3001 else:
3004 it = self
3002 it = self
3005
3003
3006 for r in it:
3004 for r in it:
3007 return True
3005 return True
3008 return False
3006 return False
3009
3007
3010 def __len__(self):
3008 def __len__(self):
3011 # Basic implementation to be changed in future patches.
3009 # Basic implementation to be changed in future patches.
3012 # until this gets improved, we use generator expression
3010 # until this gets improved, we use generator expression
3013 # here, since list compr is free to call __len__ again
3011 # here, since list compr is free to call __len__ again
3014 # causing infinite recursion
3012 # causing infinite recursion
3015 l = baseset(r for r in self)
3013 l = baseset(r for r in self)
3016 return len(l)
3014 return len(l)
3017
3015
3018 def sort(self, reverse=False):
3016 def sort(self, reverse=False):
3019 self._subset.sort(reverse=reverse)
3017 self._subset.sort(reverse=reverse)
3020
3018
3021 def reverse(self):
3019 def reverse(self):
3022 self._subset.reverse()
3020 self._subset.reverse()
3023
3021
3024 def isascending(self):
3022 def isascending(self):
3025 return self._subset.isascending()
3023 return self._subset.isascending()
3026
3024
3027 def isdescending(self):
3025 def isdescending(self):
3028 return self._subset.isdescending()
3026 return self._subset.isdescending()
3029
3027
3030 def istopo(self):
3028 def istopo(self):
3031 return self._subset.istopo()
3029 return self._subset.istopo()
3032
3030
3033 def first(self):
3031 def first(self):
3034 for x in self:
3032 for x in self:
3035 return x
3033 return x
3036 return None
3034 return None
3037
3035
3038 def last(self):
3036 def last(self):
3039 it = None
3037 it = None
3040 if self.isascending():
3038 if self.isascending():
3041 it = self.fastdesc
3039 it = self.fastdesc
3042 elif self.isdescending():
3040 elif self.isdescending():
3043 it = self.fastasc
3041 it = self.fastasc
3044 if it is not None:
3042 if it is not None:
3045 for x in it():
3043 for x in it():
3046 return x
3044 return x
3047 return None #empty case
3045 return None #empty case
3048 else:
3046 else:
3049 x = None
3047 x = None
3050 for x in self:
3048 for x in self:
3051 pass
3049 pass
3052 return x
3050 return x
3053
3051
3054 def __repr__(self):
3052 def __repr__(self):
3055 xs = [repr(self._subset)]
3053 xs = [repr(self._subset)]
3056 s = _formatsetrepr(self._condrepr)
3054 s = _formatsetrepr(self._condrepr)
3057 if s:
3055 if s:
3058 xs.append(s)
3056 xs.append(s)
3059 return '<%s %s>' % (type(self).__name__, ', '.join(xs))
3057 return '<%s %s>' % (type(self).__name__, ', '.join(xs))
3060
3058
3061 def _iterordered(ascending, iter1, iter2):
3059 def _iterordered(ascending, iter1, iter2):
3062 """produce an ordered iteration from two iterators with the same order
3060 """produce an ordered iteration from two iterators with the same order
3063
3061
3064 The ascending is used to indicated the iteration direction.
3062 The ascending is used to indicated the iteration direction.
3065 """
3063 """
3066 choice = max
3064 choice = max
3067 if ascending:
3065 if ascending:
3068 choice = min
3066 choice = min
3069
3067
3070 val1 = None
3068 val1 = None
3071 val2 = None
3069 val2 = None
3072 try:
3070 try:
3073 # Consume both iterators in an ordered way until one is empty
3071 # Consume both iterators in an ordered way until one is empty
3074 while True:
3072 while True:
3075 if val1 is None:
3073 if val1 is None:
3076 val1 = next(iter1)
3074 val1 = next(iter1)
3077 if val2 is None:
3075 if val2 is None:
3078 val2 = next(iter2)
3076 val2 = next(iter2)
3079 n = choice(val1, val2)
3077 n = choice(val1, val2)
3080 yield n
3078 yield n
3081 if val1 == n:
3079 if val1 == n:
3082 val1 = None
3080 val1 = None
3083 if val2 == n:
3081 if val2 == n:
3084 val2 = None
3082 val2 = None
3085 except StopIteration:
3083 except StopIteration:
3086 # Flush any remaining values and consume the other one
3084 # Flush any remaining values and consume the other one
3087 it = iter2
3085 it = iter2
3088 if val1 is not None:
3086 if val1 is not None:
3089 yield val1
3087 yield val1
3090 it = iter1
3088 it = iter1
3091 elif val2 is not None:
3089 elif val2 is not None:
3092 # might have been equality and both are empty
3090 # might have been equality and both are empty
3093 yield val2
3091 yield val2
3094 for val in it:
3092 for val in it:
3095 yield val
3093 yield val
3096
3094
3097 class addset(abstractsmartset):
3095 class addset(abstractsmartset):
3098 """Represent the addition of two sets
3096 """Represent the addition of two sets
3099
3097
3100 Wrapper structure for lazily adding two structures without losing much
3098 Wrapper structure for lazily adding two structures without losing much
3101 performance on the __contains__ method
3099 performance on the __contains__ method
3102
3100
3103 If the ascending attribute is set, that means the two structures are
3101 If the ascending attribute is set, that means the two structures are
3104 ordered in either an ascending or descending way. Therefore, we can add
3102 ordered in either an ascending or descending way. Therefore, we can add
3105 them maintaining the order by iterating over both at the same time
3103 them maintaining the order by iterating over both at the same time
3106
3104
3107 >>> xs = baseset([0, 3, 2])
3105 >>> xs = baseset([0, 3, 2])
3108 >>> ys = baseset([5, 2, 4])
3106 >>> ys = baseset([5, 2, 4])
3109
3107
3110 >>> rs = addset(xs, ys)
3108 >>> rs = addset(xs, ys)
3111 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3109 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3112 (True, True, False, True, 0, 4)
3110 (True, True, False, True, 0, 4)
3113 >>> rs = addset(xs, baseset([]))
3111 >>> rs = addset(xs, baseset([]))
3114 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3112 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3115 (True, True, False, 0, 2)
3113 (True, True, False, 0, 2)
3116 >>> rs = addset(baseset([]), baseset([]))
3114 >>> rs = addset(baseset([]), baseset([]))
3117 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3115 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3118 (False, False, None, None)
3116 (False, False, None, None)
3119
3117
3120 iterate unsorted:
3118 iterate unsorted:
3121 >>> rs = addset(xs, ys)
3119 >>> rs = addset(xs, ys)
3122 >>> # (use generator because pypy could call len())
3120 >>> # (use generator because pypy could call len())
3123 >>> list(x for x in rs) # without _genlist
3121 >>> list(x for x in rs) # without _genlist
3124 [0, 3, 2, 5, 4]
3122 [0, 3, 2, 5, 4]
3125 >>> assert not rs._genlist
3123 >>> assert not rs._genlist
3126 >>> len(rs)
3124 >>> len(rs)
3127 5
3125 5
3128 >>> [x for x in rs] # with _genlist
3126 >>> [x for x in rs] # with _genlist
3129 [0, 3, 2, 5, 4]
3127 [0, 3, 2, 5, 4]
3130 >>> assert rs._genlist
3128 >>> assert rs._genlist
3131
3129
3132 iterate ascending:
3130 iterate ascending:
3133 >>> rs = addset(xs, ys, ascending=True)
3131 >>> rs = addset(xs, ys, ascending=True)
3134 >>> # (use generator because pypy could call len())
3132 >>> # (use generator because pypy could call len())
3135 >>> list(x for x in rs), list(x for x in rs.fastasc()) # without _asclist
3133 >>> list(x for x in rs), list(x for x in rs.fastasc()) # without _asclist
3136 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3134 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3137 >>> assert not rs._asclist
3135 >>> assert not rs._asclist
3138 >>> len(rs)
3136 >>> len(rs)
3139 5
3137 5
3140 >>> [x for x in rs], [x for x in rs.fastasc()]
3138 >>> [x for x in rs], [x for x in rs.fastasc()]
3141 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3139 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3142 >>> assert rs._asclist
3140 >>> assert rs._asclist
3143
3141
3144 iterate descending:
3142 iterate descending:
3145 >>> rs = addset(xs, ys, ascending=False)
3143 >>> rs = addset(xs, ys, ascending=False)
3146 >>> # (use generator because pypy could call len())
3144 >>> # (use generator because pypy could call len())
3147 >>> list(x for x in rs), list(x for x in rs.fastdesc()) # without _asclist
3145 >>> list(x for x in rs), list(x for x in rs.fastdesc()) # without _asclist
3148 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3146 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3149 >>> assert not rs._asclist
3147 >>> assert not rs._asclist
3150 >>> len(rs)
3148 >>> len(rs)
3151 5
3149 5
3152 >>> [x for x in rs], [x for x in rs.fastdesc()]
3150 >>> [x for x in rs], [x for x in rs.fastdesc()]
3153 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3151 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3154 >>> assert rs._asclist
3152 >>> assert rs._asclist
3155
3153
3156 iterate ascending without fastasc:
3154 iterate ascending without fastasc:
3157 >>> rs = addset(xs, generatorset(ys), ascending=True)
3155 >>> rs = addset(xs, generatorset(ys), ascending=True)
3158 >>> assert rs.fastasc is None
3156 >>> assert rs.fastasc is None
3159 >>> [x for x in rs]
3157 >>> [x for x in rs]
3160 [0, 2, 3, 4, 5]
3158 [0, 2, 3, 4, 5]
3161
3159
3162 iterate descending without fastdesc:
3160 iterate descending without fastdesc:
3163 >>> rs = addset(generatorset(xs), ys, ascending=False)
3161 >>> rs = addset(generatorset(xs), ys, ascending=False)
3164 >>> assert rs.fastdesc is None
3162 >>> assert rs.fastdesc is None
3165 >>> [x for x in rs]
3163 >>> [x for x in rs]
3166 [5, 4, 3, 2, 0]
3164 [5, 4, 3, 2, 0]
3167 """
3165 """
3168 def __init__(self, revs1, revs2, ascending=None):
3166 def __init__(self, revs1, revs2, ascending=None):
3169 self._r1 = revs1
3167 self._r1 = revs1
3170 self._r2 = revs2
3168 self._r2 = revs2
3171 self._iter = None
3169 self._iter = None
3172 self._ascending = ascending
3170 self._ascending = ascending
3173 self._genlist = None
3171 self._genlist = None
3174 self._asclist = None
3172 self._asclist = None
3175
3173
3176 def __len__(self):
3174 def __len__(self):
3177 return len(self._list)
3175 return len(self._list)
3178
3176
3179 def __nonzero__(self):
3177 def __nonzero__(self):
3180 return bool(self._r1) or bool(self._r2)
3178 return bool(self._r1) or bool(self._r2)
3181
3179
3182 @util.propertycache
3180 @util.propertycache
3183 def _list(self):
3181 def _list(self):
3184 if not self._genlist:
3182 if not self._genlist:
3185 self._genlist = baseset(iter(self))
3183 self._genlist = baseset(iter(self))
3186 return self._genlist
3184 return self._genlist
3187
3185
3188 def __iter__(self):
3186 def __iter__(self):
3189 """Iterate over both collections without repeating elements
3187 """Iterate over both collections without repeating elements
3190
3188
3191 If the ascending attribute is not set, iterate over the first one and
3189 If the ascending attribute is not set, iterate over the first one and
3192 then over the second one checking for membership on the first one so we
3190 then over the second one checking for membership on the first one so we
3193 dont yield any duplicates.
3191 dont yield any duplicates.
3194
3192
3195 If the ascending attribute is set, iterate over both collections at the
3193 If the ascending attribute is set, iterate over both collections at the
3196 same time, yielding only one value at a time in the given order.
3194 same time, yielding only one value at a time in the given order.
3197 """
3195 """
3198 if self._ascending is None:
3196 if self._ascending is None:
3199 if self._genlist:
3197 if self._genlist:
3200 return iter(self._genlist)
3198 return iter(self._genlist)
3201 def arbitraryordergen():
3199 def arbitraryordergen():
3202 for r in self._r1:
3200 for r in self._r1:
3203 yield r
3201 yield r
3204 inr1 = self._r1.__contains__
3202 inr1 = self._r1.__contains__
3205 for r in self._r2:
3203 for r in self._r2:
3206 if not inr1(r):
3204 if not inr1(r):
3207 yield r
3205 yield r
3208 return arbitraryordergen()
3206 return arbitraryordergen()
3209 # try to use our own fast iterator if it exists
3207 # try to use our own fast iterator if it exists
3210 self._trysetasclist()
3208 self._trysetasclist()
3211 if self._ascending:
3209 if self._ascending:
3212 attr = 'fastasc'
3210 attr = 'fastasc'
3213 else:
3211 else:
3214 attr = 'fastdesc'
3212 attr = 'fastdesc'
3215 it = getattr(self, attr)
3213 it = getattr(self, attr)
3216 if it is not None:
3214 if it is not None:
3217 return it()
3215 return it()
3218 # maybe half of the component supports fast
3216 # maybe half of the component supports fast
3219 # get iterator for _r1
3217 # get iterator for _r1
3220 iter1 = getattr(self._r1, attr)
3218 iter1 = getattr(self._r1, attr)
3221 if iter1 is None:
3219 if iter1 is None:
3222 # let's avoid side effect (not sure it matters)
3220 # let's avoid side effect (not sure it matters)
3223 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3221 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3224 else:
3222 else:
3225 iter1 = iter1()
3223 iter1 = iter1()
3226 # get iterator for _r2
3224 # get iterator for _r2
3227 iter2 = getattr(self._r2, attr)
3225 iter2 = getattr(self._r2, attr)
3228 if iter2 is None:
3226 if iter2 is None:
3229 # let's avoid side effect (not sure it matters)
3227 # let's avoid side effect (not sure it matters)
3230 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3228 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3231 else:
3229 else:
3232 iter2 = iter2()
3230 iter2 = iter2()
3233 return _iterordered(self._ascending, iter1, iter2)
3231 return _iterordered(self._ascending, iter1, iter2)
3234
3232
3235 def _trysetasclist(self):
3233 def _trysetasclist(self):
3236 """populate the _asclist attribute if possible and necessary"""
3234 """populate the _asclist attribute if possible and necessary"""
3237 if self._genlist is not None and self._asclist is None:
3235 if self._genlist is not None and self._asclist is None:
3238 self._asclist = sorted(self._genlist)
3236 self._asclist = sorted(self._genlist)
3239
3237
3240 @property
3238 @property
3241 def fastasc(self):
3239 def fastasc(self):
3242 self._trysetasclist()
3240 self._trysetasclist()
3243 if self._asclist is not None:
3241 if self._asclist is not None:
3244 return self._asclist.__iter__
3242 return self._asclist.__iter__
3245 iter1 = self._r1.fastasc
3243 iter1 = self._r1.fastasc
3246 iter2 = self._r2.fastasc
3244 iter2 = self._r2.fastasc
3247 if None in (iter1, iter2):
3245 if None in (iter1, iter2):
3248 return None
3246 return None
3249 return lambda: _iterordered(True, iter1(), iter2())
3247 return lambda: _iterordered(True, iter1(), iter2())
3250
3248
3251 @property
3249 @property
3252 def fastdesc(self):
3250 def fastdesc(self):
3253 self._trysetasclist()
3251 self._trysetasclist()
3254 if self._asclist is not None:
3252 if self._asclist is not None:
3255 return self._asclist.__reversed__
3253 return self._asclist.__reversed__
3256 iter1 = self._r1.fastdesc
3254 iter1 = self._r1.fastdesc
3257 iter2 = self._r2.fastdesc
3255 iter2 = self._r2.fastdesc
3258 if None in (iter1, iter2):
3256 if None in (iter1, iter2):
3259 return None
3257 return None
3260 return lambda: _iterordered(False, iter1(), iter2())
3258 return lambda: _iterordered(False, iter1(), iter2())
3261
3259
3262 def __contains__(self, x):
3260 def __contains__(self, x):
3263 return x in self._r1 or x in self._r2
3261 return x in self._r1 or x in self._r2
3264
3262
3265 def sort(self, reverse=False):
3263 def sort(self, reverse=False):
3266 """Sort the added set
3264 """Sort the added set
3267
3265
3268 For this we use the cached list with all the generated values and if we
3266 For this we use the cached list with all the generated values and if we
3269 know they are ascending or descending we can sort them in a smart way.
3267 know they are ascending or descending we can sort them in a smart way.
3270 """
3268 """
3271 self._ascending = not reverse
3269 self._ascending = not reverse
3272
3270
3273 def isascending(self):
3271 def isascending(self):
3274 return self._ascending is not None and self._ascending
3272 return self._ascending is not None and self._ascending
3275
3273
3276 def isdescending(self):
3274 def isdescending(self):
3277 return self._ascending is not None and not self._ascending
3275 return self._ascending is not None and not self._ascending
3278
3276
3279 def istopo(self):
3277 def istopo(self):
3280 # not worth the trouble asserting if the two sets combined are still
3278 # not worth the trouble asserting if the two sets combined are still
3281 # in topographical order. Use the sort() predicate to explicitly sort
3279 # in topographical order. Use the sort() predicate to explicitly sort
3282 # again instead.
3280 # again instead.
3283 return False
3281 return False
3284
3282
3285 def reverse(self):
3283 def reverse(self):
3286 if self._ascending is None:
3284 if self._ascending is None:
3287 self._list.reverse()
3285 self._list.reverse()
3288 else:
3286 else:
3289 self._ascending = not self._ascending
3287 self._ascending = not self._ascending
3290
3288
3291 def first(self):
3289 def first(self):
3292 for x in self:
3290 for x in self:
3293 return x
3291 return x
3294 return None
3292 return None
3295
3293
3296 def last(self):
3294 def last(self):
3297 self.reverse()
3295 self.reverse()
3298 val = self.first()
3296 val = self.first()
3299 self.reverse()
3297 self.reverse()
3300 return val
3298 return val
3301
3299
3302 def __repr__(self):
3300 def __repr__(self):
3303 d = {None: '', False: '-', True: '+'}[self._ascending]
3301 d = {None: '', False: '-', True: '+'}[self._ascending]
3304 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3302 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3305
3303
3306 class generatorset(abstractsmartset):
3304 class generatorset(abstractsmartset):
3307 """Wrap a generator for lazy iteration
3305 """Wrap a generator for lazy iteration
3308
3306
3309 Wrapper structure for generators that provides lazy membership and can
3307 Wrapper structure for generators that provides lazy membership and can
3310 be iterated more than once.
3308 be iterated more than once.
3311 When asked for membership it generates values until either it finds the
3309 When asked for membership it generates values until either it finds the
3312 requested one or has gone through all the elements in the generator
3310 requested one or has gone through all the elements in the generator
3313 """
3311 """
3314 def __init__(self, gen, iterasc=None):
3312 def __init__(self, gen, iterasc=None):
3315 """
3313 """
3316 gen: a generator producing the values for the generatorset.
3314 gen: a generator producing the values for the generatorset.
3317 """
3315 """
3318 self._gen = gen
3316 self._gen = gen
3319 self._asclist = None
3317 self._asclist = None
3320 self._cache = {}
3318 self._cache = {}
3321 self._genlist = []
3319 self._genlist = []
3322 self._finished = False
3320 self._finished = False
3323 self._ascending = True
3321 self._ascending = True
3324 if iterasc is not None:
3322 if iterasc is not None:
3325 if iterasc:
3323 if iterasc:
3326 self.fastasc = self._iterator
3324 self.fastasc = self._iterator
3327 self.__contains__ = self._asccontains
3325 self.__contains__ = self._asccontains
3328 else:
3326 else:
3329 self.fastdesc = self._iterator
3327 self.fastdesc = self._iterator
3330 self.__contains__ = self._desccontains
3328 self.__contains__ = self._desccontains
3331
3329
3332 def __nonzero__(self):
3330 def __nonzero__(self):
3333 # Do not use 'for r in self' because it will enforce the iteration
3331 # Do not use 'for r in self' because it will enforce the iteration
3334 # order (default ascending), possibly unrolling a whole descending
3332 # order (default ascending), possibly unrolling a whole descending
3335 # iterator.
3333 # iterator.
3336 if self._genlist:
3334 if self._genlist:
3337 return True
3335 return True
3338 for r in self._consumegen():
3336 for r in self._consumegen():
3339 return True
3337 return True
3340 return False
3338 return False
3341
3339
3342 def __contains__(self, x):
3340 def __contains__(self, x):
3343 if x in self._cache:
3341 if x in self._cache:
3344 return self._cache[x]
3342 return self._cache[x]
3345
3343
3346 # Use new values only, as existing values would be cached.
3344 # Use new values only, as existing values would be cached.
3347 for l in self._consumegen():
3345 for l in self._consumegen():
3348 if l == x:
3346 if l == x:
3349 return True
3347 return True
3350
3348
3351 self._cache[x] = False
3349 self._cache[x] = False
3352 return False
3350 return False
3353
3351
3354 def _asccontains(self, x):
3352 def _asccontains(self, x):
3355 """version of contains optimised for ascending generator"""
3353 """version of contains optimised for ascending generator"""
3356 if x in self._cache:
3354 if x in self._cache:
3357 return self._cache[x]
3355 return self._cache[x]
3358
3356
3359 # Use new values only, as existing values would be cached.
3357 # Use new values only, as existing values would be cached.
3360 for l in self._consumegen():
3358 for l in self._consumegen():
3361 if l == x:
3359 if l == x:
3362 return True
3360 return True
3363 if l > x:
3361 if l > x:
3364 break
3362 break
3365
3363
3366 self._cache[x] = False
3364 self._cache[x] = False
3367 return False
3365 return False
3368
3366
3369 def _desccontains(self, x):
3367 def _desccontains(self, x):
3370 """version of contains optimised for descending generator"""
3368 """version of contains optimised for descending generator"""
3371 if x in self._cache:
3369 if x in self._cache:
3372 return self._cache[x]
3370 return self._cache[x]
3373
3371
3374 # Use new values only, as existing values would be cached.
3372 # Use new values only, as existing values would be cached.
3375 for l in self._consumegen():
3373 for l in self._consumegen():
3376 if l == x:
3374 if l == x:
3377 return True
3375 return True
3378 if l < x:
3376 if l < x:
3379 break
3377 break
3380
3378
3381 self._cache[x] = False
3379 self._cache[x] = False
3382 return False
3380 return False
3383
3381
3384 def __iter__(self):
3382 def __iter__(self):
3385 if self._ascending:
3383 if self._ascending:
3386 it = self.fastasc
3384 it = self.fastasc
3387 else:
3385 else:
3388 it = self.fastdesc
3386 it = self.fastdesc
3389 if it is not None:
3387 if it is not None:
3390 return it()
3388 return it()
3391 # we need to consume the iterator
3389 # we need to consume the iterator
3392 for x in self._consumegen():
3390 for x in self._consumegen():
3393 pass
3391 pass
3394 # recall the same code
3392 # recall the same code
3395 return iter(self)
3393 return iter(self)
3396
3394
3397 def _iterator(self):
3395 def _iterator(self):
3398 if self._finished:
3396 if self._finished:
3399 return iter(self._genlist)
3397 return iter(self._genlist)
3400
3398
3401 # We have to use this complex iteration strategy to allow multiple
3399 # We have to use this complex iteration strategy to allow multiple
3402 # iterations at the same time. We need to be able to catch revision
3400 # iterations at the same time. We need to be able to catch revision
3403 # removed from _consumegen and added to genlist in another instance.
3401 # removed from _consumegen and added to genlist in another instance.
3404 #
3402 #
3405 # Getting rid of it would provide an about 15% speed up on this
3403 # Getting rid of it would provide an about 15% speed up on this
3406 # iteration.
3404 # iteration.
3407 genlist = self._genlist
3405 genlist = self._genlist
3408 nextrev = self._consumegen().next
3406 nextrev = self._consumegen().next
3409 _len = len # cache global lookup
3407 _len = len # cache global lookup
3410 def gen():
3408 def gen():
3411 i = 0
3409 i = 0
3412 while True:
3410 while True:
3413 if i < _len(genlist):
3411 if i < _len(genlist):
3414 yield genlist[i]
3412 yield genlist[i]
3415 else:
3413 else:
3416 yield nextrev()
3414 yield nextrev()
3417 i += 1
3415 i += 1
3418 return gen()
3416 return gen()
3419
3417
3420 def _consumegen(self):
3418 def _consumegen(self):
3421 cache = self._cache
3419 cache = self._cache
3422 genlist = self._genlist.append
3420 genlist = self._genlist.append
3423 for item in self._gen:
3421 for item in self._gen:
3424 cache[item] = True
3422 cache[item] = True
3425 genlist(item)
3423 genlist(item)
3426 yield item
3424 yield item
3427 if not self._finished:
3425 if not self._finished:
3428 self._finished = True
3426 self._finished = True
3429 asc = self._genlist[:]
3427 asc = self._genlist[:]
3430 asc.sort()
3428 asc.sort()
3431 self._asclist = asc
3429 self._asclist = asc
3432 self.fastasc = asc.__iter__
3430 self.fastasc = asc.__iter__
3433 self.fastdesc = asc.__reversed__
3431 self.fastdesc = asc.__reversed__
3434
3432
3435 def __len__(self):
3433 def __len__(self):
3436 for x in self._consumegen():
3434 for x in self._consumegen():
3437 pass
3435 pass
3438 return len(self._genlist)
3436 return len(self._genlist)
3439
3437
3440 def sort(self, reverse=False):
3438 def sort(self, reverse=False):
3441 self._ascending = not reverse
3439 self._ascending = not reverse
3442
3440
3443 def reverse(self):
3441 def reverse(self):
3444 self._ascending = not self._ascending
3442 self._ascending = not self._ascending
3445
3443
3446 def isascending(self):
3444 def isascending(self):
3447 return self._ascending
3445 return self._ascending
3448
3446
3449 def isdescending(self):
3447 def isdescending(self):
3450 return not self._ascending
3448 return not self._ascending
3451
3449
3452 def istopo(self):
3450 def istopo(self):
3453 # not worth the trouble asserting if the two sets combined are still
3451 # not worth the trouble asserting if the two sets combined are still
3454 # in topographical order. Use the sort() predicate to explicitly sort
3452 # in topographical order. Use the sort() predicate to explicitly sort
3455 # again instead.
3453 # again instead.
3456 return False
3454 return False
3457
3455
3458 def first(self):
3456 def first(self):
3459 if self._ascending:
3457 if self._ascending:
3460 it = self.fastasc
3458 it = self.fastasc
3461 else:
3459 else:
3462 it = self.fastdesc
3460 it = self.fastdesc
3463 if it is None:
3461 if it is None:
3464 # we need to consume all and try again
3462 # we need to consume all and try again
3465 for x in self._consumegen():
3463 for x in self._consumegen():
3466 pass
3464 pass
3467 return self.first()
3465 return self.first()
3468 return next(it(), None)
3466 return next(it(), None)
3469
3467
3470 def last(self):
3468 def last(self):
3471 if self._ascending:
3469 if self._ascending:
3472 it = self.fastdesc
3470 it = self.fastdesc
3473 else:
3471 else:
3474 it = self.fastasc
3472 it = self.fastasc
3475 if it is None:
3473 if it is None:
3476 # we need to consume all and try again
3474 # we need to consume all and try again
3477 for x in self._consumegen():
3475 for x in self._consumegen():
3478 pass
3476 pass
3479 return self.first()
3477 return self.first()
3480 return next(it(), None)
3478 return next(it(), None)
3481
3479
3482 def __repr__(self):
3480 def __repr__(self):
3483 d = {False: '-', True: '+'}[self._ascending]
3481 d = {False: '-', True: '+'}[self._ascending]
3484 return '<%s%s>' % (type(self).__name__, d)
3482 return '<%s%s>' % (type(self).__name__, d)
3485
3483
3486 class spanset(abstractsmartset):
3484 class spanset(abstractsmartset):
3487 """Duck type for baseset class which represents a range of revisions and
3485 """Duck type for baseset class which represents a range of revisions and
3488 can work lazily and without having all the range in memory
3486 can work lazily and without having all the range in memory
3489
3487
3490 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3488 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3491 notable points:
3489 notable points:
3492 - when x < y it will be automatically descending,
3490 - when x < y it will be automatically descending,
3493 - revision filtered with this repoview will be skipped.
3491 - revision filtered with this repoview will be skipped.
3494
3492
3495 """
3493 """
3496 def __init__(self, repo, start=0, end=None):
3494 def __init__(self, repo, start=0, end=None):
3497 """
3495 """
3498 start: first revision included the set
3496 start: first revision included the set
3499 (default to 0)
3497 (default to 0)
3500 end: first revision excluded (last+1)
3498 end: first revision excluded (last+1)
3501 (default to len(repo)
3499 (default to len(repo)
3502
3500
3503 Spanset will be descending if `end` < `start`.
3501 Spanset will be descending if `end` < `start`.
3504 """
3502 """
3505 if end is None:
3503 if end is None:
3506 end = len(repo)
3504 end = len(repo)
3507 self._ascending = start <= end
3505 self._ascending = start <= end
3508 if not self._ascending:
3506 if not self._ascending:
3509 start, end = end + 1, start +1
3507 start, end = end + 1, start +1
3510 self._start = start
3508 self._start = start
3511 self._end = end
3509 self._end = end
3512 self._hiddenrevs = repo.changelog.filteredrevs
3510 self._hiddenrevs = repo.changelog.filteredrevs
3513
3511
3514 def sort(self, reverse=False):
3512 def sort(self, reverse=False):
3515 self._ascending = not reverse
3513 self._ascending = not reverse
3516
3514
3517 def reverse(self):
3515 def reverse(self):
3518 self._ascending = not self._ascending
3516 self._ascending = not self._ascending
3519
3517
3520 def istopo(self):
3518 def istopo(self):
3521 # not worth the trouble asserting if the two sets combined are still
3519 # not worth the trouble asserting if the two sets combined are still
3522 # in topographical order. Use the sort() predicate to explicitly sort
3520 # in topographical order. Use the sort() predicate to explicitly sort
3523 # again instead.
3521 # again instead.
3524 return False
3522 return False
3525
3523
3526 def _iterfilter(self, iterrange):
3524 def _iterfilter(self, iterrange):
3527 s = self._hiddenrevs
3525 s = self._hiddenrevs
3528 for r in iterrange:
3526 for r in iterrange:
3529 if r not in s:
3527 if r not in s:
3530 yield r
3528 yield r
3531
3529
3532 def __iter__(self):
3530 def __iter__(self):
3533 if self._ascending:
3531 if self._ascending:
3534 return self.fastasc()
3532 return self.fastasc()
3535 else:
3533 else:
3536 return self.fastdesc()
3534 return self.fastdesc()
3537
3535
3538 def fastasc(self):
3536 def fastasc(self):
3539 iterrange = xrange(self._start, self._end)
3537 iterrange = xrange(self._start, self._end)
3540 if self._hiddenrevs:
3538 if self._hiddenrevs:
3541 return self._iterfilter(iterrange)
3539 return self._iterfilter(iterrange)
3542 return iter(iterrange)
3540 return iter(iterrange)
3543
3541
3544 def fastdesc(self):
3542 def fastdesc(self):
3545 iterrange = xrange(self._end - 1, self._start - 1, -1)
3543 iterrange = xrange(self._end - 1, self._start - 1, -1)
3546 if self._hiddenrevs:
3544 if self._hiddenrevs:
3547 return self._iterfilter(iterrange)
3545 return self._iterfilter(iterrange)
3548 return iter(iterrange)
3546 return iter(iterrange)
3549
3547
3550 def __contains__(self, rev):
3548 def __contains__(self, rev):
3551 hidden = self._hiddenrevs
3549 hidden = self._hiddenrevs
3552 return ((self._start <= rev < self._end)
3550 return ((self._start <= rev < self._end)
3553 and not (hidden and rev in hidden))
3551 and not (hidden and rev in hidden))
3554
3552
3555 def __nonzero__(self):
3553 def __nonzero__(self):
3556 for r in self:
3554 for r in self:
3557 return True
3555 return True
3558 return False
3556 return False
3559
3557
3560 def __len__(self):
3558 def __len__(self):
3561 if not self._hiddenrevs:
3559 if not self._hiddenrevs:
3562 return abs(self._end - self._start)
3560 return abs(self._end - self._start)
3563 else:
3561 else:
3564 count = 0
3562 count = 0
3565 start = self._start
3563 start = self._start
3566 end = self._end
3564 end = self._end
3567 for rev in self._hiddenrevs:
3565 for rev in self._hiddenrevs:
3568 if (end < rev <= start) or (start <= rev < end):
3566 if (end < rev <= start) or (start <= rev < end):
3569 count += 1
3567 count += 1
3570 return abs(self._end - self._start) - count
3568 return abs(self._end - self._start) - count
3571
3569
3572 def isascending(self):
3570 def isascending(self):
3573 return self._ascending
3571 return self._ascending
3574
3572
3575 def isdescending(self):
3573 def isdescending(self):
3576 return not self._ascending
3574 return not self._ascending
3577
3575
3578 def first(self):
3576 def first(self):
3579 if self._ascending:
3577 if self._ascending:
3580 it = self.fastasc
3578 it = self.fastasc
3581 else:
3579 else:
3582 it = self.fastdesc
3580 it = self.fastdesc
3583 for x in it():
3581 for x in it():
3584 return x
3582 return x
3585 return None
3583 return None
3586
3584
3587 def last(self):
3585 def last(self):
3588 if self._ascending:
3586 if self._ascending:
3589 it = self.fastdesc
3587 it = self.fastdesc
3590 else:
3588 else:
3591 it = self.fastasc
3589 it = self.fastasc
3592 for x in it():
3590 for x in it():
3593 return x
3591 return x
3594 return None
3592 return None
3595
3593
3596 def __repr__(self):
3594 def __repr__(self):
3597 d = {False: '-', True: '+'}[self._ascending]
3595 d = {False: '-', True: '+'}[self._ascending]
3598 return '<%s%s %d:%d>' % (type(self).__name__, d,
3596 return '<%s%s %d:%d>' % (type(self).__name__, d,
3599 self._start, self._end - 1)
3597 self._start, self._end - 1)
3600
3598
3601 class fullreposet(spanset):
3599 class fullreposet(spanset):
3602 """a set containing all revisions in the repo
3600 """a set containing all revisions in the repo
3603
3601
3604 This class exists to host special optimization and magic to handle virtual
3602 This class exists to host special optimization and magic to handle virtual
3605 revisions such as "null".
3603 revisions such as "null".
3606 """
3604 """
3607
3605
3608 def __init__(self, repo):
3606 def __init__(self, repo):
3609 super(fullreposet, self).__init__(repo)
3607 super(fullreposet, self).__init__(repo)
3610
3608
3611 def __and__(self, other):
3609 def __and__(self, other):
3612 """As self contains the whole repo, all of the other set should also be
3610 """As self contains the whole repo, all of the other set should also be
3613 in self. Therefore `self & other = other`.
3611 in self. Therefore `self & other = other`.
3614
3612
3615 This boldly assumes the other contains valid revs only.
3613 This boldly assumes the other contains valid revs only.
3616 """
3614 """
3617 # other not a smartset, make is so
3615 # other not a smartset, make is so
3618 if not util.safehasattr(other, 'isascending'):
3616 if not util.safehasattr(other, 'isascending'):
3619 # filter out hidden revision
3617 # filter out hidden revision
3620 # (this boldly assumes all smartset are pure)
3618 # (this boldly assumes all smartset are pure)
3621 #
3619 #
3622 # `other` was used with "&", let's assume this is a set like
3620 # `other` was used with "&", let's assume this is a set like
3623 # object.
3621 # object.
3624 other = baseset(other - self._hiddenrevs)
3622 other = baseset(other - self._hiddenrevs)
3625
3623
3626 # XXX As fullreposet is also used as bootstrap, this is wrong.
3624 # XXX As fullreposet is also used as bootstrap, this is wrong.
3627 #
3625 #
3628 # With a giveme312() revset returning [3,1,2], this makes
3626 # With a giveme312() revset returning [3,1,2], this makes
3629 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3627 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3630 # We cannot just drop it because other usage still need to sort it:
3628 # We cannot just drop it because other usage still need to sort it:
3631 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3629 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3632 #
3630 #
3633 # There is also some faulty revset implementations that rely on it
3631 # There is also some faulty revset implementations that rely on it
3634 # (eg: children as of its state in e8075329c5fb)
3632 # (eg: children as of its state in e8075329c5fb)
3635 #
3633 #
3636 # When we fix the two points above we can move this into the if clause
3634 # When we fix the two points above we can move this into the if clause
3637 other.sort(reverse=self.isdescending())
3635 other.sort(reverse=self.isdescending())
3638 return other
3636 return other
3639
3637
3640 def prettyformatset(revs):
3638 def prettyformatset(revs):
3641 lines = []
3639 lines = []
3642 rs = repr(revs)
3640 rs = repr(revs)
3643 p = 0
3641 p = 0
3644 while p < len(rs):
3642 while p < len(rs):
3645 q = rs.find('<', p + 1)
3643 q = rs.find('<', p + 1)
3646 if q < 0:
3644 if q < 0:
3647 q = len(rs)
3645 q = len(rs)
3648 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3646 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3649 assert l >= 0
3647 assert l >= 0
3650 lines.append((l, rs[p:q].rstrip()))
3648 lines.append((l, rs[p:q].rstrip()))
3651 p = q
3649 p = q
3652 return '\n'.join(' ' * l + s for l, s in lines)
3650 return '\n'.join(' ' * l + s for l, s in lines)
3653
3651
3654 def loadpredicate(ui, extname, registrarobj):
3652 def loadpredicate(ui, extname, registrarobj):
3655 """Load revset predicates from specified registrarobj
3653 """Load revset predicates from specified registrarobj
3656 """
3654 """
3657 for name, func in registrarobj._table.iteritems():
3655 for name, func in registrarobj._table.iteritems():
3658 symbols[name] = func
3656 symbols[name] = func
3659 if func._safe:
3657 if func._safe:
3660 safesymbols.add(name)
3658 safesymbols.add(name)
3661
3659
3662 # load built-in predicates explicitly to setup safesymbols
3660 # load built-in predicates explicitly to setup safesymbols
3663 loadpredicate(None, None, predicate)
3661 loadpredicate(None, None, predicate)
3664
3662
3665 # tell hggettext to extract docstrings from these functions:
3663 # tell hggettext to extract docstrings from these functions:
3666 i18nfunctions = symbols.values()
3664 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now