##// END OF EJS Templates
revset: factor out getinteger() helper...
Yuya Nishihara -
r30801:67ee7874 default
parent child Browse files
Show More
@@ -1,3895 +1,3888 b''
1 # revset.py - revision set queries for mercurial
1 # revset.py - revision set queries for mercurial
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import heapq
10 import heapq
11 import re
11 import re
12 import string
12 import string
13
13
14 from .i18n import _
14 from .i18n import _
15 from . import (
15 from . import (
16 destutil,
16 destutil,
17 encoding,
17 encoding,
18 error,
18 error,
19 hbisect,
19 hbisect,
20 match as matchmod,
20 match as matchmod,
21 node,
21 node,
22 obsolete as obsmod,
22 obsolete as obsmod,
23 parser,
23 parser,
24 pathutil,
24 pathutil,
25 phases,
25 phases,
26 pycompat,
26 pycompat,
27 registrar,
27 registrar,
28 repoview,
28 repoview,
29 util,
29 util,
30 )
30 )
31
31
32 def _revancestors(repo, revs, followfirst):
32 def _revancestors(repo, revs, followfirst):
33 """Like revlog.ancestors(), but supports followfirst."""
33 """Like revlog.ancestors(), but supports followfirst."""
34 if followfirst:
34 if followfirst:
35 cut = 1
35 cut = 1
36 else:
36 else:
37 cut = None
37 cut = None
38 cl = repo.changelog
38 cl = repo.changelog
39
39
40 def iterate():
40 def iterate():
41 revs.sort(reverse=True)
41 revs.sort(reverse=True)
42 irevs = iter(revs)
42 irevs = iter(revs)
43 h = []
43 h = []
44
44
45 inputrev = next(irevs, None)
45 inputrev = next(irevs, None)
46 if inputrev is not None:
46 if inputrev is not None:
47 heapq.heappush(h, -inputrev)
47 heapq.heappush(h, -inputrev)
48
48
49 seen = set()
49 seen = set()
50 while h:
50 while h:
51 current = -heapq.heappop(h)
51 current = -heapq.heappop(h)
52 if current == inputrev:
52 if current == inputrev:
53 inputrev = next(irevs, None)
53 inputrev = next(irevs, None)
54 if inputrev is not None:
54 if inputrev is not None:
55 heapq.heappush(h, -inputrev)
55 heapq.heappush(h, -inputrev)
56 if current not in seen:
56 if current not in seen:
57 seen.add(current)
57 seen.add(current)
58 yield current
58 yield current
59 for parent in cl.parentrevs(current)[:cut]:
59 for parent in cl.parentrevs(current)[:cut]:
60 if parent != node.nullrev:
60 if parent != node.nullrev:
61 heapq.heappush(h, -parent)
61 heapq.heappush(h, -parent)
62
62
63 return generatorset(iterate(), iterasc=False)
63 return generatorset(iterate(), iterasc=False)
64
64
65 def _revdescendants(repo, revs, followfirst):
65 def _revdescendants(repo, revs, followfirst):
66 """Like revlog.descendants() but supports followfirst."""
66 """Like revlog.descendants() but supports followfirst."""
67 if followfirst:
67 if followfirst:
68 cut = 1
68 cut = 1
69 else:
69 else:
70 cut = None
70 cut = None
71
71
72 def iterate():
72 def iterate():
73 cl = repo.changelog
73 cl = repo.changelog
74 # XXX this should be 'parentset.min()' assuming 'parentset' is a
74 # XXX this should be 'parentset.min()' assuming 'parentset' is a
75 # smartset (and if it is not, it should.)
75 # smartset (and if it is not, it should.)
76 first = min(revs)
76 first = min(revs)
77 nullrev = node.nullrev
77 nullrev = node.nullrev
78 if first == nullrev:
78 if first == nullrev:
79 # Are there nodes with a null first parent and a non-null
79 # Are there nodes with a null first parent and a non-null
80 # second one? Maybe. Do we care? Probably not.
80 # second one? Maybe. Do we care? Probably not.
81 for i in cl:
81 for i in cl:
82 yield i
82 yield i
83 else:
83 else:
84 seen = set(revs)
84 seen = set(revs)
85 for i in cl.revs(first + 1):
85 for i in cl.revs(first + 1):
86 for x in cl.parentrevs(i)[:cut]:
86 for x in cl.parentrevs(i)[:cut]:
87 if x != nullrev and x in seen:
87 if x != nullrev and x in seen:
88 seen.add(i)
88 seen.add(i)
89 yield i
89 yield i
90 break
90 break
91
91
92 return generatorset(iterate(), iterasc=True)
92 return generatorset(iterate(), iterasc=True)
93
93
94 def _reachablerootspure(repo, minroot, roots, heads, includepath):
94 def _reachablerootspure(repo, minroot, roots, heads, includepath):
95 """return (heads(::<roots> and ::<heads>))
95 """return (heads(::<roots> and ::<heads>))
96
96
97 If includepath is True, return (<roots>::<heads>)."""
97 If includepath is True, return (<roots>::<heads>)."""
98 if not roots:
98 if not roots:
99 return []
99 return []
100 parentrevs = repo.changelog.parentrevs
100 parentrevs = repo.changelog.parentrevs
101 roots = set(roots)
101 roots = set(roots)
102 visit = list(heads)
102 visit = list(heads)
103 reachable = set()
103 reachable = set()
104 seen = {}
104 seen = {}
105 # prefetch all the things! (because python is slow)
105 # prefetch all the things! (because python is slow)
106 reached = reachable.add
106 reached = reachable.add
107 dovisit = visit.append
107 dovisit = visit.append
108 nextvisit = visit.pop
108 nextvisit = visit.pop
109 # open-code the post-order traversal due to the tiny size of
109 # open-code the post-order traversal due to the tiny size of
110 # sys.getrecursionlimit()
110 # sys.getrecursionlimit()
111 while visit:
111 while visit:
112 rev = nextvisit()
112 rev = nextvisit()
113 if rev in roots:
113 if rev in roots:
114 reached(rev)
114 reached(rev)
115 if not includepath:
115 if not includepath:
116 continue
116 continue
117 parents = parentrevs(rev)
117 parents = parentrevs(rev)
118 seen[rev] = parents
118 seen[rev] = parents
119 for parent in parents:
119 for parent in parents:
120 if parent >= minroot and parent not in seen:
120 if parent >= minroot and parent not in seen:
121 dovisit(parent)
121 dovisit(parent)
122 if not reachable:
122 if not reachable:
123 return baseset()
123 return baseset()
124 if not includepath:
124 if not includepath:
125 return reachable
125 return reachable
126 for rev in sorted(seen):
126 for rev in sorted(seen):
127 for parent in seen[rev]:
127 for parent in seen[rev]:
128 if parent in reachable:
128 if parent in reachable:
129 reached(rev)
129 reached(rev)
130 return reachable
130 return reachable
131
131
132 def reachableroots(repo, roots, heads, includepath=False):
132 def reachableroots(repo, roots, heads, includepath=False):
133 """return (heads(::<roots> and ::<heads>))
133 """return (heads(::<roots> and ::<heads>))
134
134
135 If includepath is True, return (<roots>::<heads>)."""
135 If includepath is True, return (<roots>::<heads>)."""
136 if not roots:
136 if not roots:
137 return baseset()
137 return baseset()
138 minroot = roots.min()
138 minroot = roots.min()
139 roots = list(roots)
139 roots = list(roots)
140 heads = list(heads)
140 heads = list(heads)
141 try:
141 try:
142 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
142 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
143 except AttributeError:
143 except AttributeError:
144 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
144 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
145 revs = baseset(revs)
145 revs = baseset(revs)
146 revs.sort()
146 revs.sort()
147 return revs
147 return revs
148
148
149 elements = {
149 elements = {
150 # token-type: binding-strength, primary, prefix, infix, suffix
150 # token-type: binding-strength, primary, prefix, infix, suffix
151 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
151 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
152 "##": (20, None, None, ("_concat", 20), None),
152 "##": (20, None, None, ("_concat", 20), None),
153 "~": (18, None, None, ("ancestor", 18), None),
153 "~": (18, None, None, ("ancestor", 18), None),
154 "^": (18, None, None, ("parent", 18), "parentpost"),
154 "^": (18, None, None, ("parent", 18), "parentpost"),
155 "-": (5, None, ("negate", 19), ("minus", 5), None),
155 "-": (5, None, ("negate", 19), ("minus", 5), None),
156 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"),
156 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"),
157 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"),
157 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"),
158 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), "rangepost"),
158 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), "rangepost"),
159 "not": (10, None, ("not", 10), None, None),
159 "not": (10, None, ("not", 10), None, None),
160 "!": (10, None, ("not", 10), None, None),
160 "!": (10, None, ("not", 10), None, None),
161 "and": (5, None, None, ("and", 5), None),
161 "and": (5, None, None, ("and", 5), None),
162 "&": (5, None, None, ("and", 5), None),
162 "&": (5, None, None, ("and", 5), None),
163 "%": (5, None, None, ("only", 5), "onlypost"),
163 "%": (5, None, None, ("only", 5), "onlypost"),
164 "or": (4, None, None, ("or", 4), None),
164 "or": (4, None, None, ("or", 4), None),
165 "|": (4, None, None, ("or", 4), None),
165 "|": (4, None, None, ("or", 4), None),
166 "+": (4, None, None, ("or", 4), None),
166 "+": (4, None, None, ("or", 4), None),
167 "=": (3, None, None, ("keyvalue", 3), None),
167 "=": (3, None, None, ("keyvalue", 3), None),
168 ",": (2, None, None, ("list", 2), None),
168 ",": (2, None, None, ("list", 2), None),
169 ")": (0, None, None, None, None),
169 ")": (0, None, None, None, None),
170 "symbol": (0, "symbol", None, None, None),
170 "symbol": (0, "symbol", None, None, None),
171 "string": (0, "string", None, None, None),
171 "string": (0, "string", None, None, None),
172 "end": (0, None, None, None, None),
172 "end": (0, None, None, None, None),
173 }
173 }
174
174
175 keywords = set(['and', 'or', 'not'])
175 keywords = set(['and', 'or', 'not'])
176
176
177 # default set of valid characters for the initial letter of symbols
177 # default set of valid characters for the initial letter of symbols
178 _syminitletters = set(
178 _syminitletters = set(
179 string.ascii_letters +
179 string.ascii_letters +
180 string.digits + pycompat.sysstr('._@')) | set(map(chr, xrange(128, 256)))
180 string.digits + pycompat.sysstr('._@')) | set(map(chr, xrange(128, 256)))
181
181
182 # default set of valid characters for non-initial letters of symbols
182 # default set of valid characters for non-initial letters of symbols
183 _symletters = _syminitletters | set(pycompat.sysstr('-/'))
183 _symletters = _syminitletters | set(pycompat.sysstr('-/'))
184
184
185 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
185 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
186 '''
186 '''
187 Parse a revset statement into a stream of tokens
187 Parse a revset statement into a stream of tokens
188
188
189 ``syminitletters`` is the set of valid characters for the initial
189 ``syminitletters`` is the set of valid characters for the initial
190 letter of symbols.
190 letter of symbols.
191
191
192 By default, character ``c`` is recognized as valid for initial
192 By default, character ``c`` is recognized as valid for initial
193 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
193 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
194
194
195 ``symletters`` is the set of valid characters for non-initial
195 ``symletters`` is the set of valid characters for non-initial
196 letters of symbols.
196 letters of symbols.
197
197
198 By default, character ``c`` is recognized as valid for non-initial
198 By default, character ``c`` is recognized as valid for non-initial
199 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
199 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
200
200
201 Check that @ is a valid unquoted token character (issue3686):
201 Check that @ is a valid unquoted token character (issue3686):
202 >>> list(tokenize("@::"))
202 >>> list(tokenize("@::"))
203 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
203 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
204
204
205 '''
205 '''
206 if syminitletters is None:
206 if syminitletters is None:
207 syminitletters = _syminitletters
207 syminitletters = _syminitletters
208 if symletters is None:
208 if symletters is None:
209 symletters = _symletters
209 symletters = _symletters
210
210
211 if program and lookup:
211 if program and lookup:
212 # attempt to parse old-style ranges first to deal with
212 # attempt to parse old-style ranges first to deal with
213 # things like old-tag which contain query metacharacters
213 # things like old-tag which contain query metacharacters
214 parts = program.split(':', 1)
214 parts = program.split(':', 1)
215 if all(lookup(sym) for sym in parts if sym):
215 if all(lookup(sym) for sym in parts if sym):
216 if parts[0]:
216 if parts[0]:
217 yield ('symbol', parts[0], 0)
217 yield ('symbol', parts[0], 0)
218 if len(parts) > 1:
218 if len(parts) > 1:
219 s = len(parts[0])
219 s = len(parts[0])
220 yield (':', None, s)
220 yield (':', None, s)
221 if parts[1]:
221 if parts[1]:
222 yield ('symbol', parts[1], s + 1)
222 yield ('symbol', parts[1], s + 1)
223 yield ('end', None, len(program))
223 yield ('end', None, len(program))
224 return
224 return
225
225
226 pos, l = 0, len(program)
226 pos, l = 0, len(program)
227 while pos < l:
227 while pos < l:
228 c = program[pos]
228 c = program[pos]
229 if c.isspace(): # skip inter-token whitespace
229 if c.isspace(): # skip inter-token whitespace
230 pass
230 pass
231 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
231 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
232 yield ('::', None, pos)
232 yield ('::', None, pos)
233 pos += 1 # skip ahead
233 pos += 1 # skip ahead
234 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
234 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
235 yield ('..', None, pos)
235 yield ('..', None, pos)
236 pos += 1 # skip ahead
236 pos += 1 # skip ahead
237 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
237 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
238 yield ('##', None, pos)
238 yield ('##', None, pos)
239 pos += 1 # skip ahead
239 pos += 1 # skip ahead
240 elif c in "():=,-|&+!~^%": # handle simple operators
240 elif c in "():=,-|&+!~^%": # handle simple operators
241 yield (c, None, pos)
241 yield (c, None, pos)
242 elif (c in '"\'' or c == 'r' and
242 elif (c in '"\'' or c == 'r' and
243 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
243 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
244 if c == 'r':
244 if c == 'r':
245 pos += 1
245 pos += 1
246 c = program[pos]
246 c = program[pos]
247 decode = lambda x: x
247 decode = lambda x: x
248 else:
248 else:
249 decode = parser.unescapestr
249 decode = parser.unescapestr
250 pos += 1
250 pos += 1
251 s = pos
251 s = pos
252 while pos < l: # find closing quote
252 while pos < l: # find closing quote
253 d = program[pos]
253 d = program[pos]
254 if d == '\\': # skip over escaped characters
254 if d == '\\': # skip over escaped characters
255 pos += 2
255 pos += 2
256 continue
256 continue
257 if d == c:
257 if d == c:
258 yield ('string', decode(program[s:pos]), s)
258 yield ('string', decode(program[s:pos]), s)
259 break
259 break
260 pos += 1
260 pos += 1
261 else:
261 else:
262 raise error.ParseError(_("unterminated string"), s)
262 raise error.ParseError(_("unterminated string"), s)
263 # gather up a symbol/keyword
263 # gather up a symbol/keyword
264 elif c in syminitletters:
264 elif c in syminitletters:
265 s = pos
265 s = pos
266 pos += 1
266 pos += 1
267 while pos < l: # find end of symbol
267 while pos < l: # find end of symbol
268 d = program[pos]
268 d = program[pos]
269 if d not in symletters:
269 if d not in symletters:
270 break
270 break
271 if d == '.' and program[pos - 1] == '.': # special case for ..
271 if d == '.' and program[pos - 1] == '.': # special case for ..
272 pos -= 1
272 pos -= 1
273 break
273 break
274 pos += 1
274 pos += 1
275 sym = program[s:pos]
275 sym = program[s:pos]
276 if sym in keywords: # operator keywords
276 if sym in keywords: # operator keywords
277 yield (sym, None, s)
277 yield (sym, None, s)
278 elif '-' in sym:
278 elif '-' in sym:
279 # some jerk gave us foo-bar-baz, try to check if it's a symbol
279 # some jerk gave us foo-bar-baz, try to check if it's a symbol
280 if lookup and lookup(sym):
280 if lookup and lookup(sym):
281 # looks like a real symbol
281 # looks like a real symbol
282 yield ('symbol', sym, s)
282 yield ('symbol', sym, s)
283 else:
283 else:
284 # looks like an expression
284 # looks like an expression
285 parts = sym.split('-')
285 parts = sym.split('-')
286 for p in parts[:-1]:
286 for p in parts[:-1]:
287 if p: # possible consecutive -
287 if p: # possible consecutive -
288 yield ('symbol', p, s)
288 yield ('symbol', p, s)
289 s += len(p)
289 s += len(p)
290 yield ('-', None, pos)
290 yield ('-', None, pos)
291 s += 1
291 s += 1
292 if parts[-1]: # possible trailing -
292 if parts[-1]: # possible trailing -
293 yield ('symbol', parts[-1], s)
293 yield ('symbol', parts[-1], s)
294 else:
294 else:
295 yield ('symbol', sym, s)
295 yield ('symbol', sym, s)
296 pos -= 1
296 pos -= 1
297 else:
297 else:
298 raise error.ParseError(_("syntax error in revset '%s'") %
298 raise error.ParseError(_("syntax error in revset '%s'") %
299 program, pos)
299 program, pos)
300 pos += 1
300 pos += 1
301 yield ('end', None, pos)
301 yield ('end', None, pos)
302
302
303 # helpers
303 # helpers
304
304
305 def getsymbol(x):
305 def getsymbol(x):
306 if x and x[0] == 'symbol':
306 if x and x[0] == 'symbol':
307 return x[1]
307 return x[1]
308 raise error.ParseError(_('not a symbol'))
308 raise error.ParseError(_('not a symbol'))
309
309
310 def getstring(x, err):
310 def getstring(x, err):
311 if x and (x[0] == 'string' or x[0] == 'symbol'):
311 if x and (x[0] == 'string' or x[0] == 'symbol'):
312 return x[1]
312 return x[1]
313 raise error.ParseError(err)
313 raise error.ParseError(err)
314
314
315 def getinteger(x, err):
316 try:
317 return int(getstring(x, err))
318 except ValueError:
319 raise error.ParseError(err)
320
315 def getlist(x):
321 def getlist(x):
316 if not x:
322 if not x:
317 return []
323 return []
318 if x[0] == 'list':
324 if x[0] == 'list':
319 return list(x[1:])
325 return list(x[1:])
320 return [x]
326 return [x]
321
327
322 def getargs(x, min, max, err):
328 def getargs(x, min, max, err):
323 l = getlist(x)
329 l = getlist(x)
324 if len(l) < min or (max >= 0 and len(l) > max):
330 if len(l) < min or (max >= 0 and len(l) > max):
325 raise error.ParseError(err)
331 raise error.ParseError(err)
326 return l
332 return l
327
333
328 def getargsdict(x, funcname, keys):
334 def getargsdict(x, funcname, keys):
329 return parser.buildargsdict(getlist(x), funcname, parser.splitargspec(keys),
335 return parser.buildargsdict(getlist(x), funcname, parser.splitargspec(keys),
330 keyvaluenode='keyvalue', keynode='symbol')
336 keyvaluenode='keyvalue', keynode='symbol')
331
337
332 def getset(repo, subset, x):
338 def getset(repo, subset, x):
333 if not x:
339 if not x:
334 raise error.ParseError(_("missing argument"))
340 raise error.ParseError(_("missing argument"))
335 s = methods[x[0]](repo, subset, *x[1:])
341 s = methods[x[0]](repo, subset, *x[1:])
336 if util.safehasattr(s, 'isascending'):
342 if util.safehasattr(s, 'isascending'):
337 return s
343 return s
338 # else case should not happen, because all non-func are internal,
344 # else case should not happen, because all non-func are internal,
339 # ignoring for now.
345 # ignoring for now.
340 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
346 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
341 repo.ui.deprecwarn('revset "%s" uses list instead of smartset'
347 repo.ui.deprecwarn('revset "%s" uses list instead of smartset'
342 % x[1][1],
348 % x[1][1],
343 '3.9')
349 '3.9')
344 return baseset(s)
350 return baseset(s)
345
351
346 def _getrevsource(repo, r):
352 def _getrevsource(repo, r):
347 extra = repo[r].extra()
353 extra = repo[r].extra()
348 for label in ('source', 'transplant_source', 'rebase_source'):
354 for label in ('source', 'transplant_source', 'rebase_source'):
349 if label in extra:
355 if label in extra:
350 try:
356 try:
351 return repo[extra[label]].rev()
357 return repo[extra[label]].rev()
352 except error.RepoLookupError:
358 except error.RepoLookupError:
353 pass
359 pass
354 return None
360 return None
355
361
356 # operator methods
362 # operator methods
357
363
358 def stringset(repo, subset, x):
364 def stringset(repo, subset, x):
359 x = repo[x].rev()
365 x = repo[x].rev()
360 if (x in subset
366 if (x in subset
361 or x == node.nullrev and isinstance(subset, fullreposet)):
367 or x == node.nullrev and isinstance(subset, fullreposet)):
362 return baseset([x])
368 return baseset([x])
363 return baseset()
369 return baseset()
364
370
365 def rangeset(repo, subset, x, y, order):
371 def rangeset(repo, subset, x, y, order):
366 m = getset(repo, fullreposet(repo), x)
372 m = getset(repo, fullreposet(repo), x)
367 n = getset(repo, fullreposet(repo), y)
373 n = getset(repo, fullreposet(repo), y)
368
374
369 if not m or not n:
375 if not m or not n:
370 return baseset()
376 return baseset()
371 return _makerangeset(repo, subset, m.first(), n.last(), order)
377 return _makerangeset(repo, subset, m.first(), n.last(), order)
372
378
373 def rangepre(repo, subset, y, order):
379 def rangepre(repo, subset, y, order):
374 # ':y' can't be rewritten to '0:y' since '0' may be hidden
380 # ':y' can't be rewritten to '0:y' since '0' may be hidden
375 n = getset(repo, fullreposet(repo), y)
381 n = getset(repo, fullreposet(repo), y)
376 if not n:
382 if not n:
377 return baseset()
383 return baseset()
378 return _makerangeset(repo, subset, 0, n.last(), order)
384 return _makerangeset(repo, subset, 0, n.last(), order)
379
385
380 def _makerangeset(repo, subset, m, n, order):
386 def _makerangeset(repo, subset, m, n, order):
381 if m == n:
387 if m == n:
382 r = baseset([m])
388 r = baseset([m])
383 elif n == node.wdirrev:
389 elif n == node.wdirrev:
384 r = spanset(repo, m, len(repo)) + baseset([n])
390 r = spanset(repo, m, len(repo)) + baseset([n])
385 elif m == node.wdirrev:
391 elif m == node.wdirrev:
386 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
392 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
387 elif m < n:
393 elif m < n:
388 r = spanset(repo, m, n + 1)
394 r = spanset(repo, m, n + 1)
389 else:
395 else:
390 r = spanset(repo, m, n - 1)
396 r = spanset(repo, m, n - 1)
391
397
392 if order == defineorder:
398 if order == defineorder:
393 return r & subset
399 return r & subset
394 else:
400 else:
395 # carrying the sorting over when possible would be more efficient
401 # carrying the sorting over when possible would be more efficient
396 return subset & r
402 return subset & r
397
403
398 def dagrange(repo, subset, x, y, order):
404 def dagrange(repo, subset, x, y, order):
399 r = fullreposet(repo)
405 r = fullreposet(repo)
400 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
406 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
401 includepath=True)
407 includepath=True)
402 return subset & xs
408 return subset & xs
403
409
404 def andset(repo, subset, x, y, order):
410 def andset(repo, subset, x, y, order):
405 return getset(repo, getset(repo, subset, x), y)
411 return getset(repo, getset(repo, subset, x), y)
406
412
407 def differenceset(repo, subset, x, y, order):
413 def differenceset(repo, subset, x, y, order):
408 return getset(repo, subset, x) - getset(repo, subset, y)
414 return getset(repo, subset, x) - getset(repo, subset, y)
409
415
410 def _orsetlist(repo, subset, xs):
416 def _orsetlist(repo, subset, xs):
411 assert xs
417 assert xs
412 if len(xs) == 1:
418 if len(xs) == 1:
413 return getset(repo, subset, xs[0])
419 return getset(repo, subset, xs[0])
414 p = len(xs) // 2
420 p = len(xs) // 2
415 a = _orsetlist(repo, subset, xs[:p])
421 a = _orsetlist(repo, subset, xs[:p])
416 b = _orsetlist(repo, subset, xs[p:])
422 b = _orsetlist(repo, subset, xs[p:])
417 return a + b
423 return a + b
418
424
419 def orset(repo, subset, x, order):
425 def orset(repo, subset, x, order):
420 xs = getlist(x)
426 xs = getlist(x)
421 if order == followorder:
427 if order == followorder:
422 # slow path to take the subset order
428 # slow path to take the subset order
423 return subset & _orsetlist(repo, fullreposet(repo), xs)
429 return subset & _orsetlist(repo, fullreposet(repo), xs)
424 else:
430 else:
425 return _orsetlist(repo, subset, xs)
431 return _orsetlist(repo, subset, xs)
426
432
427 def notset(repo, subset, x, order):
433 def notset(repo, subset, x, order):
428 return subset - getset(repo, subset, x)
434 return subset - getset(repo, subset, x)
429
435
430 def listset(repo, subset, *xs):
436 def listset(repo, subset, *xs):
431 raise error.ParseError(_("can't use a list in this context"),
437 raise error.ParseError(_("can't use a list in this context"),
432 hint=_('see hg help "revsets.x or y"'))
438 hint=_('see hg help "revsets.x or y"'))
433
439
434 def keyvaluepair(repo, subset, k, v):
440 def keyvaluepair(repo, subset, k, v):
435 raise error.ParseError(_("can't use a key-value pair in this context"))
441 raise error.ParseError(_("can't use a key-value pair in this context"))
436
442
437 def func(repo, subset, a, b, order):
443 def func(repo, subset, a, b, order):
438 f = getsymbol(a)
444 f = getsymbol(a)
439 if f in symbols:
445 if f in symbols:
440 func = symbols[f]
446 func = symbols[f]
441 if getattr(func, '_takeorder', False):
447 if getattr(func, '_takeorder', False):
442 return func(repo, subset, b, order)
448 return func(repo, subset, b, order)
443 return func(repo, subset, b)
449 return func(repo, subset, b)
444
450
445 keep = lambda fn: getattr(fn, '__doc__', None) is not None
451 keep = lambda fn: getattr(fn, '__doc__', None) is not None
446
452
447 syms = [s for (s, fn) in symbols.items() if keep(fn)]
453 syms = [s for (s, fn) in symbols.items() if keep(fn)]
448 raise error.UnknownIdentifier(f, syms)
454 raise error.UnknownIdentifier(f, syms)
449
455
450 # functions
456 # functions
451
457
452 # symbols are callables like:
458 # symbols are callables like:
453 # fn(repo, subset, x)
459 # fn(repo, subset, x)
454 # with:
460 # with:
455 # repo - current repository instance
461 # repo - current repository instance
456 # subset - of revisions to be examined
462 # subset - of revisions to be examined
457 # x - argument in tree form
463 # x - argument in tree form
458 symbols = {}
464 symbols = {}
459
465
460 # symbols which can't be used for a DoS attack for any given input
466 # symbols which can't be used for a DoS attack for any given input
461 # (e.g. those which accept regexes as plain strings shouldn't be included)
467 # (e.g. those which accept regexes as plain strings shouldn't be included)
462 # functions that just return a lot of changesets (like all) don't count here
468 # functions that just return a lot of changesets (like all) don't count here
463 safesymbols = set()
469 safesymbols = set()
464
470
465 predicate = registrar.revsetpredicate()
471 predicate = registrar.revsetpredicate()
466
472
467 @predicate('_destupdate')
473 @predicate('_destupdate')
468 def _destupdate(repo, subset, x):
474 def _destupdate(repo, subset, x):
469 # experimental revset for update destination
475 # experimental revset for update destination
470 args = getargsdict(x, 'limit', 'clean check')
476 args = getargsdict(x, 'limit', 'clean check')
471 return subset & baseset([destutil.destupdate(repo, **args)[0]])
477 return subset & baseset([destutil.destupdate(repo, **args)[0]])
472
478
473 @predicate('_destmerge')
479 @predicate('_destmerge')
474 def _destmerge(repo, subset, x):
480 def _destmerge(repo, subset, x):
475 # experimental revset for merge destination
481 # experimental revset for merge destination
476 sourceset = None
482 sourceset = None
477 if x is not None:
483 if x is not None:
478 sourceset = getset(repo, fullreposet(repo), x)
484 sourceset = getset(repo, fullreposet(repo), x)
479 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
485 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
480
486
481 @predicate('adds(pattern)', safe=True)
487 @predicate('adds(pattern)', safe=True)
482 def adds(repo, subset, x):
488 def adds(repo, subset, x):
483 """Changesets that add a file matching pattern.
489 """Changesets that add a file matching pattern.
484
490
485 The pattern without explicit kind like ``glob:`` is expected to be
491 The pattern without explicit kind like ``glob:`` is expected to be
486 relative to the current directory and match against a file or a
492 relative to the current directory and match against a file or a
487 directory.
493 directory.
488 """
494 """
489 # i18n: "adds" is a keyword
495 # i18n: "adds" is a keyword
490 pat = getstring(x, _("adds requires a pattern"))
496 pat = getstring(x, _("adds requires a pattern"))
491 return checkstatus(repo, subset, pat, 1)
497 return checkstatus(repo, subset, pat, 1)
492
498
493 @predicate('ancestor(*changeset)', safe=True)
499 @predicate('ancestor(*changeset)', safe=True)
494 def ancestor(repo, subset, x):
500 def ancestor(repo, subset, x):
495 """A greatest common ancestor of the changesets.
501 """A greatest common ancestor of the changesets.
496
502
497 Accepts 0 or more changesets.
503 Accepts 0 or more changesets.
498 Will return empty list when passed no args.
504 Will return empty list when passed no args.
499 Greatest common ancestor of a single changeset is that changeset.
505 Greatest common ancestor of a single changeset is that changeset.
500 """
506 """
501 # i18n: "ancestor" is a keyword
507 # i18n: "ancestor" is a keyword
502 l = getlist(x)
508 l = getlist(x)
503 rl = fullreposet(repo)
509 rl = fullreposet(repo)
504 anc = None
510 anc = None
505
511
506 # (getset(repo, rl, i) for i in l) generates a list of lists
512 # (getset(repo, rl, i) for i in l) generates a list of lists
507 for revs in (getset(repo, rl, i) for i in l):
513 for revs in (getset(repo, rl, i) for i in l):
508 for r in revs:
514 for r in revs:
509 if anc is None:
515 if anc is None:
510 anc = repo[r]
516 anc = repo[r]
511 else:
517 else:
512 anc = anc.ancestor(repo[r])
518 anc = anc.ancestor(repo[r])
513
519
514 if anc is not None and anc.rev() in subset:
520 if anc is not None and anc.rev() in subset:
515 return baseset([anc.rev()])
521 return baseset([anc.rev()])
516 return baseset()
522 return baseset()
517
523
518 def _ancestors(repo, subset, x, followfirst=False):
524 def _ancestors(repo, subset, x, followfirst=False):
519 heads = getset(repo, fullreposet(repo), x)
525 heads = getset(repo, fullreposet(repo), x)
520 if not heads:
526 if not heads:
521 return baseset()
527 return baseset()
522 s = _revancestors(repo, heads, followfirst)
528 s = _revancestors(repo, heads, followfirst)
523 return subset & s
529 return subset & s
524
530
525 @predicate('ancestors(set)', safe=True)
531 @predicate('ancestors(set)', safe=True)
526 def ancestors(repo, subset, x):
532 def ancestors(repo, subset, x):
527 """Changesets that are ancestors of a changeset in set.
533 """Changesets that are ancestors of a changeset in set.
528 """
534 """
529 return _ancestors(repo, subset, x)
535 return _ancestors(repo, subset, x)
530
536
531 @predicate('_firstancestors', safe=True)
537 @predicate('_firstancestors', safe=True)
532 def _firstancestors(repo, subset, x):
538 def _firstancestors(repo, subset, x):
533 # ``_firstancestors(set)``
539 # ``_firstancestors(set)``
534 # Like ``ancestors(set)`` but follows only the first parents.
540 # Like ``ancestors(set)`` but follows only the first parents.
535 return _ancestors(repo, subset, x, followfirst=True)
541 return _ancestors(repo, subset, x, followfirst=True)
536
542
537 def ancestorspec(repo, subset, x, n, order):
543 def ancestorspec(repo, subset, x, n, order):
538 """``set~n``
544 """``set~n``
539 Changesets that are the Nth ancestor (first parents only) of a changeset
545 Changesets that are the Nth ancestor (first parents only) of a changeset
540 in set.
546 in set.
541 """
547 """
542 try:
548 n = getinteger(n, _("~ expects a number"))
543 n = int(n[1])
544 except (TypeError, ValueError):
545 raise error.ParseError(_("~ expects a number"))
546 ps = set()
549 ps = set()
547 cl = repo.changelog
550 cl = repo.changelog
548 for r in getset(repo, fullreposet(repo), x):
551 for r in getset(repo, fullreposet(repo), x):
549 for i in range(n):
552 for i in range(n):
550 r = cl.parentrevs(r)[0]
553 r = cl.parentrevs(r)[0]
551 ps.add(r)
554 ps.add(r)
552 return subset & ps
555 return subset & ps
553
556
554 @predicate('author(string)', safe=True)
557 @predicate('author(string)', safe=True)
555 def author(repo, subset, x):
558 def author(repo, subset, x):
556 """Alias for ``user(string)``.
559 """Alias for ``user(string)``.
557 """
560 """
558 # i18n: "author" is a keyword
561 # i18n: "author" is a keyword
559 n = getstring(x, _("author requires a string"))
562 n = getstring(x, _("author requires a string"))
560 kind, pattern, matcher = _substringmatcher(n, casesensitive=False)
563 kind, pattern, matcher = _substringmatcher(n, casesensitive=False)
561 return subset.filter(lambda x: matcher(repo[x].user()),
564 return subset.filter(lambda x: matcher(repo[x].user()),
562 condrepr=('<user %r>', n))
565 condrepr=('<user %r>', n))
563
566
564 @predicate('bisect(string)', safe=True)
567 @predicate('bisect(string)', safe=True)
565 def bisect(repo, subset, x):
568 def bisect(repo, subset, x):
566 """Changesets marked in the specified bisect status:
569 """Changesets marked in the specified bisect status:
567
570
568 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
571 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
569 - ``goods``, ``bads`` : csets topologically good/bad
572 - ``goods``, ``bads`` : csets topologically good/bad
570 - ``range`` : csets taking part in the bisection
573 - ``range`` : csets taking part in the bisection
571 - ``pruned`` : csets that are goods, bads or skipped
574 - ``pruned`` : csets that are goods, bads or skipped
572 - ``untested`` : csets whose fate is yet unknown
575 - ``untested`` : csets whose fate is yet unknown
573 - ``ignored`` : csets ignored due to DAG topology
576 - ``ignored`` : csets ignored due to DAG topology
574 - ``current`` : the cset currently being bisected
577 - ``current`` : the cset currently being bisected
575 """
578 """
576 # i18n: "bisect" is a keyword
579 # i18n: "bisect" is a keyword
577 status = getstring(x, _("bisect requires a string")).lower()
580 status = getstring(x, _("bisect requires a string")).lower()
578 state = set(hbisect.get(repo, status))
581 state = set(hbisect.get(repo, status))
579 return subset & state
582 return subset & state
580
583
581 # Backward-compatibility
584 # Backward-compatibility
582 # - no help entry so that we do not advertise it any more
585 # - no help entry so that we do not advertise it any more
583 @predicate('bisected', safe=True)
586 @predicate('bisected', safe=True)
584 def bisected(repo, subset, x):
587 def bisected(repo, subset, x):
585 return bisect(repo, subset, x)
588 return bisect(repo, subset, x)
586
589
587 @predicate('bookmark([name])', safe=True)
590 @predicate('bookmark([name])', safe=True)
588 def bookmark(repo, subset, x):
591 def bookmark(repo, subset, x):
589 """The named bookmark or all bookmarks.
592 """The named bookmark or all bookmarks.
590
593
591 Pattern matching is supported for `name`. See :hg:`help revisions.patterns`.
594 Pattern matching is supported for `name`. See :hg:`help revisions.patterns`.
592 """
595 """
593 # i18n: "bookmark" is a keyword
596 # i18n: "bookmark" is a keyword
594 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
597 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
595 if args:
598 if args:
596 bm = getstring(args[0],
599 bm = getstring(args[0],
597 # i18n: "bookmark" is a keyword
600 # i18n: "bookmark" is a keyword
598 _('the argument to bookmark must be a string'))
601 _('the argument to bookmark must be a string'))
599 kind, pattern, matcher = util.stringmatcher(bm)
602 kind, pattern, matcher = util.stringmatcher(bm)
600 bms = set()
603 bms = set()
601 if kind == 'literal':
604 if kind == 'literal':
602 bmrev = repo._bookmarks.get(pattern, None)
605 bmrev = repo._bookmarks.get(pattern, None)
603 if not bmrev:
606 if not bmrev:
604 raise error.RepoLookupError(_("bookmark '%s' does not exist")
607 raise error.RepoLookupError(_("bookmark '%s' does not exist")
605 % pattern)
608 % pattern)
606 bms.add(repo[bmrev].rev())
609 bms.add(repo[bmrev].rev())
607 else:
610 else:
608 matchrevs = set()
611 matchrevs = set()
609 for name, bmrev in repo._bookmarks.iteritems():
612 for name, bmrev in repo._bookmarks.iteritems():
610 if matcher(name):
613 if matcher(name):
611 matchrevs.add(bmrev)
614 matchrevs.add(bmrev)
612 if not matchrevs:
615 if not matchrevs:
613 raise error.RepoLookupError(_("no bookmarks exist"
616 raise error.RepoLookupError(_("no bookmarks exist"
614 " that match '%s'") % pattern)
617 " that match '%s'") % pattern)
615 for bmrev in matchrevs:
618 for bmrev in matchrevs:
616 bms.add(repo[bmrev].rev())
619 bms.add(repo[bmrev].rev())
617 else:
620 else:
618 bms = set([repo[r].rev()
621 bms = set([repo[r].rev()
619 for r in repo._bookmarks.values()])
622 for r in repo._bookmarks.values()])
620 bms -= set([node.nullrev])
623 bms -= set([node.nullrev])
621 return subset & bms
624 return subset & bms
622
625
623 @predicate('branch(string or set)', safe=True)
626 @predicate('branch(string or set)', safe=True)
624 def branch(repo, subset, x):
627 def branch(repo, subset, x):
625 """
628 """
626 All changesets belonging to the given branch or the branches of the given
629 All changesets belonging to the given branch or the branches of the given
627 changesets.
630 changesets.
628
631
629 Pattern matching is supported for `string`. See
632 Pattern matching is supported for `string`. See
630 :hg:`help revisions.patterns`.
633 :hg:`help revisions.patterns`.
631 """
634 """
632 getbi = repo.revbranchcache().branchinfo
635 getbi = repo.revbranchcache().branchinfo
633
636
634 try:
637 try:
635 b = getstring(x, '')
638 b = getstring(x, '')
636 except error.ParseError:
639 except error.ParseError:
637 # not a string, but another revspec, e.g. tip()
640 # not a string, but another revspec, e.g. tip()
638 pass
641 pass
639 else:
642 else:
640 kind, pattern, matcher = util.stringmatcher(b)
643 kind, pattern, matcher = util.stringmatcher(b)
641 if kind == 'literal':
644 if kind == 'literal':
642 # note: falls through to the revspec case if no branch with
645 # note: falls through to the revspec case if no branch with
643 # this name exists and pattern kind is not specified explicitly
646 # this name exists and pattern kind is not specified explicitly
644 if pattern in repo.branchmap():
647 if pattern in repo.branchmap():
645 return subset.filter(lambda r: matcher(getbi(r)[0]),
648 return subset.filter(lambda r: matcher(getbi(r)[0]),
646 condrepr=('<branch %r>', b))
649 condrepr=('<branch %r>', b))
647 if b.startswith('literal:'):
650 if b.startswith('literal:'):
648 raise error.RepoLookupError(_("branch '%s' does not exist")
651 raise error.RepoLookupError(_("branch '%s' does not exist")
649 % pattern)
652 % pattern)
650 else:
653 else:
651 return subset.filter(lambda r: matcher(getbi(r)[0]),
654 return subset.filter(lambda r: matcher(getbi(r)[0]),
652 condrepr=('<branch %r>', b))
655 condrepr=('<branch %r>', b))
653
656
654 s = getset(repo, fullreposet(repo), x)
657 s = getset(repo, fullreposet(repo), x)
655 b = set()
658 b = set()
656 for r in s:
659 for r in s:
657 b.add(getbi(r)[0])
660 b.add(getbi(r)[0])
658 c = s.__contains__
661 c = s.__contains__
659 return subset.filter(lambda r: c(r) or getbi(r)[0] in b,
662 return subset.filter(lambda r: c(r) or getbi(r)[0] in b,
660 condrepr=lambda: '<branch %r>' % sorted(b))
663 condrepr=lambda: '<branch %r>' % sorted(b))
661
664
662 @predicate('bumped()', safe=True)
665 @predicate('bumped()', safe=True)
663 def bumped(repo, subset, x):
666 def bumped(repo, subset, x):
664 """Mutable changesets marked as successors of public changesets.
667 """Mutable changesets marked as successors of public changesets.
665
668
666 Only non-public and non-obsolete changesets can be `bumped`.
669 Only non-public and non-obsolete changesets can be `bumped`.
667 """
670 """
668 # i18n: "bumped" is a keyword
671 # i18n: "bumped" is a keyword
669 getargs(x, 0, 0, _("bumped takes no arguments"))
672 getargs(x, 0, 0, _("bumped takes no arguments"))
670 bumped = obsmod.getrevs(repo, 'bumped')
673 bumped = obsmod.getrevs(repo, 'bumped')
671 return subset & bumped
674 return subset & bumped
672
675
673 @predicate('bundle()', safe=True)
676 @predicate('bundle()', safe=True)
674 def bundle(repo, subset, x):
677 def bundle(repo, subset, x):
675 """Changesets in the bundle.
678 """Changesets in the bundle.
676
679
677 Bundle must be specified by the -R option."""
680 Bundle must be specified by the -R option."""
678
681
679 try:
682 try:
680 bundlerevs = repo.changelog.bundlerevs
683 bundlerevs = repo.changelog.bundlerevs
681 except AttributeError:
684 except AttributeError:
682 raise error.Abort(_("no bundle provided - specify with -R"))
685 raise error.Abort(_("no bundle provided - specify with -R"))
683 return subset & bundlerevs
686 return subset & bundlerevs
684
687
685 def checkstatus(repo, subset, pat, field):
688 def checkstatus(repo, subset, pat, field):
686 hasset = matchmod.patkind(pat) == 'set'
689 hasset = matchmod.patkind(pat) == 'set'
687
690
688 mcache = [None]
691 mcache = [None]
689 def matches(x):
692 def matches(x):
690 c = repo[x]
693 c = repo[x]
691 if not mcache[0] or hasset:
694 if not mcache[0] or hasset:
692 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
695 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
693 m = mcache[0]
696 m = mcache[0]
694 fname = None
697 fname = None
695 if not m.anypats() and len(m.files()) == 1:
698 if not m.anypats() and len(m.files()) == 1:
696 fname = m.files()[0]
699 fname = m.files()[0]
697 if fname is not None:
700 if fname is not None:
698 if fname not in c.files():
701 if fname not in c.files():
699 return False
702 return False
700 else:
703 else:
701 for f in c.files():
704 for f in c.files():
702 if m(f):
705 if m(f):
703 break
706 break
704 else:
707 else:
705 return False
708 return False
706 files = repo.status(c.p1().node(), c.node())[field]
709 files = repo.status(c.p1().node(), c.node())[field]
707 if fname is not None:
710 if fname is not None:
708 if fname in files:
711 if fname in files:
709 return True
712 return True
710 else:
713 else:
711 for f in files:
714 for f in files:
712 if m(f):
715 if m(f):
713 return True
716 return True
714
717
715 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
718 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
716
719
717 def _children(repo, subset, parentset):
720 def _children(repo, subset, parentset):
718 if not parentset:
721 if not parentset:
719 return baseset()
722 return baseset()
720 cs = set()
723 cs = set()
721 pr = repo.changelog.parentrevs
724 pr = repo.changelog.parentrevs
722 minrev = parentset.min()
725 minrev = parentset.min()
723 nullrev = node.nullrev
726 nullrev = node.nullrev
724 for r in subset:
727 for r in subset:
725 if r <= minrev:
728 if r <= minrev:
726 continue
729 continue
727 p1, p2 = pr(r)
730 p1, p2 = pr(r)
728 if p1 in parentset:
731 if p1 in parentset:
729 cs.add(r)
732 cs.add(r)
730 if p2 != nullrev and p2 in parentset:
733 if p2 != nullrev and p2 in parentset:
731 cs.add(r)
734 cs.add(r)
732 return baseset(cs)
735 return baseset(cs)
733
736
734 @predicate('children(set)', safe=True)
737 @predicate('children(set)', safe=True)
735 def children(repo, subset, x):
738 def children(repo, subset, x):
736 """Child changesets of changesets in set.
739 """Child changesets of changesets in set.
737 """
740 """
738 s = getset(repo, fullreposet(repo), x)
741 s = getset(repo, fullreposet(repo), x)
739 cs = _children(repo, subset, s)
742 cs = _children(repo, subset, s)
740 return subset & cs
743 return subset & cs
741
744
742 @predicate('closed()', safe=True)
745 @predicate('closed()', safe=True)
743 def closed(repo, subset, x):
746 def closed(repo, subset, x):
744 """Changeset is closed.
747 """Changeset is closed.
745 """
748 """
746 # i18n: "closed" is a keyword
749 # i18n: "closed" is a keyword
747 getargs(x, 0, 0, _("closed takes no arguments"))
750 getargs(x, 0, 0, _("closed takes no arguments"))
748 return subset.filter(lambda r: repo[r].closesbranch(),
751 return subset.filter(lambda r: repo[r].closesbranch(),
749 condrepr='<branch closed>')
752 condrepr='<branch closed>')
750
753
751 @predicate('contains(pattern)')
754 @predicate('contains(pattern)')
752 def contains(repo, subset, x):
755 def contains(repo, subset, x):
753 """The revision's manifest contains a file matching pattern (but might not
756 """The revision's manifest contains a file matching pattern (but might not
754 modify it). See :hg:`help patterns` for information about file patterns.
757 modify it). See :hg:`help patterns` for information about file patterns.
755
758
756 The pattern without explicit kind like ``glob:`` is expected to be
759 The pattern without explicit kind like ``glob:`` is expected to be
757 relative to the current directory and match against a file exactly
760 relative to the current directory and match against a file exactly
758 for efficiency.
761 for efficiency.
759 """
762 """
760 # i18n: "contains" is a keyword
763 # i18n: "contains" is a keyword
761 pat = getstring(x, _("contains requires a pattern"))
764 pat = getstring(x, _("contains requires a pattern"))
762
765
763 def matches(x):
766 def matches(x):
764 if not matchmod.patkind(pat):
767 if not matchmod.patkind(pat):
765 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
768 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
766 if pats in repo[x]:
769 if pats in repo[x]:
767 return True
770 return True
768 else:
771 else:
769 c = repo[x]
772 c = repo[x]
770 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
773 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
771 for f in c.manifest():
774 for f in c.manifest():
772 if m(f):
775 if m(f):
773 return True
776 return True
774 return False
777 return False
775
778
776 return subset.filter(matches, condrepr=('<contains %r>', pat))
779 return subset.filter(matches, condrepr=('<contains %r>', pat))
777
780
778 @predicate('converted([id])', safe=True)
781 @predicate('converted([id])', safe=True)
779 def converted(repo, subset, x):
782 def converted(repo, subset, x):
780 """Changesets converted from the given identifier in the old repository if
783 """Changesets converted from the given identifier in the old repository if
781 present, or all converted changesets if no identifier is specified.
784 present, or all converted changesets if no identifier is specified.
782 """
785 """
783
786
784 # There is exactly no chance of resolving the revision, so do a simple
787 # There is exactly no chance of resolving the revision, so do a simple
785 # string compare and hope for the best
788 # string compare and hope for the best
786
789
787 rev = None
790 rev = None
788 # i18n: "converted" is a keyword
791 # i18n: "converted" is a keyword
789 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
792 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
790 if l:
793 if l:
791 # i18n: "converted" is a keyword
794 # i18n: "converted" is a keyword
792 rev = getstring(l[0], _('converted requires a revision'))
795 rev = getstring(l[0], _('converted requires a revision'))
793
796
794 def _matchvalue(r):
797 def _matchvalue(r):
795 source = repo[r].extra().get('convert_revision', None)
798 source = repo[r].extra().get('convert_revision', None)
796 return source is not None and (rev is None or source.startswith(rev))
799 return source is not None and (rev is None or source.startswith(rev))
797
800
798 return subset.filter(lambda r: _matchvalue(r),
801 return subset.filter(lambda r: _matchvalue(r),
799 condrepr=('<converted %r>', rev))
802 condrepr=('<converted %r>', rev))
800
803
801 @predicate('date(interval)', safe=True)
804 @predicate('date(interval)', safe=True)
802 def date(repo, subset, x):
805 def date(repo, subset, x):
803 """Changesets within the interval, see :hg:`help dates`.
806 """Changesets within the interval, see :hg:`help dates`.
804 """
807 """
805 # i18n: "date" is a keyword
808 # i18n: "date" is a keyword
806 ds = getstring(x, _("date requires a string"))
809 ds = getstring(x, _("date requires a string"))
807 dm = util.matchdate(ds)
810 dm = util.matchdate(ds)
808 return subset.filter(lambda x: dm(repo[x].date()[0]),
811 return subset.filter(lambda x: dm(repo[x].date()[0]),
809 condrepr=('<date %r>', ds))
812 condrepr=('<date %r>', ds))
810
813
811 @predicate('desc(string)', safe=True)
814 @predicate('desc(string)', safe=True)
812 def desc(repo, subset, x):
815 def desc(repo, subset, x):
813 """Search commit message for string. The match is case-insensitive.
816 """Search commit message for string. The match is case-insensitive.
814
817
815 Pattern matching is supported for `string`. See
818 Pattern matching is supported for `string`. See
816 :hg:`help revisions.patterns`.
819 :hg:`help revisions.patterns`.
817 """
820 """
818 # i18n: "desc" is a keyword
821 # i18n: "desc" is a keyword
819 ds = getstring(x, _("desc requires a string"))
822 ds = getstring(x, _("desc requires a string"))
820
823
821 kind, pattern, matcher = _substringmatcher(ds, casesensitive=False)
824 kind, pattern, matcher = _substringmatcher(ds, casesensitive=False)
822
825
823 return subset.filter(lambda r: matcher(repo[r].description()),
826 return subset.filter(lambda r: matcher(repo[r].description()),
824 condrepr=('<desc %r>', ds))
827 condrepr=('<desc %r>', ds))
825
828
826 def _descendants(repo, subset, x, followfirst=False):
829 def _descendants(repo, subset, x, followfirst=False):
827 roots = getset(repo, fullreposet(repo), x)
830 roots = getset(repo, fullreposet(repo), x)
828 if not roots:
831 if not roots:
829 return baseset()
832 return baseset()
830 s = _revdescendants(repo, roots, followfirst)
833 s = _revdescendants(repo, roots, followfirst)
831
834
832 # Both sets need to be ascending in order to lazily return the union
835 # Both sets need to be ascending in order to lazily return the union
833 # in the correct order.
836 # in the correct order.
834 base = subset & roots
837 base = subset & roots
835 desc = subset & s
838 desc = subset & s
836 result = base + desc
839 result = base + desc
837 if subset.isascending():
840 if subset.isascending():
838 result.sort()
841 result.sort()
839 elif subset.isdescending():
842 elif subset.isdescending():
840 result.sort(reverse=True)
843 result.sort(reverse=True)
841 else:
844 else:
842 result = subset & result
845 result = subset & result
843 return result
846 return result
844
847
845 @predicate('descendants(set)', safe=True)
848 @predicate('descendants(set)', safe=True)
846 def descendants(repo, subset, x):
849 def descendants(repo, subset, x):
847 """Changesets which are descendants of changesets in set.
850 """Changesets which are descendants of changesets in set.
848 """
851 """
849 return _descendants(repo, subset, x)
852 return _descendants(repo, subset, x)
850
853
851 @predicate('_firstdescendants', safe=True)
854 @predicate('_firstdescendants', safe=True)
852 def _firstdescendants(repo, subset, x):
855 def _firstdescendants(repo, subset, x):
853 # ``_firstdescendants(set)``
856 # ``_firstdescendants(set)``
854 # Like ``descendants(set)`` but follows only the first parents.
857 # Like ``descendants(set)`` but follows only the first parents.
855 return _descendants(repo, subset, x, followfirst=True)
858 return _descendants(repo, subset, x, followfirst=True)
856
859
857 @predicate('destination([set])', safe=True)
860 @predicate('destination([set])', safe=True)
858 def destination(repo, subset, x):
861 def destination(repo, subset, x):
859 """Changesets that were created by a graft, transplant or rebase operation,
862 """Changesets that were created by a graft, transplant or rebase operation,
860 with the given revisions specified as the source. Omitting the optional set
863 with the given revisions specified as the source. Omitting the optional set
861 is the same as passing all().
864 is the same as passing all().
862 """
865 """
863 if x is not None:
866 if x is not None:
864 sources = getset(repo, fullreposet(repo), x)
867 sources = getset(repo, fullreposet(repo), x)
865 else:
868 else:
866 sources = fullreposet(repo)
869 sources = fullreposet(repo)
867
870
868 dests = set()
871 dests = set()
869
872
870 # subset contains all of the possible destinations that can be returned, so
873 # subset contains all of the possible destinations that can be returned, so
871 # iterate over them and see if their source(s) were provided in the arg set.
874 # iterate over them and see if their source(s) were provided in the arg set.
872 # Even if the immediate src of r is not in the arg set, src's source (or
875 # Even if the immediate src of r is not in the arg set, src's source (or
873 # further back) may be. Scanning back further than the immediate src allows
876 # further back) may be. Scanning back further than the immediate src allows
874 # transitive transplants and rebases to yield the same results as transitive
877 # transitive transplants and rebases to yield the same results as transitive
875 # grafts.
878 # grafts.
876 for r in subset:
879 for r in subset:
877 src = _getrevsource(repo, r)
880 src = _getrevsource(repo, r)
878 lineage = None
881 lineage = None
879
882
880 while src is not None:
883 while src is not None:
881 if lineage is None:
884 if lineage is None:
882 lineage = list()
885 lineage = list()
883
886
884 lineage.append(r)
887 lineage.append(r)
885
888
886 # The visited lineage is a match if the current source is in the arg
889 # The visited lineage is a match if the current source is in the arg
887 # set. Since every candidate dest is visited by way of iterating
890 # set. Since every candidate dest is visited by way of iterating
888 # subset, any dests further back in the lineage will be tested by a
891 # subset, any dests further back in the lineage will be tested by a
889 # different iteration over subset. Likewise, if the src was already
892 # different iteration over subset. Likewise, if the src was already
890 # selected, the current lineage can be selected without going back
893 # selected, the current lineage can be selected without going back
891 # further.
894 # further.
892 if src in sources or src in dests:
895 if src in sources or src in dests:
893 dests.update(lineage)
896 dests.update(lineage)
894 break
897 break
895
898
896 r = src
899 r = src
897 src = _getrevsource(repo, r)
900 src = _getrevsource(repo, r)
898
901
899 return subset.filter(dests.__contains__,
902 return subset.filter(dests.__contains__,
900 condrepr=lambda: '<destination %r>' % sorted(dests))
903 condrepr=lambda: '<destination %r>' % sorted(dests))
901
904
902 @predicate('divergent()', safe=True)
905 @predicate('divergent()', safe=True)
903 def divergent(repo, subset, x):
906 def divergent(repo, subset, x):
904 """
907 """
905 Final successors of changesets with an alternative set of final successors.
908 Final successors of changesets with an alternative set of final successors.
906 """
909 """
907 # i18n: "divergent" is a keyword
910 # i18n: "divergent" is a keyword
908 getargs(x, 0, 0, _("divergent takes no arguments"))
911 getargs(x, 0, 0, _("divergent takes no arguments"))
909 divergent = obsmod.getrevs(repo, 'divergent')
912 divergent = obsmod.getrevs(repo, 'divergent')
910 return subset & divergent
913 return subset & divergent
911
914
912 @predicate('extinct()', safe=True)
915 @predicate('extinct()', safe=True)
913 def extinct(repo, subset, x):
916 def extinct(repo, subset, x):
914 """Obsolete changesets with obsolete descendants only.
917 """Obsolete changesets with obsolete descendants only.
915 """
918 """
916 # i18n: "extinct" is a keyword
919 # i18n: "extinct" is a keyword
917 getargs(x, 0, 0, _("extinct takes no arguments"))
920 getargs(x, 0, 0, _("extinct takes no arguments"))
918 extincts = obsmod.getrevs(repo, 'extinct')
921 extincts = obsmod.getrevs(repo, 'extinct')
919 return subset & extincts
922 return subset & extincts
920
923
921 @predicate('extra(label, [value])', safe=True)
924 @predicate('extra(label, [value])', safe=True)
922 def extra(repo, subset, x):
925 def extra(repo, subset, x):
923 """Changesets with the given label in the extra metadata, with the given
926 """Changesets with the given label in the extra metadata, with the given
924 optional value.
927 optional value.
925
928
926 Pattern matching is supported for `value`. See
929 Pattern matching is supported for `value`. See
927 :hg:`help revisions.patterns`.
930 :hg:`help revisions.patterns`.
928 """
931 """
929 args = getargsdict(x, 'extra', 'label value')
932 args = getargsdict(x, 'extra', 'label value')
930 if 'label' not in args:
933 if 'label' not in args:
931 # i18n: "extra" is a keyword
934 # i18n: "extra" is a keyword
932 raise error.ParseError(_('extra takes at least 1 argument'))
935 raise error.ParseError(_('extra takes at least 1 argument'))
933 # i18n: "extra" is a keyword
936 # i18n: "extra" is a keyword
934 label = getstring(args['label'], _('first argument to extra must be '
937 label = getstring(args['label'], _('first argument to extra must be '
935 'a string'))
938 'a string'))
936 value = None
939 value = None
937
940
938 if 'value' in args:
941 if 'value' in args:
939 # i18n: "extra" is a keyword
942 # i18n: "extra" is a keyword
940 value = getstring(args['value'], _('second argument to extra must be '
943 value = getstring(args['value'], _('second argument to extra must be '
941 'a string'))
944 'a string'))
942 kind, value, matcher = util.stringmatcher(value)
945 kind, value, matcher = util.stringmatcher(value)
943
946
944 def _matchvalue(r):
947 def _matchvalue(r):
945 extra = repo[r].extra()
948 extra = repo[r].extra()
946 return label in extra and (value is None or matcher(extra[label]))
949 return label in extra and (value is None or matcher(extra[label]))
947
950
948 return subset.filter(lambda r: _matchvalue(r),
951 return subset.filter(lambda r: _matchvalue(r),
949 condrepr=('<extra[%r] %r>', label, value))
952 condrepr=('<extra[%r] %r>', label, value))
950
953
951 @predicate('filelog(pattern)', safe=True)
954 @predicate('filelog(pattern)', safe=True)
952 def filelog(repo, subset, x):
955 def filelog(repo, subset, x):
953 """Changesets connected to the specified filelog.
956 """Changesets connected to the specified filelog.
954
957
955 For performance reasons, visits only revisions mentioned in the file-level
958 For performance reasons, visits only revisions mentioned in the file-level
956 filelog, rather than filtering through all changesets (much faster, but
959 filelog, rather than filtering through all changesets (much faster, but
957 doesn't include deletes or duplicate changes). For a slower, more accurate
960 doesn't include deletes or duplicate changes). For a slower, more accurate
958 result, use ``file()``.
961 result, use ``file()``.
959
962
960 The pattern without explicit kind like ``glob:`` is expected to be
963 The pattern without explicit kind like ``glob:`` is expected to be
961 relative to the current directory and match against a file exactly
964 relative to the current directory and match against a file exactly
962 for efficiency.
965 for efficiency.
963
966
964 If some linkrev points to revisions filtered by the current repoview, we'll
967 If some linkrev points to revisions filtered by the current repoview, we'll
965 work around it to return a non-filtered value.
968 work around it to return a non-filtered value.
966 """
969 """
967
970
968 # i18n: "filelog" is a keyword
971 # i18n: "filelog" is a keyword
969 pat = getstring(x, _("filelog requires a pattern"))
972 pat = getstring(x, _("filelog requires a pattern"))
970 s = set()
973 s = set()
971 cl = repo.changelog
974 cl = repo.changelog
972
975
973 if not matchmod.patkind(pat):
976 if not matchmod.patkind(pat):
974 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
977 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
975 files = [f]
978 files = [f]
976 else:
979 else:
977 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
980 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
978 files = (f for f in repo[None] if m(f))
981 files = (f for f in repo[None] if m(f))
979
982
980 for f in files:
983 for f in files:
981 fl = repo.file(f)
984 fl = repo.file(f)
982 known = {}
985 known = {}
983 scanpos = 0
986 scanpos = 0
984 for fr in list(fl):
987 for fr in list(fl):
985 fn = fl.node(fr)
988 fn = fl.node(fr)
986 if fn in known:
989 if fn in known:
987 s.add(known[fn])
990 s.add(known[fn])
988 continue
991 continue
989
992
990 lr = fl.linkrev(fr)
993 lr = fl.linkrev(fr)
991 if lr in cl:
994 if lr in cl:
992 s.add(lr)
995 s.add(lr)
993 elif scanpos is not None:
996 elif scanpos is not None:
994 # lowest matching changeset is filtered, scan further
997 # lowest matching changeset is filtered, scan further
995 # ahead in changelog
998 # ahead in changelog
996 start = max(lr, scanpos) + 1
999 start = max(lr, scanpos) + 1
997 scanpos = None
1000 scanpos = None
998 for r in cl.revs(start):
1001 for r in cl.revs(start):
999 # minimize parsing of non-matching entries
1002 # minimize parsing of non-matching entries
1000 if f in cl.revision(r) and f in cl.readfiles(r):
1003 if f in cl.revision(r) and f in cl.readfiles(r):
1001 try:
1004 try:
1002 # try to use manifest delta fastpath
1005 # try to use manifest delta fastpath
1003 n = repo[r].filenode(f)
1006 n = repo[r].filenode(f)
1004 if n not in known:
1007 if n not in known:
1005 if n == fn:
1008 if n == fn:
1006 s.add(r)
1009 s.add(r)
1007 scanpos = r
1010 scanpos = r
1008 break
1011 break
1009 else:
1012 else:
1010 known[n] = r
1013 known[n] = r
1011 except error.ManifestLookupError:
1014 except error.ManifestLookupError:
1012 # deletion in changelog
1015 # deletion in changelog
1013 continue
1016 continue
1014
1017
1015 return subset & s
1018 return subset & s
1016
1019
1017 @predicate('first(set, [n])', safe=True)
1020 @predicate('first(set, [n])', safe=True)
1018 def first(repo, subset, x):
1021 def first(repo, subset, x):
1019 """An alias for limit().
1022 """An alias for limit().
1020 """
1023 """
1021 return limit(repo, subset, x)
1024 return limit(repo, subset, x)
1022
1025
1023 def _follow(repo, subset, x, name, followfirst=False):
1026 def _follow(repo, subset, x, name, followfirst=False):
1024 l = getargs(x, 0, 2, _("%s takes no arguments or a pattern "
1027 l = getargs(x, 0, 2, _("%s takes no arguments or a pattern "
1025 "and an optional revset") % name)
1028 "and an optional revset") % name)
1026 c = repo['.']
1029 c = repo['.']
1027 if l:
1030 if l:
1028 x = getstring(l[0], _("%s expected a pattern") % name)
1031 x = getstring(l[0], _("%s expected a pattern") % name)
1029 rev = None
1032 rev = None
1030 if len(l) >= 2:
1033 if len(l) >= 2:
1031 revs = getset(repo, fullreposet(repo), l[1])
1034 revs = getset(repo, fullreposet(repo), l[1])
1032 if len(revs) != 1:
1035 if len(revs) != 1:
1033 raise error.RepoLookupError(
1036 raise error.RepoLookupError(
1034 _("%s expected one starting revision") % name)
1037 _("%s expected one starting revision") % name)
1035 rev = revs.last()
1038 rev = revs.last()
1036 c = repo[rev]
1039 c = repo[rev]
1037 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
1040 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
1038 ctx=repo[rev], default='path')
1041 ctx=repo[rev], default='path')
1039
1042
1040 files = c.manifest().walk(matcher)
1043 files = c.manifest().walk(matcher)
1041
1044
1042 s = set()
1045 s = set()
1043 for fname in files:
1046 for fname in files:
1044 fctx = c[fname]
1047 fctx = c[fname]
1045 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
1048 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
1046 # include the revision responsible for the most recent version
1049 # include the revision responsible for the most recent version
1047 s.add(fctx.introrev())
1050 s.add(fctx.introrev())
1048 else:
1051 else:
1049 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1052 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1050
1053
1051 return subset & s
1054 return subset & s
1052
1055
1053 @predicate('follow([pattern[, startrev]])', safe=True)
1056 @predicate('follow([pattern[, startrev]])', safe=True)
1054 def follow(repo, subset, x):
1057 def follow(repo, subset, x):
1055 """
1058 """
1056 An alias for ``::.`` (ancestors of the working directory's first parent).
1059 An alias for ``::.`` (ancestors of the working directory's first parent).
1057 If pattern is specified, the histories of files matching given
1060 If pattern is specified, the histories of files matching given
1058 pattern in the revision given by startrev are followed, including copies.
1061 pattern in the revision given by startrev are followed, including copies.
1059 """
1062 """
1060 return _follow(repo, subset, x, 'follow')
1063 return _follow(repo, subset, x, 'follow')
1061
1064
1062 @predicate('_followfirst', safe=True)
1065 @predicate('_followfirst', safe=True)
1063 def _followfirst(repo, subset, x):
1066 def _followfirst(repo, subset, x):
1064 # ``followfirst([pattern[, startrev]])``
1067 # ``followfirst([pattern[, startrev]])``
1065 # Like ``follow([pattern[, startrev]])`` but follows only the first parent
1068 # Like ``follow([pattern[, startrev]])`` but follows only the first parent
1066 # of every revisions or files revisions.
1069 # of every revisions or files revisions.
1067 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1070 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1068
1071
1069 @predicate('followlines(file, fromline, toline[, startrev=.])', safe=True)
1072 @predicate('followlines(file, fromline, toline[, startrev=.])', safe=True)
1070 def followlines(repo, subset, x):
1073 def followlines(repo, subset, x):
1071 """Changesets modifying `file` in line range ('fromline', 'toline').
1074 """Changesets modifying `file` in line range ('fromline', 'toline').
1072
1075
1073 Line range corresponds to 'file' content at 'startrev' and should hence be
1076 Line range corresponds to 'file' content at 'startrev' and should hence be
1074 consistent with file size. If startrev is not specified, working directory's
1077 consistent with file size. If startrev is not specified, working directory's
1075 parent is used.
1078 parent is used.
1076 """
1079 """
1077 from . import context # avoid circular import issues
1080 from . import context # avoid circular import issues
1078
1081
1079 args = getargsdict(x, 'followlines', 'file *lines startrev')
1082 args = getargsdict(x, 'followlines', 'file *lines startrev')
1080 if len(args['lines']) != 2:
1083 if len(args['lines']) != 2:
1081 raise error.ParseError(_("followlines takes at least three arguments"))
1084 raise error.ParseError(_("followlines takes at least three arguments"))
1082
1085
1083 rev = '.'
1086 rev = '.'
1084 if 'startrev' in args:
1087 if 'startrev' in args:
1085 revs = getset(repo, fullreposet(repo), args['startrev'])
1088 revs = getset(repo, fullreposet(repo), args['startrev'])
1086 if len(revs) != 1:
1089 if len(revs) != 1:
1087 raise error.ParseError(
1090 raise error.ParseError(
1088 _("followlines expects exactly one revision"))
1091 _("followlines expects exactly one revision"))
1089 rev = revs.last()
1092 rev = revs.last()
1090
1093
1091 pat = getstring(args['file'], _("followlines requires a pattern"))
1094 pat = getstring(args['file'], _("followlines requires a pattern"))
1092 if not matchmod.patkind(pat):
1095 if not matchmod.patkind(pat):
1093 fname = pathutil.canonpath(repo.root, repo.getcwd(), pat)
1096 fname = pathutil.canonpath(repo.root, repo.getcwd(), pat)
1094 else:
1097 else:
1095 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[rev])
1098 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[rev])
1096 files = [f for f in repo[rev] if m(f)]
1099 files = [f for f in repo[rev] if m(f)]
1097 if len(files) != 1:
1100 if len(files) != 1:
1098 raise error.ParseError(_("followlines expects exactly one file"))
1101 raise error.ParseError(_("followlines expects exactly one file"))
1099 fname = files[0]
1102 fname = files[0]
1100
1103
1101 try:
1104 fromline, toline = [getinteger(a, _("line range bounds must be integers"))
1102 fromline, toline = [int(getsymbol(a)) for a in args['lines']]
1105 for a in args['lines']]
1103 except ValueError:
1104 raise error.ParseError(_("line range bounds must be integers"))
1105 if toline - fromline < 0:
1106 if toline - fromline < 0:
1106 raise error.ParseError(_("line range must be positive"))
1107 raise error.ParseError(_("line range must be positive"))
1107 if fromline < 1:
1108 if fromline < 1:
1108 raise error.ParseError(_("fromline must be strictly positive"))
1109 raise error.ParseError(_("fromline must be strictly positive"))
1109 fromline -= 1
1110 fromline -= 1
1110
1111
1111 fctx = repo[rev].filectx(fname)
1112 fctx = repo[rev].filectx(fname)
1112 revs = (c.rev() for c in context.blockancestors(fctx, fromline, toline))
1113 revs = (c.rev() for c in context.blockancestors(fctx, fromline, toline))
1113 return subset & generatorset(revs, iterasc=False)
1114 return subset & generatorset(revs, iterasc=False)
1114
1115
1115 @predicate('all()', safe=True)
1116 @predicate('all()', safe=True)
1116 def getall(repo, subset, x):
1117 def getall(repo, subset, x):
1117 """All changesets, the same as ``0:tip``.
1118 """All changesets, the same as ``0:tip``.
1118 """
1119 """
1119 # i18n: "all" is a keyword
1120 # i18n: "all" is a keyword
1120 getargs(x, 0, 0, _("all takes no arguments"))
1121 getargs(x, 0, 0, _("all takes no arguments"))
1121 return subset & spanset(repo) # drop "null" if any
1122 return subset & spanset(repo) # drop "null" if any
1122
1123
1123 @predicate('grep(regex)')
1124 @predicate('grep(regex)')
1124 def grep(repo, subset, x):
1125 def grep(repo, subset, x):
1125 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1126 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1126 to ensure special escape characters are handled correctly. Unlike
1127 to ensure special escape characters are handled correctly. Unlike
1127 ``keyword(string)``, the match is case-sensitive.
1128 ``keyword(string)``, the match is case-sensitive.
1128 """
1129 """
1129 try:
1130 try:
1130 # i18n: "grep" is a keyword
1131 # i18n: "grep" is a keyword
1131 gr = re.compile(getstring(x, _("grep requires a string")))
1132 gr = re.compile(getstring(x, _("grep requires a string")))
1132 except re.error as e:
1133 except re.error as e:
1133 raise error.ParseError(_('invalid match pattern: %s') % e)
1134 raise error.ParseError(_('invalid match pattern: %s') % e)
1134
1135
1135 def matches(x):
1136 def matches(x):
1136 c = repo[x]
1137 c = repo[x]
1137 for e in c.files() + [c.user(), c.description()]:
1138 for e in c.files() + [c.user(), c.description()]:
1138 if gr.search(e):
1139 if gr.search(e):
1139 return True
1140 return True
1140 return False
1141 return False
1141
1142
1142 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1143 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1143
1144
1144 @predicate('_matchfiles', safe=True)
1145 @predicate('_matchfiles', safe=True)
1145 def _matchfiles(repo, subset, x):
1146 def _matchfiles(repo, subset, x):
1146 # _matchfiles takes a revset list of prefixed arguments:
1147 # _matchfiles takes a revset list of prefixed arguments:
1147 #
1148 #
1148 # [p:foo, i:bar, x:baz]
1149 # [p:foo, i:bar, x:baz]
1149 #
1150 #
1150 # builds a match object from them and filters subset. Allowed
1151 # builds a match object from them and filters subset. Allowed
1151 # prefixes are 'p:' for regular patterns, 'i:' for include
1152 # prefixes are 'p:' for regular patterns, 'i:' for include
1152 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1153 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1153 # a revision identifier, or the empty string to reference the
1154 # a revision identifier, or the empty string to reference the
1154 # working directory, from which the match object is
1155 # working directory, from which the match object is
1155 # initialized. Use 'd:' to set the default matching mode, default
1156 # initialized. Use 'd:' to set the default matching mode, default
1156 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1157 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1157
1158
1158 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1159 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1159 pats, inc, exc = [], [], []
1160 pats, inc, exc = [], [], []
1160 rev, default = None, None
1161 rev, default = None, None
1161 for arg in l:
1162 for arg in l:
1162 s = getstring(arg, "_matchfiles requires string arguments")
1163 s = getstring(arg, "_matchfiles requires string arguments")
1163 prefix, value = s[:2], s[2:]
1164 prefix, value = s[:2], s[2:]
1164 if prefix == 'p:':
1165 if prefix == 'p:':
1165 pats.append(value)
1166 pats.append(value)
1166 elif prefix == 'i:':
1167 elif prefix == 'i:':
1167 inc.append(value)
1168 inc.append(value)
1168 elif prefix == 'x:':
1169 elif prefix == 'x:':
1169 exc.append(value)
1170 exc.append(value)
1170 elif prefix == 'r:':
1171 elif prefix == 'r:':
1171 if rev is not None:
1172 if rev is not None:
1172 raise error.ParseError('_matchfiles expected at most one '
1173 raise error.ParseError('_matchfiles expected at most one '
1173 'revision')
1174 'revision')
1174 if value != '': # empty means working directory; leave rev as None
1175 if value != '': # empty means working directory; leave rev as None
1175 rev = value
1176 rev = value
1176 elif prefix == 'd:':
1177 elif prefix == 'd:':
1177 if default is not None:
1178 if default is not None:
1178 raise error.ParseError('_matchfiles expected at most one '
1179 raise error.ParseError('_matchfiles expected at most one '
1179 'default mode')
1180 'default mode')
1180 default = value
1181 default = value
1181 else:
1182 else:
1182 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1183 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1183 if not default:
1184 if not default:
1184 default = 'glob'
1185 default = 'glob'
1185
1186
1186 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1187 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1187 exclude=exc, ctx=repo[rev], default=default)
1188 exclude=exc, ctx=repo[rev], default=default)
1188
1189
1189 # This directly read the changelog data as creating changectx for all
1190 # This directly read the changelog data as creating changectx for all
1190 # revisions is quite expensive.
1191 # revisions is quite expensive.
1191 getfiles = repo.changelog.readfiles
1192 getfiles = repo.changelog.readfiles
1192 wdirrev = node.wdirrev
1193 wdirrev = node.wdirrev
1193 def matches(x):
1194 def matches(x):
1194 if x == wdirrev:
1195 if x == wdirrev:
1195 files = repo[x].files()
1196 files = repo[x].files()
1196 else:
1197 else:
1197 files = getfiles(x)
1198 files = getfiles(x)
1198 for f in files:
1199 for f in files:
1199 if m(f):
1200 if m(f):
1200 return True
1201 return True
1201 return False
1202 return False
1202
1203
1203 return subset.filter(matches,
1204 return subset.filter(matches,
1204 condrepr=('<matchfiles patterns=%r, include=%r '
1205 condrepr=('<matchfiles patterns=%r, include=%r '
1205 'exclude=%r, default=%r, rev=%r>',
1206 'exclude=%r, default=%r, rev=%r>',
1206 pats, inc, exc, default, rev))
1207 pats, inc, exc, default, rev))
1207
1208
1208 @predicate('file(pattern)', safe=True)
1209 @predicate('file(pattern)', safe=True)
1209 def hasfile(repo, subset, x):
1210 def hasfile(repo, subset, x):
1210 """Changesets affecting files matched by pattern.
1211 """Changesets affecting files matched by pattern.
1211
1212
1212 For a faster but less accurate result, consider using ``filelog()``
1213 For a faster but less accurate result, consider using ``filelog()``
1213 instead.
1214 instead.
1214
1215
1215 This predicate uses ``glob:`` as the default kind of pattern.
1216 This predicate uses ``glob:`` as the default kind of pattern.
1216 """
1217 """
1217 # i18n: "file" is a keyword
1218 # i18n: "file" is a keyword
1218 pat = getstring(x, _("file requires a pattern"))
1219 pat = getstring(x, _("file requires a pattern"))
1219 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1220 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1220
1221
1221 @predicate('head()', safe=True)
1222 @predicate('head()', safe=True)
1222 def head(repo, subset, x):
1223 def head(repo, subset, x):
1223 """Changeset is a named branch head.
1224 """Changeset is a named branch head.
1224 """
1225 """
1225 # i18n: "head" is a keyword
1226 # i18n: "head" is a keyword
1226 getargs(x, 0, 0, _("head takes no arguments"))
1227 getargs(x, 0, 0, _("head takes no arguments"))
1227 hs = set()
1228 hs = set()
1228 cl = repo.changelog
1229 cl = repo.changelog
1229 for ls in repo.branchmap().itervalues():
1230 for ls in repo.branchmap().itervalues():
1230 hs.update(cl.rev(h) for h in ls)
1231 hs.update(cl.rev(h) for h in ls)
1231 return subset & baseset(hs)
1232 return subset & baseset(hs)
1232
1233
1233 @predicate('heads(set)', safe=True)
1234 @predicate('heads(set)', safe=True)
1234 def heads(repo, subset, x):
1235 def heads(repo, subset, x):
1235 """Members of set with no children in set.
1236 """Members of set with no children in set.
1236 """
1237 """
1237 s = getset(repo, subset, x)
1238 s = getset(repo, subset, x)
1238 ps = parents(repo, subset, x)
1239 ps = parents(repo, subset, x)
1239 return s - ps
1240 return s - ps
1240
1241
1241 @predicate('hidden()', safe=True)
1242 @predicate('hidden()', safe=True)
1242 def hidden(repo, subset, x):
1243 def hidden(repo, subset, x):
1243 """Hidden changesets.
1244 """Hidden changesets.
1244 """
1245 """
1245 # i18n: "hidden" is a keyword
1246 # i18n: "hidden" is a keyword
1246 getargs(x, 0, 0, _("hidden takes no arguments"))
1247 getargs(x, 0, 0, _("hidden takes no arguments"))
1247 hiddenrevs = repoview.filterrevs(repo, 'visible')
1248 hiddenrevs = repoview.filterrevs(repo, 'visible')
1248 return subset & hiddenrevs
1249 return subset & hiddenrevs
1249
1250
1250 @predicate('keyword(string)', safe=True)
1251 @predicate('keyword(string)', safe=True)
1251 def keyword(repo, subset, x):
1252 def keyword(repo, subset, x):
1252 """Search commit message, user name, and names of changed files for
1253 """Search commit message, user name, and names of changed files for
1253 string. The match is case-insensitive.
1254 string. The match is case-insensitive.
1254
1255
1255 For a regular expression or case sensitive search of these fields, use
1256 For a regular expression or case sensitive search of these fields, use
1256 ``grep(regex)``.
1257 ``grep(regex)``.
1257 """
1258 """
1258 # i18n: "keyword" is a keyword
1259 # i18n: "keyword" is a keyword
1259 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1260 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1260
1261
1261 def matches(r):
1262 def matches(r):
1262 c = repo[r]
1263 c = repo[r]
1263 return any(kw in encoding.lower(t)
1264 return any(kw in encoding.lower(t)
1264 for t in c.files() + [c.user(), c.description()])
1265 for t in c.files() + [c.user(), c.description()])
1265
1266
1266 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1267 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1267
1268
1268 @predicate('limit(set[, n[, offset]])', safe=True)
1269 @predicate('limit(set[, n[, offset]])', safe=True)
1269 def limit(repo, subset, x):
1270 def limit(repo, subset, x):
1270 """First n members of set, defaulting to 1, starting from offset.
1271 """First n members of set, defaulting to 1, starting from offset.
1271 """
1272 """
1272 args = getargsdict(x, 'limit', 'set n offset')
1273 args = getargsdict(x, 'limit', 'set n offset')
1273 if 'set' not in args:
1274 if 'set' not in args:
1274 # i18n: "limit" is a keyword
1275 # i18n: "limit" is a keyword
1275 raise error.ParseError(_("limit requires one to three arguments"))
1276 raise error.ParseError(_("limit requires one to three arguments"))
1276 try:
1277 lim, ofs = 1, 0
1277 lim, ofs = 1, 0
1278 if 'n' in args:
1278 if 'n' in args:
1279 # i18n: "limit" is a keyword
1280 lim = int(getstring(args['n'], _("limit requires a number")))
1281 if 'offset' in args:
1282 # i18n: "limit" is a keyword
1283 ofs = int(getstring(args['offset'], _("limit requires a number")))
1284 if ofs < 0:
1285 raise error.ParseError(_("negative offset"))
1286 except (TypeError, ValueError):
1287 # i18n: "limit" is a keyword
1279 # i18n: "limit" is a keyword
1288 raise error.ParseError(_("limit expects a number"))
1280 lim = getinteger(args['n'], _("limit expects a number"))
1281 if 'offset' in args:
1282 # i18n: "limit" is a keyword
1283 ofs = getinteger(args['offset'], _("limit expects a number"))
1284 if ofs < 0:
1285 raise error.ParseError(_("negative offset"))
1289 os = getset(repo, fullreposet(repo), args['set'])
1286 os = getset(repo, fullreposet(repo), args['set'])
1290 result = []
1287 result = []
1291 it = iter(os)
1288 it = iter(os)
1292 for x in xrange(ofs):
1289 for x in xrange(ofs):
1293 y = next(it, None)
1290 y = next(it, None)
1294 if y is None:
1291 if y is None:
1295 break
1292 break
1296 for x in xrange(lim):
1293 for x in xrange(lim):
1297 y = next(it, None)
1294 y = next(it, None)
1298 if y is None:
1295 if y is None:
1299 break
1296 break
1300 elif y in subset:
1297 elif y in subset:
1301 result.append(y)
1298 result.append(y)
1302 return baseset(result, datarepr=('<limit n=%d, offset=%d, %r, %r>',
1299 return baseset(result, datarepr=('<limit n=%d, offset=%d, %r, %r>',
1303 lim, ofs, subset, os))
1300 lim, ofs, subset, os))
1304
1301
1305 @predicate('last(set, [n])', safe=True)
1302 @predicate('last(set, [n])', safe=True)
1306 def last(repo, subset, x):
1303 def last(repo, subset, x):
1307 """Last n members of set, defaulting to 1.
1304 """Last n members of set, defaulting to 1.
1308 """
1305 """
1309 # i18n: "last" is a keyword
1306 # i18n: "last" is a keyword
1310 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1307 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1311 try:
1308 lim = 1
1312 lim = 1
1309 if len(l) == 2:
1313 if len(l) == 2:
1314 # i18n: "last" is a keyword
1315 lim = int(getstring(l[1], _("last requires a number")))
1316 except (TypeError, ValueError):
1317 # i18n: "last" is a keyword
1310 # i18n: "last" is a keyword
1318 raise error.ParseError(_("last expects a number"))
1311 lim = getinteger(l[1], _("last expects a number"))
1319 os = getset(repo, fullreposet(repo), l[0])
1312 os = getset(repo, fullreposet(repo), l[0])
1320 os.reverse()
1313 os.reverse()
1321 result = []
1314 result = []
1322 it = iter(os)
1315 it = iter(os)
1323 for x in xrange(lim):
1316 for x in xrange(lim):
1324 y = next(it, None)
1317 y = next(it, None)
1325 if y is None:
1318 if y is None:
1326 break
1319 break
1327 elif y in subset:
1320 elif y in subset:
1328 result.append(y)
1321 result.append(y)
1329 return baseset(result, datarepr=('<last n=%d, %r, %r>', lim, subset, os))
1322 return baseset(result, datarepr=('<last n=%d, %r, %r>', lim, subset, os))
1330
1323
1331 @predicate('max(set)', safe=True)
1324 @predicate('max(set)', safe=True)
1332 def maxrev(repo, subset, x):
1325 def maxrev(repo, subset, x):
1333 """Changeset with highest revision number in set.
1326 """Changeset with highest revision number in set.
1334 """
1327 """
1335 os = getset(repo, fullreposet(repo), x)
1328 os = getset(repo, fullreposet(repo), x)
1336 try:
1329 try:
1337 m = os.max()
1330 m = os.max()
1338 if m in subset:
1331 if m in subset:
1339 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1332 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1340 except ValueError:
1333 except ValueError:
1341 # os.max() throws a ValueError when the collection is empty.
1334 # os.max() throws a ValueError when the collection is empty.
1342 # Same as python's max().
1335 # Same as python's max().
1343 pass
1336 pass
1344 return baseset(datarepr=('<max %r, %r>', subset, os))
1337 return baseset(datarepr=('<max %r, %r>', subset, os))
1345
1338
1346 @predicate('merge()', safe=True)
1339 @predicate('merge()', safe=True)
1347 def merge(repo, subset, x):
1340 def merge(repo, subset, x):
1348 """Changeset is a merge changeset.
1341 """Changeset is a merge changeset.
1349 """
1342 """
1350 # i18n: "merge" is a keyword
1343 # i18n: "merge" is a keyword
1351 getargs(x, 0, 0, _("merge takes no arguments"))
1344 getargs(x, 0, 0, _("merge takes no arguments"))
1352 cl = repo.changelog
1345 cl = repo.changelog
1353 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1346 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1354 condrepr='<merge>')
1347 condrepr='<merge>')
1355
1348
1356 @predicate('branchpoint()', safe=True)
1349 @predicate('branchpoint()', safe=True)
1357 def branchpoint(repo, subset, x):
1350 def branchpoint(repo, subset, x):
1358 """Changesets with more than one child.
1351 """Changesets with more than one child.
1359 """
1352 """
1360 # i18n: "branchpoint" is a keyword
1353 # i18n: "branchpoint" is a keyword
1361 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1354 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1362 cl = repo.changelog
1355 cl = repo.changelog
1363 if not subset:
1356 if not subset:
1364 return baseset()
1357 return baseset()
1365 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1358 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1366 # (and if it is not, it should.)
1359 # (and if it is not, it should.)
1367 baserev = min(subset)
1360 baserev = min(subset)
1368 parentscount = [0]*(len(repo) - baserev)
1361 parentscount = [0]*(len(repo) - baserev)
1369 for r in cl.revs(start=baserev + 1):
1362 for r in cl.revs(start=baserev + 1):
1370 for p in cl.parentrevs(r):
1363 for p in cl.parentrevs(r):
1371 if p >= baserev:
1364 if p >= baserev:
1372 parentscount[p - baserev] += 1
1365 parentscount[p - baserev] += 1
1373 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1366 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1374 condrepr='<branchpoint>')
1367 condrepr='<branchpoint>')
1375
1368
1376 @predicate('min(set)', safe=True)
1369 @predicate('min(set)', safe=True)
1377 def minrev(repo, subset, x):
1370 def minrev(repo, subset, x):
1378 """Changeset with lowest revision number in set.
1371 """Changeset with lowest revision number in set.
1379 """
1372 """
1380 os = getset(repo, fullreposet(repo), x)
1373 os = getset(repo, fullreposet(repo), x)
1381 try:
1374 try:
1382 m = os.min()
1375 m = os.min()
1383 if m in subset:
1376 if m in subset:
1384 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1377 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1385 except ValueError:
1378 except ValueError:
1386 # os.min() throws a ValueError when the collection is empty.
1379 # os.min() throws a ValueError when the collection is empty.
1387 # Same as python's min().
1380 # Same as python's min().
1388 pass
1381 pass
1389 return baseset(datarepr=('<min %r, %r>', subset, os))
1382 return baseset(datarepr=('<min %r, %r>', subset, os))
1390
1383
1391 @predicate('modifies(pattern)', safe=True)
1384 @predicate('modifies(pattern)', safe=True)
1392 def modifies(repo, subset, x):
1385 def modifies(repo, subset, x):
1393 """Changesets modifying files matched by pattern.
1386 """Changesets modifying files matched by pattern.
1394
1387
1395 The pattern without explicit kind like ``glob:`` is expected to be
1388 The pattern without explicit kind like ``glob:`` is expected to be
1396 relative to the current directory and match against a file or a
1389 relative to the current directory and match against a file or a
1397 directory.
1390 directory.
1398 """
1391 """
1399 # i18n: "modifies" is a keyword
1392 # i18n: "modifies" is a keyword
1400 pat = getstring(x, _("modifies requires a pattern"))
1393 pat = getstring(x, _("modifies requires a pattern"))
1401 return checkstatus(repo, subset, pat, 0)
1394 return checkstatus(repo, subset, pat, 0)
1402
1395
1403 @predicate('named(namespace)')
1396 @predicate('named(namespace)')
1404 def named(repo, subset, x):
1397 def named(repo, subset, x):
1405 """The changesets in a given namespace.
1398 """The changesets in a given namespace.
1406
1399
1407 Pattern matching is supported for `namespace`. See
1400 Pattern matching is supported for `namespace`. See
1408 :hg:`help revisions.patterns`.
1401 :hg:`help revisions.patterns`.
1409 """
1402 """
1410 # i18n: "named" is a keyword
1403 # i18n: "named" is a keyword
1411 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1404 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1412
1405
1413 ns = getstring(args[0],
1406 ns = getstring(args[0],
1414 # i18n: "named" is a keyword
1407 # i18n: "named" is a keyword
1415 _('the argument to named must be a string'))
1408 _('the argument to named must be a string'))
1416 kind, pattern, matcher = util.stringmatcher(ns)
1409 kind, pattern, matcher = util.stringmatcher(ns)
1417 namespaces = set()
1410 namespaces = set()
1418 if kind == 'literal':
1411 if kind == 'literal':
1419 if pattern not in repo.names:
1412 if pattern not in repo.names:
1420 raise error.RepoLookupError(_("namespace '%s' does not exist")
1413 raise error.RepoLookupError(_("namespace '%s' does not exist")
1421 % ns)
1414 % ns)
1422 namespaces.add(repo.names[pattern])
1415 namespaces.add(repo.names[pattern])
1423 else:
1416 else:
1424 for name, ns in repo.names.iteritems():
1417 for name, ns in repo.names.iteritems():
1425 if matcher(name):
1418 if matcher(name):
1426 namespaces.add(ns)
1419 namespaces.add(ns)
1427 if not namespaces:
1420 if not namespaces:
1428 raise error.RepoLookupError(_("no namespace exists"
1421 raise error.RepoLookupError(_("no namespace exists"
1429 " that match '%s'") % pattern)
1422 " that match '%s'") % pattern)
1430
1423
1431 names = set()
1424 names = set()
1432 for ns in namespaces:
1425 for ns in namespaces:
1433 for name in ns.listnames(repo):
1426 for name in ns.listnames(repo):
1434 if name not in ns.deprecated:
1427 if name not in ns.deprecated:
1435 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1428 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1436
1429
1437 names -= set([node.nullrev])
1430 names -= set([node.nullrev])
1438 return subset & names
1431 return subset & names
1439
1432
1440 @predicate('id(string)', safe=True)
1433 @predicate('id(string)', safe=True)
1441 def node_(repo, subset, x):
1434 def node_(repo, subset, x):
1442 """Revision non-ambiguously specified by the given hex string prefix.
1435 """Revision non-ambiguously specified by the given hex string prefix.
1443 """
1436 """
1444 # i18n: "id" is a keyword
1437 # i18n: "id" is a keyword
1445 l = getargs(x, 1, 1, _("id requires one argument"))
1438 l = getargs(x, 1, 1, _("id requires one argument"))
1446 # i18n: "id" is a keyword
1439 # i18n: "id" is a keyword
1447 n = getstring(l[0], _("id requires a string"))
1440 n = getstring(l[0], _("id requires a string"))
1448 if len(n) == 40:
1441 if len(n) == 40:
1449 try:
1442 try:
1450 rn = repo.changelog.rev(node.bin(n))
1443 rn = repo.changelog.rev(node.bin(n))
1451 except (LookupError, TypeError):
1444 except (LookupError, TypeError):
1452 rn = None
1445 rn = None
1453 else:
1446 else:
1454 rn = None
1447 rn = None
1455 pm = repo.changelog._partialmatch(n)
1448 pm = repo.changelog._partialmatch(n)
1456 if pm is not None:
1449 if pm is not None:
1457 rn = repo.changelog.rev(pm)
1450 rn = repo.changelog.rev(pm)
1458
1451
1459 if rn is None:
1452 if rn is None:
1460 return baseset()
1453 return baseset()
1461 result = baseset([rn])
1454 result = baseset([rn])
1462 return result & subset
1455 return result & subset
1463
1456
1464 @predicate('obsolete()', safe=True)
1457 @predicate('obsolete()', safe=True)
1465 def obsolete(repo, subset, x):
1458 def obsolete(repo, subset, x):
1466 """Mutable changeset with a newer version."""
1459 """Mutable changeset with a newer version."""
1467 # i18n: "obsolete" is a keyword
1460 # i18n: "obsolete" is a keyword
1468 getargs(x, 0, 0, _("obsolete takes no arguments"))
1461 getargs(x, 0, 0, _("obsolete takes no arguments"))
1469 obsoletes = obsmod.getrevs(repo, 'obsolete')
1462 obsoletes = obsmod.getrevs(repo, 'obsolete')
1470 return subset & obsoletes
1463 return subset & obsoletes
1471
1464
1472 @predicate('only(set, [set])', safe=True)
1465 @predicate('only(set, [set])', safe=True)
1473 def only(repo, subset, x):
1466 def only(repo, subset, x):
1474 """Changesets that are ancestors of the first set that are not ancestors
1467 """Changesets that are ancestors of the first set that are not ancestors
1475 of any other head in the repo. If a second set is specified, the result
1468 of any other head in the repo. If a second set is specified, the result
1476 is ancestors of the first set that are not ancestors of the second set
1469 is ancestors of the first set that are not ancestors of the second set
1477 (i.e. ::<set1> - ::<set2>).
1470 (i.e. ::<set1> - ::<set2>).
1478 """
1471 """
1479 cl = repo.changelog
1472 cl = repo.changelog
1480 # i18n: "only" is a keyword
1473 # i18n: "only" is a keyword
1481 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1474 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1482 include = getset(repo, fullreposet(repo), args[0])
1475 include = getset(repo, fullreposet(repo), args[0])
1483 if len(args) == 1:
1476 if len(args) == 1:
1484 if not include:
1477 if not include:
1485 return baseset()
1478 return baseset()
1486
1479
1487 descendants = set(_revdescendants(repo, include, False))
1480 descendants = set(_revdescendants(repo, include, False))
1488 exclude = [rev for rev in cl.headrevs()
1481 exclude = [rev for rev in cl.headrevs()
1489 if not rev in descendants and not rev in include]
1482 if not rev in descendants and not rev in include]
1490 else:
1483 else:
1491 exclude = getset(repo, fullreposet(repo), args[1])
1484 exclude = getset(repo, fullreposet(repo), args[1])
1492
1485
1493 results = set(cl.findmissingrevs(common=exclude, heads=include))
1486 results = set(cl.findmissingrevs(common=exclude, heads=include))
1494 # XXX we should turn this into a baseset instead of a set, smartset may do
1487 # XXX we should turn this into a baseset instead of a set, smartset may do
1495 # some optimizations from the fact this is a baseset.
1488 # some optimizations from the fact this is a baseset.
1496 return subset & results
1489 return subset & results
1497
1490
1498 @predicate('origin([set])', safe=True)
1491 @predicate('origin([set])', safe=True)
1499 def origin(repo, subset, x):
1492 def origin(repo, subset, x):
1500 """
1493 """
1501 Changesets that were specified as a source for the grafts, transplants or
1494 Changesets that were specified as a source for the grafts, transplants or
1502 rebases that created the given revisions. Omitting the optional set is the
1495 rebases that created the given revisions. Omitting the optional set is the
1503 same as passing all(). If a changeset created by these operations is itself
1496 same as passing all(). If a changeset created by these operations is itself
1504 specified as a source for one of these operations, only the source changeset
1497 specified as a source for one of these operations, only the source changeset
1505 for the first operation is selected.
1498 for the first operation is selected.
1506 """
1499 """
1507 if x is not None:
1500 if x is not None:
1508 dests = getset(repo, fullreposet(repo), x)
1501 dests = getset(repo, fullreposet(repo), x)
1509 else:
1502 else:
1510 dests = fullreposet(repo)
1503 dests = fullreposet(repo)
1511
1504
1512 def _firstsrc(rev):
1505 def _firstsrc(rev):
1513 src = _getrevsource(repo, rev)
1506 src = _getrevsource(repo, rev)
1514 if src is None:
1507 if src is None:
1515 return None
1508 return None
1516
1509
1517 while True:
1510 while True:
1518 prev = _getrevsource(repo, src)
1511 prev = _getrevsource(repo, src)
1519
1512
1520 if prev is None:
1513 if prev is None:
1521 return src
1514 return src
1522 src = prev
1515 src = prev
1523
1516
1524 o = set([_firstsrc(r) for r in dests])
1517 o = set([_firstsrc(r) for r in dests])
1525 o -= set([None])
1518 o -= set([None])
1526 # XXX we should turn this into a baseset instead of a set, smartset may do
1519 # XXX we should turn this into a baseset instead of a set, smartset may do
1527 # some optimizations from the fact this is a baseset.
1520 # some optimizations from the fact this is a baseset.
1528 return subset & o
1521 return subset & o
1529
1522
1530 @predicate('outgoing([path])', safe=True)
1523 @predicate('outgoing([path])', safe=True)
1531 def outgoing(repo, subset, x):
1524 def outgoing(repo, subset, x):
1532 """Changesets not found in the specified destination repository, or the
1525 """Changesets not found in the specified destination repository, or the
1533 default push location.
1526 default push location.
1534 """
1527 """
1535 # Avoid cycles.
1528 # Avoid cycles.
1536 from . import (
1529 from . import (
1537 discovery,
1530 discovery,
1538 hg,
1531 hg,
1539 )
1532 )
1540 # i18n: "outgoing" is a keyword
1533 # i18n: "outgoing" is a keyword
1541 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1534 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1542 # i18n: "outgoing" is a keyword
1535 # i18n: "outgoing" is a keyword
1543 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1536 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1544 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1537 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1545 dest, branches = hg.parseurl(dest)
1538 dest, branches = hg.parseurl(dest)
1546 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1539 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1547 if revs:
1540 if revs:
1548 revs = [repo.lookup(rev) for rev in revs]
1541 revs = [repo.lookup(rev) for rev in revs]
1549 other = hg.peer(repo, {}, dest)
1542 other = hg.peer(repo, {}, dest)
1550 repo.ui.pushbuffer()
1543 repo.ui.pushbuffer()
1551 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1544 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1552 repo.ui.popbuffer()
1545 repo.ui.popbuffer()
1553 cl = repo.changelog
1546 cl = repo.changelog
1554 o = set([cl.rev(r) for r in outgoing.missing])
1547 o = set([cl.rev(r) for r in outgoing.missing])
1555 return subset & o
1548 return subset & o
1556
1549
1557 @predicate('p1([set])', safe=True)
1550 @predicate('p1([set])', safe=True)
1558 def p1(repo, subset, x):
1551 def p1(repo, subset, x):
1559 """First parent of changesets in set, or the working directory.
1552 """First parent of changesets in set, or the working directory.
1560 """
1553 """
1561 if x is None:
1554 if x is None:
1562 p = repo[x].p1().rev()
1555 p = repo[x].p1().rev()
1563 if p >= 0:
1556 if p >= 0:
1564 return subset & baseset([p])
1557 return subset & baseset([p])
1565 return baseset()
1558 return baseset()
1566
1559
1567 ps = set()
1560 ps = set()
1568 cl = repo.changelog
1561 cl = repo.changelog
1569 for r in getset(repo, fullreposet(repo), x):
1562 for r in getset(repo, fullreposet(repo), x):
1570 ps.add(cl.parentrevs(r)[0])
1563 ps.add(cl.parentrevs(r)[0])
1571 ps -= set([node.nullrev])
1564 ps -= set([node.nullrev])
1572 # XXX we should turn this into a baseset instead of a set, smartset may do
1565 # XXX we should turn this into a baseset instead of a set, smartset may do
1573 # some optimizations from the fact this is a baseset.
1566 # some optimizations from the fact this is a baseset.
1574 return subset & ps
1567 return subset & ps
1575
1568
1576 @predicate('p2([set])', safe=True)
1569 @predicate('p2([set])', safe=True)
1577 def p2(repo, subset, x):
1570 def p2(repo, subset, x):
1578 """Second parent of changesets in set, or the working directory.
1571 """Second parent of changesets in set, or the working directory.
1579 """
1572 """
1580 if x is None:
1573 if x is None:
1581 ps = repo[x].parents()
1574 ps = repo[x].parents()
1582 try:
1575 try:
1583 p = ps[1].rev()
1576 p = ps[1].rev()
1584 if p >= 0:
1577 if p >= 0:
1585 return subset & baseset([p])
1578 return subset & baseset([p])
1586 return baseset()
1579 return baseset()
1587 except IndexError:
1580 except IndexError:
1588 return baseset()
1581 return baseset()
1589
1582
1590 ps = set()
1583 ps = set()
1591 cl = repo.changelog
1584 cl = repo.changelog
1592 for r in getset(repo, fullreposet(repo), x):
1585 for r in getset(repo, fullreposet(repo), x):
1593 ps.add(cl.parentrevs(r)[1])
1586 ps.add(cl.parentrevs(r)[1])
1594 ps -= set([node.nullrev])
1587 ps -= set([node.nullrev])
1595 # XXX we should turn this into a baseset instead of a set, smartset may do
1588 # XXX we should turn this into a baseset instead of a set, smartset may do
1596 # some optimizations from the fact this is a baseset.
1589 # some optimizations from the fact this is a baseset.
1597 return subset & ps
1590 return subset & ps
1598
1591
1599 def parentpost(repo, subset, x, order):
1592 def parentpost(repo, subset, x, order):
1600 return p1(repo, subset, x)
1593 return p1(repo, subset, x)
1601
1594
1602 @predicate('parents([set])', safe=True)
1595 @predicate('parents([set])', safe=True)
1603 def parents(repo, subset, x):
1596 def parents(repo, subset, x):
1604 """
1597 """
1605 The set of all parents for all changesets in set, or the working directory.
1598 The set of all parents for all changesets in set, or the working directory.
1606 """
1599 """
1607 if x is None:
1600 if x is None:
1608 ps = set(p.rev() for p in repo[x].parents())
1601 ps = set(p.rev() for p in repo[x].parents())
1609 else:
1602 else:
1610 ps = set()
1603 ps = set()
1611 cl = repo.changelog
1604 cl = repo.changelog
1612 up = ps.update
1605 up = ps.update
1613 parentrevs = cl.parentrevs
1606 parentrevs = cl.parentrevs
1614 for r in getset(repo, fullreposet(repo), x):
1607 for r in getset(repo, fullreposet(repo), x):
1615 if r == node.wdirrev:
1608 if r == node.wdirrev:
1616 up(p.rev() for p in repo[r].parents())
1609 up(p.rev() for p in repo[r].parents())
1617 else:
1610 else:
1618 up(parentrevs(r))
1611 up(parentrevs(r))
1619 ps -= set([node.nullrev])
1612 ps -= set([node.nullrev])
1620 return subset & ps
1613 return subset & ps
1621
1614
1622 def _phase(repo, subset, target):
1615 def _phase(repo, subset, target):
1623 """helper to select all rev in phase <target>"""
1616 """helper to select all rev in phase <target>"""
1624 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1617 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1625 if repo._phasecache._phasesets:
1618 if repo._phasecache._phasesets:
1626 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1619 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1627 s = baseset(s)
1620 s = baseset(s)
1628 s.sort() # set are non ordered, so we enforce ascending
1621 s.sort() # set are non ordered, so we enforce ascending
1629 return subset & s
1622 return subset & s
1630 else:
1623 else:
1631 phase = repo._phasecache.phase
1624 phase = repo._phasecache.phase
1632 condition = lambda r: phase(repo, r) == target
1625 condition = lambda r: phase(repo, r) == target
1633 return subset.filter(condition, condrepr=('<phase %r>', target),
1626 return subset.filter(condition, condrepr=('<phase %r>', target),
1634 cache=False)
1627 cache=False)
1635
1628
1636 @predicate('draft()', safe=True)
1629 @predicate('draft()', safe=True)
1637 def draft(repo, subset, x):
1630 def draft(repo, subset, x):
1638 """Changeset in draft phase."""
1631 """Changeset in draft phase."""
1639 # i18n: "draft" is a keyword
1632 # i18n: "draft" is a keyword
1640 getargs(x, 0, 0, _("draft takes no arguments"))
1633 getargs(x, 0, 0, _("draft takes no arguments"))
1641 target = phases.draft
1634 target = phases.draft
1642 return _phase(repo, subset, target)
1635 return _phase(repo, subset, target)
1643
1636
1644 @predicate('secret()', safe=True)
1637 @predicate('secret()', safe=True)
1645 def secret(repo, subset, x):
1638 def secret(repo, subset, x):
1646 """Changeset in secret phase."""
1639 """Changeset in secret phase."""
1647 # i18n: "secret" is a keyword
1640 # i18n: "secret" is a keyword
1648 getargs(x, 0, 0, _("secret takes no arguments"))
1641 getargs(x, 0, 0, _("secret takes no arguments"))
1649 target = phases.secret
1642 target = phases.secret
1650 return _phase(repo, subset, target)
1643 return _phase(repo, subset, target)
1651
1644
1652 def parentspec(repo, subset, x, n, order):
1645 def parentspec(repo, subset, x, n, order):
1653 """``set^0``
1646 """``set^0``
1654 The set.
1647 The set.
1655 ``set^1`` (or ``set^``), ``set^2``
1648 ``set^1`` (or ``set^``), ``set^2``
1656 First or second parent, respectively, of all changesets in set.
1649 First or second parent, respectively, of all changesets in set.
1657 """
1650 """
1658 try:
1651 try:
1659 n = int(n[1])
1652 n = int(n[1])
1660 if n not in (0, 1, 2):
1653 if n not in (0, 1, 2):
1661 raise ValueError
1654 raise ValueError
1662 except (TypeError, ValueError):
1655 except (TypeError, ValueError):
1663 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1656 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1664 ps = set()
1657 ps = set()
1665 cl = repo.changelog
1658 cl = repo.changelog
1666 for r in getset(repo, fullreposet(repo), x):
1659 for r in getset(repo, fullreposet(repo), x):
1667 if n == 0:
1660 if n == 0:
1668 ps.add(r)
1661 ps.add(r)
1669 elif n == 1:
1662 elif n == 1:
1670 ps.add(cl.parentrevs(r)[0])
1663 ps.add(cl.parentrevs(r)[0])
1671 elif n == 2:
1664 elif n == 2:
1672 parents = cl.parentrevs(r)
1665 parents = cl.parentrevs(r)
1673 if parents[1] != node.nullrev:
1666 if parents[1] != node.nullrev:
1674 ps.add(parents[1])
1667 ps.add(parents[1])
1675 return subset & ps
1668 return subset & ps
1676
1669
1677 @predicate('present(set)', safe=True)
1670 @predicate('present(set)', safe=True)
1678 def present(repo, subset, x):
1671 def present(repo, subset, x):
1679 """An empty set, if any revision in set isn't found; otherwise,
1672 """An empty set, if any revision in set isn't found; otherwise,
1680 all revisions in set.
1673 all revisions in set.
1681
1674
1682 If any of specified revisions is not present in the local repository,
1675 If any of specified revisions is not present in the local repository,
1683 the query is normally aborted. But this predicate allows the query
1676 the query is normally aborted. But this predicate allows the query
1684 to continue even in such cases.
1677 to continue even in such cases.
1685 """
1678 """
1686 try:
1679 try:
1687 return getset(repo, subset, x)
1680 return getset(repo, subset, x)
1688 except error.RepoLookupError:
1681 except error.RepoLookupError:
1689 return baseset()
1682 return baseset()
1690
1683
1691 # for internal use
1684 # for internal use
1692 @predicate('_notpublic', safe=True)
1685 @predicate('_notpublic', safe=True)
1693 def _notpublic(repo, subset, x):
1686 def _notpublic(repo, subset, x):
1694 getargs(x, 0, 0, "_notpublic takes no arguments")
1687 getargs(x, 0, 0, "_notpublic takes no arguments")
1695 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1688 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1696 if repo._phasecache._phasesets:
1689 if repo._phasecache._phasesets:
1697 s = set()
1690 s = set()
1698 for u in repo._phasecache._phasesets[1:]:
1691 for u in repo._phasecache._phasesets[1:]:
1699 s.update(u)
1692 s.update(u)
1700 s = baseset(s - repo.changelog.filteredrevs)
1693 s = baseset(s - repo.changelog.filteredrevs)
1701 s.sort()
1694 s.sort()
1702 return subset & s
1695 return subset & s
1703 else:
1696 else:
1704 phase = repo._phasecache.phase
1697 phase = repo._phasecache.phase
1705 target = phases.public
1698 target = phases.public
1706 condition = lambda r: phase(repo, r) != target
1699 condition = lambda r: phase(repo, r) != target
1707 return subset.filter(condition, condrepr=('<phase %r>', target),
1700 return subset.filter(condition, condrepr=('<phase %r>', target),
1708 cache=False)
1701 cache=False)
1709
1702
1710 @predicate('public()', safe=True)
1703 @predicate('public()', safe=True)
1711 def public(repo, subset, x):
1704 def public(repo, subset, x):
1712 """Changeset in public phase."""
1705 """Changeset in public phase."""
1713 # i18n: "public" is a keyword
1706 # i18n: "public" is a keyword
1714 getargs(x, 0, 0, _("public takes no arguments"))
1707 getargs(x, 0, 0, _("public takes no arguments"))
1715 phase = repo._phasecache.phase
1708 phase = repo._phasecache.phase
1716 target = phases.public
1709 target = phases.public
1717 condition = lambda r: phase(repo, r) == target
1710 condition = lambda r: phase(repo, r) == target
1718 return subset.filter(condition, condrepr=('<phase %r>', target),
1711 return subset.filter(condition, condrepr=('<phase %r>', target),
1719 cache=False)
1712 cache=False)
1720
1713
1721 @predicate('remote([id [,path]])', safe=True)
1714 @predicate('remote([id [,path]])', safe=True)
1722 def remote(repo, subset, x):
1715 def remote(repo, subset, x):
1723 """Local revision that corresponds to the given identifier in a
1716 """Local revision that corresponds to the given identifier in a
1724 remote repository, if present. Here, the '.' identifier is a
1717 remote repository, if present. Here, the '.' identifier is a
1725 synonym for the current local branch.
1718 synonym for the current local branch.
1726 """
1719 """
1727
1720
1728 from . import hg # avoid start-up nasties
1721 from . import hg # avoid start-up nasties
1729 # i18n: "remote" is a keyword
1722 # i18n: "remote" is a keyword
1730 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1723 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1731
1724
1732 q = '.'
1725 q = '.'
1733 if len(l) > 0:
1726 if len(l) > 0:
1734 # i18n: "remote" is a keyword
1727 # i18n: "remote" is a keyword
1735 q = getstring(l[0], _("remote requires a string id"))
1728 q = getstring(l[0], _("remote requires a string id"))
1736 if q == '.':
1729 if q == '.':
1737 q = repo['.'].branch()
1730 q = repo['.'].branch()
1738
1731
1739 dest = ''
1732 dest = ''
1740 if len(l) > 1:
1733 if len(l) > 1:
1741 # i18n: "remote" is a keyword
1734 # i18n: "remote" is a keyword
1742 dest = getstring(l[1], _("remote requires a repository path"))
1735 dest = getstring(l[1], _("remote requires a repository path"))
1743 dest = repo.ui.expandpath(dest or 'default')
1736 dest = repo.ui.expandpath(dest or 'default')
1744 dest, branches = hg.parseurl(dest)
1737 dest, branches = hg.parseurl(dest)
1745 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1738 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1746 if revs:
1739 if revs:
1747 revs = [repo.lookup(rev) for rev in revs]
1740 revs = [repo.lookup(rev) for rev in revs]
1748 other = hg.peer(repo, {}, dest)
1741 other = hg.peer(repo, {}, dest)
1749 n = other.lookup(q)
1742 n = other.lookup(q)
1750 if n in repo:
1743 if n in repo:
1751 r = repo[n].rev()
1744 r = repo[n].rev()
1752 if r in subset:
1745 if r in subset:
1753 return baseset([r])
1746 return baseset([r])
1754 return baseset()
1747 return baseset()
1755
1748
1756 @predicate('removes(pattern)', safe=True)
1749 @predicate('removes(pattern)', safe=True)
1757 def removes(repo, subset, x):
1750 def removes(repo, subset, x):
1758 """Changesets which remove files matching pattern.
1751 """Changesets which remove files matching pattern.
1759
1752
1760 The pattern without explicit kind like ``glob:`` is expected to be
1753 The pattern without explicit kind like ``glob:`` is expected to be
1761 relative to the current directory and match against a file or a
1754 relative to the current directory and match against a file or a
1762 directory.
1755 directory.
1763 """
1756 """
1764 # i18n: "removes" is a keyword
1757 # i18n: "removes" is a keyword
1765 pat = getstring(x, _("removes requires a pattern"))
1758 pat = getstring(x, _("removes requires a pattern"))
1766 return checkstatus(repo, subset, pat, 2)
1759 return checkstatus(repo, subset, pat, 2)
1767
1760
1768 @predicate('rev(number)', safe=True)
1761 @predicate('rev(number)', safe=True)
1769 def rev(repo, subset, x):
1762 def rev(repo, subset, x):
1770 """Revision with the given numeric identifier.
1763 """Revision with the given numeric identifier.
1771 """
1764 """
1772 # i18n: "rev" is a keyword
1765 # i18n: "rev" is a keyword
1773 l = getargs(x, 1, 1, _("rev requires one argument"))
1766 l = getargs(x, 1, 1, _("rev requires one argument"))
1774 try:
1767 try:
1775 # i18n: "rev" is a keyword
1768 # i18n: "rev" is a keyword
1776 l = int(getstring(l[0], _("rev requires a number")))
1769 l = int(getstring(l[0], _("rev requires a number")))
1777 except (TypeError, ValueError):
1770 except (TypeError, ValueError):
1778 # i18n: "rev" is a keyword
1771 # i18n: "rev" is a keyword
1779 raise error.ParseError(_("rev expects a number"))
1772 raise error.ParseError(_("rev expects a number"))
1780 if l not in repo.changelog and l != node.nullrev:
1773 if l not in repo.changelog and l != node.nullrev:
1781 return baseset()
1774 return baseset()
1782 return subset & baseset([l])
1775 return subset & baseset([l])
1783
1776
1784 @predicate('matching(revision [, field])', safe=True)
1777 @predicate('matching(revision [, field])', safe=True)
1785 def matching(repo, subset, x):
1778 def matching(repo, subset, x):
1786 """Changesets in which a given set of fields match the set of fields in the
1779 """Changesets in which a given set of fields match the set of fields in the
1787 selected revision or set.
1780 selected revision or set.
1788
1781
1789 To match more than one field pass the list of fields to match separated
1782 To match more than one field pass the list of fields to match separated
1790 by spaces (e.g. ``author description``).
1783 by spaces (e.g. ``author description``).
1791
1784
1792 Valid fields are most regular revision fields and some special fields.
1785 Valid fields are most regular revision fields and some special fields.
1793
1786
1794 Regular revision fields are ``description``, ``author``, ``branch``,
1787 Regular revision fields are ``description``, ``author``, ``branch``,
1795 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1788 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1796 and ``diff``.
1789 and ``diff``.
1797 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1790 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1798 contents of the revision. Two revisions matching their ``diff`` will
1791 contents of the revision. Two revisions matching their ``diff`` will
1799 also match their ``files``.
1792 also match their ``files``.
1800
1793
1801 Special fields are ``summary`` and ``metadata``:
1794 Special fields are ``summary`` and ``metadata``:
1802 ``summary`` matches the first line of the description.
1795 ``summary`` matches the first line of the description.
1803 ``metadata`` is equivalent to matching ``description user date``
1796 ``metadata`` is equivalent to matching ``description user date``
1804 (i.e. it matches the main metadata fields).
1797 (i.e. it matches the main metadata fields).
1805
1798
1806 ``metadata`` is the default field which is used when no fields are
1799 ``metadata`` is the default field which is used when no fields are
1807 specified. You can match more than one field at a time.
1800 specified. You can match more than one field at a time.
1808 """
1801 """
1809 # i18n: "matching" is a keyword
1802 # i18n: "matching" is a keyword
1810 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1803 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1811
1804
1812 revs = getset(repo, fullreposet(repo), l[0])
1805 revs = getset(repo, fullreposet(repo), l[0])
1813
1806
1814 fieldlist = ['metadata']
1807 fieldlist = ['metadata']
1815 if len(l) > 1:
1808 if len(l) > 1:
1816 fieldlist = getstring(l[1],
1809 fieldlist = getstring(l[1],
1817 # i18n: "matching" is a keyword
1810 # i18n: "matching" is a keyword
1818 _("matching requires a string "
1811 _("matching requires a string "
1819 "as its second argument")).split()
1812 "as its second argument")).split()
1820
1813
1821 # Make sure that there are no repeated fields,
1814 # Make sure that there are no repeated fields,
1822 # expand the 'special' 'metadata' field type
1815 # expand the 'special' 'metadata' field type
1823 # and check the 'files' whenever we check the 'diff'
1816 # and check the 'files' whenever we check the 'diff'
1824 fields = []
1817 fields = []
1825 for field in fieldlist:
1818 for field in fieldlist:
1826 if field == 'metadata':
1819 if field == 'metadata':
1827 fields += ['user', 'description', 'date']
1820 fields += ['user', 'description', 'date']
1828 elif field == 'diff':
1821 elif field == 'diff':
1829 # a revision matching the diff must also match the files
1822 # a revision matching the diff must also match the files
1830 # since matching the diff is very costly, make sure to
1823 # since matching the diff is very costly, make sure to
1831 # also match the files first
1824 # also match the files first
1832 fields += ['files', 'diff']
1825 fields += ['files', 'diff']
1833 else:
1826 else:
1834 if field == 'author':
1827 if field == 'author':
1835 field = 'user'
1828 field = 'user'
1836 fields.append(field)
1829 fields.append(field)
1837 fields = set(fields)
1830 fields = set(fields)
1838 if 'summary' in fields and 'description' in fields:
1831 if 'summary' in fields and 'description' in fields:
1839 # If a revision matches its description it also matches its summary
1832 # If a revision matches its description it also matches its summary
1840 fields.discard('summary')
1833 fields.discard('summary')
1841
1834
1842 # We may want to match more than one field
1835 # We may want to match more than one field
1843 # Not all fields take the same amount of time to be matched
1836 # Not all fields take the same amount of time to be matched
1844 # Sort the selected fields in order of increasing matching cost
1837 # Sort the selected fields in order of increasing matching cost
1845 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1838 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1846 'files', 'description', 'substate', 'diff']
1839 'files', 'description', 'substate', 'diff']
1847 def fieldkeyfunc(f):
1840 def fieldkeyfunc(f):
1848 try:
1841 try:
1849 return fieldorder.index(f)
1842 return fieldorder.index(f)
1850 except ValueError:
1843 except ValueError:
1851 # assume an unknown field is very costly
1844 # assume an unknown field is very costly
1852 return len(fieldorder)
1845 return len(fieldorder)
1853 fields = list(fields)
1846 fields = list(fields)
1854 fields.sort(key=fieldkeyfunc)
1847 fields.sort(key=fieldkeyfunc)
1855
1848
1856 # Each field will be matched with its own "getfield" function
1849 # Each field will be matched with its own "getfield" function
1857 # which will be added to the getfieldfuncs array of functions
1850 # which will be added to the getfieldfuncs array of functions
1858 getfieldfuncs = []
1851 getfieldfuncs = []
1859 _funcs = {
1852 _funcs = {
1860 'user': lambda r: repo[r].user(),
1853 'user': lambda r: repo[r].user(),
1861 'branch': lambda r: repo[r].branch(),
1854 'branch': lambda r: repo[r].branch(),
1862 'date': lambda r: repo[r].date(),
1855 'date': lambda r: repo[r].date(),
1863 'description': lambda r: repo[r].description(),
1856 'description': lambda r: repo[r].description(),
1864 'files': lambda r: repo[r].files(),
1857 'files': lambda r: repo[r].files(),
1865 'parents': lambda r: repo[r].parents(),
1858 'parents': lambda r: repo[r].parents(),
1866 'phase': lambda r: repo[r].phase(),
1859 'phase': lambda r: repo[r].phase(),
1867 'substate': lambda r: repo[r].substate,
1860 'substate': lambda r: repo[r].substate,
1868 'summary': lambda r: repo[r].description().splitlines()[0],
1861 'summary': lambda r: repo[r].description().splitlines()[0],
1869 'diff': lambda r: list(repo[r].diff(git=True),)
1862 'diff': lambda r: list(repo[r].diff(git=True),)
1870 }
1863 }
1871 for info in fields:
1864 for info in fields:
1872 getfield = _funcs.get(info, None)
1865 getfield = _funcs.get(info, None)
1873 if getfield is None:
1866 if getfield is None:
1874 raise error.ParseError(
1867 raise error.ParseError(
1875 # i18n: "matching" is a keyword
1868 # i18n: "matching" is a keyword
1876 _("unexpected field name passed to matching: %s") % info)
1869 _("unexpected field name passed to matching: %s") % info)
1877 getfieldfuncs.append(getfield)
1870 getfieldfuncs.append(getfield)
1878 # convert the getfield array of functions into a "getinfo" function
1871 # convert the getfield array of functions into a "getinfo" function
1879 # which returns an array of field values (or a single value if there
1872 # which returns an array of field values (or a single value if there
1880 # is only one field to match)
1873 # is only one field to match)
1881 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1874 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1882
1875
1883 def matches(x):
1876 def matches(x):
1884 for rev in revs:
1877 for rev in revs:
1885 target = getinfo(rev)
1878 target = getinfo(rev)
1886 match = True
1879 match = True
1887 for n, f in enumerate(getfieldfuncs):
1880 for n, f in enumerate(getfieldfuncs):
1888 if target[n] != f(x):
1881 if target[n] != f(x):
1889 match = False
1882 match = False
1890 if match:
1883 if match:
1891 return True
1884 return True
1892 return False
1885 return False
1893
1886
1894 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1887 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1895
1888
1896 @predicate('reverse(set)', safe=True, takeorder=True)
1889 @predicate('reverse(set)', safe=True, takeorder=True)
1897 def reverse(repo, subset, x, order):
1890 def reverse(repo, subset, x, order):
1898 """Reverse order of set.
1891 """Reverse order of set.
1899 """
1892 """
1900 l = getset(repo, subset, x)
1893 l = getset(repo, subset, x)
1901 if order == defineorder:
1894 if order == defineorder:
1902 l.reverse()
1895 l.reverse()
1903 return l
1896 return l
1904
1897
1905 @predicate('roots(set)', safe=True)
1898 @predicate('roots(set)', safe=True)
1906 def roots(repo, subset, x):
1899 def roots(repo, subset, x):
1907 """Changesets in set with no parent changeset in set.
1900 """Changesets in set with no parent changeset in set.
1908 """
1901 """
1909 s = getset(repo, fullreposet(repo), x)
1902 s = getset(repo, fullreposet(repo), x)
1910 parents = repo.changelog.parentrevs
1903 parents = repo.changelog.parentrevs
1911 def filter(r):
1904 def filter(r):
1912 for p in parents(r):
1905 for p in parents(r):
1913 if 0 <= p and p in s:
1906 if 0 <= p and p in s:
1914 return False
1907 return False
1915 return True
1908 return True
1916 return subset & s.filter(filter, condrepr='<roots>')
1909 return subset & s.filter(filter, condrepr='<roots>')
1917
1910
1918 _sortkeyfuncs = {
1911 _sortkeyfuncs = {
1919 'rev': lambda c: c.rev(),
1912 'rev': lambda c: c.rev(),
1920 'branch': lambda c: c.branch(),
1913 'branch': lambda c: c.branch(),
1921 'desc': lambda c: c.description(),
1914 'desc': lambda c: c.description(),
1922 'user': lambda c: c.user(),
1915 'user': lambda c: c.user(),
1923 'author': lambda c: c.user(),
1916 'author': lambda c: c.user(),
1924 'date': lambda c: c.date()[0],
1917 'date': lambda c: c.date()[0],
1925 }
1918 }
1926
1919
1927 def _getsortargs(x):
1920 def _getsortargs(x):
1928 """Parse sort options into (set, [(key, reverse)], opts)"""
1921 """Parse sort options into (set, [(key, reverse)], opts)"""
1929 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
1922 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
1930 if 'set' not in args:
1923 if 'set' not in args:
1931 # i18n: "sort" is a keyword
1924 # i18n: "sort" is a keyword
1932 raise error.ParseError(_('sort requires one or two arguments'))
1925 raise error.ParseError(_('sort requires one or two arguments'))
1933 keys = "rev"
1926 keys = "rev"
1934 if 'keys' in args:
1927 if 'keys' in args:
1935 # i18n: "sort" is a keyword
1928 # i18n: "sort" is a keyword
1936 keys = getstring(args['keys'], _("sort spec must be a string"))
1929 keys = getstring(args['keys'], _("sort spec must be a string"))
1937
1930
1938 keyflags = []
1931 keyflags = []
1939 for k in keys.split():
1932 for k in keys.split():
1940 fk = k
1933 fk = k
1941 reverse = (k[0] == '-')
1934 reverse = (k[0] == '-')
1942 if reverse:
1935 if reverse:
1943 k = k[1:]
1936 k = k[1:]
1944 if k not in _sortkeyfuncs and k != 'topo':
1937 if k not in _sortkeyfuncs and k != 'topo':
1945 raise error.ParseError(_("unknown sort key %r") % fk)
1938 raise error.ParseError(_("unknown sort key %r") % fk)
1946 keyflags.append((k, reverse))
1939 keyflags.append((k, reverse))
1947
1940
1948 if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
1941 if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
1949 # i18n: "topo" is a keyword
1942 # i18n: "topo" is a keyword
1950 raise error.ParseError(_('topo sort order cannot be combined '
1943 raise error.ParseError(_('topo sort order cannot be combined '
1951 'with other sort keys'))
1944 'with other sort keys'))
1952
1945
1953 opts = {}
1946 opts = {}
1954 if 'topo.firstbranch' in args:
1947 if 'topo.firstbranch' in args:
1955 if any(k == 'topo' for k, reverse in keyflags):
1948 if any(k == 'topo' for k, reverse in keyflags):
1956 opts['topo.firstbranch'] = args['topo.firstbranch']
1949 opts['topo.firstbranch'] = args['topo.firstbranch']
1957 else:
1950 else:
1958 # i18n: "topo" and "topo.firstbranch" are keywords
1951 # i18n: "topo" and "topo.firstbranch" are keywords
1959 raise error.ParseError(_('topo.firstbranch can only be used '
1952 raise error.ParseError(_('topo.firstbranch can only be used '
1960 'when using the topo sort key'))
1953 'when using the topo sort key'))
1961
1954
1962 return args['set'], keyflags, opts
1955 return args['set'], keyflags, opts
1963
1956
1964 @predicate('sort(set[, [-]key... [, ...]])', safe=True, takeorder=True)
1957 @predicate('sort(set[, [-]key... [, ...]])', safe=True, takeorder=True)
1965 def sort(repo, subset, x, order):
1958 def sort(repo, subset, x, order):
1966 """Sort set by keys. The default sort order is ascending, specify a key
1959 """Sort set by keys. The default sort order is ascending, specify a key
1967 as ``-key`` to sort in descending order.
1960 as ``-key`` to sort in descending order.
1968
1961
1969 The keys can be:
1962 The keys can be:
1970
1963
1971 - ``rev`` for the revision number,
1964 - ``rev`` for the revision number,
1972 - ``branch`` for the branch name,
1965 - ``branch`` for the branch name,
1973 - ``desc`` for the commit message (description),
1966 - ``desc`` for the commit message (description),
1974 - ``user`` for user name (``author`` can be used as an alias),
1967 - ``user`` for user name (``author`` can be used as an alias),
1975 - ``date`` for the commit date
1968 - ``date`` for the commit date
1976 - ``topo`` for a reverse topographical sort
1969 - ``topo`` for a reverse topographical sort
1977
1970
1978 The ``topo`` sort order cannot be combined with other sort keys. This sort
1971 The ``topo`` sort order cannot be combined with other sort keys. This sort
1979 takes one optional argument, ``topo.firstbranch``, which takes a revset that
1972 takes one optional argument, ``topo.firstbranch``, which takes a revset that
1980 specifies what topographical branches to prioritize in the sort.
1973 specifies what topographical branches to prioritize in the sort.
1981
1974
1982 """
1975 """
1983 s, keyflags, opts = _getsortargs(x)
1976 s, keyflags, opts = _getsortargs(x)
1984 revs = getset(repo, subset, s)
1977 revs = getset(repo, subset, s)
1985
1978
1986 if not keyflags or order != defineorder:
1979 if not keyflags or order != defineorder:
1987 return revs
1980 return revs
1988 if len(keyflags) == 1 and keyflags[0][0] == "rev":
1981 if len(keyflags) == 1 and keyflags[0][0] == "rev":
1989 revs.sort(reverse=keyflags[0][1])
1982 revs.sort(reverse=keyflags[0][1])
1990 return revs
1983 return revs
1991 elif keyflags[0][0] == "topo":
1984 elif keyflags[0][0] == "topo":
1992 firstbranch = ()
1985 firstbranch = ()
1993 if 'topo.firstbranch' in opts:
1986 if 'topo.firstbranch' in opts:
1994 firstbranch = getset(repo, subset, opts['topo.firstbranch'])
1987 firstbranch = getset(repo, subset, opts['topo.firstbranch'])
1995 revs = baseset(_toposort(revs, repo.changelog.parentrevs, firstbranch),
1988 revs = baseset(_toposort(revs, repo.changelog.parentrevs, firstbranch),
1996 istopo=True)
1989 istopo=True)
1997 if keyflags[0][1]:
1990 if keyflags[0][1]:
1998 revs.reverse()
1991 revs.reverse()
1999 return revs
1992 return revs
2000
1993
2001 # sort() is guaranteed to be stable
1994 # sort() is guaranteed to be stable
2002 ctxs = [repo[r] for r in revs]
1995 ctxs = [repo[r] for r in revs]
2003 for k, reverse in reversed(keyflags):
1996 for k, reverse in reversed(keyflags):
2004 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
1997 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
2005 return baseset([c.rev() for c in ctxs])
1998 return baseset([c.rev() for c in ctxs])
2006
1999
2007 def _toposort(revs, parentsfunc, firstbranch=()):
2000 def _toposort(revs, parentsfunc, firstbranch=()):
2008 """Yield revisions from heads to roots one (topo) branch at a time.
2001 """Yield revisions from heads to roots one (topo) branch at a time.
2009
2002
2010 This function aims to be used by a graph generator that wishes to minimize
2003 This function aims to be used by a graph generator that wishes to minimize
2011 the number of parallel branches and their interleaving.
2004 the number of parallel branches and their interleaving.
2012
2005
2013 Example iteration order (numbers show the "true" order in a changelog):
2006 Example iteration order (numbers show the "true" order in a changelog):
2014
2007
2015 o 4
2008 o 4
2016 |
2009 |
2017 o 1
2010 o 1
2018 |
2011 |
2019 | o 3
2012 | o 3
2020 | |
2013 | |
2021 | o 2
2014 | o 2
2022 |/
2015 |/
2023 o 0
2016 o 0
2024
2017
2025 Note that the ancestors of merges are understood by the current
2018 Note that the ancestors of merges are understood by the current
2026 algorithm to be on the same branch. This means no reordering will
2019 algorithm to be on the same branch. This means no reordering will
2027 occur behind a merge.
2020 occur behind a merge.
2028 """
2021 """
2029
2022
2030 ### Quick summary of the algorithm
2023 ### Quick summary of the algorithm
2031 #
2024 #
2032 # This function is based around a "retention" principle. We keep revisions
2025 # This function is based around a "retention" principle. We keep revisions
2033 # in memory until we are ready to emit a whole branch that immediately
2026 # in memory until we are ready to emit a whole branch that immediately
2034 # "merges" into an existing one. This reduces the number of parallel
2027 # "merges" into an existing one. This reduces the number of parallel
2035 # branches with interleaved revisions.
2028 # branches with interleaved revisions.
2036 #
2029 #
2037 # During iteration revs are split into two groups:
2030 # During iteration revs are split into two groups:
2038 # A) revision already emitted
2031 # A) revision already emitted
2039 # B) revision in "retention". They are stored as different subgroups.
2032 # B) revision in "retention". They are stored as different subgroups.
2040 #
2033 #
2041 # for each REV, we do the following logic:
2034 # for each REV, we do the following logic:
2042 #
2035 #
2043 # 1) if REV is a parent of (A), we will emit it. If there is a
2036 # 1) if REV is a parent of (A), we will emit it. If there is a
2044 # retention group ((B) above) that is blocked on REV being
2037 # retention group ((B) above) that is blocked on REV being
2045 # available, we emit all the revisions out of that retention
2038 # available, we emit all the revisions out of that retention
2046 # group first.
2039 # group first.
2047 #
2040 #
2048 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
2041 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
2049 # available, if such subgroup exist, we add REV to it and the subgroup is
2042 # available, if such subgroup exist, we add REV to it and the subgroup is
2050 # now awaiting for REV.parents() to be available.
2043 # now awaiting for REV.parents() to be available.
2051 #
2044 #
2052 # 3) finally if no such group existed in (B), we create a new subgroup.
2045 # 3) finally if no such group existed in (B), we create a new subgroup.
2053 #
2046 #
2054 #
2047 #
2055 # To bootstrap the algorithm, we emit the tipmost revision (which
2048 # To bootstrap the algorithm, we emit the tipmost revision (which
2056 # puts it in group (A) from above).
2049 # puts it in group (A) from above).
2057
2050
2058 revs.sort(reverse=True)
2051 revs.sort(reverse=True)
2059
2052
2060 # Set of parents of revision that have been emitted. They can be considered
2053 # Set of parents of revision that have been emitted. They can be considered
2061 # unblocked as the graph generator is already aware of them so there is no
2054 # unblocked as the graph generator is already aware of them so there is no
2062 # need to delay the revisions that reference them.
2055 # need to delay the revisions that reference them.
2063 #
2056 #
2064 # If someone wants to prioritize a branch over the others, pre-filling this
2057 # If someone wants to prioritize a branch over the others, pre-filling this
2065 # set will force all other branches to wait until this branch is ready to be
2058 # set will force all other branches to wait until this branch is ready to be
2066 # emitted.
2059 # emitted.
2067 unblocked = set(firstbranch)
2060 unblocked = set(firstbranch)
2068
2061
2069 # list of groups waiting to be displayed, each group is defined by:
2062 # list of groups waiting to be displayed, each group is defined by:
2070 #
2063 #
2071 # (revs: lists of revs waiting to be displayed,
2064 # (revs: lists of revs waiting to be displayed,
2072 # blocked: set of that cannot be displayed before those in 'revs')
2065 # blocked: set of that cannot be displayed before those in 'revs')
2073 #
2066 #
2074 # The second value ('blocked') correspond to parents of any revision in the
2067 # The second value ('blocked') correspond to parents of any revision in the
2075 # group ('revs') that is not itself contained in the group. The main idea
2068 # group ('revs') that is not itself contained in the group. The main idea
2076 # of this algorithm is to delay as much as possible the emission of any
2069 # of this algorithm is to delay as much as possible the emission of any
2077 # revision. This means waiting for the moment we are about to display
2070 # revision. This means waiting for the moment we are about to display
2078 # these parents to display the revs in a group.
2071 # these parents to display the revs in a group.
2079 #
2072 #
2080 # This first implementation is smart until it encounters a merge: it will
2073 # This first implementation is smart until it encounters a merge: it will
2081 # emit revs as soon as any parent is about to be emitted and can grow an
2074 # emit revs as soon as any parent is about to be emitted and can grow an
2082 # arbitrary number of revs in 'blocked'. In practice this mean we properly
2075 # arbitrary number of revs in 'blocked'. In practice this mean we properly
2083 # retains new branches but gives up on any special ordering for ancestors
2076 # retains new branches but gives up on any special ordering for ancestors
2084 # of merges. The implementation can be improved to handle this better.
2077 # of merges. The implementation can be improved to handle this better.
2085 #
2078 #
2086 # The first subgroup is special. It corresponds to all the revision that
2079 # The first subgroup is special. It corresponds to all the revision that
2087 # were already emitted. The 'revs' lists is expected to be empty and the
2080 # were already emitted. The 'revs' lists is expected to be empty and the
2088 # 'blocked' set contains the parents revisions of already emitted revision.
2081 # 'blocked' set contains the parents revisions of already emitted revision.
2089 #
2082 #
2090 # You could pre-seed the <parents> set of groups[0] to a specific
2083 # You could pre-seed the <parents> set of groups[0] to a specific
2091 # changesets to select what the first emitted branch should be.
2084 # changesets to select what the first emitted branch should be.
2092 groups = [([], unblocked)]
2085 groups = [([], unblocked)]
2093 pendingheap = []
2086 pendingheap = []
2094 pendingset = set()
2087 pendingset = set()
2095
2088
2096 heapq.heapify(pendingheap)
2089 heapq.heapify(pendingheap)
2097 heappop = heapq.heappop
2090 heappop = heapq.heappop
2098 heappush = heapq.heappush
2091 heappush = heapq.heappush
2099 for currentrev in revs:
2092 for currentrev in revs:
2100 # Heap works with smallest element, we want highest so we invert
2093 # Heap works with smallest element, we want highest so we invert
2101 if currentrev not in pendingset:
2094 if currentrev not in pendingset:
2102 heappush(pendingheap, -currentrev)
2095 heappush(pendingheap, -currentrev)
2103 pendingset.add(currentrev)
2096 pendingset.add(currentrev)
2104 # iterates on pending rev until after the current rev have been
2097 # iterates on pending rev until after the current rev have been
2105 # processed.
2098 # processed.
2106 rev = None
2099 rev = None
2107 while rev != currentrev:
2100 while rev != currentrev:
2108 rev = -heappop(pendingheap)
2101 rev = -heappop(pendingheap)
2109 pendingset.remove(rev)
2102 pendingset.remove(rev)
2110
2103
2111 # Seek for a subgroup blocked, waiting for the current revision.
2104 # Seek for a subgroup blocked, waiting for the current revision.
2112 matching = [i for i, g in enumerate(groups) if rev in g[1]]
2105 matching = [i for i, g in enumerate(groups) if rev in g[1]]
2113
2106
2114 if matching:
2107 if matching:
2115 # The main idea is to gather together all sets that are blocked
2108 # The main idea is to gather together all sets that are blocked
2116 # on the same revision.
2109 # on the same revision.
2117 #
2110 #
2118 # Groups are merged when a common blocking ancestor is
2111 # Groups are merged when a common blocking ancestor is
2119 # observed. For example, given two groups:
2112 # observed. For example, given two groups:
2120 #
2113 #
2121 # revs [5, 4] waiting for 1
2114 # revs [5, 4] waiting for 1
2122 # revs [3, 2] waiting for 1
2115 # revs [3, 2] waiting for 1
2123 #
2116 #
2124 # These two groups will be merged when we process
2117 # These two groups will be merged when we process
2125 # 1. In theory, we could have merged the groups when
2118 # 1. In theory, we could have merged the groups when
2126 # we added 2 to the group it is now in (we could have
2119 # we added 2 to the group it is now in (we could have
2127 # noticed the groups were both blocked on 1 then), but
2120 # noticed the groups were both blocked on 1 then), but
2128 # the way it works now makes the algorithm simpler.
2121 # the way it works now makes the algorithm simpler.
2129 #
2122 #
2130 # We also always keep the oldest subgroup first. We can
2123 # We also always keep the oldest subgroup first. We can
2131 # probably improve the behavior by having the longest set
2124 # probably improve the behavior by having the longest set
2132 # first. That way, graph algorithms could minimise the length
2125 # first. That way, graph algorithms could minimise the length
2133 # of parallel lines their drawing. This is currently not done.
2126 # of parallel lines their drawing. This is currently not done.
2134 targetidx = matching.pop(0)
2127 targetidx = matching.pop(0)
2135 trevs, tparents = groups[targetidx]
2128 trevs, tparents = groups[targetidx]
2136 for i in matching:
2129 for i in matching:
2137 gr = groups[i]
2130 gr = groups[i]
2138 trevs.extend(gr[0])
2131 trevs.extend(gr[0])
2139 tparents |= gr[1]
2132 tparents |= gr[1]
2140 # delete all merged subgroups (except the one we kept)
2133 # delete all merged subgroups (except the one we kept)
2141 # (starting from the last subgroup for performance and
2134 # (starting from the last subgroup for performance and
2142 # sanity reasons)
2135 # sanity reasons)
2143 for i in reversed(matching):
2136 for i in reversed(matching):
2144 del groups[i]
2137 del groups[i]
2145 else:
2138 else:
2146 # This is a new head. We create a new subgroup for it.
2139 # This is a new head. We create a new subgroup for it.
2147 targetidx = len(groups)
2140 targetidx = len(groups)
2148 groups.append(([], set([rev])))
2141 groups.append(([], set([rev])))
2149
2142
2150 gr = groups[targetidx]
2143 gr = groups[targetidx]
2151
2144
2152 # We now add the current nodes to this subgroups. This is done
2145 # We now add the current nodes to this subgroups. This is done
2153 # after the subgroup merging because all elements from a subgroup
2146 # after the subgroup merging because all elements from a subgroup
2154 # that relied on this rev must precede it.
2147 # that relied on this rev must precede it.
2155 #
2148 #
2156 # we also update the <parents> set to include the parents of the
2149 # we also update the <parents> set to include the parents of the
2157 # new nodes.
2150 # new nodes.
2158 if rev == currentrev: # only display stuff in rev
2151 if rev == currentrev: # only display stuff in rev
2159 gr[0].append(rev)
2152 gr[0].append(rev)
2160 gr[1].remove(rev)
2153 gr[1].remove(rev)
2161 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
2154 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
2162 gr[1].update(parents)
2155 gr[1].update(parents)
2163 for p in parents:
2156 for p in parents:
2164 if p not in pendingset:
2157 if p not in pendingset:
2165 pendingset.add(p)
2158 pendingset.add(p)
2166 heappush(pendingheap, -p)
2159 heappush(pendingheap, -p)
2167
2160
2168 # Look for a subgroup to display
2161 # Look for a subgroup to display
2169 #
2162 #
2170 # When unblocked is empty (if clause), we were not waiting for any
2163 # When unblocked is empty (if clause), we were not waiting for any
2171 # revisions during the first iteration (if no priority was given) or
2164 # revisions during the first iteration (if no priority was given) or
2172 # if we emitted a whole disconnected set of the graph (reached a
2165 # if we emitted a whole disconnected set of the graph (reached a
2173 # root). In that case we arbitrarily take the oldest known
2166 # root). In that case we arbitrarily take the oldest known
2174 # subgroup. The heuristic could probably be better.
2167 # subgroup. The heuristic could probably be better.
2175 #
2168 #
2176 # Otherwise (elif clause) if the subgroup is blocked on
2169 # Otherwise (elif clause) if the subgroup is blocked on
2177 # a revision we just emitted, we can safely emit it as
2170 # a revision we just emitted, we can safely emit it as
2178 # well.
2171 # well.
2179 if not unblocked:
2172 if not unblocked:
2180 if len(groups) > 1: # display other subset
2173 if len(groups) > 1: # display other subset
2181 targetidx = 1
2174 targetidx = 1
2182 gr = groups[1]
2175 gr = groups[1]
2183 elif not gr[1] & unblocked:
2176 elif not gr[1] & unblocked:
2184 gr = None
2177 gr = None
2185
2178
2186 if gr is not None:
2179 if gr is not None:
2187 # update the set of awaited revisions with the one from the
2180 # update the set of awaited revisions with the one from the
2188 # subgroup
2181 # subgroup
2189 unblocked |= gr[1]
2182 unblocked |= gr[1]
2190 # output all revisions in the subgroup
2183 # output all revisions in the subgroup
2191 for r in gr[0]:
2184 for r in gr[0]:
2192 yield r
2185 yield r
2193 # delete the subgroup that you just output
2186 # delete the subgroup that you just output
2194 # unless it is groups[0] in which case you just empty it.
2187 # unless it is groups[0] in which case you just empty it.
2195 if targetidx:
2188 if targetidx:
2196 del groups[targetidx]
2189 del groups[targetidx]
2197 else:
2190 else:
2198 gr[0][:] = []
2191 gr[0][:] = []
2199 # Check if we have some subgroup waiting for revisions we are not going to
2192 # Check if we have some subgroup waiting for revisions we are not going to
2200 # iterate over
2193 # iterate over
2201 for g in groups:
2194 for g in groups:
2202 for r in g[0]:
2195 for r in g[0]:
2203 yield r
2196 yield r
2204
2197
2205 @predicate('subrepo([pattern])')
2198 @predicate('subrepo([pattern])')
2206 def subrepo(repo, subset, x):
2199 def subrepo(repo, subset, x):
2207 """Changesets that add, modify or remove the given subrepo. If no subrepo
2200 """Changesets that add, modify or remove the given subrepo. If no subrepo
2208 pattern is named, any subrepo changes are returned.
2201 pattern is named, any subrepo changes are returned.
2209 """
2202 """
2210 # i18n: "subrepo" is a keyword
2203 # i18n: "subrepo" is a keyword
2211 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
2204 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
2212 pat = None
2205 pat = None
2213 if len(args) != 0:
2206 if len(args) != 0:
2214 pat = getstring(args[0], _("subrepo requires a pattern"))
2207 pat = getstring(args[0], _("subrepo requires a pattern"))
2215
2208
2216 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
2209 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
2217
2210
2218 def submatches(names):
2211 def submatches(names):
2219 k, p, m = util.stringmatcher(pat)
2212 k, p, m = util.stringmatcher(pat)
2220 for name in names:
2213 for name in names:
2221 if m(name):
2214 if m(name):
2222 yield name
2215 yield name
2223
2216
2224 def matches(x):
2217 def matches(x):
2225 c = repo[x]
2218 c = repo[x]
2226 s = repo.status(c.p1().node(), c.node(), match=m)
2219 s = repo.status(c.p1().node(), c.node(), match=m)
2227
2220
2228 if pat is None:
2221 if pat is None:
2229 return s.added or s.modified or s.removed
2222 return s.added or s.modified or s.removed
2230
2223
2231 if s.added:
2224 if s.added:
2232 return any(submatches(c.substate.keys()))
2225 return any(submatches(c.substate.keys()))
2233
2226
2234 if s.modified:
2227 if s.modified:
2235 subs = set(c.p1().substate.keys())
2228 subs = set(c.p1().substate.keys())
2236 subs.update(c.substate.keys())
2229 subs.update(c.substate.keys())
2237
2230
2238 for path in submatches(subs):
2231 for path in submatches(subs):
2239 if c.p1().substate.get(path) != c.substate.get(path):
2232 if c.p1().substate.get(path) != c.substate.get(path):
2240 return True
2233 return True
2241
2234
2242 if s.removed:
2235 if s.removed:
2243 return any(submatches(c.p1().substate.keys()))
2236 return any(submatches(c.p1().substate.keys()))
2244
2237
2245 return False
2238 return False
2246
2239
2247 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
2240 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
2248
2241
2249 def _substringmatcher(pattern, casesensitive=True):
2242 def _substringmatcher(pattern, casesensitive=True):
2250 kind, pattern, matcher = util.stringmatcher(pattern,
2243 kind, pattern, matcher = util.stringmatcher(pattern,
2251 casesensitive=casesensitive)
2244 casesensitive=casesensitive)
2252 if kind == 'literal':
2245 if kind == 'literal':
2253 if not casesensitive:
2246 if not casesensitive:
2254 pattern = encoding.lower(pattern)
2247 pattern = encoding.lower(pattern)
2255 matcher = lambda s: pattern in encoding.lower(s)
2248 matcher = lambda s: pattern in encoding.lower(s)
2256 else:
2249 else:
2257 matcher = lambda s: pattern in s
2250 matcher = lambda s: pattern in s
2258 return kind, pattern, matcher
2251 return kind, pattern, matcher
2259
2252
2260 @predicate('tag([name])', safe=True)
2253 @predicate('tag([name])', safe=True)
2261 def tag(repo, subset, x):
2254 def tag(repo, subset, x):
2262 """The specified tag by name, or all tagged revisions if no name is given.
2255 """The specified tag by name, or all tagged revisions if no name is given.
2263
2256
2264 Pattern matching is supported for `name`. See
2257 Pattern matching is supported for `name`. See
2265 :hg:`help revisions.patterns`.
2258 :hg:`help revisions.patterns`.
2266 """
2259 """
2267 # i18n: "tag" is a keyword
2260 # i18n: "tag" is a keyword
2268 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2261 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2269 cl = repo.changelog
2262 cl = repo.changelog
2270 if args:
2263 if args:
2271 pattern = getstring(args[0],
2264 pattern = getstring(args[0],
2272 # i18n: "tag" is a keyword
2265 # i18n: "tag" is a keyword
2273 _('the argument to tag must be a string'))
2266 _('the argument to tag must be a string'))
2274 kind, pattern, matcher = util.stringmatcher(pattern)
2267 kind, pattern, matcher = util.stringmatcher(pattern)
2275 if kind == 'literal':
2268 if kind == 'literal':
2276 # avoid resolving all tags
2269 # avoid resolving all tags
2277 tn = repo._tagscache.tags.get(pattern, None)
2270 tn = repo._tagscache.tags.get(pattern, None)
2278 if tn is None:
2271 if tn is None:
2279 raise error.RepoLookupError(_("tag '%s' does not exist")
2272 raise error.RepoLookupError(_("tag '%s' does not exist")
2280 % pattern)
2273 % pattern)
2281 s = set([repo[tn].rev()])
2274 s = set([repo[tn].rev()])
2282 else:
2275 else:
2283 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2276 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2284 else:
2277 else:
2285 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2278 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2286 return subset & s
2279 return subset & s
2287
2280
2288 @predicate('tagged', safe=True)
2281 @predicate('tagged', safe=True)
2289 def tagged(repo, subset, x):
2282 def tagged(repo, subset, x):
2290 return tag(repo, subset, x)
2283 return tag(repo, subset, x)
2291
2284
2292 @predicate('unstable()', safe=True)
2285 @predicate('unstable()', safe=True)
2293 def unstable(repo, subset, x):
2286 def unstable(repo, subset, x):
2294 """Non-obsolete changesets with obsolete ancestors.
2287 """Non-obsolete changesets with obsolete ancestors.
2295 """
2288 """
2296 # i18n: "unstable" is a keyword
2289 # i18n: "unstable" is a keyword
2297 getargs(x, 0, 0, _("unstable takes no arguments"))
2290 getargs(x, 0, 0, _("unstable takes no arguments"))
2298 unstables = obsmod.getrevs(repo, 'unstable')
2291 unstables = obsmod.getrevs(repo, 'unstable')
2299 return subset & unstables
2292 return subset & unstables
2300
2293
2301
2294
2302 @predicate('user(string)', safe=True)
2295 @predicate('user(string)', safe=True)
2303 def user(repo, subset, x):
2296 def user(repo, subset, x):
2304 """User name contains string. The match is case-insensitive.
2297 """User name contains string. The match is case-insensitive.
2305
2298
2306 Pattern matching is supported for `string`. See
2299 Pattern matching is supported for `string`. See
2307 :hg:`help revisions.patterns`.
2300 :hg:`help revisions.patterns`.
2308 """
2301 """
2309 return author(repo, subset, x)
2302 return author(repo, subset, x)
2310
2303
2311 @predicate('wdir', safe=True)
2304 @predicate('wdir', safe=True)
2312 def wdir(repo, subset, x):
2305 def wdir(repo, subset, x):
2313 """Working directory. (EXPERIMENTAL)"""
2306 """Working directory. (EXPERIMENTAL)"""
2314 # i18n: "wdir" is a keyword
2307 # i18n: "wdir" is a keyword
2315 getargs(x, 0, 0, _("wdir takes no arguments"))
2308 getargs(x, 0, 0, _("wdir takes no arguments"))
2316 if node.wdirrev in subset or isinstance(subset, fullreposet):
2309 if node.wdirrev in subset or isinstance(subset, fullreposet):
2317 return baseset([node.wdirrev])
2310 return baseset([node.wdirrev])
2318 return baseset()
2311 return baseset()
2319
2312
2320 def _orderedlist(repo, subset, x):
2313 def _orderedlist(repo, subset, x):
2321 s = getstring(x, "internal error")
2314 s = getstring(x, "internal error")
2322 if not s:
2315 if not s:
2323 return baseset()
2316 return baseset()
2324 # remove duplicates here. it's difficult for caller to deduplicate sets
2317 # remove duplicates here. it's difficult for caller to deduplicate sets
2325 # because different symbols can point to the same rev.
2318 # because different symbols can point to the same rev.
2326 cl = repo.changelog
2319 cl = repo.changelog
2327 ls = []
2320 ls = []
2328 seen = set()
2321 seen = set()
2329 for t in s.split('\0'):
2322 for t in s.split('\0'):
2330 try:
2323 try:
2331 # fast path for integer revision
2324 # fast path for integer revision
2332 r = int(t)
2325 r = int(t)
2333 if str(r) != t or r not in cl:
2326 if str(r) != t or r not in cl:
2334 raise ValueError
2327 raise ValueError
2335 revs = [r]
2328 revs = [r]
2336 except ValueError:
2329 except ValueError:
2337 revs = stringset(repo, subset, t)
2330 revs = stringset(repo, subset, t)
2338
2331
2339 for r in revs:
2332 for r in revs:
2340 if r in seen:
2333 if r in seen:
2341 continue
2334 continue
2342 if (r in subset
2335 if (r in subset
2343 or r == node.nullrev and isinstance(subset, fullreposet)):
2336 or r == node.nullrev and isinstance(subset, fullreposet)):
2344 ls.append(r)
2337 ls.append(r)
2345 seen.add(r)
2338 seen.add(r)
2346 return baseset(ls)
2339 return baseset(ls)
2347
2340
2348 # for internal use
2341 # for internal use
2349 @predicate('_list', safe=True, takeorder=True)
2342 @predicate('_list', safe=True, takeorder=True)
2350 def _list(repo, subset, x, order):
2343 def _list(repo, subset, x, order):
2351 if order == followorder:
2344 if order == followorder:
2352 # slow path to take the subset order
2345 # slow path to take the subset order
2353 return subset & _orderedlist(repo, fullreposet(repo), x)
2346 return subset & _orderedlist(repo, fullreposet(repo), x)
2354 else:
2347 else:
2355 return _orderedlist(repo, subset, x)
2348 return _orderedlist(repo, subset, x)
2356
2349
2357 def _orderedintlist(repo, subset, x):
2350 def _orderedintlist(repo, subset, x):
2358 s = getstring(x, "internal error")
2351 s = getstring(x, "internal error")
2359 if not s:
2352 if not s:
2360 return baseset()
2353 return baseset()
2361 ls = [int(r) for r in s.split('\0')]
2354 ls = [int(r) for r in s.split('\0')]
2362 s = subset
2355 s = subset
2363 return baseset([r for r in ls if r in s])
2356 return baseset([r for r in ls if r in s])
2364
2357
2365 # for internal use
2358 # for internal use
2366 @predicate('_intlist', safe=True, takeorder=True)
2359 @predicate('_intlist', safe=True, takeorder=True)
2367 def _intlist(repo, subset, x, order):
2360 def _intlist(repo, subset, x, order):
2368 if order == followorder:
2361 if order == followorder:
2369 # slow path to take the subset order
2362 # slow path to take the subset order
2370 return subset & _orderedintlist(repo, fullreposet(repo), x)
2363 return subset & _orderedintlist(repo, fullreposet(repo), x)
2371 else:
2364 else:
2372 return _orderedintlist(repo, subset, x)
2365 return _orderedintlist(repo, subset, x)
2373
2366
2374 def _orderedhexlist(repo, subset, x):
2367 def _orderedhexlist(repo, subset, x):
2375 s = getstring(x, "internal error")
2368 s = getstring(x, "internal error")
2376 if not s:
2369 if not s:
2377 return baseset()
2370 return baseset()
2378 cl = repo.changelog
2371 cl = repo.changelog
2379 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2372 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2380 s = subset
2373 s = subset
2381 return baseset([r for r in ls if r in s])
2374 return baseset([r for r in ls if r in s])
2382
2375
2383 # for internal use
2376 # for internal use
2384 @predicate('_hexlist', safe=True, takeorder=True)
2377 @predicate('_hexlist', safe=True, takeorder=True)
2385 def _hexlist(repo, subset, x, order):
2378 def _hexlist(repo, subset, x, order):
2386 if order == followorder:
2379 if order == followorder:
2387 # slow path to take the subset order
2380 # slow path to take the subset order
2388 return subset & _orderedhexlist(repo, fullreposet(repo), x)
2381 return subset & _orderedhexlist(repo, fullreposet(repo), x)
2389 else:
2382 else:
2390 return _orderedhexlist(repo, subset, x)
2383 return _orderedhexlist(repo, subset, x)
2391
2384
2392 methods = {
2385 methods = {
2393 "range": rangeset,
2386 "range": rangeset,
2394 "rangepre": rangepre,
2387 "rangepre": rangepre,
2395 "dagrange": dagrange,
2388 "dagrange": dagrange,
2396 "string": stringset,
2389 "string": stringset,
2397 "symbol": stringset,
2390 "symbol": stringset,
2398 "and": andset,
2391 "and": andset,
2399 "or": orset,
2392 "or": orset,
2400 "not": notset,
2393 "not": notset,
2401 "difference": differenceset,
2394 "difference": differenceset,
2402 "list": listset,
2395 "list": listset,
2403 "keyvalue": keyvaluepair,
2396 "keyvalue": keyvaluepair,
2404 "func": func,
2397 "func": func,
2405 "ancestor": ancestorspec,
2398 "ancestor": ancestorspec,
2406 "parent": parentspec,
2399 "parent": parentspec,
2407 "parentpost": parentpost,
2400 "parentpost": parentpost,
2408 }
2401 }
2409
2402
2410 # Constants for ordering requirement, used in _analyze():
2403 # Constants for ordering requirement, used in _analyze():
2411 #
2404 #
2412 # If 'define', any nested functions and operations can change the ordering of
2405 # If 'define', any nested functions and operations can change the ordering of
2413 # the entries in the set. If 'follow', any nested functions and operations
2406 # the entries in the set. If 'follow', any nested functions and operations
2414 # should take the ordering specified by the first operand to the '&' operator.
2407 # should take the ordering specified by the first operand to the '&' operator.
2415 #
2408 #
2416 # For instance,
2409 # For instance,
2417 #
2410 #
2418 # X & (Y | Z)
2411 # X & (Y | Z)
2419 # ^ ^^^^^^^
2412 # ^ ^^^^^^^
2420 # | follow
2413 # | follow
2421 # define
2414 # define
2422 #
2415 #
2423 # will be evaluated as 'or(y(x()), z(x()))', where 'x()' can change the order
2416 # will be evaluated as 'or(y(x()), z(x()))', where 'x()' can change the order
2424 # of the entries in the set, but 'y()', 'z()' and 'or()' shouldn't.
2417 # of the entries in the set, but 'y()', 'z()' and 'or()' shouldn't.
2425 #
2418 #
2426 # 'any' means the order doesn't matter. For instance,
2419 # 'any' means the order doesn't matter. For instance,
2427 #
2420 #
2428 # X & !Y
2421 # X & !Y
2429 # ^
2422 # ^
2430 # any
2423 # any
2431 #
2424 #
2432 # 'y()' can either enforce its ordering requirement or take the ordering
2425 # 'y()' can either enforce its ordering requirement or take the ordering
2433 # specified by 'x()' because 'not()' doesn't care the order.
2426 # specified by 'x()' because 'not()' doesn't care the order.
2434 #
2427 #
2435 # Transition of ordering requirement:
2428 # Transition of ordering requirement:
2436 #
2429 #
2437 # 1. starts with 'define'
2430 # 1. starts with 'define'
2438 # 2. shifts to 'follow' by 'x & y'
2431 # 2. shifts to 'follow' by 'x & y'
2439 # 3. changes back to 'define' on function call 'f(x)' or function-like
2432 # 3. changes back to 'define' on function call 'f(x)' or function-like
2440 # operation 'x (f) y' because 'f' may have its own ordering requirement
2433 # operation 'x (f) y' because 'f' may have its own ordering requirement
2441 # for 'x' and 'y' (e.g. 'first(x)')
2434 # for 'x' and 'y' (e.g. 'first(x)')
2442 #
2435 #
2443 anyorder = 'any' # don't care the order
2436 anyorder = 'any' # don't care the order
2444 defineorder = 'define' # should define the order
2437 defineorder = 'define' # should define the order
2445 followorder = 'follow' # must follow the current order
2438 followorder = 'follow' # must follow the current order
2446
2439
2447 # transition table for 'x & y', from the current expression 'x' to 'y'
2440 # transition table for 'x & y', from the current expression 'x' to 'y'
2448 _tofolloworder = {
2441 _tofolloworder = {
2449 anyorder: anyorder,
2442 anyorder: anyorder,
2450 defineorder: followorder,
2443 defineorder: followorder,
2451 followorder: followorder,
2444 followorder: followorder,
2452 }
2445 }
2453
2446
2454 def _matchonly(revs, bases):
2447 def _matchonly(revs, bases):
2455 """
2448 """
2456 >>> f = lambda *args: _matchonly(*map(parse, args))
2449 >>> f = lambda *args: _matchonly(*map(parse, args))
2457 >>> f('ancestors(A)', 'not ancestors(B)')
2450 >>> f('ancestors(A)', 'not ancestors(B)')
2458 ('list', ('symbol', 'A'), ('symbol', 'B'))
2451 ('list', ('symbol', 'A'), ('symbol', 'B'))
2459 """
2452 """
2460 if (revs is not None
2453 if (revs is not None
2461 and revs[0] == 'func'
2454 and revs[0] == 'func'
2462 and getsymbol(revs[1]) == 'ancestors'
2455 and getsymbol(revs[1]) == 'ancestors'
2463 and bases is not None
2456 and bases is not None
2464 and bases[0] == 'not'
2457 and bases[0] == 'not'
2465 and bases[1][0] == 'func'
2458 and bases[1][0] == 'func'
2466 and getsymbol(bases[1][1]) == 'ancestors'):
2459 and getsymbol(bases[1][1]) == 'ancestors'):
2467 return ('list', revs[2], bases[1][2])
2460 return ('list', revs[2], bases[1][2])
2468
2461
2469 def _fixops(x):
2462 def _fixops(x):
2470 """Rewrite raw parsed tree to resolve ambiguous syntax which cannot be
2463 """Rewrite raw parsed tree to resolve ambiguous syntax which cannot be
2471 handled well by our simple top-down parser"""
2464 handled well by our simple top-down parser"""
2472 if not isinstance(x, tuple):
2465 if not isinstance(x, tuple):
2473 return x
2466 return x
2474
2467
2475 op = x[0]
2468 op = x[0]
2476 if op == 'parent':
2469 if op == 'parent':
2477 # x^:y means (x^) : y, not x ^ (:y)
2470 # x^:y means (x^) : y, not x ^ (:y)
2478 # x^: means (x^) :, not x ^ (:)
2471 # x^: means (x^) :, not x ^ (:)
2479 post = ('parentpost', x[1])
2472 post = ('parentpost', x[1])
2480 if x[2][0] == 'dagrangepre':
2473 if x[2][0] == 'dagrangepre':
2481 return _fixops(('dagrange', post, x[2][1]))
2474 return _fixops(('dagrange', post, x[2][1]))
2482 elif x[2][0] == 'rangepre':
2475 elif x[2][0] == 'rangepre':
2483 return _fixops(('range', post, x[2][1]))
2476 return _fixops(('range', post, x[2][1]))
2484 elif x[2][0] == 'rangeall':
2477 elif x[2][0] == 'rangeall':
2485 return _fixops(('rangepost', post))
2478 return _fixops(('rangepost', post))
2486 elif op == 'or':
2479 elif op == 'or':
2487 # make number of arguments deterministic:
2480 # make number of arguments deterministic:
2488 # x + y + z -> (or x y z) -> (or (list x y z))
2481 # x + y + z -> (or x y z) -> (or (list x y z))
2489 return (op, _fixops(('list',) + x[1:]))
2482 return (op, _fixops(('list',) + x[1:]))
2490
2483
2491 return (op,) + tuple(_fixops(y) for y in x[1:])
2484 return (op,) + tuple(_fixops(y) for y in x[1:])
2492
2485
2493 def _analyze(x, order):
2486 def _analyze(x, order):
2494 if x is None:
2487 if x is None:
2495 return x
2488 return x
2496
2489
2497 op = x[0]
2490 op = x[0]
2498 if op == 'minus':
2491 if op == 'minus':
2499 return _analyze(('and', x[1], ('not', x[2])), order)
2492 return _analyze(('and', x[1], ('not', x[2])), order)
2500 elif op == 'only':
2493 elif op == 'only':
2501 t = ('func', ('symbol', 'only'), ('list', x[1], x[2]))
2494 t = ('func', ('symbol', 'only'), ('list', x[1], x[2]))
2502 return _analyze(t, order)
2495 return _analyze(t, order)
2503 elif op == 'onlypost':
2496 elif op == 'onlypost':
2504 return _analyze(('func', ('symbol', 'only'), x[1]), order)
2497 return _analyze(('func', ('symbol', 'only'), x[1]), order)
2505 elif op == 'dagrangepre':
2498 elif op == 'dagrangepre':
2506 return _analyze(('func', ('symbol', 'ancestors'), x[1]), order)
2499 return _analyze(('func', ('symbol', 'ancestors'), x[1]), order)
2507 elif op == 'dagrangepost':
2500 elif op == 'dagrangepost':
2508 return _analyze(('func', ('symbol', 'descendants'), x[1]), order)
2501 return _analyze(('func', ('symbol', 'descendants'), x[1]), order)
2509 elif op == 'rangeall':
2502 elif op == 'rangeall':
2510 return _analyze(('rangepre', ('string', 'tip')), order)
2503 return _analyze(('rangepre', ('string', 'tip')), order)
2511 elif op == 'rangepost':
2504 elif op == 'rangepost':
2512 return _analyze(('range', x[1], ('string', 'tip')), order)
2505 return _analyze(('range', x[1], ('string', 'tip')), order)
2513 elif op == 'negate':
2506 elif op == 'negate':
2514 s = getstring(x[1], _("can't negate that"))
2507 s = getstring(x[1], _("can't negate that"))
2515 return _analyze(('string', '-' + s), order)
2508 return _analyze(('string', '-' + s), order)
2516 elif op in ('string', 'symbol'):
2509 elif op in ('string', 'symbol'):
2517 return x
2510 return x
2518 elif op == 'and':
2511 elif op == 'and':
2519 ta = _analyze(x[1], order)
2512 ta = _analyze(x[1], order)
2520 tb = _analyze(x[2], _tofolloworder[order])
2513 tb = _analyze(x[2], _tofolloworder[order])
2521 return (op, ta, tb, order)
2514 return (op, ta, tb, order)
2522 elif op == 'or':
2515 elif op == 'or':
2523 return (op, _analyze(x[1], order), order)
2516 return (op, _analyze(x[1], order), order)
2524 elif op == 'not':
2517 elif op == 'not':
2525 return (op, _analyze(x[1], anyorder), order)
2518 return (op, _analyze(x[1], anyorder), order)
2526 elif op in ('rangepre', 'parentpost'):
2519 elif op in ('rangepre', 'parentpost'):
2527 return (op, _analyze(x[1], defineorder), order)
2520 return (op, _analyze(x[1], defineorder), order)
2528 elif op == 'group':
2521 elif op == 'group':
2529 return _analyze(x[1], order)
2522 return _analyze(x[1], order)
2530 elif op in ('dagrange', 'range', 'parent', 'ancestor'):
2523 elif op in ('dagrange', 'range', 'parent', 'ancestor'):
2531 ta = _analyze(x[1], defineorder)
2524 ta = _analyze(x[1], defineorder)
2532 tb = _analyze(x[2], defineorder)
2525 tb = _analyze(x[2], defineorder)
2533 return (op, ta, tb, order)
2526 return (op, ta, tb, order)
2534 elif op == 'list':
2527 elif op == 'list':
2535 return (op,) + tuple(_analyze(y, order) for y in x[1:])
2528 return (op,) + tuple(_analyze(y, order) for y in x[1:])
2536 elif op == 'keyvalue':
2529 elif op == 'keyvalue':
2537 return (op, x[1], _analyze(x[2], order))
2530 return (op, x[1], _analyze(x[2], order))
2538 elif op == 'func':
2531 elif op == 'func':
2539 f = getsymbol(x[1])
2532 f = getsymbol(x[1])
2540 d = defineorder
2533 d = defineorder
2541 if f == 'present':
2534 if f == 'present':
2542 # 'present(set)' is known to return the argument set with no
2535 # 'present(set)' is known to return the argument set with no
2543 # modification, so forward the current order to its argument
2536 # modification, so forward the current order to its argument
2544 d = order
2537 d = order
2545 return (op, x[1], _analyze(x[2], d), order)
2538 return (op, x[1], _analyze(x[2], d), order)
2546 raise ValueError('invalid operator %r' % op)
2539 raise ValueError('invalid operator %r' % op)
2547
2540
2548 def analyze(x, order=defineorder):
2541 def analyze(x, order=defineorder):
2549 """Transform raw parsed tree to evaluatable tree which can be fed to
2542 """Transform raw parsed tree to evaluatable tree which can be fed to
2550 optimize() or getset()
2543 optimize() or getset()
2551
2544
2552 All pseudo operations should be mapped to real operations or functions
2545 All pseudo operations should be mapped to real operations or functions
2553 defined in methods or symbols table respectively.
2546 defined in methods or symbols table respectively.
2554
2547
2555 'order' specifies how the current expression 'x' is ordered (see the
2548 'order' specifies how the current expression 'x' is ordered (see the
2556 constants defined above.)
2549 constants defined above.)
2557 """
2550 """
2558 return _analyze(x, order)
2551 return _analyze(x, order)
2559
2552
2560 def _optimize(x, small):
2553 def _optimize(x, small):
2561 if x is None:
2554 if x is None:
2562 return 0, x
2555 return 0, x
2563
2556
2564 smallbonus = 1
2557 smallbonus = 1
2565 if small:
2558 if small:
2566 smallbonus = .5
2559 smallbonus = .5
2567
2560
2568 op = x[0]
2561 op = x[0]
2569 if op in ('string', 'symbol'):
2562 if op in ('string', 'symbol'):
2570 return smallbonus, x # single revisions are small
2563 return smallbonus, x # single revisions are small
2571 elif op == 'and':
2564 elif op == 'and':
2572 wa, ta = _optimize(x[1], True)
2565 wa, ta = _optimize(x[1], True)
2573 wb, tb = _optimize(x[2], True)
2566 wb, tb = _optimize(x[2], True)
2574 order = x[3]
2567 order = x[3]
2575 w = min(wa, wb)
2568 w = min(wa, wb)
2576
2569
2577 # (::x and not ::y)/(not ::y and ::x) have a fast path
2570 # (::x and not ::y)/(not ::y and ::x) have a fast path
2578 tm = _matchonly(ta, tb) or _matchonly(tb, ta)
2571 tm = _matchonly(ta, tb) or _matchonly(tb, ta)
2579 if tm:
2572 if tm:
2580 return w, ('func', ('symbol', 'only'), tm, order)
2573 return w, ('func', ('symbol', 'only'), tm, order)
2581
2574
2582 if tb is not None and tb[0] == 'not':
2575 if tb is not None and tb[0] == 'not':
2583 return wa, ('difference', ta, tb[1], order)
2576 return wa, ('difference', ta, tb[1], order)
2584
2577
2585 if wa > wb:
2578 if wa > wb:
2586 return w, (op, tb, ta, order)
2579 return w, (op, tb, ta, order)
2587 return w, (op, ta, tb, order)
2580 return w, (op, ta, tb, order)
2588 elif op == 'or':
2581 elif op == 'or':
2589 # fast path for machine-generated expression, that is likely to have
2582 # fast path for machine-generated expression, that is likely to have
2590 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2583 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2591 order = x[2]
2584 order = x[2]
2592 ws, ts, ss = [], [], []
2585 ws, ts, ss = [], [], []
2593 def flushss():
2586 def flushss():
2594 if not ss:
2587 if not ss:
2595 return
2588 return
2596 if len(ss) == 1:
2589 if len(ss) == 1:
2597 w, t = ss[0]
2590 w, t = ss[0]
2598 else:
2591 else:
2599 s = '\0'.join(t[1] for w, t in ss)
2592 s = '\0'.join(t[1] for w, t in ss)
2600 y = ('func', ('symbol', '_list'), ('string', s), order)
2593 y = ('func', ('symbol', '_list'), ('string', s), order)
2601 w, t = _optimize(y, False)
2594 w, t = _optimize(y, False)
2602 ws.append(w)
2595 ws.append(w)
2603 ts.append(t)
2596 ts.append(t)
2604 del ss[:]
2597 del ss[:]
2605 for y in getlist(x[1]):
2598 for y in getlist(x[1]):
2606 w, t = _optimize(y, False)
2599 w, t = _optimize(y, False)
2607 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2600 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2608 ss.append((w, t))
2601 ss.append((w, t))
2609 continue
2602 continue
2610 flushss()
2603 flushss()
2611 ws.append(w)
2604 ws.append(w)
2612 ts.append(t)
2605 ts.append(t)
2613 flushss()
2606 flushss()
2614 if len(ts) == 1:
2607 if len(ts) == 1:
2615 return ws[0], ts[0] # 'or' operation is fully optimized out
2608 return ws[0], ts[0] # 'or' operation is fully optimized out
2616 # we can't reorder trees by weight because it would change the order.
2609 # we can't reorder trees by weight because it would change the order.
2617 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2610 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2618 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2611 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2619 return max(ws), (op, ('list',) + tuple(ts), order)
2612 return max(ws), (op, ('list',) + tuple(ts), order)
2620 elif op == 'not':
2613 elif op == 'not':
2621 # Optimize not public() to _notpublic() because we have a fast version
2614 # Optimize not public() to _notpublic() because we have a fast version
2622 if x[1][:3] == ('func', ('symbol', 'public'), None):
2615 if x[1][:3] == ('func', ('symbol', 'public'), None):
2623 order = x[1][3]
2616 order = x[1][3]
2624 newsym = ('func', ('symbol', '_notpublic'), None, order)
2617 newsym = ('func', ('symbol', '_notpublic'), None, order)
2625 o = _optimize(newsym, not small)
2618 o = _optimize(newsym, not small)
2626 return o[0], o[1]
2619 return o[0], o[1]
2627 else:
2620 else:
2628 o = _optimize(x[1], not small)
2621 o = _optimize(x[1], not small)
2629 order = x[2]
2622 order = x[2]
2630 return o[0], (op, o[1], order)
2623 return o[0], (op, o[1], order)
2631 elif op in ('rangepre', 'parentpost'):
2624 elif op in ('rangepre', 'parentpost'):
2632 o = _optimize(x[1], small)
2625 o = _optimize(x[1], small)
2633 order = x[2]
2626 order = x[2]
2634 return o[0], (op, o[1], order)
2627 return o[0], (op, o[1], order)
2635 elif op in ('dagrange', 'range', 'parent', 'ancestor'):
2628 elif op in ('dagrange', 'range', 'parent', 'ancestor'):
2636 wa, ta = _optimize(x[1], small)
2629 wa, ta = _optimize(x[1], small)
2637 wb, tb = _optimize(x[2], small)
2630 wb, tb = _optimize(x[2], small)
2638 order = x[3]
2631 order = x[3]
2639 return wa + wb, (op, ta, tb, order)
2632 return wa + wb, (op, ta, tb, order)
2640 elif op == 'list':
2633 elif op == 'list':
2641 ws, ts = zip(*(_optimize(y, small) for y in x[1:]))
2634 ws, ts = zip(*(_optimize(y, small) for y in x[1:]))
2642 return sum(ws), (op,) + ts
2635 return sum(ws), (op,) + ts
2643 elif op == 'keyvalue':
2636 elif op == 'keyvalue':
2644 w, t = _optimize(x[2], small)
2637 w, t = _optimize(x[2], small)
2645 return w, (op, x[1], t)
2638 return w, (op, x[1], t)
2646 elif op == 'func':
2639 elif op == 'func':
2647 f = getsymbol(x[1])
2640 f = getsymbol(x[1])
2648 wa, ta = _optimize(x[2], small)
2641 wa, ta = _optimize(x[2], small)
2649 if f in ('author', 'branch', 'closed', 'date', 'desc', 'file', 'grep',
2642 if f in ('author', 'branch', 'closed', 'date', 'desc', 'file', 'grep',
2650 'keyword', 'outgoing', 'user', 'destination'):
2643 'keyword', 'outgoing', 'user', 'destination'):
2651 w = 10 # slow
2644 w = 10 # slow
2652 elif f in ('modifies', 'adds', 'removes'):
2645 elif f in ('modifies', 'adds', 'removes'):
2653 w = 30 # slower
2646 w = 30 # slower
2654 elif f == "contains":
2647 elif f == "contains":
2655 w = 100 # very slow
2648 w = 100 # very slow
2656 elif f == "ancestor":
2649 elif f == "ancestor":
2657 w = 1 * smallbonus
2650 w = 1 * smallbonus
2658 elif f in ('reverse', 'limit', 'first', 'wdir', '_intlist'):
2651 elif f in ('reverse', 'limit', 'first', 'wdir', '_intlist'):
2659 w = 0
2652 w = 0
2660 elif f == "sort":
2653 elif f == "sort":
2661 w = 10 # assume most sorts look at changelog
2654 w = 10 # assume most sorts look at changelog
2662 else:
2655 else:
2663 w = 1
2656 w = 1
2664 order = x[3]
2657 order = x[3]
2665 return w + wa, (op, x[1], ta, order)
2658 return w + wa, (op, x[1], ta, order)
2666 raise ValueError('invalid operator %r' % op)
2659 raise ValueError('invalid operator %r' % op)
2667
2660
2668 def optimize(tree):
2661 def optimize(tree):
2669 """Optimize evaluatable tree
2662 """Optimize evaluatable tree
2670
2663
2671 All pseudo operations should be transformed beforehand.
2664 All pseudo operations should be transformed beforehand.
2672 """
2665 """
2673 _weight, newtree = _optimize(tree, small=True)
2666 _weight, newtree = _optimize(tree, small=True)
2674 return newtree
2667 return newtree
2675
2668
2676 # the set of valid characters for the initial letter of symbols in
2669 # the set of valid characters for the initial letter of symbols in
2677 # alias declarations and definitions
2670 # alias declarations and definitions
2678 _aliassyminitletters = _syminitletters | set(pycompat.sysstr('$'))
2671 _aliassyminitletters = _syminitletters | set(pycompat.sysstr('$'))
2679
2672
2680 def _parsewith(spec, lookup=None, syminitletters=None):
2673 def _parsewith(spec, lookup=None, syminitletters=None):
2681 """Generate a parse tree of given spec with given tokenizing options
2674 """Generate a parse tree of given spec with given tokenizing options
2682
2675
2683 >>> _parsewith('foo($1)', syminitletters=_aliassyminitletters)
2676 >>> _parsewith('foo($1)', syminitletters=_aliassyminitletters)
2684 ('func', ('symbol', 'foo'), ('symbol', '$1'))
2677 ('func', ('symbol', 'foo'), ('symbol', '$1'))
2685 >>> _parsewith('$1')
2678 >>> _parsewith('$1')
2686 Traceback (most recent call last):
2679 Traceback (most recent call last):
2687 ...
2680 ...
2688 ParseError: ("syntax error in revset '$1'", 0)
2681 ParseError: ("syntax error in revset '$1'", 0)
2689 >>> _parsewith('foo bar')
2682 >>> _parsewith('foo bar')
2690 Traceback (most recent call last):
2683 Traceback (most recent call last):
2691 ...
2684 ...
2692 ParseError: ('invalid token', 4)
2685 ParseError: ('invalid token', 4)
2693 """
2686 """
2694 p = parser.parser(elements)
2687 p = parser.parser(elements)
2695 tree, pos = p.parse(tokenize(spec, lookup=lookup,
2688 tree, pos = p.parse(tokenize(spec, lookup=lookup,
2696 syminitletters=syminitletters))
2689 syminitletters=syminitletters))
2697 if pos != len(spec):
2690 if pos != len(spec):
2698 raise error.ParseError(_('invalid token'), pos)
2691 raise error.ParseError(_('invalid token'), pos)
2699 return _fixops(parser.simplifyinfixops(tree, ('list', 'or')))
2692 return _fixops(parser.simplifyinfixops(tree, ('list', 'or')))
2700
2693
2701 class _aliasrules(parser.basealiasrules):
2694 class _aliasrules(parser.basealiasrules):
2702 """Parsing and expansion rule set of revset aliases"""
2695 """Parsing and expansion rule set of revset aliases"""
2703 _section = _('revset alias')
2696 _section = _('revset alias')
2704
2697
2705 @staticmethod
2698 @staticmethod
2706 def _parse(spec):
2699 def _parse(spec):
2707 """Parse alias declaration/definition ``spec``
2700 """Parse alias declaration/definition ``spec``
2708
2701
2709 This allows symbol names to use also ``$`` as an initial letter
2702 This allows symbol names to use also ``$`` as an initial letter
2710 (for backward compatibility), and callers of this function should
2703 (for backward compatibility), and callers of this function should
2711 examine whether ``$`` is used also for unexpected symbols or not.
2704 examine whether ``$`` is used also for unexpected symbols or not.
2712 """
2705 """
2713 return _parsewith(spec, syminitletters=_aliassyminitletters)
2706 return _parsewith(spec, syminitletters=_aliassyminitletters)
2714
2707
2715 @staticmethod
2708 @staticmethod
2716 def _trygetfunc(tree):
2709 def _trygetfunc(tree):
2717 if tree[0] == 'func' and tree[1][0] == 'symbol':
2710 if tree[0] == 'func' and tree[1][0] == 'symbol':
2718 return tree[1][1], getlist(tree[2])
2711 return tree[1][1], getlist(tree[2])
2719
2712
2720 def expandaliases(ui, tree):
2713 def expandaliases(ui, tree):
2721 aliases = _aliasrules.buildmap(ui.configitems('revsetalias'))
2714 aliases = _aliasrules.buildmap(ui.configitems('revsetalias'))
2722 tree = _aliasrules.expand(aliases, tree)
2715 tree = _aliasrules.expand(aliases, tree)
2723 # warn about problematic (but not referred) aliases
2716 # warn about problematic (but not referred) aliases
2724 for name, alias in sorted(aliases.iteritems()):
2717 for name, alias in sorted(aliases.iteritems()):
2725 if alias.error and not alias.warned:
2718 if alias.error and not alias.warned:
2726 ui.warn(_('warning: %s\n') % (alias.error))
2719 ui.warn(_('warning: %s\n') % (alias.error))
2727 alias.warned = True
2720 alias.warned = True
2728 return tree
2721 return tree
2729
2722
2730 def foldconcat(tree):
2723 def foldconcat(tree):
2731 """Fold elements to be concatenated by `##`
2724 """Fold elements to be concatenated by `##`
2732 """
2725 """
2733 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2726 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2734 return tree
2727 return tree
2735 if tree[0] == '_concat':
2728 if tree[0] == '_concat':
2736 pending = [tree]
2729 pending = [tree]
2737 l = []
2730 l = []
2738 while pending:
2731 while pending:
2739 e = pending.pop()
2732 e = pending.pop()
2740 if e[0] == '_concat':
2733 if e[0] == '_concat':
2741 pending.extend(reversed(e[1:]))
2734 pending.extend(reversed(e[1:]))
2742 elif e[0] in ('string', 'symbol'):
2735 elif e[0] in ('string', 'symbol'):
2743 l.append(e[1])
2736 l.append(e[1])
2744 else:
2737 else:
2745 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2738 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2746 raise error.ParseError(msg)
2739 raise error.ParseError(msg)
2747 return ('string', ''.join(l))
2740 return ('string', ''.join(l))
2748 else:
2741 else:
2749 return tuple(foldconcat(t) for t in tree)
2742 return tuple(foldconcat(t) for t in tree)
2750
2743
2751 def parse(spec, lookup=None):
2744 def parse(spec, lookup=None):
2752 return _parsewith(spec, lookup=lookup)
2745 return _parsewith(spec, lookup=lookup)
2753
2746
2754 def posttreebuilthook(tree, repo):
2747 def posttreebuilthook(tree, repo):
2755 # hook for extensions to execute code on the optimized tree
2748 # hook for extensions to execute code on the optimized tree
2756 pass
2749 pass
2757
2750
2758 def match(ui, spec, repo=None, order=defineorder):
2751 def match(ui, spec, repo=None, order=defineorder):
2759 """Create a matcher for a single revision spec
2752 """Create a matcher for a single revision spec
2760
2753
2761 If order=followorder, a matcher takes the ordering specified by the input
2754 If order=followorder, a matcher takes the ordering specified by the input
2762 set.
2755 set.
2763 """
2756 """
2764 return matchany(ui, [spec], repo=repo, order=order)
2757 return matchany(ui, [spec], repo=repo, order=order)
2765
2758
2766 def matchany(ui, specs, repo=None, order=defineorder):
2759 def matchany(ui, specs, repo=None, order=defineorder):
2767 """Create a matcher that will include any revisions matching one of the
2760 """Create a matcher that will include any revisions matching one of the
2768 given specs
2761 given specs
2769
2762
2770 If order=followorder, a matcher takes the ordering specified by the input
2763 If order=followorder, a matcher takes the ordering specified by the input
2771 set.
2764 set.
2772 """
2765 """
2773 if not specs:
2766 if not specs:
2774 def mfunc(repo, subset=None):
2767 def mfunc(repo, subset=None):
2775 return baseset()
2768 return baseset()
2776 return mfunc
2769 return mfunc
2777 if not all(specs):
2770 if not all(specs):
2778 raise error.ParseError(_("empty query"))
2771 raise error.ParseError(_("empty query"))
2779 lookup = None
2772 lookup = None
2780 if repo:
2773 if repo:
2781 lookup = repo.__contains__
2774 lookup = repo.__contains__
2782 if len(specs) == 1:
2775 if len(specs) == 1:
2783 tree = parse(specs[0], lookup)
2776 tree = parse(specs[0], lookup)
2784 else:
2777 else:
2785 tree = ('or', ('list',) + tuple(parse(s, lookup) for s in specs))
2778 tree = ('or', ('list',) + tuple(parse(s, lookup) for s in specs))
2786
2779
2787 if ui:
2780 if ui:
2788 tree = expandaliases(ui, tree)
2781 tree = expandaliases(ui, tree)
2789 tree = foldconcat(tree)
2782 tree = foldconcat(tree)
2790 tree = analyze(tree, order)
2783 tree = analyze(tree, order)
2791 tree = optimize(tree)
2784 tree = optimize(tree)
2792 posttreebuilthook(tree, repo)
2785 posttreebuilthook(tree, repo)
2793 return makematcher(tree)
2786 return makematcher(tree)
2794
2787
2795 def makematcher(tree):
2788 def makematcher(tree):
2796 """Create a matcher from an evaluatable tree"""
2789 """Create a matcher from an evaluatable tree"""
2797 def mfunc(repo, subset=None):
2790 def mfunc(repo, subset=None):
2798 if subset is None:
2791 if subset is None:
2799 subset = fullreposet(repo)
2792 subset = fullreposet(repo)
2800 if util.safehasattr(subset, 'isascending'):
2793 if util.safehasattr(subset, 'isascending'):
2801 result = getset(repo, subset, tree)
2794 result = getset(repo, subset, tree)
2802 else:
2795 else:
2803 result = getset(repo, baseset(subset), tree)
2796 result = getset(repo, baseset(subset), tree)
2804 return result
2797 return result
2805 return mfunc
2798 return mfunc
2806
2799
2807 def formatspec(expr, *args):
2800 def formatspec(expr, *args):
2808 '''
2801 '''
2809 This is a convenience function for using revsets internally, and
2802 This is a convenience function for using revsets internally, and
2810 escapes arguments appropriately. Aliases are intentionally ignored
2803 escapes arguments appropriately. Aliases are intentionally ignored
2811 so that intended expression behavior isn't accidentally subverted.
2804 so that intended expression behavior isn't accidentally subverted.
2812
2805
2813 Supported arguments:
2806 Supported arguments:
2814
2807
2815 %r = revset expression, parenthesized
2808 %r = revset expression, parenthesized
2816 %d = int(arg), no quoting
2809 %d = int(arg), no quoting
2817 %s = string(arg), escaped and single-quoted
2810 %s = string(arg), escaped and single-quoted
2818 %b = arg.branch(), escaped and single-quoted
2811 %b = arg.branch(), escaped and single-quoted
2819 %n = hex(arg), single-quoted
2812 %n = hex(arg), single-quoted
2820 %% = a literal '%'
2813 %% = a literal '%'
2821
2814
2822 Prefixing the type with 'l' specifies a parenthesized list of that type.
2815 Prefixing the type with 'l' specifies a parenthesized list of that type.
2823
2816
2824 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2817 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2825 '(10 or 11):: and ((this()) or (that()))'
2818 '(10 or 11):: and ((this()) or (that()))'
2826 >>> formatspec('%d:: and not %d::', 10, 20)
2819 >>> formatspec('%d:: and not %d::', 10, 20)
2827 '10:: and not 20::'
2820 '10:: and not 20::'
2828 >>> formatspec('%ld or %ld', [], [1])
2821 >>> formatspec('%ld or %ld', [], [1])
2829 "_list('') or 1"
2822 "_list('') or 1"
2830 >>> formatspec('keyword(%s)', 'foo\\xe9')
2823 >>> formatspec('keyword(%s)', 'foo\\xe9')
2831 "keyword('foo\\\\xe9')"
2824 "keyword('foo\\\\xe9')"
2832 >>> b = lambda: 'default'
2825 >>> b = lambda: 'default'
2833 >>> b.branch = b
2826 >>> b.branch = b
2834 >>> formatspec('branch(%b)', b)
2827 >>> formatspec('branch(%b)', b)
2835 "branch('default')"
2828 "branch('default')"
2836 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2829 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2837 "root(_list('a\\x00b\\x00c\\x00d'))"
2830 "root(_list('a\\x00b\\x00c\\x00d'))"
2838 '''
2831 '''
2839
2832
2840 def quote(s):
2833 def quote(s):
2841 return repr(str(s))
2834 return repr(str(s))
2842
2835
2843 def argtype(c, arg):
2836 def argtype(c, arg):
2844 if c == 'd':
2837 if c == 'd':
2845 return str(int(arg))
2838 return str(int(arg))
2846 elif c == 's':
2839 elif c == 's':
2847 return quote(arg)
2840 return quote(arg)
2848 elif c == 'r':
2841 elif c == 'r':
2849 parse(arg) # make sure syntax errors are confined
2842 parse(arg) # make sure syntax errors are confined
2850 return '(%s)' % arg
2843 return '(%s)' % arg
2851 elif c == 'n':
2844 elif c == 'n':
2852 return quote(node.hex(arg))
2845 return quote(node.hex(arg))
2853 elif c == 'b':
2846 elif c == 'b':
2854 return quote(arg.branch())
2847 return quote(arg.branch())
2855
2848
2856 def listexp(s, t):
2849 def listexp(s, t):
2857 l = len(s)
2850 l = len(s)
2858 if l == 0:
2851 if l == 0:
2859 return "_list('')"
2852 return "_list('')"
2860 elif l == 1:
2853 elif l == 1:
2861 return argtype(t, s[0])
2854 return argtype(t, s[0])
2862 elif t == 'd':
2855 elif t == 'd':
2863 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2856 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2864 elif t == 's':
2857 elif t == 's':
2865 return "_list('%s')" % "\0".join(s)
2858 return "_list('%s')" % "\0".join(s)
2866 elif t == 'n':
2859 elif t == 'n':
2867 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2860 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2868 elif t == 'b':
2861 elif t == 'b':
2869 return "_list('%s')" % "\0".join(a.branch() for a in s)
2862 return "_list('%s')" % "\0".join(a.branch() for a in s)
2870
2863
2871 m = l // 2
2864 m = l // 2
2872 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2865 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2873
2866
2874 ret = ''
2867 ret = ''
2875 pos = 0
2868 pos = 0
2876 arg = 0
2869 arg = 0
2877 while pos < len(expr):
2870 while pos < len(expr):
2878 c = expr[pos]
2871 c = expr[pos]
2879 if c == '%':
2872 if c == '%':
2880 pos += 1
2873 pos += 1
2881 d = expr[pos]
2874 d = expr[pos]
2882 if d == '%':
2875 if d == '%':
2883 ret += d
2876 ret += d
2884 elif d in 'dsnbr':
2877 elif d in 'dsnbr':
2885 ret += argtype(d, args[arg])
2878 ret += argtype(d, args[arg])
2886 arg += 1
2879 arg += 1
2887 elif d == 'l':
2880 elif d == 'l':
2888 # a list of some type
2881 # a list of some type
2889 pos += 1
2882 pos += 1
2890 d = expr[pos]
2883 d = expr[pos]
2891 ret += listexp(list(args[arg]), d)
2884 ret += listexp(list(args[arg]), d)
2892 arg += 1
2885 arg += 1
2893 else:
2886 else:
2894 raise error.Abort(_('unexpected revspec format character %s')
2887 raise error.Abort(_('unexpected revspec format character %s')
2895 % d)
2888 % d)
2896 else:
2889 else:
2897 ret += c
2890 ret += c
2898 pos += 1
2891 pos += 1
2899
2892
2900 return ret
2893 return ret
2901
2894
2902 def prettyformat(tree):
2895 def prettyformat(tree):
2903 return parser.prettyformat(tree, ('string', 'symbol'))
2896 return parser.prettyformat(tree, ('string', 'symbol'))
2904
2897
2905 def depth(tree):
2898 def depth(tree):
2906 if isinstance(tree, tuple):
2899 if isinstance(tree, tuple):
2907 return max(map(depth, tree)) + 1
2900 return max(map(depth, tree)) + 1
2908 else:
2901 else:
2909 return 0
2902 return 0
2910
2903
2911 def funcsused(tree):
2904 def funcsused(tree):
2912 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2905 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2913 return set()
2906 return set()
2914 else:
2907 else:
2915 funcs = set()
2908 funcs = set()
2916 for s in tree[1:]:
2909 for s in tree[1:]:
2917 funcs |= funcsused(s)
2910 funcs |= funcsused(s)
2918 if tree[0] == 'func':
2911 if tree[0] == 'func':
2919 funcs.add(tree[1][1])
2912 funcs.add(tree[1][1])
2920 return funcs
2913 return funcs
2921
2914
2922 def _formatsetrepr(r):
2915 def _formatsetrepr(r):
2923 """Format an optional printable representation of a set
2916 """Format an optional printable representation of a set
2924
2917
2925 ======== =================================
2918 ======== =================================
2926 type(r) example
2919 type(r) example
2927 ======== =================================
2920 ======== =================================
2928 tuple ('<not %r>', other)
2921 tuple ('<not %r>', other)
2929 str '<branch closed>'
2922 str '<branch closed>'
2930 callable lambda: '<branch %r>' % sorted(b)
2923 callable lambda: '<branch %r>' % sorted(b)
2931 object other
2924 object other
2932 ======== =================================
2925 ======== =================================
2933 """
2926 """
2934 if r is None:
2927 if r is None:
2935 return ''
2928 return ''
2936 elif isinstance(r, tuple):
2929 elif isinstance(r, tuple):
2937 return r[0] % r[1:]
2930 return r[0] % r[1:]
2938 elif isinstance(r, str):
2931 elif isinstance(r, str):
2939 return r
2932 return r
2940 elif callable(r):
2933 elif callable(r):
2941 return r()
2934 return r()
2942 else:
2935 else:
2943 return repr(r)
2936 return repr(r)
2944
2937
2945 class abstractsmartset(object):
2938 class abstractsmartset(object):
2946
2939
2947 def __nonzero__(self):
2940 def __nonzero__(self):
2948 """True if the smartset is not empty"""
2941 """True if the smartset is not empty"""
2949 raise NotImplementedError()
2942 raise NotImplementedError()
2950
2943
2951 def __contains__(self, rev):
2944 def __contains__(self, rev):
2952 """provide fast membership testing"""
2945 """provide fast membership testing"""
2953 raise NotImplementedError()
2946 raise NotImplementedError()
2954
2947
2955 def __iter__(self):
2948 def __iter__(self):
2956 """iterate the set in the order it is supposed to be iterated"""
2949 """iterate the set in the order it is supposed to be iterated"""
2957 raise NotImplementedError()
2950 raise NotImplementedError()
2958
2951
2959 # Attributes containing a function to perform a fast iteration in a given
2952 # Attributes containing a function to perform a fast iteration in a given
2960 # direction. A smartset can have none, one, or both defined.
2953 # direction. A smartset can have none, one, or both defined.
2961 #
2954 #
2962 # Default value is None instead of a function returning None to avoid
2955 # Default value is None instead of a function returning None to avoid
2963 # initializing an iterator just for testing if a fast method exists.
2956 # initializing an iterator just for testing if a fast method exists.
2964 fastasc = None
2957 fastasc = None
2965 fastdesc = None
2958 fastdesc = None
2966
2959
2967 def isascending(self):
2960 def isascending(self):
2968 """True if the set will iterate in ascending order"""
2961 """True if the set will iterate in ascending order"""
2969 raise NotImplementedError()
2962 raise NotImplementedError()
2970
2963
2971 def isdescending(self):
2964 def isdescending(self):
2972 """True if the set will iterate in descending order"""
2965 """True if the set will iterate in descending order"""
2973 raise NotImplementedError()
2966 raise NotImplementedError()
2974
2967
2975 def istopo(self):
2968 def istopo(self):
2976 """True if the set will iterate in topographical order"""
2969 """True if the set will iterate in topographical order"""
2977 raise NotImplementedError()
2970 raise NotImplementedError()
2978
2971
2979 def min(self):
2972 def min(self):
2980 """return the minimum element in the set"""
2973 """return the minimum element in the set"""
2981 if self.fastasc is None:
2974 if self.fastasc is None:
2982 v = min(self)
2975 v = min(self)
2983 else:
2976 else:
2984 for v in self.fastasc():
2977 for v in self.fastasc():
2985 break
2978 break
2986 else:
2979 else:
2987 raise ValueError('arg is an empty sequence')
2980 raise ValueError('arg is an empty sequence')
2988 self.min = lambda: v
2981 self.min = lambda: v
2989 return v
2982 return v
2990
2983
2991 def max(self):
2984 def max(self):
2992 """return the maximum element in the set"""
2985 """return the maximum element in the set"""
2993 if self.fastdesc is None:
2986 if self.fastdesc is None:
2994 return max(self)
2987 return max(self)
2995 else:
2988 else:
2996 for v in self.fastdesc():
2989 for v in self.fastdesc():
2997 break
2990 break
2998 else:
2991 else:
2999 raise ValueError('arg is an empty sequence')
2992 raise ValueError('arg is an empty sequence')
3000 self.max = lambda: v
2993 self.max = lambda: v
3001 return v
2994 return v
3002
2995
3003 def first(self):
2996 def first(self):
3004 """return the first element in the set (user iteration perspective)
2997 """return the first element in the set (user iteration perspective)
3005
2998
3006 Return None if the set is empty"""
2999 Return None if the set is empty"""
3007 raise NotImplementedError()
3000 raise NotImplementedError()
3008
3001
3009 def last(self):
3002 def last(self):
3010 """return the last element in the set (user iteration perspective)
3003 """return the last element in the set (user iteration perspective)
3011
3004
3012 Return None if the set is empty"""
3005 Return None if the set is empty"""
3013 raise NotImplementedError()
3006 raise NotImplementedError()
3014
3007
3015 def __len__(self):
3008 def __len__(self):
3016 """return the length of the smartsets
3009 """return the length of the smartsets
3017
3010
3018 This can be expensive on smartset that could be lazy otherwise."""
3011 This can be expensive on smartset that could be lazy otherwise."""
3019 raise NotImplementedError()
3012 raise NotImplementedError()
3020
3013
3021 def reverse(self):
3014 def reverse(self):
3022 """reverse the expected iteration order"""
3015 """reverse the expected iteration order"""
3023 raise NotImplementedError()
3016 raise NotImplementedError()
3024
3017
3025 def sort(self, reverse=True):
3018 def sort(self, reverse=True):
3026 """get the set to iterate in an ascending or descending order"""
3019 """get the set to iterate in an ascending or descending order"""
3027 raise NotImplementedError()
3020 raise NotImplementedError()
3028
3021
3029 def __and__(self, other):
3022 def __and__(self, other):
3030 """Returns a new object with the intersection of the two collections.
3023 """Returns a new object with the intersection of the two collections.
3031
3024
3032 This is part of the mandatory API for smartset."""
3025 This is part of the mandatory API for smartset."""
3033 if isinstance(other, fullreposet):
3026 if isinstance(other, fullreposet):
3034 return self
3027 return self
3035 return self.filter(other.__contains__, condrepr=other, cache=False)
3028 return self.filter(other.__contains__, condrepr=other, cache=False)
3036
3029
3037 def __add__(self, other):
3030 def __add__(self, other):
3038 """Returns a new object with the union of the two collections.
3031 """Returns a new object with the union of the two collections.
3039
3032
3040 This is part of the mandatory API for smartset."""
3033 This is part of the mandatory API for smartset."""
3041 return addset(self, other)
3034 return addset(self, other)
3042
3035
3043 def __sub__(self, other):
3036 def __sub__(self, other):
3044 """Returns a new object with the substraction of the two collections.
3037 """Returns a new object with the substraction of the two collections.
3045
3038
3046 This is part of the mandatory API for smartset."""
3039 This is part of the mandatory API for smartset."""
3047 c = other.__contains__
3040 c = other.__contains__
3048 return self.filter(lambda r: not c(r), condrepr=('<not %r>', other),
3041 return self.filter(lambda r: not c(r), condrepr=('<not %r>', other),
3049 cache=False)
3042 cache=False)
3050
3043
3051 def filter(self, condition, condrepr=None, cache=True):
3044 def filter(self, condition, condrepr=None, cache=True):
3052 """Returns this smartset filtered by condition as a new smartset.
3045 """Returns this smartset filtered by condition as a new smartset.
3053
3046
3054 `condition` is a callable which takes a revision number and returns a
3047 `condition` is a callable which takes a revision number and returns a
3055 boolean. Optional `condrepr` provides a printable representation of
3048 boolean. Optional `condrepr` provides a printable representation of
3056 the given `condition`.
3049 the given `condition`.
3057
3050
3058 This is part of the mandatory API for smartset."""
3051 This is part of the mandatory API for smartset."""
3059 # builtin cannot be cached. but do not needs to
3052 # builtin cannot be cached. but do not needs to
3060 if cache and util.safehasattr(condition, 'func_code'):
3053 if cache and util.safehasattr(condition, 'func_code'):
3061 condition = util.cachefunc(condition)
3054 condition = util.cachefunc(condition)
3062 return filteredset(self, condition, condrepr)
3055 return filteredset(self, condition, condrepr)
3063
3056
3064 class baseset(abstractsmartset):
3057 class baseset(abstractsmartset):
3065 """Basic data structure that represents a revset and contains the basic
3058 """Basic data structure that represents a revset and contains the basic
3066 operation that it should be able to perform.
3059 operation that it should be able to perform.
3067
3060
3068 Every method in this class should be implemented by any smartset class.
3061 Every method in this class should be implemented by any smartset class.
3069 """
3062 """
3070 def __init__(self, data=(), datarepr=None, istopo=False):
3063 def __init__(self, data=(), datarepr=None, istopo=False):
3071 """
3064 """
3072 datarepr: a tuple of (format, obj, ...), a function or an object that
3065 datarepr: a tuple of (format, obj, ...), a function or an object that
3073 provides a printable representation of the given data.
3066 provides a printable representation of the given data.
3074 """
3067 """
3075 self._ascending = None
3068 self._ascending = None
3076 self._istopo = istopo
3069 self._istopo = istopo
3077 if not isinstance(data, list):
3070 if not isinstance(data, list):
3078 if isinstance(data, set):
3071 if isinstance(data, set):
3079 self._set = data
3072 self._set = data
3080 # set has no order we pick one for stability purpose
3073 # set has no order we pick one for stability purpose
3081 self._ascending = True
3074 self._ascending = True
3082 data = list(data)
3075 data = list(data)
3083 self._list = data
3076 self._list = data
3084 self._datarepr = datarepr
3077 self._datarepr = datarepr
3085
3078
3086 @util.propertycache
3079 @util.propertycache
3087 def _set(self):
3080 def _set(self):
3088 return set(self._list)
3081 return set(self._list)
3089
3082
3090 @util.propertycache
3083 @util.propertycache
3091 def _asclist(self):
3084 def _asclist(self):
3092 asclist = self._list[:]
3085 asclist = self._list[:]
3093 asclist.sort()
3086 asclist.sort()
3094 return asclist
3087 return asclist
3095
3088
3096 def __iter__(self):
3089 def __iter__(self):
3097 if self._ascending is None:
3090 if self._ascending is None:
3098 return iter(self._list)
3091 return iter(self._list)
3099 elif self._ascending:
3092 elif self._ascending:
3100 return iter(self._asclist)
3093 return iter(self._asclist)
3101 else:
3094 else:
3102 return reversed(self._asclist)
3095 return reversed(self._asclist)
3103
3096
3104 def fastasc(self):
3097 def fastasc(self):
3105 return iter(self._asclist)
3098 return iter(self._asclist)
3106
3099
3107 def fastdesc(self):
3100 def fastdesc(self):
3108 return reversed(self._asclist)
3101 return reversed(self._asclist)
3109
3102
3110 @util.propertycache
3103 @util.propertycache
3111 def __contains__(self):
3104 def __contains__(self):
3112 return self._set.__contains__
3105 return self._set.__contains__
3113
3106
3114 def __nonzero__(self):
3107 def __nonzero__(self):
3115 return bool(self._list)
3108 return bool(self._list)
3116
3109
3117 def sort(self, reverse=False):
3110 def sort(self, reverse=False):
3118 self._ascending = not bool(reverse)
3111 self._ascending = not bool(reverse)
3119 self._istopo = False
3112 self._istopo = False
3120
3113
3121 def reverse(self):
3114 def reverse(self):
3122 if self._ascending is None:
3115 if self._ascending is None:
3123 self._list.reverse()
3116 self._list.reverse()
3124 else:
3117 else:
3125 self._ascending = not self._ascending
3118 self._ascending = not self._ascending
3126 self._istopo = False
3119 self._istopo = False
3127
3120
3128 def __len__(self):
3121 def __len__(self):
3129 return len(self._list)
3122 return len(self._list)
3130
3123
3131 def isascending(self):
3124 def isascending(self):
3132 """Returns True if the collection is ascending order, False if not.
3125 """Returns True if the collection is ascending order, False if not.
3133
3126
3134 This is part of the mandatory API for smartset."""
3127 This is part of the mandatory API for smartset."""
3135 if len(self) <= 1:
3128 if len(self) <= 1:
3136 return True
3129 return True
3137 return self._ascending is not None and self._ascending
3130 return self._ascending is not None and self._ascending
3138
3131
3139 def isdescending(self):
3132 def isdescending(self):
3140 """Returns True if the collection is descending order, False if not.
3133 """Returns True if the collection is descending order, False if not.
3141
3134
3142 This is part of the mandatory API for smartset."""
3135 This is part of the mandatory API for smartset."""
3143 if len(self) <= 1:
3136 if len(self) <= 1:
3144 return True
3137 return True
3145 return self._ascending is not None and not self._ascending
3138 return self._ascending is not None and not self._ascending
3146
3139
3147 def istopo(self):
3140 def istopo(self):
3148 """Is the collection is in topographical order or not.
3141 """Is the collection is in topographical order or not.
3149
3142
3150 This is part of the mandatory API for smartset."""
3143 This is part of the mandatory API for smartset."""
3151 if len(self) <= 1:
3144 if len(self) <= 1:
3152 return True
3145 return True
3153 return self._istopo
3146 return self._istopo
3154
3147
3155 def first(self):
3148 def first(self):
3156 if self:
3149 if self:
3157 if self._ascending is None:
3150 if self._ascending is None:
3158 return self._list[0]
3151 return self._list[0]
3159 elif self._ascending:
3152 elif self._ascending:
3160 return self._asclist[0]
3153 return self._asclist[0]
3161 else:
3154 else:
3162 return self._asclist[-1]
3155 return self._asclist[-1]
3163 return None
3156 return None
3164
3157
3165 def last(self):
3158 def last(self):
3166 if self:
3159 if self:
3167 if self._ascending is None:
3160 if self._ascending is None:
3168 return self._list[-1]
3161 return self._list[-1]
3169 elif self._ascending:
3162 elif self._ascending:
3170 return self._asclist[-1]
3163 return self._asclist[-1]
3171 else:
3164 else:
3172 return self._asclist[0]
3165 return self._asclist[0]
3173 return None
3166 return None
3174
3167
3175 def __repr__(self):
3168 def __repr__(self):
3176 d = {None: '', False: '-', True: '+'}[self._ascending]
3169 d = {None: '', False: '-', True: '+'}[self._ascending]
3177 s = _formatsetrepr(self._datarepr)
3170 s = _formatsetrepr(self._datarepr)
3178 if not s:
3171 if not s:
3179 l = self._list
3172 l = self._list
3180 # if _list has been built from a set, it might have a different
3173 # if _list has been built from a set, it might have a different
3181 # order from one python implementation to another.
3174 # order from one python implementation to another.
3182 # We fallback to the sorted version for a stable output.
3175 # We fallback to the sorted version for a stable output.
3183 if self._ascending is not None:
3176 if self._ascending is not None:
3184 l = self._asclist
3177 l = self._asclist
3185 s = repr(l)
3178 s = repr(l)
3186 return '<%s%s %s>' % (type(self).__name__, d, s)
3179 return '<%s%s %s>' % (type(self).__name__, d, s)
3187
3180
3188 class filteredset(abstractsmartset):
3181 class filteredset(abstractsmartset):
3189 """Duck type for baseset class which iterates lazily over the revisions in
3182 """Duck type for baseset class which iterates lazily over the revisions in
3190 the subset and contains a function which tests for membership in the
3183 the subset and contains a function which tests for membership in the
3191 revset
3184 revset
3192 """
3185 """
3193 def __init__(self, subset, condition=lambda x: True, condrepr=None):
3186 def __init__(self, subset, condition=lambda x: True, condrepr=None):
3194 """
3187 """
3195 condition: a function that decide whether a revision in the subset
3188 condition: a function that decide whether a revision in the subset
3196 belongs to the revset or not.
3189 belongs to the revset or not.
3197 condrepr: a tuple of (format, obj, ...), a function or an object that
3190 condrepr: a tuple of (format, obj, ...), a function or an object that
3198 provides a printable representation of the given condition.
3191 provides a printable representation of the given condition.
3199 """
3192 """
3200 self._subset = subset
3193 self._subset = subset
3201 self._condition = condition
3194 self._condition = condition
3202 self._condrepr = condrepr
3195 self._condrepr = condrepr
3203
3196
3204 def __contains__(self, x):
3197 def __contains__(self, x):
3205 return x in self._subset and self._condition(x)
3198 return x in self._subset and self._condition(x)
3206
3199
3207 def __iter__(self):
3200 def __iter__(self):
3208 return self._iterfilter(self._subset)
3201 return self._iterfilter(self._subset)
3209
3202
3210 def _iterfilter(self, it):
3203 def _iterfilter(self, it):
3211 cond = self._condition
3204 cond = self._condition
3212 for x in it:
3205 for x in it:
3213 if cond(x):
3206 if cond(x):
3214 yield x
3207 yield x
3215
3208
3216 @property
3209 @property
3217 def fastasc(self):
3210 def fastasc(self):
3218 it = self._subset.fastasc
3211 it = self._subset.fastasc
3219 if it is None:
3212 if it is None:
3220 return None
3213 return None
3221 return lambda: self._iterfilter(it())
3214 return lambda: self._iterfilter(it())
3222
3215
3223 @property
3216 @property
3224 def fastdesc(self):
3217 def fastdesc(self):
3225 it = self._subset.fastdesc
3218 it = self._subset.fastdesc
3226 if it is None:
3219 if it is None:
3227 return None
3220 return None
3228 return lambda: self._iterfilter(it())
3221 return lambda: self._iterfilter(it())
3229
3222
3230 def __nonzero__(self):
3223 def __nonzero__(self):
3231 fast = None
3224 fast = None
3232 candidates = [self.fastasc if self.isascending() else None,
3225 candidates = [self.fastasc if self.isascending() else None,
3233 self.fastdesc if self.isdescending() else None,
3226 self.fastdesc if self.isdescending() else None,
3234 self.fastasc,
3227 self.fastasc,
3235 self.fastdesc]
3228 self.fastdesc]
3236 for candidate in candidates:
3229 for candidate in candidates:
3237 if candidate is not None:
3230 if candidate is not None:
3238 fast = candidate
3231 fast = candidate
3239 break
3232 break
3240
3233
3241 if fast is not None:
3234 if fast is not None:
3242 it = fast()
3235 it = fast()
3243 else:
3236 else:
3244 it = self
3237 it = self
3245
3238
3246 for r in it:
3239 for r in it:
3247 return True
3240 return True
3248 return False
3241 return False
3249
3242
3250 def __len__(self):
3243 def __len__(self):
3251 # Basic implementation to be changed in future patches.
3244 # Basic implementation to be changed in future patches.
3252 # until this gets improved, we use generator expression
3245 # until this gets improved, we use generator expression
3253 # here, since list comprehensions are free to call __len__ again
3246 # here, since list comprehensions are free to call __len__ again
3254 # causing infinite recursion
3247 # causing infinite recursion
3255 l = baseset(r for r in self)
3248 l = baseset(r for r in self)
3256 return len(l)
3249 return len(l)
3257
3250
3258 def sort(self, reverse=False):
3251 def sort(self, reverse=False):
3259 self._subset.sort(reverse=reverse)
3252 self._subset.sort(reverse=reverse)
3260
3253
3261 def reverse(self):
3254 def reverse(self):
3262 self._subset.reverse()
3255 self._subset.reverse()
3263
3256
3264 def isascending(self):
3257 def isascending(self):
3265 return self._subset.isascending()
3258 return self._subset.isascending()
3266
3259
3267 def isdescending(self):
3260 def isdescending(self):
3268 return self._subset.isdescending()
3261 return self._subset.isdescending()
3269
3262
3270 def istopo(self):
3263 def istopo(self):
3271 return self._subset.istopo()
3264 return self._subset.istopo()
3272
3265
3273 def first(self):
3266 def first(self):
3274 for x in self:
3267 for x in self:
3275 return x
3268 return x
3276 return None
3269 return None
3277
3270
3278 def last(self):
3271 def last(self):
3279 it = None
3272 it = None
3280 if self.isascending():
3273 if self.isascending():
3281 it = self.fastdesc
3274 it = self.fastdesc
3282 elif self.isdescending():
3275 elif self.isdescending():
3283 it = self.fastasc
3276 it = self.fastasc
3284 if it is not None:
3277 if it is not None:
3285 for x in it():
3278 for x in it():
3286 return x
3279 return x
3287 return None #empty case
3280 return None #empty case
3288 else:
3281 else:
3289 x = None
3282 x = None
3290 for x in self:
3283 for x in self:
3291 pass
3284 pass
3292 return x
3285 return x
3293
3286
3294 def __repr__(self):
3287 def __repr__(self):
3295 xs = [repr(self._subset)]
3288 xs = [repr(self._subset)]
3296 s = _formatsetrepr(self._condrepr)
3289 s = _formatsetrepr(self._condrepr)
3297 if s:
3290 if s:
3298 xs.append(s)
3291 xs.append(s)
3299 return '<%s %s>' % (type(self).__name__, ', '.join(xs))
3292 return '<%s %s>' % (type(self).__name__, ', '.join(xs))
3300
3293
3301 def _iterordered(ascending, iter1, iter2):
3294 def _iterordered(ascending, iter1, iter2):
3302 """produce an ordered iteration from two iterators with the same order
3295 """produce an ordered iteration from two iterators with the same order
3303
3296
3304 The ascending is used to indicated the iteration direction.
3297 The ascending is used to indicated the iteration direction.
3305 """
3298 """
3306 choice = max
3299 choice = max
3307 if ascending:
3300 if ascending:
3308 choice = min
3301 choice = min
3309
3302
3310 val1 = None
3303 val1 = None
3311 val2 = None
3304 val2 = None
3312 try:
3305 try:
3313 # Consume both iterators in an ordered way until one is empty
3306 # Consume both iterators in an ordered way until one is empty
3314 while True:
3307 while True:
3315 if val1 is None:
3308 if val1 is None:
3316 val1 = next(iter1)
3309 val1 = next(iter1)
3317 if val2 is None:
3310 if val2 is None:
3318 val2 = next(iter2)
3311 val2 = next(iter2)
3319 n = choice(val1, val2)
3312 n = choice(val1, val2)
3320 yield n
3313 yield n
3321 if val1 == n:
3314 if val1 == n:
3322 val1 = None
3315 val1 = None
3323 if val2 == n:
3316 if val2 == n:
3324 val2 = None
3317 val2 = None
3325 except StopIteration:
3318 except StopIteration:
3326 # Flush any remaining values and consume the other one
3319 # Flush any remaining values and consume the other one
3327 it = iter2
3320 it = iter2
3328 if val1 is not None:
3321 if val1 is not None:
3329 yield val1
3322 yield val1
3330 it = iter1
3323 it = iter1
3331 elif val2 is not None:
3324 elif val2 is not None:
3332 # might have been equality and both are empty
3325 # might have been equality and both are empty
3333 yield val2
3326 yield val2
3334 for val in it:
3327 for val in it:
3335 yield val
3328 yield val
3336
3329
3337 class addset(abstractsmartset):
3330 class addset(abstractsmartset):
3338 """Represent the addition of two sets
3331 """Represent the addition of two sets
3339
3332
3340 Wrapper structure for lazily adding two structures without losing much
3333 Wrapper structure for lazily adding two structures without losing much
3341 performance on the __contains__ method
3334 performance on the __contains__ method
3342
3335
3343 If the ascending attribute is set, that means the two structures are
3336 If the ascending attribute is set, that means the two structures are
3344 ordered in either an ascending or descending way. Therefore, we can add
3337 ordered in either an ascending or descending way. Therefore, we can add
3345 them maintaining the order by iterating over both at the same time
3338 them maintaining the order by iterating over both at the same time
3346
3339
3347 >>> xs = baseset([0, 3, 2])
3340 >>> xs = baseset([0, 3, 2])
3348 >>> ys = baseset([5, 2, 4])
3341 >>> ys = baseset([5, 2, 4])
3349
3342
3350 >>> rs = addset(xs, ys)
3343 >>> rs = addset(xs, ys)
3351 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3344 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3352 (True, True, False, True, 0, 4)
3345 (True, True, False, True, 0, 4)
3353 >>> rs = addset(xs, baseset([]))
3346 >>> rs = addset(xs, baseset([]))
3354 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3347 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3355 (True, True, False, 0, 2)
3348 (True, True, False, 0, 2)
3356 >>> rs = addset(baseset([]), baseset([]))
3349 >>> rs = addset(baseset([]), baseset([]))
3357 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3350 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3358 (False, False, None, None)
3351 (False, False, None, None)
3359
3352
3360 iterate unsorted:
3353 iterate unsorted:
3361 >>> rs = addset(xs, ys)
3354 >>> rs = addset(xs, ys)
3362 >>> # (use generator because pypy could call len())
3355 >>> # (use generator because pypy could call len())
3363 >>> list(x for x in rs) # without _genlist
3356 >>> list(x for x in rs) # without _genlist
3364 [0, 3, 2, 5, 4]
3357 [0, 3, 2, 5, 4]
3365 >>> assert not rs._genlist
3358 >>> assert not rs._genlist
3366 >>> len(rs)
3359 >>> len(rs)
3367 5
3360 5
3368 >>> [x for x in rs] # with _genlist
3361 >>> [x for x in rs] # with _genlist
3369 [0, 3, 2, 5, 4]
3362 [0, 3, 2, 5, 4]
3370 >>> assert rs._genlist
3363 >>> assert rs._genlist
3371
3364
3372 iterate ascending:
3365 iterate ascending:
3373 >>> rs = addset(xs, ys, ascending=True)
3366 >>> rs = addset(xs, ys, ascending=True)
3374 >>> # (use generator because pypy could call len())
3367 >>> # (use generator because pypy could call len())
3375 >>> list(x for x in rs), list(x for x in rs.fastasc()) # without _asclist
3368 >>> list(x for x in rs), list(x for x in rs.fastasc()) # without _asclist
3376 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3369 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3377 >>> assert not rs._asclist
3370 >>> assert not rs._asclist
3378 >>> len(rs)
3371 >>> len(rs)
3379 5
3372 5
3380 >>> [x for x in rs], [x for x in rs.fastasc()]
3373 >>> [x for x in rs], [x for x in rs.fastasc()]
3381 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3374 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3382 >>> assert rs._asclist
3375 >>> assert rs._asclist
3383
3376
3384 iterate descending:
3377 iterate descending:
3385 >>> rs = addset(xs, ys, ascending=False)
3378 >>> rs = addset(xs, ys, ascending=False)
3386 >>> # (use generator because pypy could call len())
3379 >>> # (use generator because pypy could call len())
3387 >>> list(x for x in rs), list(x for x in rs.fastdesc()) # without _asclist
3380 >>> list(x for x in rs), list(x for x in rs.fastdesc()) # without _asclist
3388 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3381 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3389 >>> assert not rs._asclist
3382 >>> assert not rs._asclist
3390 >>> len(rs)
3383 >>> len(rs)
3391 5
3384 5
3392 >>> [x for x in rs], [x for x in rs.fastdesc()]
3385 >>> [x for x in rs], [x for x in rs.fastdesc()]
3393 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3386 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3394 >>> assert rs._asclist
3387 >>> assert rs._asclist
3395
3388
3396 iterate ascending without fastasc:
3389 iterate ascending without fastasc:
3397 >>> rs = addset(xs, generatorset(ys), ascending=True)
3390 >>> rs = addset(xs, generatorset(ys), ascending=True)
3398 >>> assert rs.fastasc is None
3391 >>> assert rs.fastasc is None
3399 >>> [x for x in rs]
3392 >>> [x for x in rs]
3400 [0, 2, 3, 4, 5]
3393 [0, 2, 3, 4, 5]
3401
3394
3402 iterate descending without fastdesc:
3395 iterate descending without fastdesc:
3403 >>> rs = addset(generatorset(xs), ys, ascending=False)
3396 >>> rs = addset(generatorset(xs), ys, ascending=False)
3404 >>> assert rs.fastdesc is None
3397 >>> assert rs.fastdesc is None
3405 >>> [x for x in rs]
3398 >>> [x for x in rs]
3406 [5, 4, 3, 2, 0]
3399 [5, 4, 3, 2, 0]
3407 """
3400 """
3408 def __init__(self, revs1, revs2, ascending=None):
3401 def __init__(self, revs1, revs2, ascending=None):
3409 self._r1 = revs1
3402 self._r1 = revs1
3410 self._r2 = revs2
3403 self._r2 = revs2
3411 self._iter = None
3404 self._iter = None
3412 self._ascending = ascending
3405 self._ascending = ascending
3413 self._genlist = None
3406 self._genlist = None
3414 self._asclist = None
3407 self._asclist = None
3415
3408
3416 def __len__(self):
3409 def __len__(self):
3417 return len(self._list)
3410 return len(self._list)
3418
3411
3419 def __nonzero__(self):
3412 def __nonzero__(self):
3420 return bool(self._r1) or bool(self._r2)
3413 return bool(self._r1) or bool(self._r2)
3421
3414
3422 @util.propertycache
3415 @util.propertycache
3423 def _list(self):
3416 def _list(self):
3424 if not self._genlist:
3417 if not self._genlist:
3425 self._genlist = baseset(iter(self))
3418 self._genlist = baseset(iter(self))
3426 return self._genlist
3419 return self._genlist
3427
3420
3428 def __iter__(self):
3421 def __iter__(self):
3429 """Iterate over both collections without repeating elements
3422 """Iterate over both collections without repeating elements
3430
3423
3431 If the ascending attribute is not set, iterate over the first one and
3424 If the ascending attribute is not set, iterate over the first one and
3432 then over the second one checking for membership on the first one so we
3425 then over the second one checking for membership on the first one so we
3433 dont yield any duplicates.
3426 dont yield any duplicates.
3434
3427
3435 If the ascending attribute is set, iterate over both collections at the
3428 If the ascending attribute is set, iterate over both collections at the
3436 same time, yielding only one value at a time in the given order.
3429 same time, yielding only one value at a time in the given order.
3437 """
3430 """
3438 if self._ascending is None:
3431 if self._ascending is None:
3439 if self._genlist:
3432 if self._genlist:
3440 return iter(self._genlist)
3433 return iter(self._genlist)
3441 def arbitraryordergen():
3434 def arbitraryordergen():
3442 for r in self._r1:
3435 for r in self._r1:
3443 yield r
3436 yield r
3444 inr1 = self._r1.__contains__
3437 inr1 = self._r1.__contains__
3445 for r in self._r2:
3438 for r in self._r2:
3446 if not inr1(r):
3439 if not inr1(r):
3447 yield r
3440 yield r
3448 return arbitraryordergen()
3441 return arbitraryordergen()
3449 # try to use our own fast iterator if it exists
3442 # try to use our own fast iterator if it exists
3450 self._trysetasclist()
3443 self._trysetasclist()
3451 if self._ascending:
3444 if self._ascending:
3452 attr = 'fastasc'
3445 attr = 'fastasc'
3453 else:
3446 else:
3454 attr = 'fastdesc'
3447 attr = 'fastdesc'
3455 it = getattr(self, attr)
3448 it = getattr(self, attr)
3456 if it is not None:
3449 if it is not None:
3457 return it()
3450 return it()
3458 # maybe half of the component supports fast
3451 # maybe half of the component supports fast
3459 # get iterator for _r1
3452 # get iterator for _r1
3460 iter1 = getattr(self._r1, attr)
3453 iter1 = getattr(self._r1, attr)
3461 if iter1 is None:
3454 if iter1 is None:
3462 # let's avoid side effect (not sure it matters)
3455 # let's avoid side effect (not sure it matters)
3463 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3456 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3464 else:
3457 else:
3465 iter1 = iter1()
3458 iter1 = iter1()
3466 # get iterator for _r2
3459 # get iterator for _r2
3467 iter2 = getattr(self._r2, attr)
3460 iter2 = getattr(self._r2, attr)
3468 if iter2 is None:
3461 if iter2 is None:
3469 # let's avoid side effect (not sure it matters)
3462 # let's avoid side effect (not sure it matters)
3470 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3463 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3471 else:
3464 else:
3472 iter2 = iter2()
3465 iter2 = iter2()
3473 return _iterordered(self._ascending, iter1, iter2)
3466 return _iterordered(self._ascending, iter1, iter2)
3474
3467
3475 def _trysetasclist(self):
3468 def _trysetasclist(self):
3476 """populate the _asclist attribute if possible and necessary"""
3469 """populate the _asclist attribute if possible and necessary"""
3477 if self._genlist is not None and self._asclist is None:
3470 if self._genlist is not None and self._asclist is None:
3478 self._asclist = sorted(self._genlist)
3471 self._asclist = sorted(self._genlist)
3479
3472
3480 @property
3473 @property
3481 def fastasc(self):
3474 def fastasc(self):
3482 self._trysetasclist()
3475 self._trysetasclist()
3483 if self._asclist is not None:
3476 if self._asclist is not None:
3484 return self._asclist.__iter__
3477 return self._asclist.__iter__
3485 iter1 = self._r1.fastasc
3478 iter1 = self._r1.fastasc
3486 iter2 = self._r2.fastasc
3479 iter2 = self._r2.fastasc
3487 if None in (iter1, iter2):
3480 if None in (iter1, iter2):
3488 return None
3481 return None
3489 return lambda: _iterordered(True, iter1(), iter2())
3482 return lambda: _iterordered(True, iter1(), iter2())
3490
3483
3491 @property
3484 @property
3492 def fastdesc(self):
3485 def fastdesc(self):
3493 self._trysetasclist()
3486 self._trysetasclist()
3494 if self._asclist is not None:
3487 if self._asclist is not None:
3495 return self._asclist.__reversed__
3488 return self._asclist.__reversed__
3496 iter1 = self._r1.fastdesc
3489 iter1 = self._r1.fastdesc
3497 iter2 = self._r2.fastdesc
3490 iter2 = self._r2.fastdesc
3498 if None in (iter1, iter2):
3491 if None in (iter1, iter2):
3499 return None
3492 return None
3500 return lambda: _iterordered(False, iter1(), iter2())
3493 return lambda: _iterordered(False, iter1(), iter2())
3501
3494
3502 def __contains__(self, x):
3495 def __contains__(self, x):
3503 return x in self._r1 or x in self._r2
3496 return x in self._r1 or x in self._r2
3504
3497
3505 def sort(self, reverse=False):
3498 def sort(self, reverse=False):
3506 """Sort the added set
3499 """Sort the added set
3507
3500
3508 For this we use the cached list with all the generated values and if we
3501 For this we use the cached list with all the generated values and if we
3509 know they are ascending or descending we can sort them in a smart way.
3502 know they are ascending or descending we can sort them in a smart way.
3510 """
3503 """
3511 self._ascending = not reverse
3504 self._ascending = not reverse
3512
3505
3513 def isascending(self):
3506 def isascending(self):
3514 return self._ascending is not None and self._ascending
3507 return self._ascending is not None and self._ascending
3515
3508
3516 def isdescending(self):
3509 def isdescending(self):
3517 return self._ascending is not None and not self._ascending
3510 return self._ascending is not None and not self._ascending
3518
3511
3519 def istopo(self):
3512 def istopo(self):
3520 # not worth the trouble asserting if the two sets combined are still
3513 # not worth the trouble asserting if the two sets combined are still
3521 # in topographical order. Use the sort() predicate to explicitly sort
3514 # in topographical order. Use the sort() predicate to explicitly sort
3522 # again instead.
3515 # again instead.
3523 return False
3516 return False
3524
3517
3525 def reverse(self):
3518 def reverse(self):
3526 if self._ascending is None:
3519 if self._ascending is None:
3527 self._list.reverse()
3520 self._list.reverse()
3528 else:
3521 else:
3529 self._ascending = not self._ascending
3522 self._ascending = not self._ascending
3530
3523
3531 def first(self):
3524 def first(self):
3532 for x in self:
3525 for x in self:
3533 return x
3526 return x
3534 return None
3527 return None
3535
3528
3536 def last(self):
3529 def last(self):
3537 self.reverse()
3530 self.reverse()
3538 val = self.first()
3531 val = self.first()
3539 self.reverse()
3532 self.reverse()
3540 return val
3533 return val
3541
3534
3542 def __repr__(self):
3535 def __repr__(self):
3543 d = {None: '', False: '-', True: '+'}[self._ascending]
3536 d = {None: '', False: '-', True: '+'}[self._ascending]
3544 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3537 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3545
3538
3546 class generatorset(abstractsmartset):
3539 class generatorset(abstractsmartset):
3547 """Wrap a generator for lazy iteration
3540 """Wrap a generator for lazy iteration
3548
3541
3549 Wrapper structure for generators that provides lazy membership and can
3542 Wrapper structure for generators that provides lazy membership and can
3550 be iterated more than once.
3543 be iterated more than once.
3551 When asked for membership it generates values until either it finds the
3544 When asked for membership it generates values until either it finds the
3552 requested one or has gone through all the elements in the generator
3545 requested one or has gone through all the elements in the generator
3553 """
3546 """
3554 def __init__(self, gen, iterasc=None):
3547 def __init__(self, gen, iterasc=None):
3555 """
3548 """
3556 gen: a generator producing the values for the generatorset.
3549 gen: a generator producing the values for the generatorset.
3557 """
3550 """
3558 self._gen = gen
3551 self._gen = gen
3559 self._asclist = None
3552 self._asclist = None
3560 self._cache = {}
3553 self._cache = {}
3561 self._genlist = []
3554 self._genlist = []
3562 self._finished = False
3555 self._finished = False
3563 self._ascending = True
3556 self._ascending = True
3564 if iterasc is not None:
3557 if iterasc is not None:
3565 if iterasc:
3558 if iterasc:
3566 self.fastasc = self._iterator
3559 self.fastasc = self._iterator
3567 self.__contains__ = self._asccontains
3560 self.__contains__ = self._asccontains
3568 else:
3561 else:
3569 self.fastdesc = self._iterator
3562 self.fastdesc = self._iterator
3570 self.__contains__ = self._desccontains
3563 self.__contains__ = self._desccontains
3571
3564
3572 def __nonzero__(self):
3565 def __nonzero__(self):
3573 # Do not use 'for r in self' because it will enforce the iteration
3566 # Do not use 'for r in self' because it will enforce the iteration
3574 # order (default ascending), possibly unrolling a whole descending
3567 # order (default ascending), possibly unrolling a whole descending
3575 # iterator.
3568 # iterator.
3576 if self._genlist:
3569 if self._genlist:
3577 return True
3570 return True
3578 for r in self._consumegen():
3571 for r in self._consumegen():
3579 return True
3572 return True
3580 return False
3573 return False
3581
3574
3582 def __contains__(self, x):
3575 def __contains__(self, x):
3583 if x in self._cache:
3576 if x in self._cache:
3584 return self._cache[x]
3577 return self._cache[x]
3585
3578
3586 # Use new values only, as existing values would be cached.
3579 # Use new values only, as existing values would be cached.
3587 for l in self._consumegen():
3580 for l in self._consumegen():
3588 if l == x:
3581 if l == x:
3589 return True
3582 return True
3590
3583
3591 self._cache[x] = False
3584 self._cache[x] = False
3592 return False
3585 return False
3593
3586
3594 def _asccontains(self, x):
3587 def _asccontains(self, x):
3595 """version of contains optimised for ascending generator"""
3588 """version of contains optimised for ascending generator"""
3596 if x in self._cache:
3589 if x in self._cache:
3597 return self._cache[x]
3590 return self._cache[x]
3598
3591
3599 # Use new values only, as existing values would be cached.
3592 # Use new values only, as existing values would be cached.
3600 for l in self._consumegen():
3593 for l in self._consumegen():
3601 if l == x:
3594 if l == x:
3602 return True
3595 return True
3603 if l > x:
3596 if l > x:
3604 break
3597 break
3605
3598
3606 self._cache[x] = False
3599 self._cache[x] = False
3607 return False
3600 return False
3608
3601
3609 def _desccontains(self, x):
3602 def _desccontains(self, x):
3610 """version of contains optimised for descending generator"""
3603 """version of contains optimised for descending generator"""
3611 if x in self._cache:
3604 if x in self._cache:
3612 return self._cache[x]
3605 return self._cache[x]
3613
3606
3614 # Use new values only, as existing values would be cached.
3607 # Use new values only, as existing values would be cached.
3615 for l in self._consumegen():
3608 for l in self._consumegen():
3616 if l == x:
3609 if l == x:
3617 return True
3610 return True
3618 if l < x:
3611 if l < x:
3619 break
3612 break
3620
3613
3621 self._cache[x] = False
3614 self._cache[x] = False
3622 return False
3615 return False
3623
3616
3624 def __iter__(self):
3617 def __iter__(self):
3625 if self._ascending:
3618 if self._ascending:
3626 it = self.fastasc
3619 it = self.fastasc
3627 else:
3620 else:
3628 it = self.fastdesc
3621 it = self.fastdesc
3629 if it is not None:
3622 if it is not None:
3630 return it()
3623 return it()
3631 # we need to consume the iterator
3624 # we need to consume the iterator
3632 for x in self._consumegen():
3625 for x in self._consumegen():
3633 pass
3626 pass
3634 # recall the same code
3627 # recall the same code
3635 return iter(self)
3628 return iter(self)
3636
3629
3637 def _iterator(self):
3630 def _iterator(self):
3638 if self._finished:
3631 if self._finished:
3639 return iter(self._genlist)
3632 return iter(self._genlist)
3640
3633
3641 # We have to use this complex iteration strategy to allow multiple
3634 # We have to use this complex iteration strategy to allow multiple
3642 # iterations at the same time. We need to be able to catch revision
3635 # iterations at the same time. We need to be able to catch revision
3643 # removed from _consumegen and added to genlist in another instance.
3636 # removed from _consumegen and added to genlist in another instance.
3644 #
3637 #
3645 # Getting rid of it would provide an about 15% speed up on this
3638 # Getting rid of it would provide an about 15% speed up on this
3646 # iteration.
3639 # iteration.
3647 genlist = self._genlist
3640 genlist = self._genlist
3648 nextrev = self._consumegen().next
3641 nextrev = self._consumegen().next
3649 _len = len # cache global lookup
3642 _len = len # cache global lookup
3650 def gen():
3643 def gen():
3651 i = 0
3644 i = 0
3652 while True:
3645 while True:
3653 if i < _len(genlist):
3646 if i < _len(genlist):
3654 yield genlist[i]
3647 yield genlist[i]
3655 else:
3648 else:
3656 yield nextrev()
3649 yield nextrev()
3657 i += 1
3650 i += 1
3658 return gen()
3651 return gen()
3659
3652
3660 def _consumegen(self):
3653 def _consumegen(self):
3661 cache = self._cache
3654 cache = self._cache
3662 genlist = self._genlist.append
3655 genlist = self._genlist.append
3663 for item in self._gen:
3656 for item in self._gen:
3664 cache[item] = True
3657 cache[item] = True
3665 genlist(item)
3658 genlist(item)
3666 yield item
3659 yield item
3667 if not self._finished:
3660 if not self._finished:
3668 self._finished = True
3661 self._finished = True
3669 asc = self._genlist[:]
3662 asc = self._genlist[:]
3670 asc.sort()
3663 asc.sort()
3671 self._asclist = asc
3664 self._asclist = asc
3672 self.fastasc = asc.__iter__
3665 self.fastasc = asc.__iter__
3673 self.fastdesc = asc.__reversed__
3666 self.fastdesc = asc.__reversed__
3674
3667
3675 def __len__(self):
3668 def __len__(self):
3676 for x in self._consumegen():
3669 for x in self._consumegen():
3677 pass
3670 pass
3678 return len(self._genlist)
3671 return len(self._genlist)
3679
3672
3680 def sort(self, reverse=False):
3673 def sort(self, reverse=False):
3681 self._ascending = not reverse
3674 self._ascending = not reverse
3682
3675
3683 def reverse(self):
3676 def reverse(self):
3684 self._ascending = not self._ascending
3677 self._ascending = not self._ascending
3685
3678
3686 def isascending(self):
3679 def isascending(self):
3687 return self._ascending
3680 return self._ascending
3688
3681
3689 def isdescending(self):
3682 def isdescending(self):
3690 return not self._ascending
3683 return not self._ascending
3691
3684
3692 def istopo(self):
3685 def istopo(self):
3693 # not worth the trouble asserting if the two sets combined are still
3686 # not worth the trouble asserting if the two sets combined are still
3694 # in topographical order. Use the sort() predicate to explicitly sort
3687 # in topographical order. Use the sort() predicate to explicitly sort
3695 # again instead.
3688 # again instead.
3696 return False
3689 return False
3697
3690
3698 def first(self):
3691 def first(self):
3699 if self._ascending:
3692 if self._ascending:
3700 it = self.fastasc
3693 it = self.fastasc
3701 else:
3694 else:
3702 it = self.fastdesc
3695 it = self.fastdesc
3703 if it is None:
3696 if it is None:
3704 # we need to consume all and try again
3697 # we need to consume all and try again
3705 for x in self._consumegen():
3698 for x in self._consumegen():
3706 pass
3699 pass
3707 return self.first()
3700 return self.first()
3708 return next(it(), None)
3701 return next(it(), None)
3709
3702
3710 def last(self):
3703 def last(self):
3711 if self._ascending:
3704 if self._ascending:
3712 it = self.fastdesc
3705 it = self.fastdesc
3713 else:
3706 else:
3714 it = self.fastasc
3707 it = self.fastasc
3715 if it is None:
3708 if it is None:
3716 # we need to consume all and try again
3709 # we need to consume all and try again
3717 for x in self._consumegen():
3710 for x in self._consumegen():
3718 pass
3711 pass
3719 return self.first()
3712 return self.first()
3720 return next(it(), None)
3713 return next(it(), None)
3721
3714
3722 def __repr__(self):
3715 def __repr__(self):
3723 d = {False: '-', True: '+'}[self._ascending]
3716 d = {False: '-', True: '+'}[self._ascending]
3724 return '<%s%s>' % (type(self).__name__, d)
3717 return '<%s%s>' % (type(self).__name__, d)
3725
3718
3726 class spanset(abstractsmartset):
3719 class spanset(abstractsmartset):
3727 """Duck type for baseset class which represents a range of revisions and
3720 """Duck type for baseset class which represents a range of revisions and
3728 can work lazily and without having all the range in memory
3721 can work lazily and without having all the range in memory
3729
3722
3730 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3723 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3731 notable points:
3724 notable points:
3732 - when x < y it will be automatically descending,
3725 - when x < y it will be automatically descending,
3733 - revision filtered with this repoview will be skipped.
3726 - revision filtered with this repoview will be skipped.
3734
3727
3735 """
3728 """
3736 def __init__(self, repo, start=0, end=None):
3729 def __init__(self, repo, start=0, end=None):
3737 """
3730 """
3738 start: first revision included the set
3731 start: first revision included the set
3739 (default to 0)
3732 (default to 0)
3740 end: first revision excluded (last+1)
3733 end: first revision excluded (last+1)
3741 (default to len(repo)
3734 (default to len(repo)
3742
3735
3743 Spanset will be descending if `end` < `start`.
3736 Spanset will be descending if `end` < `start`.
3744 """
3737 """
3745 if end is None:
3738 if end is None:
3746 end = len(repo)
3739 end = len(repo)
3747 self._ascending = start <= end
3740 self._ascending = start <= end
3748 if not self._ascending:
3741 if not self._ascending:
3749 start, end = end + 1, start +1
3742 start, end = end + 1, start +1
3750 self._start = start
3743 self._start = start
3751 self._end = end
3744 self._end = end
3752 self._hiddenrevs = repo.changelog.filteredrevs
3745 self._hiddenrevs = repo.changelog.filteredrevs
3753
3746
3754 def sort(self, reverse=False):
3747 def sort(self, reverse=False):
3755 self._ascending = not reverse
3748 self._ascending = not reverse
3756
3749
3757 def reverse(self):
3750 def reverse(self):
3758 self._ascending = not self._ascending
3751 self._ascending = not self._ascending
3759
3752
3760 def istopo(self):
3753 def istopo(self):
3761 # not worth the trouble asserting if the two sets combined are still
3754 # not worth the trouble asserting if the two sets combined are still
3762 # in topographical order. Use the sort() predicate to explicitly sort
3755 # in topographical order. Use the sort() predicate to explicitly sort
3763 # again instead.
3756 # again instead.
3764 return False
3757 return False
3765
3758
3766 def _iterfilter(self, iterrange):
3759 def _iterfilter(self, iterrange):
3767 s = self._hiddenrevs
3760 s = self._hiddenrevs
3768 for r in iterrange:
3761 for r in iterrange:
3769 if r not in s:
3762 if r not in s:
3770 yield r
3763 yield r
3771
3764
3772 def __iter__(self):
3765 def __iter__(self):
3773 if self._ascending:
3766 if self._ascending:
3774 return self.fastasc()
3767 return self.fastasc()
3775 else:
3768 else:
3776 return self.fastdesc()
3769 return self.fastdesc()
3777
3770
3778 def fastasc(self):
3771 def fastasc(self):
3779 iterrange = xrange(self._start, self._end)
3772 iterrange = xrange(self._start, self._end)
3780 if self._hiddenrevs:
3773 if self._hiddenrevs:
3781 return self._iterfilter(iterrange)
3774 return self._iterfilter(iterrange)
3782 return iter(iterrange)
3775 return iter(iterrange)
3783
3776
3784 def fastdesc(self):
3777 def fastdesc(self):
3785 iterrange = xrange(self._end - 1, self._start - 1, -1)
3778 iterrange = xrange(self._end - 1, self._start - 1, -1)
3786 if self._hiddenrevs:
3779 if self._hiddenrevs:
3787 return self._iterfilter(iterrange)
3780 return self._iterfilter(iterrange)
3788 return iter(iterrange)
3781 return iter(iterrange)
3789
3782
3790 def __contains__(self, rev):
3783 def __contains__(self, rev):
3791 hidden = self._hiddenrevs
3784 hidden = self._hiddenrevs
3792 return ((self._start <= rev < self._end)
3785 return ((self._start <= rev < self._end)
3793 and not (hidden and rev in hidden))
3786 and not (hidden and rev in hidden))
3794
3787
3795 def __nonzero__(self):
3788 def __nonzero__(self):
3796 for r in self:
3789 for r in self:
3797 return True
3790 return True
3798 return False
3791 return False
3799
3792
3800 def __len__(self):
3793 def __len__(self):
3801 if not self._hiddenrevs:
3794 if not self._hiddenrevs:
3802 return abs(self._end - self._start)
3795 return abs(self._end - self._start)
3803 else:
3796 else:
3804 count = 0
3797 count = 0
3805 start = self._start
3798 start = self._start
3806 end = self._end
3799 end = self._end
3807 for rev in self._hiddenrevs:
3800 for rev in self._hiddenrevs:
3808 if (end < rev <= start) or (start <= rev < end):
3801 if (end < rev <= start) or (start <= rev < end):
3809 count += 1
3802 count += 1
3810 return abs(self._end - self._start) - count
3803 return abs(self._end - self._start) - count
3811
3804
3812 def isascending(self):
3805 def isascending(self):
3813 return self._ascending
3806 return self._ascending
3814
3807
3815 def isdescending(self):
3808 def isdescending(self):
3816 return not self._ascending
3809 return not self._ascending
3817
3810
3818 def first(self):
3811 def first(self):
3819 if self._ascending:
3812 if self._ascending:
3820 it = self.fastasc
3813 it = self.fastasc
3821 else:
3814 else:
3822 it = self.fastdesc
3815 it = self.fastdesc
3823 for x in it():
3816 for x in it():
3824 return x
3817 return x
3825 return None
3818 return None
3826
3819
3827 def last(self):
3820 def last(self):
3828 if self._ascending:
3821 if self._ascending:
3829 it = self.fastdesc
3822 it = self.fastdesc
3830 else:
3823 else:
3831 it = self.fastasc
3824 it = self.fastasc
3832 for x in it():
3825 for x in it():
3833 return x
3826 return x
3834 return None
3827 return None
3835
3828
3836 def __repr__(self):
3829 def __repr__(self):
3837 d = {False: '-', True: '+'}[self._ascending]
3830 d = {False: '-', True: '+'}[self._ascending]
3838 return '<%s%s %d:%d>' % (type(self).__name__, d,
3831 return '<%s%s %d:%d>' % (type(self).__name__, d,
3839 self._start, self._end - 1)
3832 self._start, self._end - 1)
3840
3833
3841 class fullreposet(spanset):
3834 class fullreposet(spanset):
3842 """a set containing all revisions in the repo
3835 """a set containing all revisions in the repo
3843
3836
3844 This class exists to host special optimization and magic to handle virtual
3837 This class exists to host special optimization and magic to handle virtual
3845 revisions such as "null".
3838 revisions such as "null".
3846 """
3839 """
3847
3840
3848 def __init__(self, repo):
3841 def __init__(self, repo):
3849 super(fullreposet, self).__init__(repo)
3842 super(fullreposet, self).__init__(repo)
3850
3843
3851 def __and__(self, other):
3844 def __and__(self, other):
3852 """As self contains the whole repo, all of the other set should also be
3845 """As self contains the whole repo, all of the other set should also be
3853 in self. Therefore `self & other = other`.
3846 in self. Therefore `self & other = other`.
3854
3847
3855 This boldly assumes the other contains valid revs only.
3848 This boldly assumes the other contains valid revs only.
3856 """
3849 """
3857 # other not a smartset, make is so
3850 # other not a smartset, make is so
3858 if not util.safehasattr(other, 'isascending'):
3851 if not util.safehasattr(other, 'isascending'):
3859 # filter out hidden revision
3852 # filter out hidden revision
3860 # (this boldly assumes all smartset are pure)
3853 # (this boldly assumes all smartset are pure)
3861 #
3854 #
3862 # `other` was used with "&", let's assume this is a set like
3855 # `other` was used with "&", let's assume this is a set like
3863 # object.
3856 # object.
3864 other = baseset(other - self._hiddenrevs)
3857 other = baseset(other - self._hiddenrevs)
3865
3858
3866 other.sort(reverse=self.isdescending())
3859 other.sort(reverse=self.isdescending())
3867 return other
3860 return other
3868
3861
3869 def prettyformatset(revs):
3862 def prettyformatset(revs):
3870 lines = []
3863 lines = []
3871 rs = repr(revs)
3864 rs = repr(revs)
3872 p = 0
3865 p = 0
3873 while p < len(rs):
3866 while p < len(rs):
3874 q = rs.find('<', p + 1)
3867 q = rs.find('<', p + 1)
3875 if q < 0:
3868 if q < 0:
3876 q = len(rs)
3869 q = len(rs)
3877 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3870 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3878 assert l >= 0
3871 assert l >= 0
3879 lines.append((l, rs[p:q].rstrip()))
3872 lines.append((l, rs[p:q].rstrip()))
3880 p = q
3873 p = q
3881 return '\n'.join(' ' * l + s for l, s in lines)
3874 return '\n'.join(' ' * l + s for l, s in lines)
3882
3875
3883 def loadpredicate(ui, extname, registrarobj):
3876 def loadpredicate(ui, extname, registrarobj):
3884 """Load revset predicates from specified registrarobj
3877 """Load revset predicates from specified registrarobj
3885 """
3878 """
3886 for name, func in registrarobj._table.iteritems():
3879 for name, func in registrarobj._table.iteritems():
3887 symbols[name] = func
3880 symbols[name] = func
3888 if func._safe:
3881 if func._safe:
3889 safesymbols.add(name)
3882 safesymbols.add(name)
3890
3883
3891 # load built-in predicates explicitly to setup safesymbols
3884 # load built-in predicates explicitly to setup safesymbols
3892 loadpredicate(None, None, predicate)
3885 loadpredicate(None, None, predicate)
3893
3886
3894 # tell hggettext to extract docstrings from these functions:
3887 # tell hggettext to extract docstrings from these functions:
3895 i18nfunctions = symbols.values()
3888 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now