##// END OF EJS Templates
revset: categorize wdir() as very fast function...
Yuya Nishihara -
r30700:323f0c4b default
parent child Browse files
Show More
@@ -1,3857 +1,3857
1 # revset.py - revision set queries for mercurial
1 # revset.py - revision set queries for mercurial
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import heapq
10 import heapq
11 import re
11 import re
12 import string
12 import string
13
13
14 from .i18n import _
14 from .i18n import _
15 from . import (
15 from . import (
16 destutil,
16 destutil,
17 encoding,
17 encoding,
18 error,
18 error,
19 hbisect,
19 hbisect,
20 match as matchmod,
20 match as matchmod,
21 node,
21 node,
22 obsolete as obsmod,
22 obsolete as obsmod,
23 parser,
23 parser,
24 pathutil,
24 pathutil,
25 phases,
25 phases,
26 pycompat,
26 pycompat,
27 registrar,
27 registrar,
28 repoview,
28 repoview,
29 util,
29 util,
30 )
30 )
31
31
32 def _revancestors(repo, revs, followfirst):
32 def _revancestors(repo, revs, followfirst):
33 """Like revlog.ancestors(), but supports followfirst."""
33 """Like revlog.ancestors(), but supports followfirst."""
34 if followfirst:
34 if followfirst:
35 cut = 1
35 cut = 1
36 else:
36 else:
37 cut = None
37 cut = None
38 cl = repo.changelog
38 cl = repo.changelog
39
39
40 def iterate():
40 def iterate():
41 revs.sort(reverse=True)
41 revs.sort(reverse=True)
42 irevs = iter(revs)
42 irevs = iter(revs)
43 h = []
43 h = []
44
44
45 inputrev = next(irevs, None)
45 inputrev = next(irevs, None)
46 if inputrev is not None:
46 if inputrev is not None:
47 heapq.heappush(h, -inputrev)
47 heapq.heappush(h, -inputrev)
48
48
49 seen = set()
49 seen = set()
50 while h:
50 while h:
51 current = -heapq.heappop(h)
51 current = -heapq.heappop(h)
52 if current == inputrev:
52 if current == inputrev:
53 inputrev = next(irevs, None)
53 inputrev = next(irevs, None)
54 if inputrev is not None:
54 if inputrev is not None:
55 heapq.heappush(h, -inputrev)
55 heapq.heappush(h, -inputrev)
56 if current not in seen:
56 if current not in seen:
57 seen.add(current)
57 seen.add(current)
58 yield current
58 yield current
59 for parent in cl.parentrevs(current)[:cut]:
59 for parent in cl.parentrevs(current)[:cut]:
60 if parent != node.nullrev:
60 if parent != node.nullrev:
61 heapq.heappush(h, -parent)
61 heapq.heappush(h, -parent)
62
62
63 return generatorset(iterate(), iterasc=False)
63 return generatorset(iterate(), iterasc=False)
64
64
65 def _revdescendants(repo, revs, followfirst):
65 def _revdescendants(repo, revs, followfirst):
66 """Like revlog.descendants() but supports followfirst."""
66 """Like revlog.descendants() but supports followfirst."""
67 if followfirst:
67 if followfirst:
68 cut = 1
68 cut = 1
69 else:
69 else:
70 cut = None
70 cut = None
71
71
72 def iterate():
72 def iterate():
73 cl = repo.changelog
73 cl = repo.changelog
74 # XXX this should be 'parentset.min()' assuming 'parentset' is a
74 # XXX this should be 'parentset.min()' assuming 'parentset' is a
75 # smartset (and if it is not, it should.)
75 # smartset (and if it is not, it should.)
76 first = min(revs)
76 first = min(revs)
77 nullrev = node.nullrev
77 nullrev = node.nullrev
78 if first == nullrev:
78 if first == nullrev:
79 # Are there nodes with a null first parent and a non-null
79 # Are there nodes with a null first parent and a non-null
80 # second one? Maybe. Do we care? Probably not.
80 # second one? Maybe. Do we care? Probably not.
81 for i in cl:
81 for i in cl:
82 yield i
82 yield i
83 else:
83 else:
84 seen = set(revs)
84 seen = set(revs)
85 for i in cl.revs(first + 1):
85 for i in cl.revs(first + 1):
86 for x in cl.parentrevs(i)[:cut]:
86 for x in cl.parentrevs(i)[:cut]:
87 if x != nullrev and x in seen:
87 if x != nullrev and x in seen:
88 seen.add(i)
88 seen.add(i)
89 yield i
89 yield i
90 break
90 break
91
91
92 return generatorset(iterate(), iterasc=True)
92 return generatorset(iterate(), iterasc=True)
93
93
94 def _reachablerootspure(repo, minroot, roots, heads, includepath):
94 def _reachablerootspure(repo, minroot, roots, heads, includepath):
95 """return (heads(::<roots> and ::<heads>))
95 """return (heads(::<roots> and ::<heads>))
96
96
97 If includepath is True, return (<roots>::<heads>)."""
97 If includepath is True, return (<roots>::<heads>)."""
98 if not roots:
98 if not roots:
99 return []
99 return []
100 parentrevs = repo.changelog.parentrevs
100 parentrevs = repo.changelog.parentrevs
101 roots = set(roots)
101 roots = set(roots)
102 visit = list(heads)
102 visit = list(heads)
103 reachable = set()
103 reachable = set()
104 seen = {}
104 seen = {}
105 # prefetch all the things! (because python is slow)
105 # prefetch all the things! (because python is slow)
106 reached = reachable.add
106 reached = reachable.add
107 dovisit = visit.append
107 dovisit = visit.append
108 nextvisit = visit.pop
108 nextvisit = visit.pop
109 # open-code the post-order traversal due to the tiny size of
109 # open-code the post-order traversal due to the tiny size of
110 # sys.getrecursionlimit()
110 # sys.getrecursionlimit()
111 while visit:
111 while visit:
112 rev = nextvisit()
112 rev = nextvisit()
113 if rev in roots:
113 if rev in roots:
114 reached(rev)
114 reached(rev)
115 if not includepath:
115 if not includepath:
116 continue
116 continue
117 parents = parentrevs(rev)
117 parents = parentrevs(rev)
118 seen[rev] = parents
118 seen[rev] = parents
119 for parent in parents:
119 for parent in parents:
120 if parent >= minroot and parent not in seen:
120 if parent >= minroot and parent not in seen:
121 dovisit(parent)
121 dovisit(parent)
122 if not reachable:
122 if not reachable:
123 return baseset()
123 return baseset()
124 if not includepath:
124 if not includepath:
125 return reachable
125 return reachable
126 for rev in sorted(seen):
126 for rev in sorted(seen):
127 for parent in seen[rev]:
127 for parent in seen[rev]:
128 if parent in reachable:
128 if parent in reachable:
129 reached(rev)
129 reached(rev)
130 return reachable
130 return reachable
131
131
132 def reachableroots(repo, roots, heads, includepath=False):
132 def reachableroots(repo, roots, heads, includepath=False):
133 """return (heads(::<roots> and ::<heads>))
133 """return (heads(::<roots> and ::<heads>))
134
134
135 If includepath is True, return (<roots>::<heads>)."""
135 If includepath is True, return (<roots>::<heads>)."""
136 if not roots:
136 if not roots:
137 return baseset()
137 return baseset()
138 minroot = roots.min()
138 minroot = roots.min()
139 roots = list(roots)
139 roots = list(roots)
140 heads = list(heads)
140 heads = list(heads)
141 try:
141 try:
142 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
142 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
143 except AttributeError:
143 except AttributeError:
144 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
144 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
145 revs = baseset(revs)
145 revs = baseset(revs)
146 revs.sort()
146 revs.sort()
147 return revs
147 return revs
148
148
149 elements = {
149 elements = {
150 # token-type: binding-strength, primary, prefix, infix, suffix
150 # token-type: binding-strength, primary, prefix, infix, suffix
151 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
151 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
152 "##": (20, None, None, ("_concat", 20), None),
152 "##": (20, None, None, ("_concat", 20), None),
153 "~": (18, None, None, ("ancestor", 18), None),
153 "~": (18, None, None, ("ancestor", 18), None),
154 "^": (18, None, None, ("parent", 18), "parentpost"),
154 "^": (18, None, None, ("parent", 18), "parentpost"),
155 "-": (5, None, ("negate", 19), ("minus", 5), None),
155 "-": (5, None, ("negate", 19), ("minus", 5), None),
156 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"),
156 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"),
157 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"),
157 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"),
158 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), "rangepost"),
158 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), "rangepost"),
159 "not": (10, None, ("not", 10), None, None),
159 "not": (10, None, ("not", 10), None, None),
160 "!": (10, None, ("not", 10), None, None),
160 "!": (10, None, ("not", 10), None, None),
161 "and": (5, None, None, ("and", 5), None),
161 "and": (5, None, None, ("and", 5), None),
162 "&": (5, None, None, ("and", 5), None),
162 "&": (5, None, None, ("and", 5), None),
163 "%": (5, None, None, ("only", 5), "onlypost"),
163 "%": (5, None, None, ("only", 5), "onlypost"),
164 "or": (4, None, None, ("or", 4), None),
164 "or": (4, None, None, ("or", 4), None),
165 "|": (4, None, None, ("or", 4), None),
165 "|": (4, None, None, ("or", 4), None),
166 "+": (4, None, None, ("or", 4), None),
166 "+": (4, None, None, ("or", 4), None),
167 "=": (3, None, None, ("keyvalue", 3), None),
167 "=": (3, None, None, ("keyvalue", 3), None),
168 ",": (2, None, None, ("list", 2), None),
168 ",": (2, None, None, ("list", 2), None),
169 ")": (0, None, None, None, None),
169 ")": (0, None, None, None, None),
170 "symbol": (0, "symbol", None, None, None),
170 "symbol": (0, "symbol", None, None, None),
171 "string": (0, "string", None, None, None),
171 "string": (0, "string", None, None, None),
172 "end": (0, None, None, None, None),
172 "end": (0, None, None, None, None),
173 }
173 }
174
174
175 keywords = set(['and', 'or', 'not'])
175 keywords = set(['and', 'or', 'not'])
176
176
177 # default set of valid characters for the initial letter of symbols
177 # default set of valid characters for the initial letter of symbols
178 _syminitletters = set(
178 _syminitletters = set(
179 string.ascii_letters +
179 string.ascii_letters +
180 string.digits + pycompat.sysstr('._@')) | set(map(chr, xrange(128, 256)))
180 string.digits + pycompat.sysstr('._@')) | set(map(chr, xrange(128, 256)))
181
181
182 # default set of valid characters for non-initial letters of symbols
182 # default set of valid characters for non-initial letters of symbols
183 _symletters = _syminitletters | set(pycompat.sysstr('-/'))
183 _symletters = _syminitletters | set(pycompat.sysstr('-/'))
184
184
185 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
185 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
186 '''
186 '''
187 Parse a revset statement into a stream of tokens
187 Parse a revset statement into a stream of tokens
188
188
189 ``syminitletters`` is the set of valid characters for the initial
189 ``syminitletters`` is the set of valid characters for the initial
190 letter of symbols.
190 letter of symbols.
191
191
192 By default, character ``c`` is recognized as valid for initial
192 By default, character ``c`` is recognized as valid for initial
193 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
193 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
194
194
195 ``symletters`` is the set of valid characters for non-initial
195 ``symletters`` is the set of valid characters for non-initial
196 letters of symbols.
196 letters of symbols.
197
197
198 By default, character ``c`` is recognized as valid for non-initial
198 By default, character ``c`` is recognized as valid for non-initial
199 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
199 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
200
200
201 Check that @ is a valid unquoted token character (issue3686):
201 Check that @ is a valid unquoted token character (issue3686):
202 >>> list(tokenize("@::"))
202 >>> list(tokenize("@::"))
203 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
203 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
204
204
205 '''
205 '''
206 if syminitletters is None:
206 if syminitletters is None:
207 syminitletters = _syminitletters
207 syminitletters = _syminitletters
208 if symletters is None:
208 if symletters is None:
209 symletters = _symletters
209 symletters = _symletters
210
210
211 if program and lookup:
211 if program and lookup:
212 # attempt to parse old-style ranges first to deal with
212 # attempt to parse old-style ranges first to deal with
213 # things like old-tag which contain query metacharacters
213 # things like old-tag which contain query metacharacters
214 parts = program.split(':', 1)
214 parts = program.split(':', 1)
215 if all(lookup(sym) for sym in parts if sym):
215 if all(lookup(sym) for sym in parts if sym):
216 if parts[0]:
216 if parts[0]:
217 yield ('symbol', parts[0], 0)
217 yield ('symbol', parts[0], 0)
218 if len(parts) > 1:
218 if len(parts) > 1:
219 s = len(parts[0])
219 s = len(parts[0])
220 yield (':', None, s)
220 yield (':', None, s)
221 if parts[1]:
221 if parts[1]:
222 yield ('symbol', parts[1], s + 1)
222 yield ('symbol', parts[1], s + 1)
223 yield ('end', None, len(program))
223 yield ('end', None, len(program))
224 return
224 return
225
225
226 pos, l = 0, len(program)
226 pos, l = 0, len(program)
227 while pos < l:
227 while pos < l:
228 c = program[pos]
228 c = program[pos]
229 if c.isspace(): # skip inter-token whitespace
229 if c.isspace(): # skip inter-token whitespace
230 pass
230 pass
231 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
231 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
232 yield ('::', None, pos)
232 yield ('::', None, pos)
233 pos += 1 # skip ahead
233 pos += 1 # skip ahead
234 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
234 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
235 yield ('..', None, pos)
235 yield ('..', None, pos)
236 pos += 1 # skip ahead
236 pos += 1 # skip ahead
237 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
237 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
238 yield ('##', None, pos)
238 yield ('##', None, pos)
239 pos += 1 # skip ahead
239 pos += 1 # skip ahead
240 elif c in "():=,-|&+!~^%": # handle simple operators
240 elif c in "():=,-|&+!~^%": # handle simple operators
241 yield (c, None, pos)
241 yield (c, None, pos)
242 elif (c in '"\'' or c == 'r' and
242 elif (c in '"\'' or c == 'r' and
243 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
243 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
244 if c == 'r':
244 if c == 'r':
245 pos += 1
245 pos += 1
246 c = program[pos]
246 c = program[pos]
247 decode = lambda x: x
247 decode = lambda x: x
248 else:
248 else:
249 decode = parser.unescapestr
249 decode = parser.unescapestr
250 pos += 1
250 pos += 1
251 s = pos
251 s = pos
252 while pos < l: # find closing quote
252 while pos < l: # find closing quote
253 d = program[pos]
253 d = program[pos]
254 if d == '\\': # skip over escaped characters
254 if d == '\\': # skip over escaped characters
255 pos += 2
255 pos += 2
256 continue
256 continue
257 if d == c:
257 if d == c:
258 yield ('string', decode(program[s:pos]), s)
258 yield ('string', decode(program[s:pos]), s)
259 break
259 break
260 pos += 1
260 pos += 1
261 else:
261 else:
262 raise error.ParseError(_("unterminated string"), s)
262 raise error.ParseError(_("unterminated string"), s)
263 # gather up a symbol/keyword
263 # gather up a symbol/keyword
264 elif c in syminitletters:
264 elif c in syminitletters:
265 s = pos
265 s = pos
266 pos += 1
266 pos += 1
267 while pos < l: # find end of symbol
267 while pos < l: # find end of symbol
268 d = program[pos]
268 d = program[pos]
269 if d not in symletters:
269 if d not in symletters:
270 break
270 break
271 if d == '.' and program[pos - 1] == '.': # special case for ..
271 if d == '.' and program[pos - 1] == '.': # special case for ..
272 pos -= 1
272 pos -= 1
273 break
273 break
274 pos += 1
274 pos += 1
275 sym = program[s:pos]
275 sym = program[s:pos]
276 if sym in keywords: # operator keywords
276 if sym in keywords: # operator keywords
277 yield (sym, None, s)
277 yield (sym, None, s)
278 elif '-' in sym:
278 elif '-' in sym:
279 # some jerk gave us foo-bar-baz, try to check if it's a symbol
279 # some jerk gave us foo-bar-baz, try to check if it's a symbol
280 if lookup and lookup(sym):
280 if lookup and lookup(sym):
281 # looks like a real symbol
281 # looks like a real symbol
282 yield ('symbol', sym, s)
282 yield ('symbol', sym, s)
283 else:
283 else:
284 # looks like an expression
284 # looks like an expression
285 parts = sym.split('-')
285 parts = sym.split('-')
286 for p in parts[:-1]:
286 for p in parts[:-1]:
287 if p: # possible consecutive -
287 if p: # possible consecutive -
288 yield ('symbol', p, s)
288 yield ('symbol', p, s)
289 s += len(p)
289 s += len(p)
290 yield ('-', None, pos)
290 yield ('-', None, pos)
291 s += 1
291 s += 1
292 if parts[-1]: # possible trailing -
292 if parts[-1]: # possible trailing -
293 yield ('symbol', parts[-1], s)
293 yield ('symbol', parts[-1], s)
294 else:
294 else:
295 yield ('symbol', sym, s)
295 yield ('symbol', sym, s)
296 pos -= 1
296 pos -= 1
297 else:
297 else:
298 raise error.ParseError(_("syntax error in revset '%s'") %
298 raise error.ParseError(_("syntax error in revset '%s'") %
299 program, pos)
299 program, pos)
300 pos += 1
300 pos += 1
301 yield ('end', None, pos)
301 yield ('end', None, pos)
302
302
303 # helpers
303 # helpers
304
304
305 def getsymbol(x):
305 def getsymbol(x):
306 if x and x[0] == 'symbol':
306 if x and x[0] == 'symbol':
307 return x[1]
307 return x[1]
308 raise error.ParseError(_('not a symbol'))
308 raise error.ParseError(_('not a symbol'))
309
309
310 def getstring(x, err):
310 def getstring(x, err):
311 if x and (x[0] == 'string' or x[0] == 'symbol'):
311 if x and (x[0] == 'string' or x[0] == 'symbol'):
312 return x[1]
312 return x[1]
313 raise error.ParseError(err)
313 raise error.ParseError(err)
314
314
315 def getlist(x):
315 def getlist(x):
316 if not x:
316 if not x:
317 return []
317 return []
318 if x[0] == 'list':
318 if x[0] == 'list':
319 return list(x[1:])
319 return list(x[1:])
320 return [x]
320 return [x]
321
321
322 def getargs(x, min, max, err):
322 def getargs(x, min, max, err):
323 l = getlist(x)
323 l = getlist(x)
324 if len(l) < min or (max >= 0 and len(l) > max):
324 if len(l) < min or (max >= 0 and len(l) > max):
325 raise error.ParseError(err)
325 raise error.ParseError(err)
326 return l
326 return l
327
327
328 def getargsdict(x, funcname, keys):
328 def getargsdict(x, funcname, keys):
329 return parser.buildargsdict(getlist(x), funcname, keys.split(),
329 return parser.buildargsdict(getlist(x), funcname, keys.split(),
330 keyvaluenode='keyvalue', keynode='symbol')
330 keyvaluenode='keyvalue', keynode='symbol')
331
331
332 def getset(repo, subset, x):
332 def getset(repo, subset, x):
333 if not x:
333 if not x:
334 raise error.ParseError(_("missing argument"))
334 raise error.ParseError(_("missing argument"))
335 s = methods[x[0]](repo, subset, *x[1:])
335 s = methods[x[0]](repo, subset, *x[1:])
336 if util.safehasattr(s, 'isascending'):
336 if util.safehasattr(s, 'isascending'):
337 return s
337 return s
338 # else case should not happen, because all non-func are internal,
338 # else case should not happen, because all non-func are internal,
339 # ignoring for now.
339 # ignoring for now.
340 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
340 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
341 repo.ui.deprecwarn('revset "%s" uses list instead of smartset'
341 repo.ui.deprecwarn('revset "%s" uses list instead of smartset'
342 % x[1][1],
342 % x[1][1],
343 '3.9')
343 '3.9')
344 return baseset(s)
344 return baseset(s)
345
345
346 def _getrevsource(repo, r):
346 def _getrevsource(repo, r):
347 extra = repo[r].extra()
347 extra = repo[r].extra()
348 for label in ('source', 'transplant_source', 'rebase_source'):
348 for label in ('source', 'transplant_source', 'rebase_source'):
349 if label in extra:
349 if label in extra:
350 try:
350 try:
351 return repo[extra[label]].rev()
351 return repo[extra[label]].rev()
352 except error.RepoLookupError:
352 except error.RepoLookupError:
353 pass
353 pass
354 return None
354 return None
355
355
356 # operator methods
356 # operator methods
357
357
358 def stringset(repo, subset, x):
358 def stringset(repo, subset, x):
359 x = repo[x].rev()
359 x = repo[x].rev()
360 if (x in subset
360 if (x in subset
361 or x == node.nullrev and isinstance(subset, fullreposet)):
361 or x == node.nullrev and isinstance(subset, fullreposet)):
362 return baseset([x])
362 return baseset([x])
363 return baseset()
363 return baseset()
364
364
365 def rangeset(repo, subset, x, y, order):
365 def rangeset(repo, subset, x, y, order):
366 m = getset(repo, fullreposet(repo), x)
366 m = getset(repo, fullreposet(repo), x)
367 n = getset(repo, fullreposet(repo), y)
367 n = getset(repo, fullreposet(repo), y)
368
368
369 if not m or not n:
369 if not m or not n:
370 return baseset()
370 return baseset()
371 return _makerangeset(repo, subset, m.first(), n.last(), order)
371 return _makerangeset(repo, subset, m.first(), n.last(), order)
372
372
373 def rangepre(repo, subset, y, order):
373 def rangepre(repo, subset, y, order):
374 # ':y' can't be rewritten to '0:y' since '0' may be hidden
374 # ':y' can't be rewritten to '0:y' since '0' may be hidden
375 n = getset(repo, fullreposet(repo), y)
375 n = getset(repo, fullreposet(repo), y)
376 if not n:
376 if not n:
377 return baseset()
377 return baseset()
378 return _makerangeset(repo, subset, 0, n.last(), order)
378 return _makerangeset(repo, subset, 0, n.last(), order)
379
379
380 def _makerangeset(repo, subset, m, n, order):
380 def _makerangeset(repo, subset, m, n, order):
381 if m == n:
381 if m == n:
382 r = baseset([m])
382 r = baseset([m])
383 elif n == node.wdirrev:
383 elif n == node.wdirrev:
384 r = spanset(repo, m, len(repo)) + baseset([n])
384 r = spanset(repo, m, len(repo)) + baseset([n])
385 elif m == node.wdirrev:
385 elif m == node.wdirrev:
386 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
386 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
387 elif m < n:
387 elif m < n:
388 r = spanset(repo, m, n + 1)
388 r = spanset(repo, m, n + 1)
389 else:
389 else:
390 r = spanset(repo, m, n - 1)
390 r = spanset(repo, m, n - 1)
391
391
392 if order == defineorder:
392 if order == defineorder:
393 return r & subset
393 return r & subset
394 else:
394 else:
395 # carrying the sorting over when possible would be more efficient
395 # carrying the sorting over when possible would be more efficient
396 return subset & r
396 return subset & r
397
397
398 def dagrange(repo, subset, x, y, order):
398 def dagrange(repo, subset, x, y, order):
399 r = fullreposet(repo)
399 r = fullreposet(repo)
400 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
400 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
401 includepath=True)
401 includepath=True)
402 return subset & xs
402 return subset & xs
403
403
404 def andset(repo, subset, x, y, order):
404 def andset(repo, subset, x, y, order):
405 return getset(repo, getset(repo, subset, x), y)
405 return getset(repo, getset(repo, subset, x), y)
406
406
407 def differenceset(repo, subset, x, y, order):
407 def differenceset(repo, subset, x, y, order):
408 return getset(repo, subset, x) - getset(repo, subset, y)
408 return getset(repo, subset, x) - getset(repo, subset, y)
409
409
410 def _orsetlist(repo, subset, xs):
410 def _orsetlist(repo, subset, xs):
411 assert xs
411 assert xs
412 if len(xs) == 1:
412 if len(xs) == 1:
413 return getset(repo, subset, xs[0])
413 return getset(repo, subset, xs[0])
414 p = len(xs) // 2
414 p = len(xs) // 2
415 a = _orsetlist(repo, subset, xs[:p])
415 a = _orsetlist(repo, subset, xs[:p])
416 b = _orsetlist(repo, subset, xs[p:])
416 b = _orsetlist(repo, subset, xs[p:])
417 return a + b
417 return a + b
418
418
419 def orset(repo, subset, x, order):
419 def orset(repo, subset, x, order):
420 xs = getlist(x)
420 xs = getlist(x)
421 if order == followorder:
421 if order == followorder:
422 # slow path to take the subset order
422 # slow path to take the subset order
423 return subset & _orsetlist(repo, fullreposet(repo), xs)
423 return subset & _orsetlist(repo, fullreposet(repo), xs)
424 else:
424 else:
425 return _orsetlist(repo, subset, xs)
425 return _orsetlist(repo, subset, xs)
426
426
427 def notset(repo, subset, x, order):
427 def notset(repo, subset, x, order):
428 return subset - getset(repo, subset, x)
428 return subset - getset(repo, subset, x)
429
429
430 def listset(repo, subset, *xs):
430 def listset(repo, subset, *xs):
431 raise error.ParseError(_("can't use a list in this context"),
431 raise error.ParseError(_("can't use a list in this context"),
432 hint=_('see hg help "revsets.x or y"'))
432 hint=_('see hg help "revsets.x or y"'))
433
433
434 def keyvaluepair(repo, subset, k, v):
434 def keyvaluepair(repo, subset, k, v):
435 raise error.ParseError(_("can't use a key-value pair in this context"))
435 raise error.ParseError(_("can't use a key-value pair in this context"))
436
436
437 def func(repo, subset, a, b, order):
437 def func(repo, subset, a, b, order):
438 f = getsymbol(a)
438 f = getsymbol(a)
439 if f in symbols:
439 if f in symbols:
440 func = symbols[f]
440 func = symbols[f]
441 if getattr(func, '_takeorder', False):
441 if getattr(func, '_takeorder', False):
442 return func(repo, subset, b, order)
442 return func(repo, subset, b, order)
443 return func(repo, subset, b)
443 return func(repo, subset, b)
444
444
445 keep = lambda fn: getattr(fn, '__doc__', None) is not None
445 keep = lambda fn: getattr(fn, '__doc__', None) is not None
446
446
447 syms = [s for (s, fn) in symbols.items() if keep(fn)]
447 syms = [s for (s, fn) in symbols.items() if keep(fn)]
448 raise error.UnknownIdentifier(f, syms)
448 raise error.UnknownIdentifier(f, syms)
449
449
450 # functions
450 # functions
451
451
452 # symbols are callables like:
452 # symbols are callables like:
453 # fn(repo, subset, x)
453 # fn(repo, subset, x)
454 # with:
454 # with:
455 # repo - current repository instance
455 # repo - current repository instance
456 # subset - of revisions to be examined
456 # subset - of revisions to be examined
457 # x - argument in tree form
457 # x - argument in tree form
458 symbols = {}
458 symbols = {}
459
459
460 # symbols which can't be used for a DoS attack for any given input
460 # symbols which can't be used for a DoS attack for any given input
461 # (e.g. those which accept regexes as plain strings shouldn't be included)
461 # (e.g. those which accept regexes as plain strings shouldn't be included)
462 # functions that just return a lot of changesets (like all) don't count here
462 # functions that just return a lot of changesets (like all) don't count here
463 safesymbols = set()
463 safesymbols = set()
464
464
465 predicate = registrar.revsetpredicate()
465 predicate = registrar.revsetpredicate()
466
466
467 @predicate('_destupdate')
467 @predicate('_destupdate')
468 def _destupdate(repo, subset, x):
468 def _destupdate(repo, subset, x):
469 # experimental revset for update destination
469 # experimental revset for update destination
470 args = getargsdict(x, 'limit', 'clean check')
470 args = getargsdict(x, 'limit', 'clean check')
471 return subset & baseset([destutil.destupdate(repo, **args)[0]])
471 return subset & baseset([destutil.destupdate(repo, **args)[0]])
472
472
473 @predicate('_destmerge')
473 @predicate('_destmerge')
474 def _destmerge(repo, subset, x):
474 def _destmerge(repo, subset, x):
475 # experimental revset for merge destination
475 # experimental revset for merge destination
476 sourceset = None
476 sourceset = None
477 if x is not None:
477 if x is not None:
478 sourceset = getset(repo, fullreposet(repo), x)
478 sourceset = getset(repo, fullreposet(repo), x)
479 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
479 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
480
480
481 @predicate('adds(pattern)', safe=True)
481 @predicate('adds(pattern)', safe=True)
482 def adds(repo, subset, x):
482 def adds(repo, subset, x):
483 """Changesets that add a file matching pattern.
483 """Changesets that add a file matching pattern.
484
484
485 The pattern without explicit kind like ``glob:`` is expected to be
485 The pattern without explicit kind like ``glob:`` is expected to be
486 relative to the current directory and match against a file or a
486 relative to the current directory and match against a file or a
487 directory.
487 directory.
488 """
488 """
489 # i18n: "adds" is a keyword
489 # i18n: "adds" is a keyword
490 pat = getstring(x, _("adds requires a pattern"))
490 pat = getstring(x, _("adds requires a pattern"))
491 return checkstatus(repo, subset, pat, 1)
491 return checkstatus(repo, subset, pat, 1)
492
492
493 @predicate('ancestor(*changeset)', safe=True)
493 @predicate('ancestor(*changeset)', safe=True)
494 def ancestor(repo, subset, x):
494 def ancestor(repo, subset, x):
495 """A greatest common ancestor of the changesets.
495 """A greatest common ancestor of the changesets.
496
496
497 Accepts 0 or more changesets.
497 Accepts 0 or more changesets.
498 Will return empty list when passed no args.
498 Will return empty list when passed no args.
499 Greatest common ancestor of a single changeset is that changeset.
499 Greatest common ancestor of a single changeset is that changeset.
500 """
500 """
501 # i18n: "ancestor" is a keyword
501 # i18n: "ancestor" is a keyword
502 l = getlist(x)
502 l = getlist(x)
503 rl = fullreposet(repo)
503 rl = fullreposet(repo)
504 anc = None
504 anc = None
505
505
506 # (getset(repo, rl, i) for i in l) generates a list of lists
506 # (getset(repo, rl, i) for i in l) generates a list of lists
507 for revs in (getset(repo, rl, i) for i in l):
507 for revs in (getset(repo, rl, i) for i in l):
508 for r in revs:
508 for r in revs:
509 if anc is None:
509 if anc is None:
510 anc = repo[r]
510 anc = repo[r]
511 else:
511 else:
512 anc = anc.ancestor(repo[r])
512 anc = anc.ancestor(repo[r])
513
513
514 if anc is not None and anc.rev() in subset:
514 if anc is not None and anc.rev() in subset:
515 return baseset([anc.rev()])
515 return baseset([anc.rev()])
516 return baseset()
516 return baseset()
517
517
518 def _ancestors(repo, subset, x, followfirst=False):
518 def _ancestors(repo, subset, x, followfirst=False):
519 heads = getset(repo, fullreposet(repo), x)
519 heads = getset(repo, fullreposet(repo), x)
520 if not heads:
520 if not heads:
521 return baseset()
521 return baseset()
522 s = _revancestors(repo, heads, followfirst)
522 s = _revancestors(repo, heads, followfirst)
523 return subset & s
523 return subset & s
524
524
525 @predicate('ancestors(set)', safe=True)
525 @predicate('ancestors(set)', safe=True)
526 def ancestors(repo, subset, x):
526 def ancestors(repo, subset, x):
527 """Changesets that are ancestors of a changeset in set.
527 """Changesets that are ancestors of a changeset in set.
528 """
528 """
529 return _ancestors(repo, subset, x)
529 return _ancestors(repo, subset, x)
530
530
531 @predicate('_firstancestors', safe=True)
531 @predicate('_firstancestors', safe=True)
532 def _firstancestors(repo, subset, x):
532 def _firstancestors(repo, subset, x):
533 # ``_firstancestors(set)``
533 # ``_firstancestors(set)``
534 # Like ``ancestors(set)`` but follows only the first parents.
534 # Like ``ancestors(set)`` but follows only the first parents.
535 return _ancestors(repo, subset, x, followfirst=True)
535 return _ancestors(repo, subset, x, followfirst=True)
536
536
537 def ancestorspec(repo, subset, x, n, order):
537 def ancestorspec(repo, subset, x, n, order):
538 """``set~n``
538 """``set~n``
539 Changesets that are the Nth ancestor (first parents only) of a changeset
539 Changesets that are the Nth ancestor (first parents only) of a changeset
540 in set.
540 in set.
541 """
541 """
542 try:
542 try:
543 n = int(n[1])
543 n = int(n[1])
544 except (TypeError, ValueError):
544 except (TypeError, ValueError):
545 raise error.ParseError(_("~ expects a number"))
545 raise error.ParseError(_("~ expects a number"))
546 ps = set()
546 ps = set()
547 cl = repo.changelog
547 cl = repo.changelog
548 for r in getset(repo, fullreposet(repo), x):
548 for r in getset(repo, fullreposet(repo), x):
549 for i in range(n):
549 for i in range(n):
550 r = cl.parentrevs(r)[0]
550 r = cl.parentrevs(r)[0]
551 ps.add(r)
551 ps.add(r)
552 return subset & ps
552 return subset & ps
553
553
554 @predicate('author(string)', safe=True)
554 @predicate('author(string)', safe=True)
555 def author(repo, subset, x):
555 def author(repo, subset, x):
556 """Alias for ``user(string)``.
556 """Alias for ``user(string)``.
557 """
557 """
558 # i18n: "author" is a keyword
558 # i18n: "author" is a keyword
559 n = encoding.lower(getstring(x, _("author requires a string")))
559 n = encoding.lower(getstring(x, _("author requires a string")))
560 kind, pattern, matcher = _substringmatcher(n)
560 kind, pattern, matcher = _substringmatcher(n)
561 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())),
561 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())),
562 condrepr=('<user %r>', n))
562 condrepr=('<user %r>', n))
563
563
564 @predicate('bisect(string)', safe=True)
564 @predicate('bisect(string)', safe=True)
565 def bisect(repo, subset, x):
565 def bisect(repo, subset, x):
566 """Changesets marked in the specified bisect status:
566 """Changesets marked in the specified bisect status:
567
567
568 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
568 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
569 - ``goods``, ``bads`` : csets topologically good/bad
569 - ``goods``, ``bads`` : csets topologically good/bad
570 - ``range`` : csets taking part in the bisection
570 - ``range`` : csets taking part in the bisection
571 - ``pruned`` : csets that are goods, bads or skipped
571 - ``pruned`` : csets that are goods, bads or skipped
572 - ``untested`` : csets whose fate is yet unknown
572 - ``untested`` : csets whose fate is yet unknown
573 - ``ignored`` : csets ignored due to DAG topology
573 - ``ignored`` : csets ignored due to DAG topology
574 - ``current`` : the cset currently being bisected
574 - ``current`` : the cset currently being bisected
575 """
575 """
576 # i18n: "bisect" is a keyword
576 # i18n: "bisect" is a keyword
577 status = getstring(x, _("bisect requires a string")).lower()
577 status = getstring(x, _("bisect requires a string")).lower()
578 state = set(hbisect.get(repo, status))
578 state = set(hbisect.get(repo, status))
579 return subset & state
579 return subset & state
580
580
581 # Backward-compatibility
581 # Backward-compatibility
582 # - no help entry so that we do not advertise it any more
582 # - no help entry so that we do not advertise it any more
583 @predicate('bisected', safe=True)
583 @predicate('bisected', safe=True)
584 def bisected(repo, subset, x):
584 def bisected(repo, subset, x):
585 return bisect(repo, subset, x)
585 return bisect(repo, subset, x)
586
586
587 @predicate('bookmark([name])', safe=True)
587 @predicate('bookmark([name])', safe=True)
588 def bookmark(repo, subset, x):
588 def bookmark(repo, subset, x):
589 """The named bookmark or all bookmarks.
589 """The named bookmark or all bookmarks.
590
590
591 If `name` starts with `re:`, the remainder of the name is treated as
591 If `name` starts with `re:`, the remainder of the name is treated as
592 a regular expression. To match a bookmark that actually starts with `re:`,
592 a regular expression. To match a bookmark that actually starts with `re:`,
593 use the prefix `literal:`.
593 use the prefix `literal:`.
594 """
594 """
595 # i18n: "bookmark" is a keyword
595 # i18n: "bookmark" is a keyword
596 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
596 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
597 if args:
597 if args:
598 bm = getstring(args[0],
598 bm = getstring(args[0],
599 # i18n: "bookmark" is a keyword
599 # i18n: "bookmark" is a keyword
600 _('the argument to bookmark must be a string'))
600 _('the argument to bookmark must be a string'))
601 kind, pattern, matcher = util.stringmatcher(bm)
601 kind, pattern, matcher = util.stringmatcher(bm)
602 bms = set()
602 bms = set()
603 if kind == 'literal':
603 if kind == 'literal':
604 bmrev = repo._bookmarks.get(pattern, None)
604 bmrev = repo._bookmarks.get(pattern, None)
605 if not bmrev:
605 if not bmrev:
606 raise error.RepoLookupError(_("bookmark '%s' does not exist")
606 raise error.RepoLookupError(_("bookmark '%s' does not exist")
607 % pattern)
607 % pattern)
608 bms.add(repo[bmrev].rev())
608 bms.add(repo[bmrev].rev())
609 else:
609 else:
610 matchrevs = set()
610 matchrevs = set()
611 for name, bmrev in repo._bookmarks.iteritems():
611 for name, bmrev in repo._bookmarks.iteritems():
612 if matcher(name):
612 if matcher(name):
613 matchrevs.add(bmrev)
613 matchrevs.add(bmrev)
614 if not matchrevs:
614 if not matchrevs:
615 raise error.RepoLookupError(_("no bookmarks exist"
615 raise error.RepoLookupError(_("no bookmarks exist"
616 " that match '%s'") % pattern)
616 " that match '%s'") % pattern)
617 for bmrev in matchrevs:
617 for bmrev in matchrevs:
618 bms.add(repo[bmrev].rev())
618 bms.add(repo[bmrev].rev())
619 else:
619 else:
620 bms = set([repo[r].rev()
620 bms = set([repo[r].rev()
621 for r in repo._bookmarks.values()])
621 for r in repo._bookmarks.values()])
622 bms -= set([node.nullrev])
622 bms -= set([node.nullrev])
623 return subset & bms
623 return subset & bms
624
624
625 @predicate('branch(string or set)', safe=True)
625 @predicate('branch(string or set)', safe=True)
626 def branch(repo, subset, x):
626 def branch(repo, subset, x):
627 """
627 """
628 All changesets belonging to the given branch or the branches of the given
628 All changesets belonging to the given branch or the branches of the given
629 changesets.
629 changesets.
630
630
631 If `string` starts with `re:`, the remainder of the name is treated as
631 If `string` starts with `re:`, the remainder of the name is treated as
632 a regular expression. To match a branch that actually starts with `re:`,
632 a regular expression. To match a branch that actually starts with `re:`,
633 use the prefix `literal:`.
633 use the prefix `literal:`.
634 """
634 """
635 getbi = repo.revbranchcache().branchinfo
635 getbi = repo.revbranchcache().branchinfo
636
636
637 try:
637 try:
638 b = getstring(x, '')
638 b = getstring(x, '')
639 except error.ParseError:
639 except error.ParseError:
640 # not a string, but another revspec, e.g. tip()
640 # not a string, but another revspec, e.g. tip()
641 pass
641 pass
642 else:
642 else:
643 kind, pattern, matcher = util.stringmatcher(b)
643 kind, pattern, matcher = util.stringmatcher(b)
644 if kind == 'literal':
644 if kind == 'literal':
645 # note: falls through to the revspec case if no branch with
645 # note: falls through to the revspec case if no branch with
646 # this name exists and pattern kind is not specified explicitly
646 # this name exists and pattern kind is not specified explicitly
647 if pattern in repo.branchmap():
647 if pattern in repo.branchmap():
648 return subset.filter(lambda r: matcher(getbi(r)[0]),
648 return subset.filter(lambda r: matcher(getbi(r)[0]),
649 condrepr=('<branch %r>', b))
649 condrepr=('<branch %r>', b))
650 if b.startswith('literal:'):
650 if b.startswith('literal:'):
651 raise error.RepoLookupError(_("branch '%s' does not exist")
651 raise error.RepoLookupError(_("branch '%s' does not exist")
652 % pattern)
652 % pattern)
653 else:
653 else:
654 return subset.filter(lambda r: matcher(getbi(r)[0]),
654 return subset.filter(lambda r: matcher(getbi(r)[0]),
655 condrepr=('<branch %r>', b))
655 condrepr=('<branch %r>', b))
656
656
657 s = getset(repo, fullreposet(repo), x)
657 s = getset(repo, fullreposet(repo), x)
658 b = set()
658 b = set()
659 for r in s:
659 for r in s:
660 b.add(getbi(r)[0])
660 b.add(getbi(r)[0])
661 c = s.__contains__
661 c = s.__contains__
662 return subset.filter(lambda r: c(r) or getbi(r)[0] in b,
662 return subset.filter(lambda r: c(r) or getbi(r)[0] in b,
663 condrepr=lambda: '<branch %r>' % sorted(b))
663 condrepr=lambda: '<branch %r>' % sorted(b))
664
664
665 @predicate('bumped()', safe=True)
665 @predicate('bumped()', safe=True)
666 def bumped(repo, subset, x):
666 def bumped(repo, subset, x):
667 """Mutable changesets marked as successors of public changesets.
667 """Mutable changesets marked as successors of public changesets.
668
668
669 Only non-public and non-obsolete changesets can be `bumped`.
669 Only non-public and non-obsolete changesets can be `bumped`.
670 """
670 """
671 # i18n: "bumped" is a keyword
671 # i18n: "bumped" is a keyword
672 getargs(x, 0, 0, _("bumped takes no arguments"))
672 getargs(x, 0, 0, _("bumped takes no arguments"))
673 bumped = obsmod.getrevs(repo, 'bumped')
673 bumped = obsmod.getrevs(repo, 'bumped')
674 return subset & bumped
674 return subset & bumped
675
675
676 @predicate('bundle()', safe=True)
676 @predicate('bundle()', safe=True)
677 def bundle(repo, subset, x):
677 def bundle(repo, subset, x):
678 """Changesets in the bundle.
678 """Changesets in the bundle.
679
679
680 Bundle must be specified by the -R option."""
680 Bundle must be specified by the -R option."""
681
681
682 try:
682 try:
683 bundlerevs = repo.changelog.bundlerevs
683 bundlerevs = repo.changelog.bundlerevs
684 except AttributeError:
684 except AttributeError:
685 raise error.Abort(_("no bundle provided - specify with -R"))
685 raise error.Abort(_("no bundle provided - specify with -R"))
686 return subset & bundlerevs
686 return subset & bundlerevs
687
687
688 def checkstatus(repo, subset, pat, field):
688 def checkstatus(repo, subset, pat, field):
689 hasset = matchmod.patkind(pat) == 'set'
689 hasset = matchmod.patkind(pat) == 'set'
690
690
691 mcache = [None]
691 mcache = [None]
692 def matches(x):
692 def matches(x):
693 c = repo[x]
693 c = repo[x]
694 if not mcache[0] or hasset:
694 if not mcache[0] or hasset:
695 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
695 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
696 m = mcache[0]
696 m = mcache[0]
697 fname = None
697 fname = None
698 if not m.anypats() and len(m.files()) == 1:
698 if not m.anypats() and len(m.files()) == 1:
699 fname = m.files()[0]
699 fname = m.files()[0]
700 if fname is not None:
700 if fname is not None:
701 if fname not in c.files():
701 if fname not in c.files():
702 return False
702 return False
703 else:
703 else:
704 for f in c.files():
704 for f in c.files():
705 if m(f):
705 if m(f):
706 break
706 break
707 else:
707 else:
708 return False
708 return False
709 files = repo.status(c.p1().node(), c.node())[field]
709 files = repo.status(c.p1().node(), c.node())[field]
710 if fname is not None:
710 if fname is not None:
711 if fname in files:
711 if fname in files:
712 return True
712 return True
713 else:
713 else:
714 for f in files:
714 for f in files:
715 if m(f):
715 if m(f):
716 return True
716 return True
717
717
718 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
718 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
719
719
720 def _children(repo, subset, parentset):
720 def _children(repo, subset, parentset):
721 if not parentset:
721 if not parentset:
722 return baseset()
722 return baseset()
723 cs = set()
723 cs = set()
724 pr = repo.changelog.parentrevs
724 pr = repo.changelog.parentrevs
725 minrev = parentset.min()
725 minrev = parentset.min()
726 nullrev = node.nullrev
726 nullrev = node.nullrev
727 for r in subset:
727 for r in subset:
728 if r <= minrev:
728 if r <= minrev:
729 continue
729 continue
730 p1, p2 = pr(r)
730 p1, p2 = pr(r)
731 if p1 in parentset:
731 if p1 in parentset:
732 cs.add(r)
732 cs.add(r)
733 if p2 != nullrev and p2 in parentset:
733 if p2 != nullrev and p2 in parentset:
734 cs.add(r)
734 cs.add(r)
735 return baseset(cs)
735 return baseset(cs)
736
736
737 @predicate('children(set)', safe=True)
737 @predicate('children(set)', safe=True)
738 def children(repo, subset, x):
738 def children(repo, subset, x):
739 """Child changesets of changesets in set.
739 """Child changesets of changesets in set.
740 """
740 """
741 s = getset(repo, fullreposet(repo), x)
741 s = getset(repo, fullreposet(repo), x)
742 cs = _children(repo, subset, s)
742 cs = _children(repo, subset, s)
743 return subset & cs
743 return subset & cs
744
744
745 @predicate('closed()', safe=True)
745 @predicate('closed()', safe=True)
746 def closed(repo, subset, x):
746 def closed(repo, subset, x):
747 """Changeset is closed.
747 """Changeset is closed.
748 """
748 """
749 # i18n: "closed" is a keyword
749 # i18n: "closed" is a keyword
750 getargs(x, 0, 0, _("closed takes no arguments"))
750 getargs(x, 0, 0, _("closed takes no arguments"))
751 return subset.filter(lambda r: repo[r].closesbranch(),
751 return subset.filter(lambda r: repo[r].closesbranch(),
752 condrepr='<branch closed>')
752 condrepr='<branch closed>')
753
753
754 @predicate('contains(pattern)')
754 @predicate('contains(pattern)')
755 def contains(repo, subset, x):
755 def contains(repo, subset, x):
756 """The revision's manifest contains a file matching pattern (but might not
756 """The revision's manifest contains a file matching pattern (but might not
757 modify it). See :hg:`help patterns` for information about file patterns.
757 modify it). See :hg:`help patterns` for information about file patterns.
758
758
759 The pattern without explicit kind like ``glob:`` is expected to be
759 The pattern without explicit kind like ``glob:`` is expected to be
760 relative to the current directory and match against a file exactly
760 relative to the current directory and match against a file exactly
761 for efficiency.
761 for efficiency.
762 """
762 """
763 # i18n: "contains" is a keyword
763 # i18n: "contains" is a keyword
764 pat = getstring(x, _("contains requires a pattern"))
764 pat = getstring(x, _("contains requires a pattern"))
765
765
766 def matches(x):
766 def matches(x):
767 if not matchmod.patkind(pat):
767 if not matchmod.patkind(pat):
768 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
768 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
769 if pats in repo[x]:
769 if pats in repo[x]:
770 return True
770 return True
771 else:
771 else:
772 c = repo[x]
772 c = repo[x]
773 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
773 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
774 for f in c.manifest():
774 for f in c.manifest():
775 if m(f):
775 if m(f):
776 return True
776 return True
777 return False
777 return False
778
778
779 return subset.filter(matches, condrepr=('<contains %r>', pat))
779 return subset.filter(matches, condrepr=('<contains %r>', pat))
780
780
781 @predicate('converted([id])', safe=True)
781 @predicate('converted([id])', safe=True)
782 def converted(repo, subset, x):
782 def converted(repo, subset, x):
783 """Changesets converted from the given identifier in the old repository if
783 """Changesets converted from the given identifier in the old repository if
784 present, or all converted changesets if no identifier is specified.
784 present, or all converted changesets if no identifier is specified.
785 """
785 """
786
786
787 # There is exactly no chance of resolving the revision, so do a simple
787 # There is exactly no chance of resolving the revision, so do a simple
788 # string compare and hope for the best
788 # string compare and hope for the best
789
789
790 rev = None
790 rev = None
791 # i18n: "converted" is a keyword
791 # i18n: "converted" is a keyword
792 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
792 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
793 if l:
793 if l:
794 # i18n: "converted" is a keyword
794 # i18n: "converted" is a keyword
795 rev = getstring(l[0], _('converted requires a revision'))
795 rev = getstring(l[0], _('converted requires a revision'))
796
796
797 def _matchvalue(r):
797 def _matchvalue(r):
798 source = repo[r].extra().get('convert_revision', None)
798 source = repo[r].extra().get('convert_revision', None)
799 return source is not None and (rev is None or source.startswith(rev))
799 return source is not None and (rev is None or source.startswith(rev))
800
800
801 return subset.filter(lambda r: _matchvalue(r),
801 return subset.filter(lambda r: _matchvalue(r),
802 condrepr=('<converted %r>', rev))
802 condrepr=('<converted %r>', rev))
803
803
804 @predicate('date(interval)', safe=True)
804 @predicate('date(interval)', safe=True)
805 def date(repo, subset, x):
805 def date(repo, subset, x):
806 """Changesets within the interval, see :hg:`help dates`.
806 """Changesets within the interval, see :hg:`help dates`.
807 """
807 """
808 # i18n: "date" is a keyword
808 # i18n: "date" is a keyword
809 ds = getstring(x, _("date requires a string"))
809 ds = getstring(x, _("date requires a string"))
810 dm = util.matchdate(ds)
810 dm = util.matchdate(ds)
811 return subset.filter(lambda x: dm(repo[x].date()[0]),
811 return subset.filter(lambda x: dm(repo[x].date()[0]),
812 condrepr=('<date %r>', ds))
812 condrepr=('<date %r>', ds))
813
813
814 @predicate('desc(string)', safe=True)
814 @predicate('desc(string)', safe=True)
815 def desc(repo, subset, x):
815 def desc(repo, subset, x):
816 """Search commit message for string. The match is case-insensitive.
816 """Search commit message for string. The match is case-insensitive.
817 """
817 """
818 # i18n: "desc" is a keyword
818 # i18n: "desc" is a keyword
819 ds = encoding.lower(getstring(x, _("desc requires a string")))
819 ds = encoding.lower(getstring(x, _("desc requires a string")))
820
820
821 def matches(x):
821 def matches(x):
822 c = repo[x]
822 c = repo[x]
823 return ds in encoding.lower(c.description())
823 return ds in encoding.lower(c.description())
824
824
825 return subset.filter(matches, condrepr=('<desc %r>', ds))
825 return subset.filter(matches, condrepr=('<desc %r>', ds))
826
826
827 def _descendants(repo, subset, x, followfirst=False):
827 def _descendants(repo, subset, x, followfirst=False):
828 roots = getset(repo, fullreposet(repo), x)
828 roots = getset(repo, fullreposet(repo), x)
829 if not roots:
829 if not roots:
830 return baseset()
830 return baseset()
831 s = _revdescendants(repo, roots, followfirst)
831 s = _revdescendants(repo, roots, followfirst)
832
832
833 # Both sets need to be ascending in order to lazily return the union
833 # Both sets need to be ascending in order to lazily return the union
834 # in the correct order.
834 # in the correct order.
835 base = subset & roots
835 base = subset & roots
836 desc = subset & s
836 desc = subset & s
837 result = base + desc
837 result = base + desc
838 if subset.isascending():
838 if subset.isascending():
839 result.sort()
839 result.sort()
840 elif subset.isdescending():
840 elif subset.isdescending():
841 result.sort(reverse=True)
841 result.sort(reverse=True)
842 else:
842 else:
843 result = subset & result
843 result = subset & result
844 return result
844 return result
845
845
846 @predicate('descendants(set)', safe=True)
846 @predicate('descendants(set)', safe=True)
847 def descendants(repo, subset, x):
847 def descendants(repo, subset, x):
848 """Changesets which are descendants of changesets in set.
848 """Changesets which are descendants of changesets in set.
849 """
849 """
850 return _descendants(repo, subset, x)
850 return _descendants(repo, subset, x)
851
851
852 @predicate('_firstdescendants', safe=True)
852 @predicate('_firstdescendants', safe=True)
853 def _firstdescendants(repo, subset, x):
853 def _firstdescendants(repo, subset, x):
854 # ``_firstdescendants(set)``
854 # ``_firstdescendants(set)``
855 # Like ``descendants(set)`` but follows only the first parents.
855 # Like ``descendants(set)`` but follows only the first parents.
856 return _descendants(repo, subset, x, followfirst=True)
856 return _descendants(repo, subset, x, followfirst=True)
857
857
858 @predicate('destination([set])', safe=True)
858 @predicate('destination([set])', safe=True)
859 def destination(repo, subset, x):
859 def destination(repo, subset, x):
860 """Changesets that were created by a graft, transplant or rebase operation,
860 """Changesets that were created by a graft, transplant or rebase operation,
861 with the given revisions specified as the source. Omitting the optional set
861 with the given revisions specified as the source. Omitting the optional set
862 is the same as passing all().
862 is the same as passing all().
863 """
863 """
864 if x is not None:
864 if x is not None:
865 sources = getset(repo, fullreposet(repo), x)
865 sources = getset(repo, fullreposet(repo), x)
866 else:
866 else:
867 sources = fullreposet(repo)
867 sources = fullreposet(repo)
868
868
869 dests = set()
869 dests = set()
870
870
871 # subset contains all of the possible destinations that can be returned, so
871 # subset contains all of the possible destinations that can be returned, so
872 # iterate over them and see if their source(s) were provided in the arg set.
872 # iterate over them and see if their source(s) were provided in the arg set.
873 # Even if the immediate src of r is not in the arg set, src's source (or
873 # Even if the immediate src of r is not in the arg set, src's source (or
874 # further back) may be. Scanning back further than the immediate src allows
874 # further back) may be. Scanning back further than the immediate src allows
875 # transitive transplants and rebases to yield the same results as transitive
875 # transitive transplants and rebases to yield the same results as transitive
876 # grafts.
876 # grafts.
877 for r in subset:
877 for r in subset:
878 src = _getrevsource(repo, r)
878 src = _getrevsource(repo, r)
879 lineage = None
879 lineage = None
880
880
881 while src is not None:
881 while src is not None:
882 if lineage is None:
882 if lineage is None:
883 lineage = list()
883 lineage = list()
884
884
885 lineage.append(r)
885 lineage.append(r)
886
886
887 # The visited lineage is a match if the current source is in the arg
887 # The visited lineage is a match if the current source is in the arg
888 # set. Since every candidate dest is visited by way of iterating
888 # set. Since every candidate dest is visited by way of iterating
889 # subset, any dests further back in the lineage will be tested by a
889 # subset, any dests further back in the lineage will be tested by a
890 # different iteration over subset. Likewise, if the src was already
890 # different iteration over subset. Likewise, if the src was already
891 # selected, the current lineage can be selected without going back
891 # selected, the current lineage can be selected without going back
892 # further.
892 # further.
893 if src in sources or src in dests:
893 if src in sources or src in dests:
894 dests.update(lineage)
894 dests.update(lineage)
895 break
895 break
896
896
897 r = src
897 r = src
898 src = _getrevsource(repo, r)
898 src = _getrevsource(repo, r)
899
899
900 return subset.filter(dests.__contains__,
900 return subset.filter(dests.__contains__,
901 condrepr=lambda: '<destination %r>' % sorted(dests))
901 condrepr=lambda: '<destination %r>' % sorted(dests))
902
902
903 @predicate('divergent()', safe=True)
903 @predicate('divergent()', safe=True)
904 def divergent(repo, subset, x):
904 def divergent(repo, subset, x):
905 """
905 """
906 Final successors of changesets with an alternative set of final successors.
906 Final successors of changesets with an alternative set of final successors.
907 """
907 """
908 # i18n: "divergent" is a keyword
908 # i18n: "divergent" is a keyword
909 getargs(x, 0, 0, _("divergent takes no arguments"))
909 getargs(x, 0, 0, _("divergent takes no arguments"))
910 divergent = obsmod.getrevs(repo, 'divergent')
910 divergent = obsmod.getrevs(repo, 'divergent')
911 return subset & divergent
911 return subset & divergent
912
912
913 @predicate('extinct()', safe=True)
913 @predicate('extinct()', safe=True)
914 def extinct(repo, subset, x):
914 def extinct(repo, subset, x):
915 """Obsolete changesets with obsolete descendants only.
915 """Obsolete changesets with obsolete descendants only.
916 """
916 """
917 # i18n: "extinct" is a keyword
917 # i18n: "extinct" is a keyword
918 getargs(x, 0, 0, _("extinct takes no arguments"))
918 getargs(x, 0, 0, _("extinct takes no arguments"))
919 extincts = obsmod.getrevs(repo, 'extinct')
919 extincts = obsmod.getrevs(repo, 'extinct')
920 return subset & extincts
920 return subset & extincts
921
921
922 @predicate('extra(label, [value])', safe=True)
922 @predicate('extra(label, [value])', safe=True)
923 def extra(repo, subset, x):
923 def extra(repo, subset, x):
924 """Changesets with the given label in the extra metadata, with the given
924 """Changesets with the given label in the extra metadata, with the given
925 optional value.
925 optional value.
926
926
927 If `value` starts with `re:`, the remainder of the value is treated as
927 If `value` starts with `re:`, the remainder of the value is treated as
928 a regular expression. To match a value that actually starts with `re:`,
928 a regular expression. To match a value that actually starts with `re:`,
929 use the prefix `literal:`.
929 use the prefix `literal:`.
930 """
930 """
931 args = getargsdict(x, 'extra', 'label value')
931 args = getargsdict(x, 'extra', 'label value')
932 if 'label' not in args:
932 if 'label' not in args:
933 # i18n: "extra" is a keyword
933 # i18n: "extra" is a keyword
934 raise error.ParseError(_('extra takes at least 1 argument'))
934 raise error.ParseError(_('extra takes at least 1 argument'))
935 # i18n: "extra" is a keyword
935 # i18n: "extra" is a keyword
936 label = getstring(args['label'], _('first argument to extra must be '
936 label = getstring(args['label'], _('first argument to extra must be '
937 'a string'))
937 'a string'))
938 value = None
938 value = None
939
939
940 if 'value' in args:
940 if 'value' in args:
941 # i18n: "extra" is a keyword
941 # i18n: "extra" is a keyword
942 value = getstring(args['value'], _('second argument to extra must be '
942 value = getstring(args['value'], _('second argument to extra must be '
943 'a string'))
943 'a string'))
944 kind, value, matcher = util.stringmatcher(value)
944 kind, value, matcher = util.stringmatcher(value)
945
945
946 def _matchvalue(r):
946 def _matchvalue(r):
947 extra = repo[r].extra()
947 extra = repo[r].extra()
948 return label in extra and (value is None or matcher(extra[label]))
948 return label in extra and (value is None or matcher(extra[label]))
949
949
950 return subset.filter(lambda r: _matchvalue(r),
950 return subset.filter(lambda r: _matchvalue(r),
951 condrepr=('<extra[%r] %r>', label, value))
951 condrepr=('<extra[%r] %r>', label, value))
952
952
953 @predicate('filelog(pattern)', safe=True)
953 @predicate('filelog(pattern)', safe=True)
954 def filelog(repo, subset, x):
954 def filelog(repo, subset, x):
955 """Changesets connected to the specified filelog.
955 """Changesets connected to the specified filelog.
956
956
957 For performance reasons, visits only revisions mentioned in the file-level
957 For performance reasons, visits only revisions mentioned in the file-level
958 filelog, rather than filtering through all changesets (much faster, but
958 filelog, rather than filtering through all changesets (much faster, but
959 doesn't include deletes or duplicate changes). For a slower, more accurate
959 doesn't include deletes or duplicate changes). For a slower, more accurate
960 result, use ``file()``.
960 result, use ``file()``.
961
961
962 The pattern without explicit kind like ``glob:`` is expected to be
962 The pattern without explicit kind like ``glob:`` is expected to be
963 relative to the current directory and match against a file exactly
963 relative to the current directory and match against a file exactly
964 for efficiency.
964 for efficiency.
965
965
966 If some linkrev points to revisions filtered by the current repoview, we'll
966 If some linkrev points to revisions filtered by the current repoview, we'll
967 work around it to return a non-filtered value.
967 work around it to return a non-filtered value.
968 """
968 """
969
969
970 # i18n: "filelog" is a keyword
970 # i18n: "filelog" is a keyword
971 pat = getstring(x, _("filelog requires a pattern"))
971 pat = getstring(x, _("filelog requires a pattern"))
972 s = set()
972 s = set()
973 cl = repo.changelog
973 cl = repo.changelog
974
974
975 if not matchmod.patkind(pat):
975 if not matchmod.patkind(pat):
976 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
976 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
977 files = [f]
977 files = [f]
978 else:
978 else:
979 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
979 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
980 files = (f for f in repo[None] if m(f))
980 files = (f for f in repo[None] if m(f))
981
981
982 for f in files:
982 for f in files:
983 fl = repo.file(f)
983 fl = repo.file(f)
984 known = {}
984 known = {}
985 scanpos = 0
985 scanpos = 0
986 for fr in list(fl):
986 for fr in list(fl):
987 fn = fl.node(fr)
987 fn = fl.node(fr)
988 if fn in known:
988 if fn in known:
989 s.add(known[fn])
989 s.add(known[fn])
990 continue
990 continue
991
991
992 lr = fl.linkrev(fr)
992 lr = fl.linkrev(fr)
993 if lr in cl:
993 if lr in cl:
994 s.add(lr)
994 s.add(lr)
995 elif scanpos is not None:
995 elif scanpos is not None:
996 # lowest matching changeset is filtered, scan further
996 # lowest matching changeset is filtered, scan further
997 # ahead in changelog
997 # ahead in changelog
998 start = max(lr, scanpos) + 1
998 start = max(lr, scanpos) + 1
999 scanpos = None
999 scanpos = None
1000 for r in cl.revs(start):
1000 for r in cl.revs(start):
1001 # minimize parsing of non-matching entries
1001 # minimize parsing of non-matching entries
1002 if f in cl.revision(r) and f in cl.readfiles(r):
1002 if f in cl.revision(r) and f in cl.readfiles(r):
1003 try:
1003 try:
1004 # try to use manifest delta fastpath
1004 # try to use manifest delta fastpath
1005 n = repo[r].filenode(f)
1005 n = repo[r].filenode(f)
1006 if n not in known:
1006 if n not in known:
1007 if n == fn:
1007 if n == fn:
1008 s.add(r)
1008 s.add(r)
1009 scanpos = r
1009 scanpos = r
1010 break
1010 break
1011 else:
1011 else:
1012 known[n] = r
1012 known[n] = r
1013 except error.ManifestLookupError:
1013 except error.ManifestLookupError:
1014 # deletion in changelog
1014 # deletion in changelog
1015 continue
1015 continue
1016
1016
1017 return subset & s
1017 return subset & s
1018
1018
1019 @predicate('first(set, [n])', safe=True)
1019 @predicate('first(set, [n])', safe=True)
1020 def first(repo, subset, x):
1020 def first(repo, subset, x):
1021 """An alias for limit().
1021 """An alias for limit().
1022 """
1022 """
1023 return limit(repo, subset, x)
1023 return limit(repo, subset, x)
1024
1024
1025 def _follow(repo, subset, x, name, followfirst=False):
1025 def _follow(repo, subset, x, name, followfirst=False):
1026 l = getargs(x, 0, 2, _("%s takes no arguments or a pattern "
1026 l = getargs(x, 0, 2, _("%s takes no arguments or a pattern "
1027 "and an optional revset") % name)
1027 "and an optional revset") % name)
1028 c = repo['.']
1028 c = repo['.']
1029 if l:
1029 if l:
1030 x = getstring(l[0], _("%s expected a pattern") % name)
1030 x = getstring(l[0], _("%s expected a pattern") % name)
1031 rev = None
1031 rev = None
1032 if len(l) >= 2:
1032 if len(l) >= 2:
1033 revs = getset(repo, fullreposet(repo), l[1])
1033 revs = getset(repo, fullreposet(repo), l[1])
1034 if len(revs) != 1:
1034 if len(revs) != 1:
1035 raise error.RepoLookupError(
1035 raise error.RepoLookupError(
1036 _("%s expected one starting revision") % name)
1036 _("%s expected one starting revision") % name)
1037 rev = revs.last()
1037 rev = revs.last()
1038 c = repo[rev]
1038 c = repo[rev]
1039 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
1039 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
1040 ctx=repo[rev], default='path')
1040 ctx=repo[rev], default='path')
1041
1041
1042 files = c.manifest().walk(matcher)
1042 files = c.manifest().walk(matcher)
1043
1043
1044 s = set()
1044 s = set()
1045 for fname in files:
1045 for fname in files:
1046 fctx = c[fname]
1046 fctx = c[fname]
1047 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
1047 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
1048 # include the revision responsible for the most recent version
1048 # include the revision responsible for the most recent version
1049 s.add(fctx.introrev())
1049 s.add(fctx.introrev())
1050 else:
1050 else:
1051 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1051 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1052
1052
1053 return subset & s
1053 return subset & s
1054
1054
1055 @predicate('follow([pattern[, startrev]])', safe=True)
1055 @predicate('follow([pattern[, startrev]])', safe=True)
1056 def follow(repo, subset, x):
1056 def follow(repo, subset, x):
1057 """
1057 """
1058 An alias for ``::.`` (ancestors of the working directory's first parent).
1058 An alias for ``::.`` (ancestors of the working directory's first parent).
1059 If pattern is specified, the histories of files matching given
1059 If pattern is specified, the histories of files matching given
1060 pattern in the revision given by startrev are followed, including copies.
1060 pattern in the revision given by startrev are followed, including copies.
1061 """
1061 """
1062 return _follow(repo, subset, x, 'follow')
1062 return _follow(repo, subset, x, 'follow')
1063
1063
1064 @predicate('_followfirst', safe=True)
1064 @predicate('_followfirst', safe=True)
1065 def _followfirst(repo, subset, x):
1065 def _followfirst(repo, subset, x):
1066 # ``followfirst([pattern[, startrev]])``
1066 # ``followfirst([pattern[, startrev]])``
1067 # Like ``follow([pattern[, startrev]])`` but follows only the first parent
1067 # Like ``follow([pattern[, startrev]])`` but follows only the first parent
1068 # of every revisions or files revisions.
1068 # of every revisions or files revisions.
1069 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1069 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1070
1070
1071 @predicate('all()', safe=True)
1071 @predicate('all()', safe=True)
1072 def getall(repo, subset, x):
1072 def getall(repo, subset, x):
1073 """All changesets, the same as ``0:tip``.
1073 """All changesets, the same as ``0:tip``.
1074 """
1074 """
1075 # i18n: "all" is a keyword
1075 # i18n: "all" is a keyword
1076 getargs(x, 0, 0, _("all takes no arguments"))
1076 getargs(x, 0, 0, _("all takes no arguments"))
1077 return subset & spanset(repo) # drop "null" if any
1077 return subset & spanset(repo) # drop "null" if any
1078
1078
1079 @predicate('grep(regex)')
1079 @predicate('grep(regex)')
1080 def grep(repo, subset, x):
1080 def grep(repo, subset, x):
1081 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1081 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1082 to ensure special escape characters are handled correctly. Unlike
1082 to ensure special escape characters are handled correctly. Unlike
1083 ``keyword(string)``, the match is case-sensitive.
1083 ``keyword(string)``, the match is case-sensitive.
1084 """
1084 """
1085 try:
1085 try:
1086 # i18n: "grep" is a keyword
1086 # i18n: "grep" is a keyword
1087 gr = re.compile(getstring(x, _("grep requires a string")))
1087 gr = re.compile(getstring(x, _("grep requires a string")))
1088 except re.error as e:
1088 except re.error as e:
1089 raise error.ParseError(_('invalid match pattern: %s') % e)
1089 raise error.ParseError(_('invalid match pattern: %s') % e)
1090
1090
1091 def matches(x):
1091 def matches(x):
1092 c = repo[x]
1092 c = repo[x]
1093 for e in c.files() + [c.user(), c.description()]:
1093 for e in c.files() + [c.user(), c.description()]:
1094 if gr.search(e):
1094 if gr.search(e):
1095 return True
1095 return True
1096 return False
1096 return False
1097
1097
1098 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1098 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1099
1099
1100 @predicate('_matchfiles', safe=True)
1100 @predicate('_matchfiles', safe=True)
1101 def _matchfiles(repo, subset, x):
1101 def _matchfiles(repo, subset, x):
1102 # _matchfiles takes a revset list of prefixed arguments:
1102 # _matchfiles takes a revset list of prefixed arguments:
1103 #
1103 #
1104 # [p:foo, i:bar, x:baz]
1104 # [p:foo, i:bar, x:baz]
1105 #
1105 #
1106 # builds a match object from them and filters subset. Allowed
1106 # builds a match object from them and filters subset. Allowed
1107 # prefixes are 'p:' for regular patterns, 'i:' for include
1107 # prefixes are 'p:' for regular patterns, 'i:' for include
1108 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1108 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1109 # a revision identifier, or the empty string to reference the
1109 # a revision identifier, or the empty string to reference the
1110 # working directory, from which the match object is
1110 # working directory, from which the match object is
1111 # initialized. Use 'd:' to set the default matching mode, default
1111 # initialized. Use 'd:' to set the default matching mode, default
1112 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1112 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1113
1113
1114 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1114 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1115 pats, inc, exc = [], [], []
1115 pats, inc, exc = [], [], []
1116 rev, default = None, None
1116 rev, default = None, None
1117 for arg in l:
1117 for arg in l:
1118 s = getstring(arg, "_matchfiles requires string arguments")
1118 s = getstring(arg, "_matchfiles requires string arguments")
1119 prefix, value = s[:2], s[2:]
1119 prefix, value = s[:2], s[2:]
1120 if prefix == 'p:':
1120 if prefix == 'p:':
1121 pats.append(value)
1121 pats.append(value)
1122 elif prefix == 'i:':
1122 elif prefix == 'i:':
1123 inc.append(value)
1123 inc.append(value)
1124 elif prefix == 'x:':
1124 elif prefix == 'x:':
1125 exc.append(value)
1125 exc.append(value)
1126 elif prefix == 'r:':
1126 elif prefix == 'r:':
1127 if rev is not None:
1127 if rev is not None:
1128 raise error.ParseError('_matchfiles expected at most one '
1128 raise error.ParseError('_matchfiles expected at most one '
1129 'revision')
1129 'revision')
1130 if value != '': # empty means working directory; leave rev as None
1130 if value != '': # empty means working directory; leave rev as None
1131 rev = value
1131 rev = value
1132 elif prefix == 'd:':
1132 elif prefix == 'd:':
1133 if default is not None:
1133 if default is not None:
1134 raise error.ParseError('_matchfiles expected at most one '
1134 raise error.ParseError('_matchfiles expected at most one '
1135 'default mode')
1135 'default mode')
1136 default = value
1136 default = value
1137 else:
1137 else:
1138 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1138 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1139 if not default:
1139 if not default:
1140 default = 'glob'
1140 default = 'glob'
1141
1141
1142 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1142 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1143 exclude=exc, ctx=repo[rev], default=default)
1143 exclude=exc, ctx=repo[rev], default=default)
1144
1144
1145 # This directly read the changelog data as creating changectx for all
1145 # This directly read the changelog data as creating changectx for all
1146 # revisions is quite expensive.
1146 # revisions is quite expensive.
1147 getfiles = repo.changelog.readfiles
1147 getfiles = repo.changelog.readfiles
1148 wdirrev = node.wdirrev
1148 wdirrev = node.wdirrev
1149 def matches(x):
1149 def matches(x):
1150 if x == wdirrev:
1150 if x == wdirrev:
1151 files = repo[x].files()
1151 files = repo[x].files()
1152 else:
1152 else:
1153 files = getfiles(x)
1153 files = getfiles(x)
1154 for f in files:
1154 for f in files:
1155 if m(f):
1155 if m(f):
1156 return True
1156 return True
1157 return False
1157 return False
1158
1158
1159 return subset.filter(matches,
1159 return subset.filter(matches,
1160 condrepr=('<matchfiles patterns=%r, include=%r '
1160 condrepr=('<matchfiles patterns=%r, include=%r '
1161 'exclude=%r, default=%r, rev=%r>',
1161 'exclude=%r, default=%r, rev=%r>',
1162 pats, inc, exc, default, rev))
1162 pats, inc, exc, default, rev))
1163
1163
1164 @predicate('file(pattern)', safe=True)
1164 @predicate('file(pattern)', safe=True)
1165 def hasfile(repo, subset, x):
1165 def hasfile(repo, subset, x):
1166 """Changesets affecting files matched by pattern.
1166 """Changesets affecting files matched by pattern.
1167
1167
1168 For a faster but less accurate result, consider using ``filelog()``
1168 For a faster but less accurate result, consider using ``filelog()``
1169 instead.
1169 instead.
1170
1170
1171 This predicate uses ``glob:`` as the default kind of pattern.
1171 This predicate uses ``glob:`` as the default kind of pattern.
1172 """
1172 """
1173 # i18n: "file" is a keyword
1173 # i18n: "file" is a keyword
1174 pat = getstring(x, _("file requires a pattern"))
1174 pat = getstring(x, _("file requires a pattern"))
1175 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1175 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1176
1176
1177 @predicate('head()', safe=True)
1177 @predicate('head()', safe=True)
1178 def head(repo, subset, x):
1178 def head(repo, subset, x):
1179 """Changeset is a named branch head.
1179 """Changeset is a named branch head.
1180 """
1180 """
1181 # i18n: "head" is a keyword
1181 # i18n: "head" is a keyword
1182 getargs(x, 0, 0, _("head takes no arguments"))
1182 getargs(x, 0, 0, _("head takes no arguments"))
1183 hs = set()
1183 hs = set()
1184 cl = repo.changelog
1184 cl = repo.changelog
1185 for ls in repo.branchmap().itervalues():
1185 for ls in repo.branchmap().itervalues():
1186 hs.update(cl.rev(h) for h in ls)
1186 hs.update(cl.rev(h) for h in ls)
1187 return subset & baseset(hs)
1187 return subset & baseset(hs)
1188
1188
1189 @predicate('heads(set)', safe=True)
1189 @predicate('heads(set)', safe=True)
1190 def heads(repo, subset, x):
1190 def heads(repo, subset, x):
1191 """Members of set with no children in set.
1191 """Members of set with no children in set.
1192 """
1192 """
1193 s = getset(repo, subset, x)
1193 s = getset(repo, subset, x)
1194 ps = parents(repo, subset, x)
1194 ps = parents(repo, subset, x)
1195 return s - ps
1195 return s - ps
1196
1196
1197 @predicate('hidden()', safe=True)
1197 @predicate('hidden()', safe=True)
1198 def hidden(repo, subset, x):
1198 def hidden(repo, subset, x):
1199 """Hidden changesets.
1199 """Hidden changesets.
1200 """
1200 """
1201 # i18n: "hidden" is a keyword
1201 # i18n: "hidden" is a keyword
1202 getargs(x, 0, 0, _("hidden takes no arguments"))
1202 getargs(x, 0, 0, _("hidden takes no arguments"))
1203 hiddenrevs = repoview.filterrevs(repo, 'visible')
1203 hiddenrevs = repoview.filterrevs(repo, 'visible')
1204 return subset & hiddenrevs
1204 return subset & hiddenrevs
1205
1205
1206 @predicate('keyword(string)', safe=True)
1206 @predicate('keyword(string)', safe=True)
1207 def keyword(repo, subset, x):
1207 def keyword(repo, subset, x):
1208 """Search commit message, user name, and names of changed files for
1208 """Search commit message, user name, and names of changed files for
1209 string. The match is case-insensitive.
1209 string. The match is case-insensitive.
1210 """
1210 """
1211 # i18n: "keyword" is a keyword
1211 # i18n: "keyword" is a keyword
1212 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1212 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1213
1213
1214 def matches(r):
1214 def matches(r):
1215 c = repo[r]
1215 c = repo[r]
1216 return any(kw in encoding.lower(t)
1216 return any(kw in encoding.lower(t)
1217 for t in c.files() + [c.user(), c.description()])
1217 for t in c.files() + [c.user(), c.description()])
1218
1218
1219 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1219 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1220
1220
1221 @predicate('limit(set[, n[, offset]])', safe=True)
1221 @predicate('limit(set[, n[, offset]])', safe=True)
1222 def limit(repo, subset, x):
1222 def limit(repo, subset, x):
1223 """First n members of set, defaulting to 1, starting from offset.
1223 """First n members of set, defaulting to 1, starting from offset.
1224 """
1224 """
1225 args = getargsdict(x, 'limit', 'set n offset')
1225 args = getargsdict(x, 'limit', 'set n offset')
1226 if 'set' not in args:
1226 if 'set' not in args:
1227 # i18n: "limit" is a keyword
1227 # i18n: "limit" is a keyword
1228 raise error.ParseError(_("limit requires one to three arguments"))
1228 raise error.ParseError(_("limit requires one to three arguments"))
1229 try:
1229 try:
1230 lim, ofs = 1, 0
1230 lim, ofs = 1, 0
1231 if 'n' in args:
1231 if 'n' in args:
1232 # i18n: "limit" is a keyword
1232 # i18n: "limit" is a keyword
1233 lim = int(getstring(args['n'], _("limit requires a number")))
1233 lim = int(getstring(args['n'], _("limit requires a number")))
1234 if 'offset' in args:
1234 if 'offset' in args:
1235 # i18n: "limit" is a keyword
1235 # i18n: "limit" is a keyword
1236 ofs = int(getstring(args['offset'], _("limit requires a number")))
1236 ofs = int(getstring(args['offset'], _("limit requires a number")))
1237 if ofs < 0:
1237 if ofs < 0:
1238 raise error.ParseError(_("negative offset"))
1238 raise error.ParseError(_("negative offset"))
1239 except (TypeError, ValueError):
1239 except (TypeError, ValueError):
1240 # i18n: "limit" is a keyword
1240 # i18n: "limit" is a keyword
1241 raise error.ParseError(_("limit expects a number"))
1241 raise error.ParseError(_("limit expects a number"))
1242 os = getset(repo, fullreposet(repo), args['set'])
1242 os = getset(repo, fullreposet(repo), args['set'])
1243 result = []
1243 result = []
1244 it = iter(os)
1244 it = iter(os)
1245 for x in xrange(ofs):
1245 for x in xrange(ofs):
1246 y = next(it, None)
1246 y = next(it, None)
1247 if y is None:
1247 if y is None:
1248 break
1248 break
1249 for x in xrange(lim):
1249 for x in xrange(lim):
1250 y = next(it, None)
1250 y = next(it, None)
1251 if y is None:
1251 if y is None:
1252 break
1252 break
1253 elif y in subset:
1253 elif y in subset:
1254 result.append(y)
1254 result.append(y)
1255 return baseset(result, datarepr=('<limit n=%d, offset=%d, %r, %r>',
1255 return baseset(result, datarepr=('<limit n=%d, offset=%d, %r, %r>',
1256 lim, ofs, subset, os))
1256 lim, ofs, subset, os))
1257
1257
1258 @predicate('last(set, [n])', safe=True)
1258 @predicate('last(set, [n])', safe=True)
1259 def last(repo, subset, x):
1259 def last(repo, subset, x):
1260 """Last n members of set, defaulting to 1.
1260 """Last n members of set, defaulting to 1.
1261 """
1261 """
1262 # i18n: "last" is a keyword
1262 # i18n: "last" is a keyword
1263 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1263 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1264 try:
1264 try:
1265 lim = 1
1265 lim = 1
1266 if len(l) == 2:
1266 if len(l) == 2:
1267 # i18n: "last" is a keyword
1267 # i18n: "last" is a keyword
1268 lim = int(getstring(l[1], _("last requires a number")))
1268 lim = int(getstring(l[1], _("last requires a number")))
1269 except (TypeError, ValueError):
1269 except (TypeError, ValueError):
1270 # i18n: "last" is a keyword
1270 # i18n: "last" is a keyword
1271 raise error.ParseError(_("last expects a number"))
1271 raise error.ParseError(_("last expects a number"))
1272 os = getset(repo, fullreposet(repo), l[0])
1272 os = getset(repo, fullreposet(repo), l[0])
1273 os.reverse()
1273 os.reverse()
1274 result = []
1274 result = []
1275 it = iter(os)
1275 it = iter(os)
1276 for x in xrange(lim):
1276 for x in xrange(lim):
1277 y = next(it, None)
1277 y = next(it, None)
1278 if y is None:
1278 if y is None:
1279 break
1279 break
1280 elif y in subset:
1280 elif y in subset:
1281 result.append(y)
1281 result.append(y)
1282 return baseset(result, datarepr=('<last n=%d, %r, %r>', lim, subset, os))
1282 return baseset(result, datarepr=('<last n=%d, %r, %r>', lim, subset, os))
1283
1283
1284 @predicate('max(set)', safe=True)
1284 @predicate('max(set)', safe=True)
1285 def maxrev(repo, subset, x):
1285 def maxrev(repo, subset, x):
1286 """Changeset with highest revision number in set.
1286 """Changeset with highest revision number in set.
1287 """
1287 """
1288 os = getset(repo, fullreposet(repo), x)
1288 os = getset(repo, fullreposet(repo), x)
1289 try:
1289 try:
1290 m = os.max()
1290 m = os.max()
1291 if m in subset:
1291 if m in subset:
1292 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1292 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1293 except ValueError:
1293 except ValueError:
1294 # os.max() throws a ValueError when the collection is empty.
1294 # os.max() throws a ValueError when the collection is empty.
1295 # Same as python's max().
1295 # Same as python's max().
1296 pass
1296 pass
1297 return baseset(datarepr=('<max %r, %r>', subset, os))
1297 return baseset(datarepr=('<max %r, %r>', subset, os))
1298
1298
1299 @predicate('merge()', safe=True)
1299 @predicate('merge()', safe=True)
1300 def merge(repo, subset, x):
1300 def merge(repo, subset, x):
1301 """Changeset is a merge changeset.
1301 """Changeset is a merge changeset.
1302 """
1302 """
1303 # i18n: "merge" is a keyword
1303 # i18n: "merge" is a keyword
1304 getargs(x, 0, 0, _("merge takes no arguments"))
1304 getargs(x, 0, 0, _("merge takes no arguments"))
1305 cl = repo.changelog
1305 cl = repo.changelog
1306 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1306 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1307 condrepr='<merge>')
1307 condrepr='<merge>')
1308
1308
1309 @predicate('branchpoint()', safe=True)
1309 @predicate('branchpoint()', safe=True)
1310 def branchpoint(repo, subset, x):
1310 def branchpoint(repo, subset, x):
1311 """Changesets with more than one child.
1311 """Changesets with more than one child.
1312 """
1312 """
1313 # i18n: "branchpoint" is a keyword
1313 # i18n: "branchpoint" is a keyword
1314 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1314 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1315 cl = repo.changelog
1315 cl = repo.changelog
1316 if not subset:
1316 if not subset:
1317 return baseset()
1317 return baseset()
1318 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1318 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1319 # (and if it is not, it should.)
1319 # (and if it is not, it should.)
1320 baserev = min(subset)
1320 baserev = min(subset)
1321 parentscount = [0]*(len(repo) - baserev)
1321 parentscount = [0]*(len(repo) - baserev)
1322 for r in cl.revs(start=baserev + 1):
1322 for r in cl.revs(start=baserev + 1):
1323 for p in cl.parentrevs(r):
1323 for p in cl.parentrevs(r):
1324 if p >= baserev:
1324 if p >= baserev:
1325 parentscount[p - baserev] += 1
1325 parentscount[p - baserev] += 1
1326 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1326 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1327 condrepr='<branchpoint>')
1327 condrepr='<branchpoint>')
1328
1328
1329 @predicate('min(set)', safe=True)
1329 @predicate('min(set)', safe=True)
1330 def minrev(repo, subset, x):
1330 def minrev(repo, subset, x):
1331 """Changeset with lowest revision number in set.
1331 """Changeset with lowest revision number in set.
1332 """
1332 """
1333 os = getset(repo, fullreposet(repo), x)
1333 os = getset(repo, fullreposet(repo), x)
1334 try:
1334 try:
1335 m = os.min()
1335 m = os.min()
1336 if m in subset:
1336 if m in subset:
1337 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1337 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1338 except ValueError:
1338 except ValueError:
1339 # os.min() throws a ValueError when the collection is empty.
1339 # os.min() throws a ValueError when the collection is empty.
1340 # Same as python's min().
1340 # Same as python's min().
1341 pass
1341 pass
1342 return baseset(datarepr=('<min %r, %r>', subset, os))
1342 return baseset(datarepr=('<min %r, %r>', subset, os))
1343
1343
1344 @predicate('modifies(pattern)', safe=True)
1344 @predicate('modifies(pattern)', safe=True)
1345 def modifies(repo, subset, x):
1345 def modifies(repo, subset, x):
1346 """Changesets modifying files matched by pattern.
1346 """Changesets modifying files matched by pattern.
1347
1347
1348 The pattern without explicit kind like ``glob:`` is expected to be
1348 The pattern without explicit kind like ``glob:`` is expected to be
1349 relative to the current directory and match against a file or a
1349 relative to the current directory and match against a file or a
1350 directory.
1350 directory.
1351 """
1351 """
1352 # i18n: "modifies" is a keyword
1352 # i18n: "modifies" is a keyword
1353 pat = getstring(x, _("modifies requires a pattern"))
1353 pat = getstring(x, _("modifies requires a pattern"))
1354 return checkstatus(repo, subset, pat, 0)
1354 return checkstatus(repo, subset, pat, 0)
1355
1355
1356 @predicate('named(namespace)')
1356 @predicate('named(namespace)')
1357 def named(repo, subset, x):
1357 def named(repo, subset, x):
1358 """The changesets in a given namespace.
1358 """The changesets in a given namespace.
1359
1359
1360 If `namespace` starts with `re:`, the remainder of the string is treated as
1360 If `namespace` starts with `re:`, the remainder of the string is treated as
1361 a regular expression. To match a namespace that actually starts with `re:`,
1361 a regular expression. To match a namespace that actually starts with `re:`,
1362 use the prefix `literal:`.
1362 use the prefix `literal:`.
1363 """
1363 """
1364 # i18n: "named" is a keyword
1364 # i18n: "named" is a keyword
1365 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1365 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1366
1366
1367 ns = getstring(args[0],
1367 ns = getstring(args[0],
1368 # i18n: "named" is a keyword
1368 # i18n: "named" is a keyword
1369 _('the argument to named must be a string'))
1369 _('the argument to named must be a string'))
1370 kind, pattern, matcher = util.stringmatcher(ns)
1370 kind, pattern, matcher = util.stringmatcher(ns)
1371 namespaces = set()
1371 namespaces = set()
1372 if kind == 'literal':
1372 if kind == 'literal':
1373 if pattern not in repo.names:
1373 if pattern not in repo.names:
1374 raise error.RepoLookupError(_("namespace '%s' does not exist")
1374 raise error.RepoLookupError(_("namespace '%s' does not exist")
1375 % ns)
1375 % ns)
1376 namespaces.add(repo.names[pattern])
1376 namespaces.add(repo.names[pattern])
1377 else:
1377 else:
1378 for name, ns in repo.names.iteritems():
1378 for name, ns in repo.names.iteritems():
1379 if matcher(name):
1379 if matcher(name):
1380 namespaces.add(ns)
1380 namespaces.add(ns)
1381 if not namespaces:
1381 if not namespaces:
1382 raise error.RepoLookupError(_("no namespace exists"
1382 raise error.RepoLookupError(_("no namespace exists"
1383 " that match '%s'") % pattern)
1383 " that match '%s'") % pattern)
1384
1384
1385 names = set()
1385 names = set()
1386 for ns in namespaces:
1386 for ns in namespaces:
1387 for name in ns.listnames(repo):
1387 for name in ns.listnames(repo):
1388 if name not in ns.deprecated:
1388 if name not in ns.deprecated:
1389 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1389 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1390
1390
1391 names -= set([node.nullrev])
1391 names -= set([node.nullrev])
1392 return subset & names
1392 return subset & names
1393
1393
1394 @predicate('id(string)', safe=True)
1394 @predicate('id(string)', safe=True)
1395 def node_(repo, subset, x):
1395 def node_(repo, subset, x):
1396 """Revision non-ambiguously specified by the given hex string prefix.
1396 """Revision non-ambiguously specified by the given hex string prefix.
1397 """
1397 """
1398 # i18n: "id" is a keyword
1398 # i18n: "id" is a keyword
1399 l = getargs(x, 1, 1, _("id requires one argument"))
1399 l = getargs(x, 1, 1, _("id requires one argument"))
1400 # i18n: "id" is a keyword
1400 # i18n: "id" is a keyword
1401 n = getstring(l[0], _("id requires a string"))
1401 n = getstring(l[0], _("id requires a string"))
1402 if len(n) == 40:
1402 if len(n) == 40:
1403 try:
1403 try:
1404 rn = repo.changelog.rev(node.bin(n))
1404 rn = repo.changelog.rev(node.bin(n))
1405 except (LookupError, TypeError):
1405 except (LookupError, TypeError):
1406 rn = None
1406 rn = None
1407 else:
1407 else:
1408 rn = None
1408 rn = None
1409 pm = repo.changelog._partialmatch(n)
1409 pm = repo.changelog._partialmatch(n)
1410 if pm is not None:
1410 if pm is not None:
1411 rn = repo.changelog.rev(pm)
1411 rn = repo.changelog.rev(pm)
1412
1412
1413 if rn is None:
1413 if rn is None:
1414 return baseset()
1414 return baseset()
1415 result = baseset([rn])
1415 result = baseset([rn])
1416 return result & subset
1416 return result & subset
1417
1417
1418 @predicate('obsolete()', safe=True)
1418 @predicate('obsolete()', safe=True)
1419 def obsolete(repo, subset, x):
1419 def obsolete(repo, subset, x):
1420 """Mutable changeset with a newer version."""
1420 """Mutable changeset with a newer version."""
1421 # i18n: "obsolete" is a keyword
1421 # i18n: "obsolete" is a keyword
1422 getargs(x, 0, 0, _("obsolete takes no arguments"))
1422 getargs(x, 0, 0, _("obsolete takes no arguments"))
1423 obsoletes = obsmod.getrevs(repo, 'obsolete')
1423 obsoletes = obsmod.getrevs(repo, 'obsolete')
1424 return subset & obsoletes
1424 return subset & obsoletes
1425
1425
1426 @predicate('only(set, [set])', safe=True)
1426 @predicate('only(set, [set])', safe=True)
1427 def only(repo, subset, x):
1427 def only(repo, subset, x):
1428 """Changesets that are ancestors of the first set that are not ancestors
1428 """Changesets that are ancestors of the first set that are not ancestors
1429 of any other head in the repo. If a second set is specified, the result
1429 of any other head in the repo. If a second set is specified, the result
1430 is ancestors of the first set that are not ancestors of the second set
1430 is ancestors of the first set that are not ancestors of the second set
1431 (i.e. ::<set1> - ::<set2>).
1431 (i.e. ::<set1> - ::<set2>).
1432 """
1432 """
1433 cl = repo.changelog
1433 cl = repo.changelog
1434 # i18n: "only" is a keyword
1434 # i18n: "only" is a keyword
1435 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1435 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1436 include = getset(repo, fullreposet(repo), args[0])
1436 include = getset(repo, fullreposet(repo), args[0])
1437 if len(args) == 1:
1437 if len(args) == 1:
1438 if not include:
1438 if not include:
1439 return baseset()
1439 return baseset()
1440
1440
1441 descendants = set(_revdescendants(repo, include, False))
1441 descendants = set(_revdescendants(repo, include, False))
1442 exclude = [rev for rev in cl.headrevs()
1442 exclude = [rev for rev in cl.headrevs()
1443 if not rev in descendants and not rev in include]
1443 if not rev in descendants and not rev in include]
1444 else:
1444 else:
1445 exclude = getset(repo, fullreposet(repo), args[1])
1445 exclude = getset(repo, fullreposet(repo), args[1])
1446
1446
1447 results = set(cl.findmissingrevs(common=exclude, heads=include))
1447 results = set(cl.findmissingrevs(common=exclude, heads=include))
1448 # XXX we should turn this into a baseset instead of a set, smartset may do
1448 # XXX we should turn this into a baseset instead of a set, smartset may do
1449 # some optimizations from the fact this is a baseset.
1449 # some optimizations from the fact this is a baseset.
1450 return subset & results
1450 return subset & results
1451
1451
1452 @predicate('origin([set])', safe=True)
1452 @predicate('origin([set])', safe=True)
1453 def origin(repo, subset, x):
1453 def origin(repo, subset, x):
1454 """
1454 """
1455 Changesets that were specified as a source for the grafts, transplants or
1455 Changesets that were specified as a source for the grafts, transplants or
1456 rebases that created the given revisions. Omitting the optional set is the
1456 rebases that created the given revisions. Omitting the optional set is the
1457 same as passing all(). If a changeset created by these operations is itself
1457 same as passing all(). If a changeset created by these operations is itself
1458 specified as a source for one of these operations, only the source changeset
1458 specified as a source for one of these operations, only the source changeset
1459 for the first operation is selected.
1459 for the first operation is selected.
1460 """
1460 """
1461 if x is not None:
1461 if x is not None:
1462 dests = getset(repo, fullreposet(repo), x)
1462 dests = getset(repo, fullreposet(repo), x)
1463 else:
1463 else:
1464 dests = fullreposet(repo)
1464 dests = fullreposet(repo)
1465
1465
1466 def _firstsrc(rev):
1466 def _firstsrc(rev):
1467 src = _getrevsource(repo, rev)
1467 src = _getrevsource(repo, rev)
1468 if src is None:
1468 if src is None:
1469 return None
1469 return None
1470
1470
1471 while True:
1471 while True:
1472 prev = _getrevsource(repo, src)
1472 prev = _getrevsource(repo, src)
1473
1473
1474 if prev is None:
1474 if prev is None:
1475 return src
1475 return src
1476 src = prev
1476 src = prev
1477
1477
1478 o = set([_firstsrc(r) for r in dests])
1478 o = set([_firstsrc(r) for r in dests])
1479 o -= set([None])
1479 o -= set([None])
1480 # XXX we should turn this into a baseset instead of a set, smartset may do
1480 # XXX we should turn this into a baseset instead of a set, smartset may do
1481 # some optimizations from the fact this is a baseset.
1481 # some optimizations from the fact this is a baseset.
1482 return subset & o
1482 return subset & o
1483
1483
1484 @predicate('outgoing([path])', safe=True)
1484 @predicate('outgoing([path])', safe=True)
1485 def outgoing(repo, subset, x):
1485 def outgoing(repo, subset, x):
1486 """Changesets not found in the specified destination repository, or the
1486 """Changesets not found in the specified destination repository, or the
1487 default push location.
1487 default push location.
1488 """
1488 """
1489 # Avoid cycles.
1489 # Avoid cycles.
1490 from . import (
1490 from . import (
1491 discovery,
1491 discovery,
1492 hg,
1492 hg,
1493 )
1493 )
1494 # i18n: "outgoing" is a keyword
1494 # i18n: "outgoing" is a keyword
1495 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1495 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1496 # i18n: "outgoing" is a keyword
1496 # i18n: "outgoing" is a keyword
1497 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1497 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1498 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1498 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1499 dest, branches = hg.parseurl(dest)
1499 dest, branches = hg.parseurl(dest)
1500 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1500 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1501 if revs:
1501 if revs:
1502 revs = [repo.lookup(rev) for rev in revs]
1502 revs = [repo.lookup(rev) for rev in revs]
1503 other = hg.peer(repo, {}, dest)
1503 other = hg.peer(repo, {}, dest)
1504 repo.ui.pushbuffer()
1504 repo.ui.pushbuffer()
1505 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1505 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1506 repo.ui.popbuffer()
1506 repo.ui.popbuffer()
1507 cl = repo.changelog
1507 cl = repo.changelog
1508 o = set([cl.rev(r) for r in outgoing.missing])
1508 o = set([cl.rev(r) for r in outgoing.missing])
1509 return subset & o
1509 return subset & o
1510
1510
1511 @predicate('p1([set])', safe=True)
1511 @predicate('p1([set])', safe=True)
1512 def p1(repo, subset, x):
1512 def p1(repo, subset, x):
1513 """First parent of changesets in set, or the working directory.
1513 """First parent of changesets in set, or the working directory.
1514 """
1514 """
1515 if x is None:
1515 if x is None:
1516 p = repo[x].p1().rev()
1516 p = repo[x].p1().rev()
1517 if p >= 0:
1517 if p >= 0:
1518 return subset & baseset([p])
1518 return subset & baseset([p])
1519 return baseset()
1519 return baseset()
1520
1520
1521 ps = set()
1521 ps = set()
1522 cl = repo.changelog
1522 cl = repo.changelog
1523 for r in getset(repo, fullreposet(repo), x):
1523 for r in getset(repo, fullreposet(repo), x):
1524 ps.add(cl.parentrevs(r)[0])
1524 ps.add(cl.parentrevs(r)[0])
1525 ps -= set([node.nullrev])
1525 ps -= set([node.nullrev])
1526 # XXX we should turn this into a baseset instead of a set, smartset may do
1526 # XXX we should turn this into a baseset instead of a set, smartset may do
1527 # some optimizations from the fact this is a baseset.
1527 # some optimizations from the fact this is a baseset.
1528 return subset & ps
1528 return subset & ps
1529
1529
1530 @predicate('p2([set])', safe=True)
1530 @predicate('p2([set])', safe=True)
1531 def p2(repo, subset, x):
1531 def p2(repo, subset, x):
1532 """Second parent of changesets in set, or the working directory.
1532 """Second parent of changesets in set, or the working directory.
1533 """
1533 """
1534 if x is None:
1534 if x is None:
1535 ps = repo[x].parents()
1535 ps = repo[x].parents()
1536 try:
1536 try:
1537 p = ps[1].rev()
1537 p = ps[1].rev()
1538 if p >= 0:
1538 if p >= 0:
1539 return subset & baseset([p])
1539 return subset & baseset([p])
1540 return baseset()
1540 return baseset()
1541 except IndexError:
1541 except IndexError:
1542 return baseset()
1542 return baseset()
1543
1543
1544 ps = set()
1544 ps = set()
1545 cl = repo.changelog
1545 cl = repo.changelog
1546 for r in getset(repo, fullreposet(repo), x):
1546 for r in getset(repo, fullreposet(repo), x):
1547 ps.add(cl.parentrevs(r)[1])
1547 ps.add(cl.parentrevs(r)[1])
1548 ps -= set([node.nullrev])
1548 ps -= set([node.nullrev])
1549 # XXX we should turn this into a baseset instead of a set, smartset may do
1549 # XXX we should turn this into a baseset instead of a set, smartset may do
1550 # some optimizations from the fact this is a baseset.
1550 # some optimizations from the fact this is a baseset.
1551 return subset & ps
1551 return subset & ps
1552
1552
1553 def parentpost(repo, subset, x, order):
1553 def parentpost(repo, subset, x, order):
1554 return p1(repo, subset, x)
1554 return p1(repo, subset, x)
1555
1555
1556 @predicate('parents([set])', safe=True)
1556 @predicate('parents([set])', safe=True)
1557 def parents(repo, subset, x):
1557 def parents(repo, subset, x):
1558 """
1558 """
1559 The set of all parents for all changesets in set, or the working directory.
1559 The set of all parents for all changesets in set, or the working directory.
1560 """
1560 """
1561 if x is None:
1561 if x is None:
1562 ps = set(p.rev() for p in repo[x].parents())
1562 ps = set(p.rev() for p in repo[x].parents())
1563 else:
1563 else:
1564 ps = set()
1564 ps = set()
1565 cl = repo.changelog
1565 cl = repo.changelog
1566 up = ps.update
1566 up = ps.update
1567 parentrevs = cl.parentrevs
1567 parentrevs = cl.parentrevs
1568 for r in getset(repo, fullreposet(repo), x):
1568 for r in getset(repo, fullreposet(repo), x):
1569 if r == node.wdirrev:
1569 if r == node.wdirrev:
1570 up(p.rev() for p in repo[r].parents())
1570 up(p.rev() for p in repo[r].parents())
1571 else:
1571 else:
1572 up(parentrevs(r))
1572 up(parentrevs(r))
1573 ps -= set([node.nullrev])
1573 ps -= set([node.nullrev])
1574 return subset & ps
1574 return subset & ps
1575
1575
1576 def _phase(repo, subset, target):
1576 def _phase(repo, subset, target):
1577 """helper to select all rev in phase <target>"""
1577 """helper to select all rev in phase <target>"""
1578 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1578 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1579 if repo._phasecache._phasesets:
1579 if repo._phasecache._phasesets:
1580 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1580 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1581 s = baseset(s)
1581 s = baseset(s)
1582 s.sort() # set are non ordered, so we enforce ascending
1582 s.sort() # set are non ordered, so we enforce ascending
1583 return subset & s
1583 return subset & s
1584 else:
1584 else:
1585 phase = repo._phasecache.phase
1585 phase = repo._phasecache.phase
1586 condition = lambda r: phase(repo, r) == target
1586 condition = lambda r: phase(repo, r) == target
1587 return subset.filter(condition, condrepr=('<phase %r>', target),
1587 return subset.filter(condition, condrepr=('<phase %r>', target),
1588 cache=False)
1588 cache=False)
1589
1589
1590 @predicate('draft()', safe=True)
1590 @predicate('draft()', safe=True)
1591 def draft(repo, subset, x):
1591 def draft(repo, subset, x):
1592 """Changeset in draft phase."""
1592 """Changeset in draft phase."""
1593 # i18n: "draft" is a keyword
1593 # i18n: "draft" is a keyword
1594 getargs(x, 0, 0, _("draft takes no arguments"))
1594 getargs(x, 0, 0, _("draft takes no arguments"))
1595 target = phases.draft
1595 target = phases.draft
1596 return _phase(repo, subset, target)
1596 return _phase(repo, subset, target)
1597
1597
1598 @predicate('secret()', safe=True)
1598 @predicate('secret()', safe=True)
1599 def secret(repo, subset, x):
1599 def secret(repo, subset, x):
1600 """Changeset in secret phase."""
1600 """Changeset in secret phase."""
1601 # i18n: "secret" is a keyword
1601 # i18n: "secret" is a keyword
1602 getargs(x, 0, 0, _("secret takes no arguments"))
1602 getargs(x, 0, 0, _("secret takes no arguments"))
1603 target = phases.secret
1603 target = phases.secret
1604 return _phase(repo, subset, target)
1604 return _phase(repo, subset, target)
1605
1605
1606 def parentspec(repo, subset, x, n, order):
1606 def parentspec(repo, subset, x, n, order):
1607 """``set^0``
1607 """``set^0``
1608 The set.
1608 The set.
1609 ``set^1`` (or ``set^``), ``set^2``
1609 ``set^1`` (or ``set^``), ``set^2``
1610 First or second parent, respectively, of all changesets in set.
1610 First or second parent, respectively, of all changesets in set.
1611 """
1611 """
1612 try:
1612 try:
1613 n = int(n[1])
1613 n = int(n[1])
1614 if n not in (0, 1, 2):
1614 if n not in (0, 1, 2):
1615 raise ValueError
1615 raise ValueError
1616 except (TypeError, ValueError):
1616 except (TypeError, ValueError):
1617 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1617 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1618 ps = set()
1618 ps = set()
1619 cl = repo.changelog
1619 cl = repo.changelog
1620 for r in getset(repo, fullreposet(repo), x):
1620 for r in getset(repo, fullreposet(repo), x):
1621 if n == 0:
1621 if n == 0:
1622 ps.add(r)
1622 ps.add(r)
1623 elif n == 1:
1623 elif n == 1:
1624 ps.add(cl.parentrevs(r)[0])
1624 ps.add(cl.parentrevs(r)[0])
1625 elif n == 2:
1625 elif n == 2:
1626 parents = cl.parentrevs(r)
1626 parents = cl.parentrevs(r)
1627 if parents[1] != node.nullrev:
1627 if parents[1] != node.nullrev:
1628 ps.add(parents[1])
1628 ps.add(parents[1])
1629 return subset & ps
1629 return subset & ps
1630
1630
1631 @predicate('present(set)', safe=True)
1631 @predicate('present(set)', safe=True)
1632 def present(repo, subset, x):
1632 def present(repo, subset, x):
1633 """An empty set, if any revision in set isn't found; otherwise,
1633 """An empty set, if any revision in set isn't found; otherwise,
1634 all revisions in set.
1634 all revisions in set.
1635
1635
1636 If any of specified revisions is not present in the local repository,
1636 If any of specified revisions is not present in the local repository,
1637 the query is normally aborted. But this predicate allows the query
1637 the query is normally aborted. But this predicate allows the query
1638 to continue even in such cases.
1638 to continue even in such cases.
1639 """
1639 """
1640 try:
1640 try:
1641 return getset(repo, subset, x)
1641 return getset(repo, subset, x)
1642 except error.RepoLookupError:
1642 except error.RepoLookupError:
1643 return baseset()
1643 return baseset()
1644
1644
1645 # for internal use
1645 # for internal use
1646 @predicate('_notpublic', safe=True)
1646 @predicate('_notpublic', safe=True)
1647 def _notpublic(repo, subset, x):
1647 def _notpublic(repo, subset, x):
1648 getargs(x, 0, 0, "_notpublic takes no arguments")
1648 getargs(x, 0, 0, "_notpublic takes no arguments")
1649 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1649 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1650 if repo._phasecache._phasesets:
1650 if repo._phasecache._phasesets:
1651 s = set()
1651 s = set()
1652 for u in repo._phasecache._phasesets[1:]:
1652 for u in repo._phasecache._phasesets[1:]:
1653 s.update(u)
1653 s.update(u)
1654 s = baseset(s - repo.changelog.filteredrevs)
1654 s = baseset(s - repo.changelog.filteredrevs)
1655 s.sort()
1655 s.sort()
1656 return subset & s
1656 return subset & s
1657 else:
1657 else:
1658 phase = repo._phasecache.phase
1658 phase = repo._phasecache.phase
1659 target = phases.public
1659 target = phases.public
1660 condition = lambda r: phase(repo, r) != target
1660 condition = lambda r: phase(repo, r) != target
1661 return subset.filter(condition, condrepr=('<phase %r>', target),
1661 return subset.filter(condition, condrepr=('<phase %r>', target),
1662 cache=False)
1662 cache=False)
1663
1663
1664 @predicate('public()', safe=True)
1664 @predicate('public()', safe=True)
1665 def public(repo, subset, x):
1665 def public(repo, subset, x):
1666 """Changeset in public phase."""
1666 """Changeset in public phase."""
1667 # i18n: "public" is a keyword
1667 # i18n: "public" is a keyword
1668 getargs(x, 0, 0, _("public takes no arguments"))
1668 getargs(x, 0, 0, _("public takes no arguments"))
1669 phase = repo._phasecache.phase
1669 phase = repo._phasecache.phase
1670 target = phases.public
1670 target = phases.public
1671 condition = lambda r: phase(repo, r) == target
1671 condition = lambda r: phase(repo, r) == target
1672 return subset.filter(condition, condrepr=('<phase %r>', target),
1672 return subset.filter(condition, condrepr=('<phase %r>', target),
1673 cache=False)
1673 cache=False)
1674
1674
1675 @predicate('remote([id [,path]])', safe=True)
1675 @predicate('remote([id [,path]])', safe=True)
1676 def remote(repo, subset, x):
1676 def remote(repo, subset, x):
1677 """Local revision that corresponds to the given identifier in a
1677 """Local revision that corresponds to the given identifier in a
1678 remote repository, if present. Here, the '.' identifier is a
1678 remote repository, if present. Here, the '.' identifier is a
1679 synonym for the current local branch.
1679 synonym for the current local branch.
1680 """
1680 """
1681
1681
1682 from . import hg # avoid start-up nasties
1682 from . import hg # avoid start-up nasties
1683 # i18n: "remote" is a keyword
1683 # i18n: "remote" is a keyword
1684 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1684 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1685
1685
1686 q = '.'
1686 q = '.'
1687 if len(l) > 0:
1687 if len(l) > 0:
1688 # i18n: "remote" is a keyword
1688 # i18n: "remote" is a keyword
1689 q = getstring(l[0], _("remote requires a string id"))
1689 q = getstring(l[0], _("remote requires a string id"))
1690 if q == '.':
1690 if q == '.':
1691 q = repo['.'].branch()
1691 q = repo['.'].branch()
1692
1692
1693 dest = ''
1693 dest = ''
1694 if len(l) > 1:
1694 if len(l) > 1:
1695 # i18n: "remote" is a keyword
1695 # i18n: "remote" is a keyword
1696 dest = getstring(l[1], _("remote requires a repository path"))
1696 dest = getstring(l[1], _("remote requires a repository path"))
1697 dest = repo.ui.expandpath(dest or 'default')
1697 dest = repo.ui.expandpath(dest or 'default')
1698 dest, branches = hg.parseurl(dest)
1698 dest, branches = hg.parseurl(dest)
1699 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1699 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1700 if revs:
1700 if revs:
1701 revs = [repo.lookup(rev) for rev in revs]
1701 revs = [repo.lookup(rev) for rev in revs]
1702 other = hg.peer(repo, {}, dest)
1702 other = hg.peer(repo, {}, dest)
1703 n = other.lookup(q)
1703 n = other.lookup(q)
1704 if n in repo:
1704 if n in repo:
1705 r = repo[n].rev()
1705 r = repo[n].rev()
1706 if r in subset:
1706 if r in subset:
1707 return baseset([r])
1707 return baseset([r])
1708 return baseset()
1708 return baseset()
1709
1709
1710 @predicate('removes(pattern)', safe=True)
1710 @predicate('removes(pattern)', safe=True)
1711 def removes(repo, subset, x):
1711 def removes(repo, subset, x):
1712 """Changesets which remove files matching pattern.
1712 """Changesets which remove files matching pattern.
1713
1713
1714 The pattern without explicit kind like ``glob:`` is expected to be
1714 The pattern without explicit kind like ``glob:`` is expected to be
1715 relative to the current directory and match against a file or a
1715 relative to the current directory and match against a file or a
1716 directory.
1716 directory.
1717 """
1717 """
1718 # i18n: "removes" is a keyword
1718 # i18n: "removes" is a keyword
1719 pat = getstring(x, _("removes requires a pattern"))
1719 pat = getstring(x, _("removes requires a pattern"))
1720 return checkstatus(repo, subset, pat, 2)
1720 return checkstatus(repo, subset, pat, 2)
1721
1721
1722 @predicate('rev(number)', safe=True)
1722 @predicate('rev(number)', safe=True)
1723 def rev(repo, subset, x):
1723 def rev(repo, subset, x):
1724 """Revision with the given numeric identifier.
1724 """Revision with the given numeric identifier.
1725 """
1725 """
1726 # i18n: "rev" is a keyword
1726 # i18n: "rev" is a keyword
1727 l = getargs(x, 1, 1, _("rev requires one argument"))
1727 l = getargs(x, 1, 1, _("rev requires one argument"))
1728 try:
1728 try:
1729 # i18n: "rev" is a keyword
1729 # i18n: "rev" is a keyword
1730 l = int(getstring(l[0], _("rev requires a number")))
1730 l = int(getstring(l[0], _("rev requires a number")))
1731 except (TypeError, ValueError):
1731 except (TypeError, ValueError):
1732 # i18n: "rev" is a keyword
1732 # i18n: "rev" is a keyword
1733 raise error.ParseError(_("rev expects a number"))
1733 raise error.ParseError(_("rev expects a number"))
1734 if l not in repo.changelog and l != node.nullrev:
1734 if l not in repo.changelog and l != node.nullrev:
1735 return baseset()
1735 return baseset()
1736 return subset & baseset([l])
1736 return subset & baseset([l])
1737
1737
1738 @predicate('matching(revision [, field])', safe=True)
1738 @predicate('matching(revision [, field])', safe=True)
1739 def matching(repo, subset, x):
1739 def matching(repo, subset, x):
1740 """Changesets in which a given set of fields match the set of fields in the
1740 """Changesets in which a given set of fields match the set of fields in the
1741 selected revision or set.
1741 selected revision or set.
1742
1742
1743 To match more than one field pass the list of fields to match separated
1743 To match more than one field pass the list of fields to match separated
1744 by spaces (e.g. ``author description``).
1744 by spaces (e.g. ``author description``).
1745
1745
1746 Valid fields are most regular revision fields and some special fields.
1746 Valid fields are most regular revision fields and some special fields.
1747
1747
1748 Regular revision fields are ``description``, ``author``, ``branch``,
1748 Regular revision fields are ``description``, ``author``, ``branch``,
1749 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1749 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1750 and ``diff``.
1750 and ``diff``.
1751 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1751 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1752 contents of the revision. Two revisions matching their ``diff`` will
1752 contents of the revision. Two revisions matching their ``diff`` will
1753 also match their ``files``.
1753 also match their ``files``.
1754
1754
1755 Special fields are ``summary`` and ``metadata``:
1755 Special fields are ``summary`` and ``metadata``:
1756 ``summary`` matches the first line of the description.
1756 ``summary`` matches the first line of the description.
1757 ``metadata`` is equivalent to matching ``description user date``
1757 ``metadata`` is equivalent to matching ``description user date``
1758 (i.e. it matches the main metadata fields).
1758 (i.e. it matches the main metadata fields).
1759
1759
1760 ``metadata`` is the default field which is used when no fields are
1760 ``metadata`` is the default field which is used when no fields are
1761 specified. You can match more than one field at a time.
1761 specified. You can match more than one field at a time.
1762 """
1762 """
1763 # i18n: "matching" is a keyword
1763 # i18n: "matching" is a keyword
1764 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1764 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1765
1765
1766 revs = getset(repo, fullreposet(repo), l[0])
1766 revs = getset(repo, fullreposet(repo), l[0])
1767
1767
1768 fieldlist = ['metadata']
1768 fieldlist = ['metadata']
1769 if len(l) > 1:
1769 if len(l) > 1:
1770 fieldlist = getstring(l[1],
1770 fieldlist = getstring(l[1],
1771 # i18n: "matching" is a keyword
1771 # i18n: "matching" is a keyword
1772 _("matching requires a string "
1772 _("matching requires a string "
1773 "as its second argument")).split()
1773 "as its second argument")).split()
1774
1774
1775 # Make sure that there are no repeated fields,
1775 # Make sure that there are no repeated fields,
1776 # expand the 'special' 'metadata' field type
1776 # expand the 'special' 'metadata' field type
1777 # and check the 'files' whenever we check the 'diff'
1777 # and check the 'files' whenever we check the 'diff'
1778 fields = []
1778 fields = []
1779 for field in fieldlist:
1779 for field in fieldlist:
1780 if field == 'metadata':
1780 if field == 'metadata':
1781 fields += ['user', 'description', 'date']
1781 fields += ['user', 'description', 'date']
1782 elif field == 'diff':
1782 elif field == 'diff':
1783 # a revision matching the diff must also match the files
1783 # a revision matching the diff must also match the files
1784 # since matching the diff is very costly, make sure to
1784 # since matching the diff is very costly, make sure to
1785 # also match the files first
1785 # also match the files first
1786 fields += ['files', 'diff']
1786 fields += ['files', 'diff']
1787 else:
1787 else:
1788 if field == 'author':
1788 if field == 'author':
1789 field = 'user'
1789 field = 'user'
1790 fields.append(field)
1790 fields.append(field)
1791 fields = set(fields)
1791 fields = set(fields)
1792 if 'summary' in fields and 'description' in fields:
1792 if 'summary' in fields and 'description' in fields:
1793 # If a revision matches its description it also matches its summary
1793 # If a revision matches its description it also matches its summary
1794 fields.discard('summary')
1794 fields.discard('summary')
1795
1795
1796 # We may want to match more than one field
1796 # We may want to match more than one field
1797 # Not all fields take the same amount of time to be matched
1797 # Not all fields take the same amount of time to be matched
1798 # Sort the selected fields in order of increasing matching cost
1798 # Sort the selected fields in order of increasing matching cost
1799 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1799 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1800 'files', 'description', 'substate', 'diff']
1800 'files', 'description', 'substate', 'diff']
1801 def fieldkeyfunc(f):
1801 def fieldkeyfunc(f):
1802 try:
1802 try:
1803 return fieldorder.index(f)
1803 return fieldorder.index(f)
1804 except ValueError:
1804 except ValueError:
1805 # assume an unknown field is very costly
1805 # assume an unknown field is very costly
1806 return len(fieldorder)
1806 return len(fieldorder)
1807 fields = list(fields)
1807 fields = list(fields)
1808 fields.sort(key=fieldkeyfunc)
1808 fields.sort(key=fieldkeyfunc)
1809
1809
1810 # Each field will be matched with its own "getfield" function
1810 # Each field will be matched with its own "getfield" function
1811 # which will be added to the getfieldfuncs array of functions
1811 # which will be added to the getfieldfuncs array of functions
1812 getfieldfuncs = []
1812 getfieldfuncs = []
1813 _funcs = {
1813 _funcs = {
1814 'user': lambda r: repo[r].user(),
1814 'user': lambda r: repo[r].user(),
1815 'branch': lambda r: repo[r].branch(),
1815 'branch': lambda r: repo[r].branch(),
1816 'date': lambda r: repo[r].date(),
1816 'date': lambda r: repo[r].date(),
1817 'description': lambda r: repo[r].description(),
1817 'description': lambda r: repo[r].description(),
1818 'files': lambda r: repo[r].files(),
1818 'files': lambda r: repo[r].files(),
1819 'parents': lambda r: repo[r].parents(),
1819 'parents': lambda r: repo[r].parents(),
1820 'phase': lambda r: repo[r].phase(),
1820 'phase': lambda r: repo[r].phase(),
1821 'substate': lambda r: repo[r].substate,
1821 'substate': lambda r: repo[r].substate,
1822 'summary': lambda r: repo[r].description().splitlines()[0],
1822 'summary': lambda r: repo[r].description().splitlines()[0],
1823 'diff': lambda r: list(repo[r].diff(git=True),)
1823 'diff': lambda r: list(repo[r].diff(git=True),)
1824 }
1824 }
1825 for info in fields:
1825 for info in fields:
1826 getfield = _funcs.get(info, None)
1826 getfield = _funcs.get(info, None)
1827 if getfield is None:
1827 if getfield is None:
1828 raise error.ParseError(
1828 raise error.ParseError(
1829 # i18n: "matching" is a keyword
1829 # i18n: "matching" is a keyword
1830 _("unexpected field name passed to matching: %s") % info)
1830 _("unexpected field name passed to matching: %s") % info)
1831 getfieldfuncs.append(getfield)
1831 getfieldfuncs.append(getfield)
1832 # convert the getfield array of functions into a "getinfo" function
1832 # convert the getfield array of functions into a "getinfo" function
1833 # which returns an array of field values (or a single value if there
1833 # which returns an array of field values (or a single value if there
1834 # is only one field to match)
1834 # is only one field to match)
1835 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1835 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1836
1836
1837 def matches(x):
1837 def matches(x):
1838 for rev in revs:
1838 for rev in revs:
1839 target = getinfo(rev)
1839 target = getinfo(rev)
1840 match = True
1840 match = True
1841 for n, f in enumerate(getfieldfuncs):
1841 for n, f in enumerate(getfieldfuncs):
1842 if target[n] != f(x):
1842 if target[n] != f(x):
1843 match = False
1843 match = False
1844 if match:
1844 if match:
1845 return True
1845 return True
1846 return False
1846 return False
1847
1847
1848 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1848 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1849
1849
1850 @predicate('reverse(set)', safe=True, takeorder=True)
1850 @predicate('reverse(set)', safe=True, takeorder=True)
1851 def reverse(repo, subset, x, order):
1851 def reverse(repo, subset, x, order):
1852 """Reverse order of set.
1852 """Reverse order of set.
1853 """
1853 """
1854 l = getset(repo, subset, x)
1854 l = getset(repo, subset, x)
1855 if order == defineorder:
1855 if order == defineorder:
1856 l.reverse()
1856 l.reverse()
1857 return l
1857 return l
1858
1858
1859 @predicate('roots(set)', safe=True)
1859 @predicate('roots(set)', safe=True)
1860 def roots(repo, subset, x):
1860 def roots(repo, subset, x):
1861 """Changesets in set with no parent changeset in set.
1861 """Changesets in set with no parent changeset in set.
1862 """
1862 """
1863 s = getset(repo, fullreposet(repo), x)
1863 s = getset(repo, fullreposet(repo), x)
1864 parents = repo.changelog.parentrevs
1864 parents = repo.changelog.parentrevs
1865 def filter(r):
1865 def filter(r):
1866 for p in parents(r):
1866 for p in parents(r):
1867 if 0 <= p and p in s:
1867 if 0 <= p and p in s:
1868 return False
1868 return False
1869 return True
1869 return True
1870 return subset & s.filter(filter, condrepr='<roots>')
1870 return subset & s.filter(filter, condrepr='<roots>')
1871
1871
1872 _sortkeyfuncs = {
1872 _sortkeyfuncs = {
1873 'rev': lambda c: c.rev(),
1873 'rev': lambda c: c.rev(),
1874 'branch': lambda c: c.branch(),
1874 'branch': lambda c: c.branch(),
1875 'desc': lambda c: c.description(),
1875 'desc': lambda c: c.description(),
1876 'user': lambda c: c.user(),
1876 'user': lambda c: c.user(),
1877 'author': lambda c: c.user(),
1877 'author': lambda c: c.user(),
1878 'date': lambda c: c.date()[0],
1878 'date': lambda c: c.date()[0],
1879 }
1879 }
1880
1880
1881 def _getsortargs(x):
1881 def _getsortargs(x):
1882 """Parse sort options into (set, [(key, reverse)], opts)"""
1882 """Parse sort options into (set, [(key, reverse)], opts)"""
1883 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
1883 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
1884 if 'set' not in args:
1884 if 'set' not in args:
1885 # i18n: "sort" is a keyword
1885 # i18n: "sort" is a keyword
1886 raise error.ParseError(_('sort requires one or two arguments'))
1886 raise error.ParseError(_('sort requires one or two arguments'))
1887 keys = "rev"
1887 keys = "rev"
1888 if 'keys' in args:
1888 if 'keys' in args:
1889 # i18n: "sort" is a keyword
1889 # i18n: "sort" is a keyword
1890 keys = getstring(args['keys'], _("sort spec must be a string"))
1890 keys = getstring(args['keys'], _("sort spec must be a string"))
1891
1891
1892 keyflags = []
1892 keyflags = []
1893 for k in keys.split():
1893 for k in keys.split():
1894 fk = k
1894 fk = k
1895 reverse = (k[0] == '-')
1895 reverse = (k[0] == '-')
1896 if reverse:
1896 if reverse:
1897 k = k[1:]
1897 k = k[1:]
1898 if k not in _sortkeyfuncs and k != 'topo':
1898 if k not in _sortkeyfuncs and k != 'topo':
1899 raise error.ParseError(_("unknown sort key %r") % fk)
1899 raise error.ParseError(_("unknown sort key %r") % fk)
1900 keyflags.append((k, reverse))
1900 keyflags.append((k, reverse))
1901
1901
1902 if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
1902 if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
1903 # i18n: "topo" is a keyword
1903 # i18n: "topo" is a keyword
1904 raise error.ParseError(_('topo sort order cannot be combined '
1904 raise error.ParseError(_('topo sort order cannot be combined '
1905 'with other sort keys'))
1905 'with other sort keys'))
1906
1906
1907 opts = {}
1907 opts = {}
1908 if 'topo.firstbranch' in args:
1908 if 'topo.firstbranch' in args:
1909 if any(k == 'topo' for k, reverse in keyflags):
1909 if any(k == 'topo' for k, reverse in keyflags):
1910 opts['topo.firstbranch'] = args['topo.firstbranch']
1910 opts['topo.firstbranch'] = args['topo.firstbranch']
1911 else:
1911 else:
1912 # i18n: "topo" and "topo.firstbranch" are keywords
1912 # i18n: "topo" and "topo.firstbranch" are keywords
1913 raise error.ParseError(_('topo.firstbranch can only be used '
1913 raise error.ParseError(_('topo.firstbranch can only be used '
1914 'when using the topo sort key'))
1914 'when using the topo sort key'))
1915
1915
1916 return args['set'], keyflags, opts
1916 return args['set'], keyflags, opts
1917
1917
1918 @predicate('sort(set[, [-]key... [, ...]])', safe=True, takeorder=True)
1918 @predicate('sort(set[, [-]key... [, ...]])', safe=True, takeorder=True)
1919 def sort(repo, subset, x, order):
1919 def sort(repo, subset, x, order):
1920 """Sort set by keys. The default sort order is ascending, specify a key
1920 """Sort set by keys. The default sort order is ascending, specify a key
1921 as ``-key`` to sort in descending order.
1921 as ``-key`` to sort in descending order.
1922
1922
1923 The keys can be:
1923 The keys can be:
1924
1924
1925 - ``rev`` for the revision number,
1925 - ``rev`` for the revision number,
1926 - ``branch`` for the branch name,
1926 - ``branch`` for the branch name,
1927 - ``desc`` for the commit message (description),
1927 - ``desc`` for the commit message (description),
1928 - ``user`` for user name (``author`` can be used as an alias),
1928 - ``user`` for user name (``author`` can be used as an alias),
1929 - ``date`` for the commit date
1929 - ``date`` for the commit date
1930 - ``topo`` for a reverse topographical sort
1930 - ``topo`` for a reverse topographical sort
1931
1931
1932 The ``topo`` sort order cannot be combined with other sort keys. This sort
1932 The ``topo`` sort order cannot be combined with other sort keys. This sort
1933 takes one optional argument, ``topo.firstbranch``, which takes a revset that
1933 takes one optional argument, ``topo.firstbranch``, which takes a revset that
1934 specifies what topographical branches to prioritize in the sort.
1934 specifies what topographical branches to prioritize in the sort.
1935
1935
1936 """
1936 """
1937 s, keyflags, opts = _getsortargs(x)
1937 s, keyflags, opts = _getsortargs(x)
1938 revs = getset(repo, subset, s)
1938 revs = getset(repo, subset, s)
1939
1939
1940 if not keyflags or order != defineorder:
1940 if not keyflags or order != defineorder:
1941 return revs
1941 return revs
1942 if len(keyflags) == 1 and keyflags[0][0] == "rev":
1942 if len(keyflags) == 1 and keyflags[0][0] == "rev":
1943 revs.sort(reverse=keyflags[0][1])
1943 revs.sort(reverse=keyflags[0][1])
1944 return revs
1944 return revs
1945 elif keyflags[0][0] == "topo":
1945 elif keyflags[0][0] == "topo":
1946 firstbranch = ()
1946 firstbranch = ()
1947 if 'topo.firstbranch' in opts:
1947 if 'topo.firstbranch' in opts:
1948 firstbranch = getset(repo, subset, opts['topo.firstbranch'])
1948 firstbranch = getset(repo, subset, opts['topo.firstbranch'])
1949 revs = baseset(_toposort(revs, repo.changelog.parentrevs, firstbranch),
1949 revs = baseset(_toposort(revs, repo.changelog.parentrevs, firstbranch),
1950 istopo=True)
1950 istopo=True)
1951 if keyflags[0][1]:
1951 if keyflags[0][1]:
1952 revs.reverse()
1952 revs.reverse()
1953 return revs
1953 return revs
1954
1954
1955 # sort() is guaranteed to be stable
1955 # sort() is guaranteed to be stable
1956 ctxs = [repo[r] for r in revs]
1956 ctxs = [repo[r] for r in revs]
1957 for k, reverse in reversed(keyflags):
1957 for k, reverse in reversed(keyflags):
1958 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
1958 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
1959 return baseset([c.rev() for c in ctxs])
1959 return baseset([c.rev() for c in ctxs])
1960
1960
1961 def _toposort(revs, parentsfunc, firstbranch=()):
1961 def _toposort(revs, parentsfunc, firstbranch=()):
1962 """Yield revisions from heads to roots one (topo) branch at a time.
1962 """Yield revisions from heads to roots one (topo) branch at a time.
1963
1963
1964 This function aims to be used by a graph generator that wishes to minimize
1964 This function aims to be used by a graph generator that wishes to minimize
1965 the number of parallel branches and their interleaving.
1965 the number of parallel branches and their interleaving.
1966
1966
1967 Example iteration order (numbers show the "true" order in a changelog):
1967 Example iteration order (numbers show the "true" order in a changelog):
1968
1968
1969 o 4
1969 o 4
1970 |
1970 |
1971 o 1
1971 o 1
1972 |
1972 |
1973 | o 3
1973 | o 3
1974 | |
1974 | |
1975 | o 2
1975 | o 2
1976 |/
1976 |/
1977 o 0
1977 o 0
1978
1978
1979 Note that the ancestors of merges are understood by the current
1979 Note that the ancestors of merges are understood by the current
1980 algorithm to be on the same branch. This means no reordering will
1980 algorithm to be on the same branch. This means no reordering will
1981 occur behind a merge.
1981 occur behind a merge.
1982 """
1982 """
1983
1983
1984 ### Quick summary of the algorithm
1984 ### Quick summary of the algorithm
1985 #
1985 #
1986 # This function is based around a "retention" principle. We keep revisions
1986 # This function is based around a "retention" principle. We keep revisions
1987 # in memory until we are ready to emit a whole branch that immediately
1987 # in memory until we are ready to emit a whole branch that immediately
1988 # "merges" into an existing one. This reduces the number of parallel
1988 # "merges" into an existing one. This reduces the number of parallel
1989 # branches with interleaved revisions.
1989 # branches with interleaved revisions.
1990 #
1990 #
1991 # During iteration revs are split into two groups:
1991 # During iteration revs are split into two groups:
1992 # A) revision already emitted
1992 # A) revision already emitted
1993 # B) revision in "retention". They are stored as different subgroups.
1993 # B) revision in "retention". They are stored as different subgroups.
1994 #
1994 #
1995 # for each REV, we do the following logic:
1995 # for each REV, we do the following logic:
1996 #
1996 #
1997 # 1) if REV is a parent of (A), we will emit it. If there is a
1997 # 1) if REV is a parent of (A), we will emit it. If there is a
1998 # retention group ((B) above) that is blocked on REV being
1998 # retention group ((B) above) that is blocked on REV being
1999 # available, we emit all the revisions out of that retention
1999 # available, we emit all the revisions out of that retention
2000 # group first.
2000 # group first.
2001 #
2001 #
2002 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
2002 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
2003 # available, if such subgroup exist, we add REV to it and the subgroup is
2003 # available, if such subgroup exist, we add REV to it and the subgroup is
2004 # now awaiting for REV.parents() to be available.
2004 # now awaiting for REV.parents() to be available.
2005 #
2005 #
2006 # 3) finally if no such group existed in (B), we create a new subgroup.
2006 # 3) finally if no such group existed in (B), we create a new subgroup.
2007 #
2007 #
2008 #
2008 #
2009 # To bootstrap the algorithm, we emit the tipmost revision (which
2009 # To bootstrap the algorithm, we emit the tipmost revision (which
2010 # puts it in group (A) from above).
2010 # puts it in group (A) from above).
2011
2011
2012 revs.sort(reverse=True)
2012 revs.sort(reverse=True)
2013
2013
2014 # Set of parents of revision that have been emitted. They can be considered
2014 # Set of parents of revision that have been emitted. They can be considered
2015 # unblocked as the graph generator is already aware of them so there is no
2015 # unblocked as the graph generator is already aware of them so there is no
2016 # need to delay the revisions that reference them.
2016 # need to delay the revisions that reference them.
2017 #
2017 #
2018 # If someone wants to prioritize a branch over the others, pre-filling this
2018 # If someone wants to prioritize a branch over the others, pre-filling this
2019 # set will force all other branches to wait until this branch is ready to be
2019 # set will force all other branches to wait until this branch is ready to be
2020 # emitted.
2020 # emitted.
2021 unblocked = set(firstbranch)
2021 unblocked = set(firstbranch)
2022
2022
2023 # list of groups waiting to be displayed, each group is defined by:
2023 # list of groups waiting to be displayed, each group is defined by:
2024 #
2024 #
2025 # (revs: lists of revs waiting to be displayed,
2025 # (revs: lists of revs waiting to be displayed,
2026 # blocked: set of that cannot be displayed before those in 'revs')
2026 # blocked: set of that cannot be displayed before those in 'revs')
2027 #
2027 #
2028 # The second value ('blocked') correspond to parents of any revision in the
2028 # The second value ('blocked') correspond to parents of any revision in the
2029 # group ('revs') that is not itself contained in the group. The main idea
2029 # group ('revs') that is not itself contained in the group. The main idea
2030 # of this algorithm is to delay as much as possible the emission of any
2030 # of this algorithm is to delay as much as possible the emission of any
2031 # revision. This means waiting for the moment we are about to display
2031 # revision. This means waiting for the moment we are about to display
2032 # these parents to display the revs in a group.
2032 # these parents to display the revs in a group.
2033 #
2033 #
2034 # This first implementation is smart until it encounters a merge: it will
2034 # This first implementation is smart until it encounters a merge: it will
2035 # emit revs as soon as any parent is about to be emitted and can grow an
2035 # emit revs as soon as any parent is about to be emitted and can grow an
2036 # arbitrary number of revs in 'blocked'. In practice this mean we properly
2036 # arbitrary number of revs in 'blocked'. In practice this mean we properly
2037 # retains new branches but gives up on any special ordering for ancestors
2037 # retains new branches but gives up on any special ordering for ancestors
2038 # of merges. The implementation can be improved to handle this better.
2038 # of merges. The implementation can be improved to handle this better.
2039 #
2039 #
2040 # The first subgroup is special. It corresponds to all the revision that
2040 # The first subgroup is special. It corresponds to all the revision that
2041 # were already emitted. The 'revs' lists is expected to be empty and the
2041 # were already emitted. The 'revs' lists is expected to be empty and the
2042 # 'blocked' set contains the parents revisions of already emitted revision.
2042 # 'blocked' set contains the parents revisions of already emitted revision.
2043 #
2043 #
2044 # You could pre-seed the <parents> set of groups[0] to a specific
2044 # You could pre-seed the <parents> set of groups[0] to a specific
2045 # changesets to select what the first emitted branch should be.
2045 # changesets to select what the first emitted branch should be.
2046 groups = [([], unblocked)]
2046 groups = [([], unblocked)]
2047 pendingheap = []
2047 pendingheap = []
2048 pendingset = set()
2048 pendingset = set()
2049
2049
2050 heapq.heapify(pendingheap)
2050 heapq.heapify(pendingheap)
2051 heappop = heapq.heappop
2051 heappop = heapq.heappop
2052 heappush = heapq.heappush
2052 heappush = heapq.heappush
2053 for currentrev in revs:
2053 for currentrev in revs:
2054 # Heap works with smallest element, we want highest so we invert
2054 # Heap works with smallest element, we want highest so we invert
2055 if currentrev not in pendingset:
2055 if currentrev not in pendingset:
2056 heappush(pendingheap, -currentrev)
2056 heappush(pendingheap, -currentrev)
2057 pendingset.add(currentrev)
2057 pendingset.add(currentrev)
2058 # iterates on pending rev until after the current rev have been
2058 # iterates on pending rev until after the current rev have been
2059 # processed.
2059 # processed.
2060 rev = None
2060 rev = None
2061 while rev != currentrev:
2061 while rev != currentrev:
2062 rev = -heappop(pendingheap)
2062 rev = -heappop(pendingheap)
2063 pendingset.remove(rev)
2063 pendingset.remove(rev)
2064
2064
2065 # Seek for a subgroup blocked, waiting for the current revision.
2065 # Seek for a subgroup blocked, waiting for the current revision.
2066 matching = [i for i, g in enumerate(groups) if rev in g[1]]
2066 matching = [i for i, g in enumerate(groups) if rev in g[1]]
2067
2067
2068 if matching:
2068 if matching:
2069 # The main idea is to gather together all sets that are blocked
2069 # The main idea is to gather together all sets that are blocked
2070 # on the same revision.
2070 # on the same revision.
2071 #
2071 #
2072 # Groups are merged when a common blocking ancestor is
2072 # Groups are merged when a common blocking ancestor is
2073 # observed. For example, given two groups:
2073 # observed. For example, given two groups:
2074 #
2074 #
2075 # revs [5, 4] waiting for 1
2075 # revs [5, 4] waiting for 1
2076 # revs [3, 2] waiting for 1
2076 # revs [3, 2] waiting for 1
2077 #
2077 #
2078 # These two groups will be merged when we process
2078 # These two groups will be merged when we process
2079 # 1. In theory, we could have merged the groups when
2079 # 1. In theory, we could have merged the groups when
2080 # we added 2 to the group it is now in (we could have
2080 # we added 2 to the group it is now in (we could have
2081 # noticed the groups were both blocked on 1 then), but
2081 # noticed the groups were both blocked on 1 then), but
2082 # the way it works now makes the algorithm simpler.
2082 # the way it works now makes the algorithm simpler.
2083 #
2083 #
2084 # We also always keep the oldest subgroup first. We can
2084 # We also always keep the oldest subgroup first. We can
2085 # probably improve the behavior by having the longest set
2085 # probably improve the behavior by having the longest set
2086 # first. That way, graph algorithms could minimise the length
2086 # first. That way, graph algorithms could minimise the length
2087 # of parallel lines their drawing. This is currently not done.
2087 # of parallel lines their drawing. This is currently not done.
2088 targetidx = matching.pop(0)
2088 targetidx = matching.pop(0)
2089 trevs, tparents = groups[targetidx]
2089 trevs, tparents = groups[targetidx]
2090 for i in matching:
2090 for i in matching:
2091 gr = groups[i]
2091 gr = groups[i]
2092 trevs.extend(gr[0])
2092 trevs.extend(gr[0])
2093 tparents |= gr[1]
2093 tparents |= gr[1]
2094 # delete all merged subgroups (except the one we kept)
2094 # delete all merged subgroups (except the one we kept)
2095 # (starting from the last subgroup for performance and
2095 # (starting from the last subgroup for performance and
2096 # sanity reasons)
2096 # sanity reasons)
2097 for i in reversed(matching):
2097 for i in reversed(matching):
2098 del groups[i]
2098 del groups[i]
2099 else:
2099 else:
2100 # This is a new head. We create a new subgroup for it.
2100 # This is a new head. We create a new subgroup for it.
2101 targetidx = len(groups)
2101 targetidx = len(groups)
2102 groups.append(([], set([rev])))
2102 groups.append(([], set([rev])))
2103
2103
2104 gr = groups[targetidx]
2104 gr = groups[targetidx]
2105
2105
2106 # We now add the current nodes to this subgroups. This is done
2106 # We now add the current nodes to this subgroups. This is done
2107 # after the subgroup merging because all elements from a subgroup
2107 # after the subgroup merging because all elements from a subgroup
2108 # that relied on this rev must precede it.
2108 # that relied on this rev must precede it.
2109 #
2109 #
2110 # we also update the <parents> set to include the parents of the
2110 # we also update the <parents> set to include the parents of the
2111 # new nodes.
2111 # new nodes.
2112 if rev == currentrev: # only display stuff in rev
2112 if rev == currentrev: # only display stuff in rev
2113 gr[0].append(rev)
2113 gr[0].append(rev)
2114 gr[1].remove(rev)
2114 gr[1].remove(rev)
2115 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
2115 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
2116 gr[1].update(parents)
2116 gr[1].update(parents)
2117 for p in parents:
2117 for p in parents:
2118 if p not in pendingset:
2118 if p not in pendingset:
2119 pendingset.add(p)
2119 pendingset.add(p)
2120 heappush(pendingheap, -p)
2120 heappush(pendingheap, -p)
2121
2121
2122 # Look for a subgroup to display
2122 # Look for a subgroup to display
2123 #
2123 #
2124 # When unblocked is empty (if clause), we were not waiting for any
2124 # When unblocked is empty (if clause), we were not waiting for any
2125 # revisions during the first iteration (if no priority was given) or
2125 # revisions during the first iteration (if no priority was given) or
2126 # if we emitted a whole disconnected set of the graph (reached a
2126 # if we emitted a whole disconnected set of the graph (reached a
2127 # root). In that case we arbitrarily take the oldest known
2127 # root). In that case we arbitrarily take the oldest known
2128 # subgroup. The heuristic could probably be better.
2128 # subgroup. The heuristic could probably be better.
2129 #
2129 #
2130 # Otherwise (elif clause) if the subgroup is blocked on
2130 # Otherwise (elif clause) if the subgroup is blocked on
2131 # a revision we just emitted, we can safely emit it as
2131 # a revision we just emitted, we can safely emit it as
2132 # well.
2132 # well.
2133 if not unblocked:
2133 if not unblocked:
2134 if len(groups) > 1: # display other subset
2134 if len(groups) > 1: # display other subset
2135 targetidx = 1
2135 targetidx = 1
2136 gr = groups[1]
2136 gr = groups[1]
2137 elif not gr[1] & unblocked:
2137 elif not gr[1] & unblocked:
2138 gr = None
2138 gr = None
2139
2139
2140 if gr is not None:
2140 if gr is not None:
2141 # update the set of awaited revisions with the one from the
2141 # update the set of awaited revisions with the one from the
2142 # subgroup
2142 # subgroup
2143 unblocked |= gr[1]
2143 unblocked |= gr[1]
2144 # output all revisions in the subgroup
2144 # output all revisions in the subgroup
2145 for r in gr[0]:
2145 for r in gr[0]:
2146 yield r
2146 yield r
2147 # delete the subgroup that you just output
2147 # delete the subgroup that you just output
2148 # unless it is groups[0] in which case you just empty it.
2148 # unless it is groups[0] in which case you just empty it.
2149 if targetidx:
2149 if targetidx:
2150 del groups[targetidx]
2150 del groups[targetidx]
2151 else:
2151 else:
2152 gr[0][:] = []
2152 gr[0][:] = []
2153 # Check if we have some subgroup waiting for revisions we are not going to
2153 # Check if we have some subgroup waiting for revisions we are not going to
2154 # iterate over
2154 # iterate over
2155 for g in groups:
2155 for g in groups:
2156 for r in g[0]:
2156 for r in g[0]:
2157 yield r
2157 yield r
2158
2158
2159 @predicate('subrepo([pattern])')
2159 @predicate('subrepo([pattern])')
2160 def subrepo(repo, subset, x):
2160 def subrepo(repo, subset, x):
2161 """Changesets that add, modify or remove the given subrepo. If no subrepo
2161 """Changesets that add, modify or remove the given subrepo. If no subrepo
2162 pattern is named, any subrepo changes are returned.
2162 pattern is named, any subrepo changes are returned.
2163 """
2163 """
2164 # i18n: "subrepo" is a keyword
2164 # i18n: "subrepo" is a keyword
2165 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
2165 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
2166 pat = None
2166 pat = None
2167 if len(args) != 0:
2167 if len(args) != 0:
2168 pat = getstring(args[0], _("subrepo requires a pattern"))
2168 pat = getstring(args[0], _("subrepo requires a pattern"))
2169
2169
2170 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
2170 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
2171
2171
2172 def submatches(names):
2172 def submatches(names):
2173 k, p, m = util.stringmatcher(pat)
2173 k, p, m = util.stringmatcher(pat)
2174 for name in names:
2174 for name in names:
2175 if m(name):
2175 if m(name):
2176 yield name
2176 yield name
2177
2177
2178 def matches(x):
2178 def matches(x):
2179 c = repo[x]
2179 c = repo[x]
2180 s = repo.status(c.p1().node(), c.node(), match=m)
2180 s = repo.status(c.p1().node(), c.node(), match=m)
2181
2181
2182 if pat is None:
2182 if pat is None:
2183 return s.added or s.modified or s.removed
2183 return s.added or s.modified or s.removed
2184
2184
2185 if s.added:
2185 if s.added:
2186 return any(submatches(c.substate.keys()))
2186 return any(submatches(c.substate.keys()))
2187
2187
2188 if s.modified:
2188 if s.modified:
2189 subs = set(c.p1().substate.keys())
2189 subs = set(c.p1().substate.keys())
2190 subs.update(c.substate.keys())
2190 subs.update(c.substate.keys())
2191
2191
2192 for path in submatches(subs):
2192 for path in submatches(subs):
2193 if c.p1().substate.get(path) != c.substate.get(path):
2193 if c.p1().substate.get(path) != c.substate.get(path):
2194 return True
2194 return True
2195
2195
2196 if s.removed:
2196 if s.removed:
2197 return any(submatches(c.p1().substate.keys()))
2197 return any(submatches(c.p1().substate.keys()))
2198
2198
2199 return False
2199 return False
2200
2200
2201 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
2201 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
2202
2202
2203 def _substringmatcher(pattern):
2203 def _substringmatcher(pattern):
2204 kind, pattern, matcher = util.stringmatcher(pattern)
2204 kind, pattern, matcher = util.stringmatcher(pattern)
2205 if kind == 'literal':
2205 if kind == 'literal':
2206 matcher = lambda s: pattern in s
2206 matcher = lambda s: pattern in s
2207 return kind, pattern, matcher
2207 return kind, pattern, matcher
2208
2208
2209 @predicate('tag([name])', safe=True)
2209 @predicate('tag([name])', safe=True)
2210 def tag(repo, subset, x):
2210 def tag(repo, subset, x):
2211 """The specified tag by name, or all tagged revisions if no name is given.
2211 """The specified tag by name, or all tagged revisions if no name is given.
2212
2212
2213 If `name` starts with `re:`, the remainder of the name is treated as
2213 If `name` starts with `re:`, the remainder of the name is treated as
2214 a regular expression. To match a tag that actually starts with `re:`,
2214 a regular expression. To match a tag that actually starts with `re:`,
2215 use the prefix `literal:`.
2215 use the prefix `literal:`.
2216 """
2216 """
2217 # i18n: "tag" is a keyword
2217 # i18n: "tag" is a keyword
2218 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2218 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2219 cl = repo.changelog
2219 cl = repo.changelog
2220 if args:
2220 if args:
2221 pattern = getstring(args[0],
2221 pattern = getstring(args[0],
2222 # i18n: "tag" is a keyword
2222 # i18n: "tag" is a keyword
2223 _('the argument to tag must be a string'))
2223 _('the argument to tag must be a string'))
2224 kind, pattern, matcher = util.stringmatcher(pattern)
2224 kind, pattern, matcher = util.stringmatcher(pattern)
2225 if kind == 'literal':
2225 if kind == 'literal':
2226 # avoid resolving all tags
2226 # avoid resolving all tags
2227 tn = repo._tagscache.tags.get(pattern, None)
2227 tn = repo._tagscache.tags.get(pattern, None)
2228 if tn is None:
2228 if tn is None:
2229 raise error.RepoLookupError(_("tag '%s' does not exist")
2229 raise error.RepoLookupError(_("tag '%s' does not exist")
2230 % pattern)
2230 % pattern)
2231 s = set([repo[tn].rev()])
2231 s = set([repo[tn].rev()])
2232 else:
2232 else:
2233 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2233 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2234 else:
2234 else:
2235 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2235 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2236 return subset & s
2236 return subset & s
2237
2237
2238 @predicate('tagged', safe=True)
2238 @predicate('tagged', safe=True)
2239 def tagged(repo, subset, x):
2239 def tagged(repo, subset, x):
2240 return tag(repo, subset, x)
2240 return tag(repo, subset, x)
2241
2241
2242 @predicate('unstable()', safe=True)
2242 @predicate('unstable()', safe=True)
2243 def unstable(repo, subset, x):
2243 def unstable(repo, subset, x):
2244 """Non-obsolete changesets with obsolete ancestors.
2244 """Non-obsolete changesets with obsolete ancestors.
2245 """
2245 """
2246 # i18n: "unstable" is a keyword
2246 # i18n: "unstable" is a keyword
2247 getargs(x, 0, 0, _("unstable takes no arguments"))
2247 getargs(x, 0, 0, _("unstable takes no arguments"))
2248 unstables = obsmod.getrevs(repo, 'unstable')
2248 unstables = obsmod.getrevs(repo, 'unstable')
2249 return subset & unstables
2249 return subset & unstables
2250
2250
2251
2251
2252 @predicate('user(string)', safe=True)
2252 @predicate('user(string)', safe=True)
2253 def user(repo, subset, x):
2253 def user(repo, subset, x):
2254 """User name contains string. The match is case-insensitive.
2254 """User name contains string. The match is case-insensitive.
2255
2255
2256 If `string` starts with `re:`, the remainder of the string is treated as
2256 If `string` starts with `re:`, the remainder of the string is treated as
2257 a regular expression. To match a user that actually contains `re:`, use
2257 a regular expression. To match a user that actually contains `re:`, use
2258 the prefix `literal:`.
2258 the prefix `literal:`.
2259 """
2259 """
2260 return author(repo, subset, x)
2260 return author(repo, subset, x)
2261
2261
2262 # experimental
2262 # experimental
2263 @predicate('wdir', safe=True)
2263 @predicate('wdir', safe=True)
2264 def wdir(repo, subset, x):
2264 def wdir(repo, subset, x):
2265 # i18n: "wdir" is a keyword
2265 # i18n: "wdir" is a keyword
2266 getargs(x, 0, 0, _("wdir takes no arguments"))
2266 getargs(x, 0, 0, _("wdir takes no arguments"))
2267 if node.wdirrev in subset or isinstance(subset, fullreposet):
2267 if node.wdirrev in subset or isinstance(subset, fullreposet):
2268 return baseset([node.wdirrev])
2268 return baseset([node.wdirrev])
2269 return baseset()
2269 return baseset()
2270
2270
2271 def _orderedlist(repo, subset, x):
2271 def _orderedlist(repo, subset, x):
2272 s = getstring(x, "internal error")
2272 s = getstring(x, "internal error")
2273 if not s:
2273 if not s:
2274 return baseset()
2274 return baseset()
2275 # remove duplicates here. it's difficult for caller to deduplicate sets
2275 # remove duplicates here. it's difficult for caller to deduplicate sets
2276 # because different symbols can point to the same rev.
2276 # because different symbols can point to the same rev.
2277 cl = repo.changelog
2277 cl = repo.changelog
2278 ls = []
2278 ls = []
2279 seen = set()
2279 seen = set()
2280 for t in s.split('\0'):
2280 for t in s.split('\0'):
2281 try:
2281 try:
2282 # fast path for integer revision
2282 # fast path for integer revision
2283 r = int(t)
2283 r = int(t)
2284 if str(r) != t or r not in cl:
2284 if str(r) != t or r not in cl:
2285 raise ValueError
2285 raise ValueError
2286 revs = [r]
2286 revs = [r]
2287 except ValueError:
2287 except ValueError:
2288 revs = stringset(repo, subset, t)
2288 revs = stringset(repo, subset, t)
2289
2289
2290 for r in revs:
2290 for r in revs:
2291 if r in seen:
2291 if r in seen:
2292 continue
2292 continue
2293 if (r in subset
2293 if (r in subset
2294 or r == node.nullrev and isinstance(subset, fullreposet)):
2294 or r == node.nullrev and isinstance(subset, fullreposet)):
2295 ls.append(r)
2295 ls.append(r)
2296 seen.add(r)
2296 seen.add(r)
2297 return baseset(ls)
2297 return baseset(ls)
2298
2298
2299 # for internal use
2299 # for internal use
2300 @predicate('_list', safe=True, takeorder=True)
2300 @predicate('_list', safe=True, takeorder=True)
2301 def _list(repo, subset, x, order):
2301 def _list(repo, subset, x, order):
2302 if order == followorder:
2302 if order == followorder:
2303 # slow path to take the subset order
2303 # slow path to take the subset order
2304 return subset & _orderedlist(repo, fullreposet(repo), x)
2304 return subset & _orderedlist(repo, fullreposet(repo), x)
2305 else:
2305 else:
2306 return _orderedlist(repo, subset, x)
2306 return _orderedlist(repo, subset, x)
2307
2307
2308 def _orderedintlist(repo, subset, x):
2308 def _orderedintlist(repo, subset, x):
2309 s = getstring(x, "internal error")
2309 s = getstring(x, "internal error")
2310 if not s:
2310 if not s:
2311 return baseset()
2311 return baseset()
2312 ls = [int(r) for r in s.split('\0')]
2312 ls = [int(r) for r in s.split('\0')]
2313 s = subset
2313 s = subset
2314 return baseset([r for r in ls if r in s])
2314 return baseset([r for r in ls if r in s])
2315
2315
2316 # for internal use
2316 # for internal use
2317 @predicate('_intlist', safe=True, takeorder=True)
2317 @predicate('_intlist', safe=True, takeorder=True)
2318 def _intlist(repo, subset, x, order):
2318 def _intlist(repo, subset, x, order):
2319 if order == followorder:
2319 if order == followorder:
2320 # slow path to take the subset order
2320 # slow path to take the subset order
2321 return subset & _orderedintlist(repo, fullreposet(repo), x)
2321 return subset & _orderedintlist(repo, fullreposet(repo), x)
2322 else:
2322 else:
2323 return _orderedintlist(repo, subset, x)
2323 return _orderedintlist(repo, subset, x)
2324
2324
2325 def _orderedhexlist(repo, subset, x):
2325 def _orderedhexlist(repo, subset, x):
2326 s = getstring(x, "internal error")
2326 s = getstring(x, "internal error")
2327 if not s:
2327 if not s:
2328 return baseset()
2328 return baseset()
2329 cl = repo.changelog
2329 cl = repo.changelog
2330 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2330 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2331 s = subset
2331 s = subset
2332 return baseset([r for r in ls if r in s])
2332 return baseset([r for r in ls if r in s])
2333
2333
2334 # for internal use
2334 # for internal use
2335 @predicate('_hexlist', safe=True, takeorder=True)
2335 @predicate('_hexlist', safe=True, takeorder=True)
2336 def _hexlist(repo, subset, x, order):
2336 def _hexlist(repo, subset, x, order):
2337 if order == followorder:
2337 if order == followorder:
2338 # slow path to take the subset order
2338 # slow path to take the subset order
2339 return subset & _orderedhexlist(repo, fullreposet(repo), x)
2339 return subset & _orderedhexlist(repo, fullreposet(repo), x)
2340 else:
2340 else:
2341 return _orderedhexlist(repo, subset, x)
2341 return _orderedhexlist(repo, subset, x)
2342
2342
2343 methods = {
2343 methods = {
2344 "range": rangeset,
2344 "range": rangeset,
2345 "rangepre": rangepre,
2345 "rangepre": rangepre,
2346 "dagrange": dagrange,
2346 "dagrange": dagrange,
2347 "string": stringset,
2347 "string": stringset,
2348 "symbol": stringset,
2348 "symbol": stringset,
2349 "and": andset,
2349 "and": andset,
2350 "or": orset,
2350 "or": orset,
2351 "not": notset,
2351 "not": notset,
2352 "difference": differenceset,
2352 "difference": differenceset,
2353 "list": listset,
2353 "list": listset,
2354 "keyvalue": keyvaluepair,
2354 "keyvalue": keyvaluepair,
2355 "func": func,
2355 "func": func,
2356 "ancestor": ancestorspec,
2356 "ancestor": ancestorspec,
2357 "parent": parentspec,
2357 "parent": parentspec,
2358 "parentpost": parentpost,
2358 "parentpost": parentpost,
2359 }
2359 }
2360
2360
2361 # Constants for ordering requirement, used in _analyze():
2361 # Constants for ordering requirement, used in _analyze():
2362 #
2362 #
2363 # If 'define', any nested functions and operations can change the ordering of
2363 # If 'define', any nested functions and operations can change the ordering of
2364 # the entries in the set. If 'follow', any nested functions and operations
2364 # the entries in the set. If 'follow', any nested functions and operations
2365 # should take the ordering specified by the first operand to the '&' operator.
2365 # should take the ordering specified by the first operand to the '&' operator.
2366 #
2366 #
2367 # For instance,
2367 # For instance,
2368 #
2368 #
2369 # X & (Y | Z)
2369 # X & (Y | Z)
2370 # ^ ^^^^^^^
2370 # ^ ^^^^^^^
2371 # | follow
2371 # | follow
2372 # define
2372 # define
2373 #
2373 #
2374 # will be evaluated as 'or(y(x()), z(x()))', where 'x()' can change the order
2374 # will be evaluated as 'or(y(x()), z(x()))', where 'x()' can change the order
2375 # of the entries in the set, but 'y()', 'z()' and 'or()' shouldn't.
2375 # of the entries in the set, but 'y()', 'z()' and 'or()' shouldn't.
2376 #
2376 #
2377 # 'any' means the order doesn't matter. For instance,
2377 # 'any' means the order doesn't matter. For instance,
2378 #
2378 #
2379 # X & !Y
2379 # X & !Y
2380 # ^
2380 # ^
2381 # any
2381 # any
2382 #
2382 #
2383 # 'y()' can either enforce its ordering requirement or take the ordering
2383 # 'y()' can either enforce its ordering requirement or take the ordering
2384 # specified by 'x()' because 'not()' doesn't care the order.
2384 # specified by 'x()' because 'not()' doesn't care the order.
2385 #
2385 #
2386 # Transition of ordering requirement:
2386 # Transition of ordering requirement:
2387 #
2387 #
2388 # 1. starts with 'define'
2388 # 1. starts with 'define'
2389 # 2. shifts to 'follow' by 'x & y'
2389 # 2. shifts to 'follow' by 'x & y'
2390 # 3. changes back to 'define' on function call 'f(x)' or function-like
2390 # 3. changes back to 'define' on function call 'f(x)' or function-like
2391 # operation 'x (f) y' because 'f' may have its own ordering requirement
2391 # operation 'x (f) y' because 'f' may have its own ordering requirement
2392 # for 'x' and 'y' (e.g. 'first(x)')
2392 # for 'x' and 'y' (e.g. 'first(x)')
2393 #
2393 #
2394 anyorder = 'any' # don't care the order
2394 anyorder = 'any' # don't care the order
2395 defineorder = 'define' # should define the order
2395 defineorder = 'define' # should define the order
2396 followorder = 'follow' # must follow the current order
2396 followorder = 'follow' # must follow the current order
2397
2397
2398 # transition table for 'x & y', from the current expression 'x' to 'y'
2398 # transition table for 'x & y', from the current expression 'x' to 'y'
2399 _tofolloworder = {
2399 _tofolloworder = {
2400 anyorder: anyorder,
2400 anyorder: anyorder,
2401 defineorder: followorder,
2401 defineorder: followorder,
2402 followorder: followorder,
2402 followorder: followorder,
2403 }
2403 }
2404
2404
2405 def _matchonly(revs, bases):
2405 def _matchonly(revs, bases):
2406 """
2406 """
2407 >>> f = lambda *args: _matchonly(*map(parse, args))
2407 >>> f = lambda *args: _matchonly(*map(parse, args))
2408 >>> f('ancestors(A)', 'not ancestors(B)')
2408 >>> f('ancestors(A)', 'not ancestors(B)')
2409 ('list', ('symbol', 'A'), ('symbol', 'B'))
2409 ('list', ('symbol', 'A'), ('symbol', 'B'))
2410 """
2410 """
2411 if (revs is not None
2411 if (revs is not None
2412 and revs[0] == 'func'
2412 and revs[0] == 'func'
2413 and getsymbol(revs[1]) == 'ancestors'
2413 and getsymbol(revs[1]) == 'ancestors'
2414 and bases is not None
2414 and bases is not None
2415 and bases[0] == 'not'
2415 and bases[0] == 'not'
2416 and bases[1][0] == 'func'
2416 and bases[1][0] == 'func'
2417 and getsymbol(bases[1][1]) == 'ancestors'):
2417 and getsymbol(bases[1][1]) == 'ancestors'):
2418 return ('list', revs[2], bases[1][2])
2418 return ('list', revs[2], bases[1][2])
2419
2419
2420 def _fixops(x):
2420 def _fixops(x):
2421 """Rewrite raw parsed tree to resolve ambiguous syntax which cannot be
2421 """Rewrite raw parsed tree to resolve ambiguous syntax which cannot be
2422 handled well by our simple top-down parser"""
2422 handled well by our simple top-down parser"""
2423 if not isinstance(x, tuple):
2423 if not isinstance(x, tuple):
2424 return x
2424 return x
2425
2425
2426 op = x[0]
2426 op = x[0]
2427 if op == 'parent':
2427 if op == 'parent':
2428 # x^:y means (x^) : y, not x ^ (:y)
2428 # x^:y means (x^) : y, not x ^ (:y)
2429 # x^: means (x^) :, not x ^ (:)
2429 # x^: means (x^) :, not x ^ (:)
2430 post = ('parentpost', x[1])
2430 post = ('parentpost', x[1])
2431 if x[2][0] == 'dagrangepre':
2431 if x[2][0] == 'dagrangepre':
2432 return _fixops(('dagrange', post, x[2][1]))
2432 return _fixops(('dagrange', post, x[2][1]))
2433 elif x[2][0] == 'rangepre':
2433 elif x[2][0] == 'rangepre':
2434 return _fixops(('range', post, x[2][1]))
2434 return _fixops(('range', post, x[2][1]))
2435 elif x[2][0] == 'rangeall':
2435 elif x[2][0] == 'rangeall':
2436 return _fixops(('rangepost', post))
2436 return _fixops(('rangepost', post))
2437 elif op == 'or':
2437 elif op == 'or':
2438 # make number of arguments deterministic:
2438 # make number of arguments deterministic:
2439 # x + y + z -> (or x y z) -> (or (list x y z))
2439 # x + y + z -> (or x y z) -> (or (list x y z))
2440 return (op, _fixops(('list',) + x[1:]))
2440 return (op, _fixops(('list',) + x[1:]))
2441
2441
2442 return (op,) + tuple(_fixops(y) for y in x[1:])
2442 return (op,) + tuple(_fixops(y) for y in x[1:])
2443
2443
2444 def _analyze(x, order):
2444 def _analyze(x, order):
2445 if x is None:
2445 if x is None:
2446 return x
2446 return x
2447
2447
2448 op = x[0]
2448 op = x[0]
2449 if op == 'minus':
2449 if op == 'minus':
2450 return _analyze(('and', x[1], ('not', x[2])), order)
2450 return _analyze(('and', x[1], ('not', x[2])), order)
2451 elif op == 'only':
2451 elif op == 'only':
2452 t = ('func', ('symbol', 'only'), ('list', x[1], x[2]))
2452 t = ('func', ('symbol', 'only'), ('list', x[1], x[2]))
2453 return _analyze(t, order)
2453 return _analyze(t, order)
2454 elif op == 'onlypost':
2454 elif op == 'onlypost':
2455 return _analyze(('func', ('symbol', 'only'), x[1]), order)
2455 return _analyze(('func', ('symbol', 'only'), x[1]), order)
2456 elif op == 'dagrangepre':
2456 elif op == 'dagrangepre':
2457 return _analyze(('func', ('symbol', 'ancestors'), x[1]), order)
2457 return _analyze(('func', ('symbol', 'ancestors'), x[1]), order)
2458 elif op == 'dagrangepost':
2458 elif op == 'dagrangepost':
2459 return _analyze(('func', ('symbol', 'descendants'), x[1]), order)
2459 return _analyze(('func', ('symbol', 'descendants'), x[1]), order)
2460 elif op == 'rangeall':
2460 elif op == 'rangeall':
2461 return _analyze(('rangepre', ('string', 'tip')), order)
2461 return _analyze(('rangepre', ('string', 'tip')), order)
2462 elif op == 'rangepost':
2462 elif op == 'rangepost':
2463 return _analyze(('range', x[1], ('string', 'tip')), order)
2463 return _analyze(('range', x[1], ('string', 'tip')), order)
2464 elif op == 'negate':
2464 elif op == 'negate':
2465 s = getstring(x[1], _("can't negate that"))
2465 s = getstring(x[1], _("can't negate that"))
2466 return _analyze(('string', '-' + s), order)
2466 return _analyze(('string', '-' + s), order)
2467 elif op in ('string', 'symbol'):
2467 elif op in ('string', 'symbol'):
2468 return x
2468 return x
2469 elif op == 'and':
2469 elif op == 'and':
2470 ta = _analyze(x[1], order)
2470 ta = _analyze(x[1], order)
2471 tb = _analyze(x[2], _tofolloworder[order])
2471 tb = _analyze(x[2], _tofolloworder[order])
2472 return (op, ta, tb, order)
2472 return (op, ta, tb, order)
2473 elif op == 'or':
2473 elif op == 'or':
2474 return (op, _analyze(x[1], order), order)
2474 return (op, _analyze(x[1], order), order)
2475 elif op == 'not':
2475 elif op == 'not':
2476 return (op, _analyze(x[1], anyorder), order)
2476 return (op, _analyze(x[1], anyorder), order)
2477 elif op in ('rangepre', 'parentpost'):
2477 elif op in ('rangepre', 'parentpost'):
2478 return (op, _analyze(x[1], defineorder), order)
2478 return (op, _analyze(x[1], defineorder), order)
2479 elif op == 'group':
2479 elif op == 'group':
2480 return _analyze(x[1], order)
2480 return _analyze(x[1], order)
2481 elif op in ('dagrange', 'range', 'parent', 'ancestor'):
2481 elif op in ('dagrange', 'range', 'parent', 'ancestor'):
2482 ta = _analyze(x[1], defineorder)
2482 ta = _analyze(x[1], defineorder)
2483 tb = _analyze(x[2], defineorder)
2483 tb = _analyze(x[2], defineorder)
2484 return (op, ta, tb, order)
2484 return (op, ta, tb, order)
2485 elif op == 'list':
2485 elif op == 'list':
2486 return (op,) + tuple(_analyze(y, order) for y in x[1:])
2486 return (op,) + tuple(_analyze(y, order) for y in x[1:])
2487 elif op == 'keyvalue':
2487 elif op == 'keyvalue':
2488 return (op, x[1], _analyze(x[2], order))
2488 return (op, x[1], _analyze(x[2], order))
2489 elif op == 'func':
2489 elif op == 'func':
2490 f = getsymbol(x[1])
2490 f = getsymbol(x[1])
2491 d = defineorder
2491 d = defineorder
2492 if f == 'present':
2492 if f == 'present':
2493 # 'present(set)' is known to return the argument set with no
2493 # 'present(set)' is known to return the argument set with no
2494 # modification, so forward the current order to its argument
2494 # modification, so forward the current order to its argument
2495 d = order
2495 d = order
2496 return (op, x[1], _analyze(x[2], d), order)
2496 return (op, x[1], _analyze(x[2], d), order)
2497 raise ValueError('invalid operator %r' % op)
2497 raise ValueError('invalid operator %r' % op)
2498
2498
2499 def analyze(x, order=defineorder):
2499 def analyze(x, order=defineorder):
2500 """Transform raw parsed tree to evaluatable tree which can be fed to
2500 """Transform raw parsed tree to evaluatable tree which can be fed to
2501 optimize() or getset()
2501 optimize() or getset()
2502
2502
2503 All pseudo operations should be mapped to real operations or functions
2503 All pseudo operations should be mapped to real operations or functions
2504 defined in methods or symbols table respectively.
2504 defined in methods or symbols table respectively.
2505
2505
2506 'order' specifies how the current expression 'x' is ordered (see the
2506 'order' specifies how the current expression 'x' is ordered (see the
2507 constants defined above.)
2507 constants defined above.)
2508 """
2508 """
2509 return _analyze(x, order)
2509 return _analyze(x, order)
2510
2510
2511 def _optimize(x, small):
2511 def _optimize(x, small):
2512 if x is None:
2512 if x is None:
2513 return 0, x
2513 return 0, x
2514
2514
2515 smallbonus = 1
2515 smallbonus = 1
2516 if small:
2516 if small:
2517 smallbonus = .5
2517 smallbonus = .5
2518
2518
2519 op = x[0]
2519 op = x[0]
2520 if op in ('string', 'symbol'):
2520 if op in ('string', 'symbol'):
2521 return smallbonus, x # single revisions are small
2521 return smallbonus, x # single revisions are small
2522 elif op == 'and':
2522 elif op == 'and':
2523 wa, ta = _optimize(x[1], True)
2523 wa, ta = _optimize(x[1], True)
2524 wb, tb = _optimize(x[2], True)
2524 wb, tb = _optimize(x[2], True)
2525 order = x[3]
2525 order = x[3]
2526 w = min(wa, wb)
2526 w = min(wa, wb)
2527
2527
2528 # (::x and not ::y)/(not ::y and ::x) have a fast path
2528 # (::x and not ::y)/(not ::y and ::x) have a fast path
2529 tm = _matchonly(ta, tb) or _matchonly(tb, ta)
2529 tm = _matchonly(ta, tb) or _matchonly(tb, ta)
2530 if tm:
2530 if tm:
2531 return w, ('func', ('symbol', 'only'), tm, order)
2531 return w, ('func', ('symbol', 'only'), tm, order)
2532
2532
2533 if tb is not None and tb[0] == 'not':
2533 if tb is not None and tb[0] == 'not':
2534 return wa, ('difference', ta, tb[1], order)
2534 return wa, ('difference', ta, tb[1], order)
2535
2535
2536 if wa > wb:
2536 if wa > wb:
2537 return w, (op, tb, ta, order)
2537 return w, (op, tb, ta, order)
2538 return w, (op, ta, tb, order)
2538 return w, (op, ta, tb, order)
2539 elif op == 'or':
2539 elif op == 'or':
2540 # fast path for machine-generated expression, that is likely to have
2540 # fast path for machine-generated expression, that is likely to have
2541 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2541 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2542 order = x[2]
2542 order = x[2]
2543 ws, ts, ss = [], [], []
2543 ws, ts, ss = [], [], []
2544 def flushss():
2544 def flushss():
2545 if not ss:
2545 if not ss:
2546 return
2546 return
2547 if len(ss) == 1:
2547 if len(ss) == 1:
2548 w, t = ss[0]
2548 w, t = ss[0]
2549 else:
2549 else:
2550 s = '\0'.join(t[1] for w, t in ss)
2550 s = '\0'.join(t[1] for w, t in ss)
2551 y = ('func', ('symbol', '_list'), ('string', s), order)
2551 y = ('func', ('symbol', '_list'), ('string', s), order)
2552 w, t = _optimize(y, False)
2552 w, t = _optimize(y, False)
2553 ws.append(w)
2553 ws.append(w)
2554 ts.append(t)
2554 ts.append(t)
2555 del ss[:]
2555 del ss[:]
2556 for y in getlist(x[1]):
2556 for y in getlist(x[1]):
2557 w, t = _optimize(y, False)
2557 w, t = _optimize(y, False)
2558 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2558 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2559 ss.append((w, t))
2559 ss.append((w, t))
2560 continue
2560 continue
2561 flushss()
2561 flushss()
2562 ws.append(w)
2562 ws.append(w)
2563 ts.append(t)
2563 ts.append(t)
2564 flushss()
2564 flushss()
2565 if len(ts) == 1:
2565 if len(ts) == 1:
2566 return ws[0], ts[0] # 'or' operation is fully optimized out
2566 return ws[0], ts[0] # 'or' operation is fully optimized out
2567 # we can't reorder trees by weight because it would change the order.
2567 # we can't reorder trees by weight because it would change the order.
2568 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2568 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2569 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2569 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2570 return max(ws), (op, ('list',) + tuple(ts), order)
2570 return max(ws), (op, ('list',) + tuple(ts), order)
2571 elif op == 'not':
2571 elif op == 'not':
2572 # Optimize not public() to _notpublic() because we have a fast version
2572 # Optimize not public() to _notpublic() because we have a fast version
2573 if x[1][:3] == ('func', ('symbol', 'public'), None):
2573 if x[1][:3] == ('func', ('symbol', 'public'), None):
2574 order = x[1][3]
2574 order = x[1][3]
2575 newsym = ('func', ('symbol', '_notpublic'), None, order)
2575 newsym = ('func', ('symbol', '_notpublic'), None, order)
2576 o = _optimize(newsym, not small)
2576 o = _optimize(newsym, not small)
2577 return o[0], o[1]
2577 return o[0], o[1]
2578 else:
2578 else:
2579 o = _optimize(x[1], not small)
2579 o = _optimize(x[1], not small)
2580 order = x[2]
2580 order = x[2]
2581 return o[0], (op, o[1], order)
2581 return o[0], (op, o[1], order)
2582 elif op in ('rangepre', 'parentpost'):
2582 elif op in ('rangepre', 'parentpost'):
2583 o = _optimize(x[1], small)
2583 o = _optimize(x[1], small)
2584 order = x[2]
2584 order = x[2]
2585 return o[0], (op, o[1], order)
2585 return o[0], (op, o[1], order)
2586 elif op in ('dagrange', 'range', 'parent', 'ancestor'):
2586 elif op in ('dagrange', 'range', 'parent', 'ancestor'):
2587 wa, ta = _optimize(x[1], small)
2587 wa, ta = _optimize(x[1], small)
2588 wb, tb = _optimize(x[2], small)
2588 wb, tb = _optimize(x[2], small)
2589 order = x[3]
2589 order = x[3]
2590 return wa + wb, (op, ta, tb, order)
2590 return wa + wb, (op, ta, tb, order)
2591 elif op == 'list':
2591 elif op == 'list':
2592 ws, ts = zip(*(_optimize(y, small) for y in x[1:]))
2592 ws, ts = zip(*(_optimize(y, small) for y in x[1:]))
2593 return sum(ws), (op,) + ts
2593 return sum(ws), (op,) + ts
2594 elif op == 'keyvalue':
2594 elif op == 'keyvalue':
2595 w, t = _optimize(x[2], small)
2595 w, t = _optimize(x[2], small)
2596 return w, (op, x[1], t)
2596 return w, (op, x[1], t)
2597 elif op == 'func':
2597 elif op == 'func':
2598 f = getsymbol(x[1])
2598 f = getsymbol(x[1])
2599 wa, ta = _optimize(x[2], small)
2599 wa, ta = _optimize(x[2], small)
2600 if f in ('author', 'branch', 'closed', 'date', 'desc', 'file', 'grep',
2600 if f in ('author', 'branch', 'closed', 'date', 'desc', 'file', 'grep',
2601 'keyword', 'outgoing', 'user', 'destination'):
2601 'keyword', 'outgoing', 'user', 'destination'):
2602 w = 10 # slow
2602 w = 10 # slow
2603 elif f in ('modifies', 'adds', 'removes'):
2603 elif f in ('modifies', 'adds', 'removes'):
2604 w = 30 # slower
2604 w = 30 # slower
2605 elif f == "contains":
2605 elif f == "contains":
2606 w = 100 # very slow
2606 w = 100 # very slow
2607 elif f == "ancestor":
2607 elif f == "ancestor":
2608 w = 1 * smallbonus
2608 w = 1 * smallbonus
2609 elif f in ('reverse', 'limit', 'first', '_intlist'):
2609 elif f in ('reverse', 'limit', 'first', 'wdir', '_intlist'):
2610 w = 0
2610 w = 0
2611 elif f == "sort":
2611 elif f == "sort":
2612 w = 10 # assume most sorts look at changelog
2612 w = 10 # assume most sorts look at changelog
2613 else:
2613 else:
2614 w = 1
2614 w = 1
2615 order = x[3]
2615 order = x[3]
2616 return w + wa, (op, x[1], ta, order)
2616 return w + wa, (op, x[1], ta, order)
2617 raise ValueError('invalid operator %r' % op)
2617 raise ValueError('invalid operator %r' % op)
2618
2618
2619 def optimize(tree):
2619 def optimize(tree):
2620 """Optimize evaluatable tree
2620 """Optimize evaluatable tree
2621
2621
2622 All pseudo operations should be transformed beforehand.
2622 All pseudo operations should be transformed beforehand.
2623 """
2623 """
2624 _weight, newtree = _optimize(tree, small=True)
2624 _weight, newtree = _optimize(tree, small=True)
2625 return newtree
2625 return newtree
2626
2626
2627 # the set of valid characters for the initial letter of symbols in
2627 # the set of valid characters for the initial letter of symbols in
2628 # alias declarations and definitions
2628 # alias declarations and definitions
2629 _aliassyminitletters = _syminitletters | set(pycompat.sysstr('$'))
2629 _aliassyminitletters = _syminitletters | set(pycompat.sysstr('$'))
2630
2630
2631 def _parsewith(spec, lookup=None, syminitletters=None):
2631 def _parsewith(spec, lookup=None, syminitletters=None):
2632 """Generate a parse tree of given spec with given tokenizing options
2632 """Generate a parse tree of given spec with given tokenizing options
2633
2633
2634 >>> _parsewith('foo($1)', syminitletters=_aliassyminitletters)
2634 >>> _parsewith('foo($1)', syminitletters=_aliassyminitletters)
2635 ('func', ('symbol', 'foo'), ('symbol', '$1'))
2635 ('func', ('symbol', 'foo'), ('symbol', '$1'))
2636 >>> _parsewith('$1')
2636 >>> _parsewith('$1')
2637 Traceback (most recent call last):
2637 Traceback (most recent call last):
2638 ...
2638 ...
2639 ParseError: ("syntax error in revset '$1'", 0)
2639 ParseError: ("syntax error in revset '$1'", 0)
2640 >>> _parsewith('foo bar')
2640 >>> _parsewith('foo bar')
2641 Traceback (most recent call last):
2641 Traceback (most recent call last):
2642 ...
2642 ...
2643 ParseError: ('invalid token', 4)
2643 ParseError: ('invalid token', 4)
2644 """
2644 """
2645 p = parser.parser(elements)
2645 p = parser.parser(elements)
2646 tree, pos = p.parse(tokenize(spec, lookup=lookup,
2646 tree, pos = p.parse(tokenize(spec, lookup=lookup,
2647 syminitletters=syminitletters))
2647 syminitletters=syminitletters))
2648 if pos != len(spec):
2648 if pos != len(spec):
2649 raise error.ParseError(_('invalid token'), pos)
2649 raise error.ParseError(_('invalid token'), pos)
2650 return _fixops(parser.simplifyinfixops(tree, ('list', 'or')))
2650 return _fixops(parser.simplifyinfixops(tree, ('list', 'or')))
2651
2651
2652 class _aliasrules(parser.basealiasrules):
2652 class _aliasrules(parser.basealiasrules):
2653 """Parsing and expansion rule set of revset aliases"""
2653 """Parsing and expansion rule set of revset aliases"""
2654 _section = _('revset alias')
2654 _section = _('revset alias')
2655
2655
2656 @staticmethod
2656 @staticmethod
2657 def _parse(spec):
2657 def _parse(spec):
2658 """Parse alias declaration/definition ``spec``
2658 """Parse alias declaration/definition ``spec``
2659
2659
2660 This allows symbol names to use also ``$`` as an initial letter
2660 This allows symbol names to use also ``$`` as an initial letter
2661 (for backward compatibility), and callers of this function should
2661 (for backward compatibility), and callers of this function should
2662 examine whether ``$`` is used also for unexpected symbols or not.
2662 examine whether ``$`` is used also for unexpected symbols or not.
2663 """
2663 """
2664 return _parsewith(spec, syminitletters=_aliassyminitletters)
2664 return _parsewith(spec, syminitletters=_aliassyminitletters)
2665
2665
2666 @staticmethod
2666 @staticmethod
2667 def _trygetfunc(tree):
2667 def _trygetfunc(tree):
2668 if tree[0] == 'func' and tree[1][0] == 'symbol':
2668 if tree[0] == 'func' and tree[1][0] == 'symbol':
2669 return tree[1][1], getlist(tree[2])
2669 return tree[1][1], getlist(tree[2])
2670
2670
2671 def expandaliases(ui, tree):
2671 def expandaliases(ui, tree):
2672 aliases = _aliasrules.buildmap(ui.configitems('revsetalias'))
2672 aliases = _aliasrules.buildmap(ui.configitems('revsetalias'))
2673 tree = _aliasrules.expand(aliases, tree)
2673 tree = _aliasrules.expand(aliases, tree)
2674 # warn about problematic (but not referred) aliases
2674 # warn about problematic (but not referred) aliases
2675 for name, alias in sorted(aliases.iteritems()):
2675 for name, alias in sorted(aliases.iteritems()):
2676 if alias.error and not alias.warned:
2676 if alias.error and not alias.warned:
2677 ui.warn(_('warning: %s\n') % (alias.error))
2677 ui.warn(_('warning: %s\n') % (alias.error))
2678 alias.warned = True
2678 alias.warned = True
2679 return tree
2679 return tree
2680
2680
2681 def foldconcat(tree):
2681 def foldconcat(tree):
2682 """Fold elements to be concatenated by `##`
2682 """Fold elements to be concatenated by `##`
2683 """
2683 """
2684 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2684 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2685 return tree
2685 return tree
2686 if tree[0] == '_concat':
2686 if tree[0] == '_concat':
2687 pending = [tree]
2687 pending = [tree]
2688 l = []
2688 l = []
2689 while pending:
2689 while pending:
2690 e = pending.pop()
2690 e = pending.pop()
2691 if e[0] == '_concat':
2691 if e[0] == '_concat':
2692 pending.extend(reversed(e[1:]))
2692 pending.extend(reversed(e[1:]))
2693 elif e[0] in ('string', 'symbol'):
2693 elif e[0] in ('string', 'symbol'):
2694 l.append(e[1])
2694 l.append(e[1])
2695 else:
2695 else:
2696 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2696 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2697 raise error.ParseError(msg)
2697 raise error.ParseError(msg)
2698 return ('string', ''.join(l))
2698 return ('string', ''.join(l))
2699 else:
2699 else:
2700 return tuple(foldconcat(t) for t in tree)
2700 return tuple(foldconcat(t) for t in tree)
2701
2701
2702 def parse(spec, lookup=None):
2702 def parse(spec, lookup=None):
2703 return _parsewith(spec, lookup=lookup)
2703 return _parsewith(spec, lookup=lookup)
2704
2704
2705 def posttreebuilthook(tree, repo):
2705 def posttreebuilthook(tree, repo):
2706 # hook for extensions to execute code on the optimized tree
2706 # hook for extensions to execute code on the optimized tree
2707 pass
2707 pass
2708
2708
2709 def match(ui, spec, repo=None, order=defineorder):
2709 def match(ui, spec, repo=None, order=defineorder):
2710 """Create a matcher for a single revision spec
2710 """Create a matcher for a single revision spec
2711
2711
2712 If order=followorder, a matcher takes the ordering specified by the input
2712 If order=followorder, a matcher takes the ordering specified by the input
2713 set.
2713 set.
2714 """
2714 """
2715 return matchany(ui, [spec], repo=repo, order=order)
2715 return matchany(ui, [spec], repo=repo, order=order)
2716
2716
2717 def matchany(ui, specs, repo=None, order=defineorder):
2717 def matchany(ui, specs, repo=None, order=defineorder):
2718 """Create a matcher that will include any revisions matching one of the
2718 """Create a matcher that will include any revisions matching one of the
2719 given specs
2719 given specs
2720
2720
2721 If order=followorder, a matcher takes the ordering specified by the input
2721 If order=followorder, a matcher takes the ordering specified by the input
2722 set.
2722 set.
2723 """
2723 """
2724 if not specs:
2724 if not specs:
2725 def mfunc(repo, subset=None):
2725 def mfunc(repo, subset=None):
2726 return baseset()
2726 return baseset()
2727 return mfunc
2727 return mfunc
2728 if not all(specs):
2728 if not all(specs):
2729 raise error.ParseError(_("empty query"))
2729 raise error.ParseError(_("empty query"))
2730 lookup = None
2730 lookup = None
2731 if repo:
2731 if repo:
2732 lookup = repo.__contains__
2732 lookup = repo.__contains__
2733 if len(specs) == 1:
2733 if len(specs) == 1:
2734 tree = parse(specs[0], lookup)
2734 tree = parse(specs[0], lookup)
2735 else:
2735 else:
2736 tree = ('or', ('list',) + tuple(parse(s, lookup) for s in specs))
2736 tree = ('or', ('list',) + tuple(parse(s, lookup) for s in specs))
2737
2737
2738 if ui:
2738 if ui:
2739 tree = expandaliases(ui, tree)
2739 tree = expandaliases(ui, tree)
2740 tree = foldconcat(tree)
2740 tree = foldconcat(tree)
2741 tree = analyze(tree, order)
2741 tree = analyze(tree, order)
2742 tree = optimize(tree)
2742 tree = optimize(tree)
2743 posttreebuilthook(tree, repo)
2743 posttreebuilthook(tree, repo)
2744 return makematcher(tree)
2744 return makematcher(tree)
2745
2745
2746 def makematcher(tree):
2746 def makematcher(tree):
2747 """Create a matcher from an evaluatable tree"""
2747 """Create a matcher from an evaluatable tree"""
2748 def mfunc(repo, subset=None):
2748 def mfunc(repo, subset=None):
2749 if subset is None:
2749 if subset is None:
2750 subset = fullreposet(repo)
2750 subset = fullreposet(repo)
2751 if util.safehasattr(subset, 'isascending'):
2751 if util.safehasattr(subset, 'isascending'):
2752 result = getset(repo, subset, tree)
2752 result = getset(repo, subset, tree)
2753 else:
2753 else:
2754 result = getset(repo, baseset(subset), tree)
2754 result = getset(repo, baseset(subset), tree)
2755 return result
2755 return result
2756 return mfunc
2756 return mfunc
2757
2757
2758 def formatspec(expr, *args):
2758 def formatspec(expr, *args):
2759 '''
2759 '''
2760 This is a convenience function for using revsets internally, and
2760 This is a convenience function for using revsets internally, and
2761 escapes arguments appropriately. Aliases are intentionally ignored
2761 escapes arguments appropriately. Aliases are intentionally ignored
2762 so that intended expression behavior isn't accidentally subverted.
2762 so that intended expression behavior isn't accidentally subverted.
2763
2763
2764 Supported arguments:
2764 Supported arguments:
2765
2765
2766 %r = revset expression, parenthesized
2766 %r = revset expression, parenthesized
2767 %d = int(arg), no quoting
2767 %d = int(arg), no quoting
2768 %s = string(arg), escaped and single-quoted
2768 %s = string(arg), escaped and single-quoted
2769 %b = arg.branch(), escaped and single-quoted
2769 %b = arg.branch(), escaped and single-quoted
2770 %n = hex(arg), single-quoted
2770 %n = hex(arg), single-quoted
2771 %% = a literal '%'
2771 %% = a literal '%'
2772
2772
2773 Prefixing the type with 'l' specifies a parenthesized list of that type.
2773 Prefixing the type with 'l' specifies a parenthesized list of that type.
2774
2774
2775 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2775 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2776 '(10 or 11):: and ((this()) or (that()))'
2776 '(10 or 11):: and ((this()) or (that()))'
2777 >>> formatspec('%d:: and not %d::', 10, 20)
2777 >>> formatspec('%d:: and not %d::', 10, 20)
2778 '10:: and not 20::'
2778 '10:: and not 20::'
2779 >>> formatspec('%ld or %ld', [], [1])
2779 >>> formatspec('%ld or %ld', [], [1])
2780 "_list('') or 1"
2780 "_list('') or 1"
2781 >>> formatspec('keyword(%s)', 'foo\\xe9')
2781 >>> formatspec('keyword(%s)', 'foo\\xe9')
2782 "keyword('foo\\\\xe9')"
2782 "keyword('foo\\\\xe9')"
2783 >>> b = lambda: 'default'
2783 >>> b = lambda: 'default'
2784 >>> b.branch = b
2784 >>> b.branch = b
2785 >>> formatspec('branch(%b)', b)
2785 >>> formatspec('branch(%b)', b)
2786 "branch('default')"
2786 "branch('default')"
2787 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2787 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2788 "root(_list('a\\x00b\\x00c\\x00d'))"
2788 "root(_list('a\\x00b\\x00c\\x00d'))"
2789 '''
2789 '''
2790
2790
2791 def quote(s):
2791 def quote(s):
2792 return repr(str(s))
2792 return repr(str(s))
2793
2793
2794 def argtype(c, arg):
2794 def argtype(c, arg):
2795 if c == 'd':
2795 if c == 'd':
2796 return str(int(arg))
2796 return str(int(arg))
2797 elif c == 's':
2797 elif c == 's':
2798 return quote(arg)
2798 return quote(arg)
2799 elif c == 'r':
2799 elif c == 'r':
2800 parse(arg) # make sure syntax errors are confined
2800 parse(arg) # make sure syntax errors are confined
2801 return '(%s)' % arg
2801 return '(%s)' % arg
2802 elif c == 'n':
2802 elif c == 'n':
2803 return quote(node.hex(arg))
2803 return quote(node.hex(arg))
2804 elif c == 'b':
2804 elif c == 'b':
2805 return quote(arg.branch())
2805 return quote(arg.branch())
2806
2806
2807 def listexp(s, t):
2807 def listexp(s, t):
2808 l = len(s)
2808 l = len(s)
2809 if l == 0:
2809 if l == 0:
2810 return "_list('')"
2810 return "_list('')"
2811 elif l == 1:
2811 elif l == 1:
2812 return argtype(t, s[0])
2812 return argtype(t, s[0])
2813 elif t == 'd':
2813 elif t == 'd':
2814 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2814 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2815 elif t == 's':
2815 elif t == 's':
2816 return "_list('%s')" % "\0".join(s)
2816 return "_list('%s')" % "\0".join(s)
2817 elif t == 'n':
2817 elif t == 'n':
2818 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2818 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2819 elif t == 'b':
2819 elif t == 'b':
2820 return "_list('%s')" % "\0".join(a.branch() for a in s)
2820 return "_list('%s')" % "\0".join(a.branch() for a in s)
2821
2821
2822 m = l // 2
2822 m = l // 2
2823 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2823 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2824
2824
2825 ret = ''
2825 ret = ''
2826 pos = 0
2826 pos = 0
2827 arg = 0
2827 arg = 0
2828 while pos < len(expr):
2828 while pos < len(expr):
2829 c = expr[pos]
2829 c = expr[pos]
2830 if c == '%':
2830 if c == '%':
2831 pos += 1
2831 pos += 1
2832 d = expr[pos]
2832 d = expr[pos]
2833 if d == '%':
2833 if d == '%':
2834 ret += d
2834 ret += d
2835 elif d in 'dsnbr':
2835 elif d in 'dsnbr':
2836 ret += argtype(d, args[arg])
2836 ret += argtype(d, args[arg])
2837 arg += 1
2837 arg += 1
2838 elif d == 'l':
2838 elif d == 'l':
2839 # a list of some type
2839 # a list of some type
2840 pos += 1
2840 pos += 1
2841 d = expr[pos]
2841 d = expr[pos]
2842 ret += listexp(list(args[arg]), d)
2842 ret += listexp(list(args[arg]), d)
2843 arg += 1
2843 arg += 1
2844 else:
2844 else:
2845 raise error.Abort(_('unexpected revspec format character %s')
2845 raise error.Abort(_('unexpected revspec format character %s')
2846 % d)
2846 % d)
2847 else:
2847 else:
2848 ret += c
2848 ret += c
2849 pos += 1
2849 pos += 1
2850
2850
2851 return ret
2851 return ret
2852
2852
2853 def prettyformat(tree):
2853 def prettyformat(tree):
2854 return parser.prettyformat(tree, ('string', 'symbol'))
2854 return parser.prettyformat(tree, ('string', 'symbol'))
2855
2855
2856 def depth(tree):
2856 def depth(tree):
2857 if isinstance(tree, tuple):
2857 if isinstance(tree, tuple):
2858 return max(map(depth, tree)) + 1
2858 return max(map(depth, tree)) + 1
2859 else:
2859 else:
2860 return 0
2860 return 0
2861
2861
2862 def funcsused(tree):
2862 def funcsused(tree):
2863 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2863 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2864 return set()
2864 return set()
2865 else:
2865 else:
2866 funcs = set()
2866 funcs = set()
2867 for s in tree[1:]:
2867 for s in tree[1:]:
2868 funcs |= funcsused(s)
2868 funcs |= funcsused(s)
2869 if tree[0] == 'func':
2869 if tree[0] == 'func':
2870 funcs.add(tree[1][1])
2870 funcs.add(tree[1][1])
2871 return funcs
2871 return funcs
2872
2872
2873 def _formatsetrepr(r):
2873 def _formatsetrepr(r):
2874 """Format an optional printable representation of a set
2874 """Format an optional printable representation of a set
2875
2875
2876 ======== =================================
2876 ======== =================================
2877 type(r) example
2877 type(r) example
2878 ======== =================================
2878 ======== =================================
2879 tuple ('<not %r>', other)
2879 tuple ('<not %r>', other)
2880 str '<branch closed>'
2880 str '<branch closed>'
2881 callable lambda: '<branch %r>' % sorted(b)
2881 callable lambda: '<branch %r>' % sorted(b)
2882 object other
2882 object other
2883 ======== =================================
2883 ======== =================================
2884 """
2884 """
2885 if r is None:
2885 if r is None:
2886 return ''
2886 return ''
2887 elif isinstance(r, tuple):
2887 elif isinstance(r, tuple):
2888 return r[0] % r[1:]
2888 return r[0] % r[1:]
2889 elif isinstance(r, str):
2889 elif isinstance(r, str):
2890 return r
2890 return r
2891 elif callable(r):
2891 elif callable(r):
2892 return r()
2892 return r()
2893 else:
2893 else:
2894 return repr(r)
2894 return repr(r)
2895
2895
2896 class abstractsmartset(object):
2896 class abstractsmartset(object):
2897
2897
2898 def __nonzero__(self):
2898 def __nonzero__(self):
2899 """True if the smartset is not empty"""
2899 """True if the smartset is not empty"""
2900 raise NotImplementedError()
2900 raise NotImplementedError()
2901
2901
2902 def __contains__(self, rev):
2902 def __contains__(self, rev):
2903 """provide fast membership testing"""
2903 """provide fast membership testing"""
2904 raise NotImplementedError()
2904 raise NotImplementedError()
2905
2905
2906 def __iter__(self):
2906 def __iter__(self):
2907 """iterate the set in the order it is supposed to be iterated"""
2907 """iterate the set in the order it is supposed to be iterated"""
2908 raise NotImplementedError()
2908 raise NotImplementedError()
2909
2909
2910 # Attributes containing a function to perform a fast iteration in a given
2910 # Attributes containing a function to perform a fast iteration in a given
2911 # direction. A smartset can have none, one, or both defined.
2911 # direction. A smartset can have none, one, or both defined.
2912 #
2912 #
2913 # Default value is None instead of a function returning None to avoid
2913 # Default value is None instead of a function returning None to avoid
2914 # initializing an iterator just for testing if a fast method exists.
2914 # initializing an iterator just for testing if a fast method exists.
2915 fastasc = None
2915 fastasc = None
2916 fastdesc = None
2916 fastdesc = None
2917
2917
2918 def isascending(self):
2918 def isascending(self):
2919 """True if the set will iterate in ascending order"""
2919 """True if the set will iterate in ascending order"""
2920 raise NotImplementedError()
2920 raise NotImplementedError()
2921
2921
2922 def isdescending(self):
2922 def isdescending(self):
2923 """True if the set will iterate in descending order"""
2923 """True if the set will iterate in descending order"""
2924 raise NotImplementedError()
2924 raise NotImplementedError()
2925
2925
2926 def istopo(self):
2926 def istopo(self):
2927 """True if the set will iterate in topographical order"""
2927 """True if the set will iterate in topographical order"""
2928 raise NotImplementedError()
2928 raise NotImplementedError()
2929
2929
2930 def min(self):
2930 def min(self):
2931 """return the minimum element in the set"""
2931 """return the minimum element in the set"""
2932 if self.fastasc is None:
2932 if self.fastasc is None:
2933 v = min(self)
2933 v = min(self)
2934 else:
2934 else:
2935 for v in self.fastasc():
2935 for v in self.fastasc():
2936 break
2936 break
2937 else:
2937 else:
2938 raise ValueError('arg is an empty sequence')
2938 raise ValueError('arg is an empty sequence')
2939 self.min = lambda: v
2939 self.min = lambda: v
2940 return v
2940 return v
2941
2941
2942 def max(self):
2942 def max(self):
2943 """return the maximum element in the set"""
2943 """return the maximum element in the set"""
2944 if self.fastdesc is None:
2944 if self.fastdesc is None:
2945 return max(self)
2945 return max(self)
2946 else:
2946 else:
2947 for v in self.fastdesc():
2947 for v in self.fastdesc():
2948 break
2948 break
2949 else:
2949 else:
2950 raise ValueError('arg is an empty sequence')
2950 raise ValueError('arg is an empty sequence')
2951 self.max = lambda: v
2951 self.max = lambda: v
2952 return v
2952 return v
2953
2953
2954 def first(self):
2954 def first(self):
2955 """return the first element in the set (user iteration perspective)
2955 """return the first element in the set (user iteration perspective)
2956
2956
2957 Return None if the set is empty"""
2957 Return None if the set is empty"""
2958 raise NotImplementedError()
2958 raise NotImplementedError()
2959
2959
2960 def last(self):
2960 def last(self):
2961 """return the last element in the set (user iteration perspective)
2961 """return the last element in the set (user iteration perspective)
2962
2962
2963 Return None if the set is empty"""
2963 Return None if the set is empty"""
2964 raise NotImplementedError()
2964 raise NotImplementedError()
2965
2965
2966 def __len__(self):
2966 def __len__(self):
2967 """return the length of the smartsets
2967 """return the length of the smartsets
2968
2968
2969 This can be expensive on smartset that could be lazy otherwise."""
2969 This can be expensive on smartset that could be lazy otherwise."""
2970 raise NotImplementedError()
2970 raise NotImplementedError()
2971
2971
2972 def reverse(self):
2972 def reverse(self):
2973 """reverse the expected iteration order"""
2973 """reverse the expected iteration order"""
2974 raise NotImplementedError()
2974 raise NotImplementedError()
2975
2975
2976 def sort(self, reverse=True):
2976 def sort(self, reverse=True):
2977 """get the set to iterate in an ascending or descending order"""
2977 """get the set to iterate in an ascending or descending order"""
2978 raise NotImplementedError()
2978 raise NotImplementedError()
2979
2979
2980 def __and__(self, other):
2980 def __and__(self, other):
2981 """Returns a new object with the intersection of the two collections.
2981 """Returns a new object with the intersection of the two collections.
2982
2982
2983 This is part of the mandatory API for smartset."""
2983 This is part of the mandatory API for smartset."""
2984 if isinstance(other, fullreposet):
2984 if isinstance(other, fullreposet):
2985 return self
2985 return self
2986 return self.filter(other.__contains__, condrepr=other, cache=False)
2986 return self.filter(other.__contains__, condrepr=other, cache=False)
2987
2987
2988 def __add__(self, other):
2988 def __add__(self, other):
2989 """Returns a new object with the union of the two collections.
2989 """Returns a new object with the union of the two collections.
2990
2990
2991 This is part of the mandatory API for smartset."""
2991 This is part of the mandatory API for smartset."""
2992 return addset(self, other)
2992 return addset(self, other)
2993
2993
2994 def __sub__(self, other):
2994 def __sub__(self, other):
2995 """Returns a new object with the substraction of the two collections.
2995 """Returns a new object with the substraction of the two collections.
2996
2996
2997 This is part of the mandatory API for smartset."""
2997 This is part of the mandatory API for smartset."""
2998 c = other.__contains__
2998 c = other.__contains__
2999 return self.filter(lambda r: not c(r), condrepr=('<not %r>', other),
2999 return self.filter(lambda r: not c(r), condrepr=('<not %r>', other),
3000 cache=False)
3000 cache=False)
3001
3001
3002 def filter(self, condition, condrepr=None, cache=True):
3002 def filter(self, condition, condrepr=None, cache=True):
3003 """Returns this smartset filtered by condition as a new smartset.
3003 """Returns this smartset filtered by condition as a new smartset.
3004
3004
3005 `condition` is a callable which takes a revision number and returns a
3005 `condition` is a callable which takes a revision number and returns a
3006 boolean. Optional `condrepr` provides a printable representation of
3006 boolean. Optional `condrepr` provides a printable representation of
3007 the given `condition`.
3007 the given `condition`.
3008
3008
3009 This is part of the mandatory API for smartset."""
3009 This is part of the mandatory API for smartset."""
3010 # builtin cannot be cached. but do not needs to
3010 # builtin cannot be cached. but do not needs to
3011 if cache and util.safehasattr(condition, 'func_code'):
3011 if cache and util.safehasattr(condition, 'func_code'):
3012 condition = util.cachefunc(condition)
3012 condition = util.cachefunc(condition)
3013 return filteredset(self, condition, condrepr)
3013 return filteredset(self, condition, condrepr)
3014
3014
3015 class baseset(abstractsmartset):
3015 class baseset(abstractsmartset):
3016 """Basic data structure that represents a revset and contains the basic
3016 """Basic data structure that represents a revset and contains the basic
3017 operation that it should be able to perform.
3017 operation that it should be able to perform.
3018
3018
3019 Every method in this class should be implemented by any smartset class.
3019 Every method in this class should be implemented by any smartset class.
3020 """
3020 """
3021 def __init__(self, data=(), datarepr=None, istopo=False):
3021 def __init__(self, data=(), datarepr=None, istopo=False):
3022 """
3022 """
3023 datarepr: a tuple of (format, obj, ...), a function or an object that
3023 datarepr: a tuple of (format, obj, ...), a function or an object that
3024 provides a printable representation of the given data.
3024 provides a printable representation of the given data.
3025 """
3025 """
3026 self._ascending = None
3026 self._ascending = None
3027 self._istopo = istopo
3027 self._istopo = istopo
3028 if not isinstance(data, list):
3028 if not isinstance(data, list):
3029 if isinstance(data, set):
3029 if isinstance(data, set):
3030 self._set = data
3030 self._set = data
3031 # set has no order we pick one for stability purpose
3031 # set has no order we pick one for stability purpose
3032 self._ascending = True
3032 self._ascending = True
3033 data = list(data)
3033 data = list(data)
3034 self._list = data
3034 self._list = data
3035 self._datarepr = datarepr
3035 self._datarepr = datarepr
3036
3036
3037 @util.propertycache
3037 @util.propertycache
3038 def _set(self):
3038 def _set(self):
3039 return set(self._list)
3039 return set(self._list)
3040
3040
3041 @util.propertycache
3041 @util.propertycache
3042 def _asclist(self):
3042 def _asclist(self):
3043 asclist = self._list[:]
3043 asclist = self._list[:]
3044 asclist.sort()
3044 asclist.sort()
3045 return asclist
3045 return asclist
3046
3046
3047 def __iter__(self):
3047 def __iter__(self):
3048 if self._ascending is None:
3048 if self._ascending is None:
3049 return iter(self._list)
3049 return iter(self._list)
3050 elif self._ascending:
3050 elif self._ascending:
3051 return iter(self._asclist)
3051 return iter(self._asclist)
3052 else:
3052 else:
3053 return reversed(self._asclist)
3053 return reversed(self._asclist)
3054
3054
3055 def fastasc(self):
3055 def fastasc(self):
3056 return iter(self._asclist)
3056 return iter(self._asclist)
3057
3057
3058 def fastdesc(self):
3058 def fastdesc(self):
3059 return reversed(self._asclist)
3059 return reversed(self._asclist)
3060
3060
3061 @util.propertycache
3061 @util.propertycache
3062 def __contains__(self):
3062 def __contains__(self):
3063 return self._set.__contains__
3063 return self._set.__contains__
3064
3064
3065 def __nonzero__(self):
3065 def __nonzero__(self):
3066 return bool(self._list)
3066 return bool(self._list)
3067
3067
3068 def sort(self, reverse=False):
3068 def sort(self, reverse=False):
3069 self._ascending = not bool(reverse)
3069 self._ascending = not bool(reverse)
3070 self._istopo = False
3070 self._istopo = False
3071
3071
3072 def reverse(self):
3072 def reverse(self):
3073 if self._ascending is None:
3073 if self._ascending is None:
3074 self._list.reverse()
3074 self._list.reverse()
3075 else:
3075 else:
3076 self._ascending = not self._ascending
3076 self._ascending = not self._ascending
3077 self._istopo = False
3077 self._istopo = False
3078
3078
3079 def __len__(self):
3079 def __len__(self):
3080 return len(self._list)
3080 return len(self._list)
3081
3081
3082 def isascending(self):
3082 def isascending(self):
3083 """Returns True if the collection is ascending order, False if not.
3083 """Returns True if the collection is ascending order, False if not.
3084
3084
3085 This is part of the mandatory API for smartset."""
3085 This is part of the mandatory API for smartset."""
3086 if len(self) <= 1:
3086 if len(self) <= 1:
3087 return True
3087 return True
3088 return self._ascending is not None and self._ascending
3088 return self._ascending is not None and self._ascending
3089
3089
3090 def isdescending(self):
3090 def isdescending(self):
3091 """Returns True if the collection is descending order, False if not.
3091 """Returns True if the collection is descending order, False if not.
3092
3092
3093 This is part of the mandatory API for smartset."""
3093 This is part of the mandatory API for smartset."""
3094 if len(self) <= 1:
3094 if len(self) <= 1:
3095 return True
3095 return True
3096 return self._ascending is not None and not self._ascending
3096 return self._ascending is not None and not self._ascending
3097
3097
3098 def istopo(self):
3098 def istopo(self):
3099 """Is the collection is in topographical order or not.
3099 """Is the collection is in topographical order or not.
3100
3100
3101 This is part of the mandatory API for smartset."""
3101 This is part of the mandatory API for smartset."""
3102 if len(self) <= 1:
3102 if len(self) <= 1:
3103 return True
3103 return True
3104 return self._istopo
3104 return self._istopo
3105
3105
3106 def first(self):
3106 def first(self):
3107 if self:
3107 if self:
3108 if self._ascending is None:
3108 if self._ascending is None:
3109 return self._list[0]
3109 return self._list[0]
3110 elif self._ascending:
3110 elif self._ascending:
3111 return self._asclist[0]
3111 return self._asclist[0]
3112 else:
3112 else:
3113 return self._asclist[-1]
3113 return self._asclist[-1]
3114 return None
3114 return None
3115
3115
3116 def last(self):
3116 def last(self):
3117 if self:
3117 if self:
3118 if self._ascending is None:
3118 if self._ascending is None:
3119 return self._list[-1]
3119 return self._list[-1]
3120 elif self._ascending:
3120 elif self._ascending:
3121 return self._asclist[-1]
3121 return self._asclist[-1]
3122 else:
3122 else:
3123 return self._asclist[0]
3123 return self._asclist[0]
3124 return None
3124 return None
3125
3125
3126 def __repr__(self):
3126 def __repr__(self):
3127 d = {None: '', False: '-', True: '+'}[self._ascending]
3127 d = {None: '', False: '-', True: '+'}[self._ascending]
3128 s = _formatsetrepr(self._datarepr)
3128 s = _formatsetrepr(self._datarepr)
3129 if not s:
3129 if not s:
3130 l = self._list
3130 l = self._list
3131 # if _list has been built from a set, it might have a different
3131 # if _list has been built from a set, it might have a different
3132 # order from one python implementation to another.
3132 # order from one python implementation to another.
3133 # We fallback to the sorted version for a stable output.
3133 # We fallback to the sorted version for a stable output.
3134 if self._ascending is not None:
3134 if self._ascending is not None:
3135 l = self._asclist
3135 l = self._asclist
3136 s = repr(l)
3136 s = repr(l)
3137 return '<%s%s %s>' % (type(self).__name__, d, s)
3137 return '<%s%s %s>' % (type(self).__name__, d, s)
3138
3138
3139 class filteredset(abstractsmartset):
3139 class filteredset(abstractsmartset):
3140 """Duck type for baseset class which iterates lazily over the revisions in
3140 """Duck type for baseset class which iterates lazily over the revisions in
3141 the subset and contains a function which tests for membership in the
3141 the subset and contains a function which tests for membership in the
3142 revset
3142 revset
3143 """
3143 """
3144 def __init__(self, subset, condition=lambda x: True, condrepr=None):
3144 def __init__(self, subset, condition=lambda x: True, condrepr=None):
3145 """
3145 """
3146 condition: a function that decide whether a revision in the subset
3146 condition: a function that decide whether a revision in the subset
3147 belongs to the revset or not.
3147 belongs to the revset or not.
3148 condrepr: a tuple of (format, obj, ...), a function or an object that
3148 condrepr: a tuple of (format, obj, ...), a function or an object that
3149 provides a printable representation of the given condition.
3149 provides a printable representation of the given condition.
3150 """
3150 """
3151 self._subset = subset
3151 self._subset = subset
3152 self._condition = condition
3152 self._condition = condition
3153 self._condrepr = condrepr
3153 self._condrepr = condrepr
3154
3154
3155 def __contains__(self, x):
3155 def __contains__(self, x):
3156 return x in self._subset and self._condition(x)
3156 return x in self._subset and self._condition(x)
3157
3157
3158 def __iter__(self):
3158 def __iter__(self):
3159 return self._iterfilter(self._subset)
3159 return self._iterfilter(self._subset)
3160
3160
3161 def _iterfilter(self, it):
3161 def _iterfilter(self, it):
3162 cond = self._condition
3162 cond = self._condition
3163 for x in it:
3163 for x in it:
3164 if cond(x):
3164 if cond(x):
3165 yield x
3165 yield x
3166
3166
3167 @property
3167 @property
3168 def fastasc(self):
3168 def fastasc(self):
3169 it = self._subset.fastasc
3169 it = self._subset.fastasc
3170 if it is None:
3170 if it is None:
3171 return None
3171 return None
3172 return lambda: self._iterfilter(it())
3172 return lambda: self._iterfilter(it())
3173
3173
3174 @property
3174 @property
3175 def fastdesc(self):
3175 def fastdesc(self):
3176 it = self._subset.fastdesc
3176 it = self._subset.fastdesc
3177 if it is None:
3177 if it is None:
3178 return None
3178 return None
3179 return lambda: self._iterfilter(it())
3179 return lambda: self._iterfilter(it())
3180
3180
3181 def __nonzero__(self):
3181 def __nonzero__(self):
3182 fast = None
3182 fast = None
3183 candidates = [self.fastasc if self.isascending() else None,
3183 candidates = [self.fastasc if self.isascending() else None,
3184 self.fastdesc if self.isdescending() else None,
3184 self.fastdesc if self.isdescending() else None,
3185 self.fastasc,
3185 self.fastasc,
3186 self.fastdesc]
3186 self.fastdesc]
3187 for candidate in candidates:
3187 for candidate in candidates:
3188 if candidate is not None:
3188 if candidate is not None:
3189 fast = candidate
3189 fast = candidate
3190 break
3190 break
3191
3191
3192 if fast is not None:
3192 if fast is not None:
3193 it = fast()
3193 it = fast()
3194 else:
3194 else:
3195 it = self
3195 it = self
3196
3196
3197 for r in it:
3197 for r in it:
3198 return True
3198 return True
3199 return False
3199 return False
3200
3200
3201 def __len__(self):
3201 def __len__(self):
3202 # Basic implementation to be changed in future patches.
3202 # Basic implementation to be changed in future patches.
3203 # until this gets improved, we use generator expression
3203 # until this gets improved, we use generator expression
3204 # here, since list comprehensions are free to call __len__ again
3204 # here, since list comprehensions are free to call __len__ again
3205 # causing infinite recursion
3205 # causing infinite recursion
3206 l = baseset(r for r in self)
3206 l = baseset(r for r in self)
3207 return len(l)
3207 return len(l)
3208
3208
3209 def sort(self, reverse=False):
3209 def sort(self, reverse=False):
3210 self._subset.sort(reverse=reverse)
3210 self._subset.sort(reverse=reverse)
3211
3211
3212 def reverse(self):
3212 def reverse(self):
3213 self._subset.reverse()
3213 self._subset.reverse()
3214
3214
3215 def isascending(self):
3215 def isascending(self):
3216 return self._subset.isascending()
3216 return self._subset.isascending()
3217
3217
3218 def isdescending(self):
3218 def isdescending(self):
3219 return self._subset.isdescending()
3219 return self._subset.isdescending()
3220
3220
3221 def istopo(self):
3221 def istopo(self):
3222 return self._subset.istopo()
3222 return self._subset.istopo()
3223
3223
3224 def first(self):
3224 def first(self):
3225 for x in self:
3225 for x in self:
3226 return x
3226 return x
3227 return None
3227 return None
3228
3228
3229 def last(self):
3229 def last(self):
3230 it = None
3230 it = None
3231 if self.isascending():
3231 if self.isascending():
3232 it = self.fastdesc
3232 it = self.fastdesc
3233 elif self.isdescending():
3233 elif self.isdescending():
3234 it = self.fastasc
3234 it = self.fastasc
3235 if it is not None:
3235 if it is not None:
3236 for x in it():
3236 for x in it():
3237 return x
3237 return x
3238 return None #empty case
3238 return None #empty case
3239 else:
3239 else:
3240 x = None
3240 x = None
3241 for x in self:
3241 for x in self:
3242 pass
3242 pass
3243 return x
3243 return x
3244
3244
3245 def __repr__(self):
3245 def __repr__(self):
3246 xs = [repr(self._subset)]
3246 xs = [repr(self._subset)]
3247 s = _formatsetrepr(self._condrepr)
3247 s = _formatsetrepr(self._condrepr)
3248 if s:
3248 if s:
3249 xs.append(s)
3249 xs.append(s)
3250 return '<%s %s>' % (type(self).__name__, ', '.join(xs))
3250 return '<%s %s>' % (type(self).__name__, ', '.join(xs))
3251
3251
3252 def _iterordered(ascending, iter1, iter2):
3252 def _iterordered(ascending, iter1, iter2):
3253 """produce an ordered iteration from two iterators with the same order
3253 """produce an ordered iteration from two iterators with the same order
3254
3254
3255 The ascending is used to indicated the iteration direction.
3255 The ascending is used to indicated the iteration direction.
3256 """
3256 """
3257 choice = max
3257 choice = max
3258 if ascending:
3258 if ascending:
3259 choice = min
3259 choice = min
3260
3260
3261 val1 = None
3261 val1 = None
3262 val2 = None
3262 val2 = None
3263 try:
3263 try:
3264 # Consume both iterators in an ordered way until one is empty
3264 # Consume both iterators in an ordered way until one is empty
3265 while True:
3265 while True:
3266 if val1 is None:
3266 if val1 is None:
3267 val1 = next(iter1)
3267 val1 = next(iter1)
3268 if val2 is None:
3268 if val2 is None:
3269 val2 = next(iter2)
3269 val2 = next(iter2)
3270 n = choice(val1, val2)
3270 n = choice(val1, val2)
3271 yield n
3271 yield n
3272 if val1 == n:
3272 if val1 == n:
3273 val1 = None
3273 val1 = None
3274 if val2 == n:
3274 if val2 == n:
3275 val2 = None
3275 val2 = None
3276 except StopIteration:
3276 except StopIteration:
3277 # Flush any remaining values and consume the other one
3277 # Flush any remaining values and consume the other one
3278 it = iter2
3278 it = iter2
3279 if val1 is not None:
3279 if val1 is not None:
3280 yield val1
3280 yield val1
3281 it = iter1
3281 it = iter1
3282 elif val2 is not None:
3282 elif val2 is not None:
3283 # might have been equality and both are empty
3283 # might have been equality and both are empty
3284 yield val2
3284 yield val2
3285 for val in it:
3285 for val in it:
3286 yield val
3286 yield val
3287
3287
3288 class addset(abstractsmartset):
3288 class addset(abstractsmartset):
3289 """Represent the addition of two sets
3289 """Represent the addition of two sets
3290
3290
3291 Wrapper structure for lazily adding two structures without losing much
3291 Wrapper structure for lazily adding two structures without losing much
3292 performance on the __contains__ method
3292 performance on the __contains__ method
3293
3293
3294 If the ascending attribute is set, that means the two structures are
3294 If the ascending attribute is set, that means the two structures are
3295 ordered in either an ascending or descending way. Therefore, we can add
3295 ordered in either an ascending or descending way. Therefore, we can add
3296 them maintaining the order by iterating over both at the same time
3296 them maintaining the order by iterating over both at the same time
3297
3297
3298 >>> xs = baseset([0, 3, 2])
3298 >>> xs = baseset([0, 3, 2])
3299 >>> ys = baseset([5, 2, 4])
3299 >>> ys = baseset([5, 2, 4])
3300
3300
3301 >>> rs = addset(xs, ys)
3301 >>> rs = addset(xs, ys)
3302 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3302 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3303 (True, True, False, True, 0, 4)
3303 (True, True, False, True, 0, 4)
3304 >>> rs = addset(xs, baseset([]))
3304 >>> rs = addset(xs, baseset([]))
3305 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3305 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3306 (True, True, False, 0, 2)
3306 (True, True, False, 0, 2)
3307 >>> rs = addset(baseset([]), baseset([]))
3307 >>> rs = addset(baseset([]), baseset([]))
3308 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3308 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3309 (False, False, None, None)
3309 (False, False, None, None)
3310
3310
3311 iterate unsorted:
3311 iterate unsorted:
3312 >>> rs = addset(xs, ys)
3312 >>> rs = addset(xs, ys)
3313 >>> # (use generator because pypy could call len())
3313 >>> # (use generator because pypy could call len())
3314 >>> list(x for x in rs) # without _genlist
3314 >>> list(x for x in rs) # without _genlist
3315 [0, 3, 2, 5, 4]
3315 [0, 3, 2, 5, 4]
3316 >>> assert not rs._genlist
3316 >>> assert not rs._genlist
3317 >>> len(rs)
3317 >>> len(rs)
3318 5
3318 5
3319 >>> [x for x in rs] # with _genlist
3319 >>> [x for x in rs] # with _genlist
3320 [0, 3, 2, 5, 4]
3320 [0, 3, 2, 5, 4]
3321 >>> assert rs._genlist
3321 >>> assert rs._genlist
3322
3322
3323 iterate ascending:
3323 iterate ascending:
3324 >>> rs = addset(xs, ys, ascending=True)
3324 >>> rs = addset(xs, ys, ascending=True)
3325 >>> # (use generator because pypy could call len())
3325 >>> # (use generator because pypy could call len())
3326 >>> list(x for x in rs), list(x for x in rs.fastasc()) # without _asclist
3326 >>> list(x for x in rs), list(x for x in rs.fastasc()) # without _asclist
3327 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3327 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3328 >>> assert not rs._asclist
3328 >>> assert not rs._asclist
3329 >>> len(rs)
3329 >>> len(rs)
3330 5
3330 5
3331 >>> [x for x in rs], [x for x in rs.fastasc()]
3331 >>> [x for x in rs], [x for x in rs.fastasc()]
3332 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3332 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3333 >>> assert rs._asclist
3333 >>> assert rs._asclist
3334
3334
3335 iterate descending:
3335 iterate descending:
3336 >>> rs = addset(xs, ys, ascending=False)
3336 >>> rs = addset(xs, ys, ascending=False)
3337 >>> # (use generator because pypy could call len())
3337 >>> # (use generator because pypy could call len())
3338 >>> list(x for x in rs), list(x for x in rs.fastdesc()) # without _asclist
3338 >>> list(x for x in rs), list(x for x in rs.fastdesc()) # without _asclist
3339 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3339 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3340 >>> assert not rs._asclist
3340 >>> assert not rs._asclist
3341 >>> len(rs)
3341 >>> len(rs)
3342 5
3342 5
3343 >>> [x for x in rs], [x for x in rs.fastdesc()]
3343 >>> [x for x in rs], [x for x in rs.fastdesc()]
3344 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3344 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3345 >>> assert rs._asclist
3345 >>> assert rs._asclist
3346
3346
3347 iterate ascending without fastasc:
3347 iterate ascending without fastasc:
3348 >>> rs = addset(xs, generatorset(ys), ascending=True)
3348 >>> rs = addset(xs, generatorset(ys), ascending=True)
3349 >>> assert rs.fastasc is None
3349 >>> assert rs.fastasc is None
3350 >>> [x for x in rs]
3350 >>> [x for x in rs]
3351 [0, 2, 3, 4, 5]
3351 [0, 2, 3, 4, 5]
3352
3352
3353 iterate descending without fastdesc:
3353 iterate descending without fastdesc:
3354 >>> rs = addset(generatorset(xs), ys, ascending=False)
3354 >>> rs = addset(generatorset(xs), ys, ascending=False)
3355 >>> assert rs.fastdesc is None
3355 >>> assert rs.fastdesc is None
3356 >>> [x for x in rs]
3356 >>> [x for x in rs]
3357 [5, 4, 3, 2, 0]
3357 [5, 4, 3, 2, 0]
3358 """
3358 """
3359 def __init__(self, revs1, revs2, ascending=None):
3359 def __init__(self, revs1, revs2, ascending=None):
3360 self._r1 = revs1
3360 self._r1 = revs1
3361 self._r2 = revs2
3361 self._r2 = revs2
3362 self._iter = None
3362 self._iter = None
3363 self._ascending = ascending
3363 self._ascending = ascending
3364 self._genlist = None
3364 self._genlist = None
3365 self._asclist = None
3365 self._asclist = None
3366
3366
3367 def __len__(self):
3367 def __len__(self):
3368 return len(self._list)
3368 return len(self._list)
3369
3369
3370 def __nonzero__(self):
3370 def __nonzero__(self):
3371 return bool(self._r1) or bool(self._r2)
3371 return bool(self._r1) or bool(self._r2)
3372
3372
3373 @util.propertycache
3373 @util.propertycache
3374 def _list(self):
3374 def _list(self):
3375 if not self._genlist:
3375 if not self._genlist:
3376 self._genlist = baseset(iter(self))
3376 self._genlist = baseset(iter(self))
3377 return self._genlist
3377 return self._genlist
3378
3378
3379 def __iter__(self):
3379 def __iter__(self):
3380 """Iterate over both collections without repeating elements
3380 """Iterate over both collections without repeating elements
3381
3381
3382 If the ascending attribute is not set, iterate over the first one and
3382 If the ascending attribute is not set, iterate over the first one and
3383 then over the second one checking for membership on the first one so we
3383 then over the second one checking for membership on the first one so we
3384 dont yield any duplicates.
3384 dont yield any duplicates.
3385
3385
3386 If the ascending attribute is set, iterate over both collections at the
3386 If the ascending attribute is set, iterate over both collections at the
3387 same time, yielding only one value at a time in the given order.
3387 same time, yielding only one value at a time in the given order.
3388 """
3388 """
3389 if self._ascending is None:
3389 if self._ascending is None:
3390 if self._genlist:
3390 if self._genlist:
3391 return iter(self._genlist)
3391 return iter(self._genlist)
3392 def arbitraryordergen():
3392 def arbitraryordergen():
3393 for r in self._r1:
3393 for r in self._r1:
3394 yield r
3394 yield r
3395 inr1 = self._r1.__contains__
3395 inr1 = self._r1.__contains__
3396 for r in self._r2:
3396 for r in self._r2:
3397 if not inr1(r):
3397 if not inr1(r):
3398 yield r
3398 yield r
3399 return arbitraryordergen()
3399 return arbitraryordergen()
3400 # try to use our own fast iterator if it exists
3400 # try to use our own fast iterator if it exists
3401 self._trysetasclist()
3401 self._trysetasclist()
3402 if self._ascending:
3402 if self._ascending:
3403 attr = 'fastasc'
3403 attr = 'fastasc'
3404 else:
3404 else:
3405 attr = 'fastdesc'
3405 attr = 'fastdesc'
3406 it = getattr(self, attr)
3406 it = getattr(self, attr)
3407 if it is not None:
3407 if it is not None:
3408 return it()
3408 return it()
3409 # maybe half of the component supports fast
3409 # maybe half of the component supports fast
3410 # get iterator for _r1
3410 # get iterator for _r1
3411 iter1 = getattr(self._r1, attr)
3411 iter1 = getattr(self._r1, attr)
3412 if iter1 is None:
3412 if iter1 is None:
3413 # let's avoid side effect (not sure it matters)
3413 # let's avoid side effect (not sure it matters)
3414 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3414 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3415 else:
3415 else:
3416 iter1 = iter1()
3416 iter1 = iter1()
3417 # get iterator for _r2
3417 # get iterator for _r2
3418 iter2 = getattr(self._r2, attr)
3418 iter2 = getattr(self._r2, attr)
3419 if iter2 is None:
3419 if iter2 is None:
3420 # let's avoid side effect (not sure it matters)
3420 # let's avoid side effect (not sure it matters)
3421 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3421 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3422 else:
3422 else:
3423 iter2 = iter2()
3423 iter2 = iter2()
3424 return _iterordered(self._ascending, iter1, iter2)
3424 return _iterordered(self._ascending, iter1, iter2)
3425
3425
3426 def _trysetasclist(self):
3426 def _trysetasclist(self):
3427 """populate the _asclist attribute if possible and necessary"""
3427 """populate the _asclist attribute if possible and necessary"""
3428 if self._genlist is not None and self._asclist is None:
3428 if self._genlist is not None and self._asclist is None:
3429 self._asclist = sorted(self._genlist)
3429 self._asclist = sorted(self._genlist)
3430
3430
3431 @property
3431 @property
3432 def fastasc(self):
3432 def fastasc(self):
3433 self._trysetasclist()
3433 self._trysetasclist()
3434 if self._asclist is not None:
3434 if self._asclist is not None:
3435 return self._asclist.__iter__
3435 return self._asclist.__iter__
3436 iter1 = self._r1.fastasc
3436 iter1 = self._r1.fastasc
3437 iter2 = self._r2.fastasc
3437 iter2 = self._r2.fastasc
3438 if None in (iter1, iter2):
3438 if None in (iter1, iter2):
3439 return None
3439 return None
3440 return lambda: _iterordered(True, iter1(), iter2())
3440 return lambda: _iterordered(True, iter1(), iter2())
3441
3441
3442 @property
3442 @property
3443 def fastdesc(self):
3443 def fastdesc(self):
3444 self._trysetasclist()
3444 self._trysetasclist()
3445 if self._asclist is not None:
3445 if self._asclist is not None:
3446 return self._asclist.__reversed__
3446 return self._asclist.__reversed__
3447 iter1 = self._r1.fastdesc
3447 iter1 = self._r1.fastdesc
3448 iter2 = self._r2.fastdesc
3448 iter2 = self._r2.fastdesc
3449 if None in (iter1, iter2):
3449 if None in (iter1, iter2):
3450 return None
3450 return None
3451 return lambda: _iterordered(False, iter1(), iter2())
3451 return lambda: _iterordered(False, iter1(), iter2())
3452
3452
3453 def __contains__(self, x):
3453 def __contains__(self, x):
3454 return x in self._r1 or x in self._r2
3454 return x in self._r1 or x in self._r2
3455
3455
3456 def sort(self, reverse=False):
3456 def sort(self, reverse=False):
3457 """Sort the added set
3457 """Sort the added set
3458
3458
3459 For this we use the cached list with all the generated values and if we
3459 For this we use the cached list with all the generated values and if we
3460 know they are ascending or descending we can sort them in a smart way.
3460 know they are ascending or descending we can sort them in a smart way.
3461 """
3461 """
3462 self._ascending = not reverse
3462 self._ascending = not reverse
3463
3463
3464 def isascending(self):
3464 def isascending(self):
3465 return self._ascending is not None and self._ascending
3465 return self._ascending is not None and self._ascending
3466
3466
3467 def isdescending(self):
3467 def isdescending(self):
3468 return self._ascending is not None and not self._ascending
3468 return self._ascending is not None and not self._ascending
3469
3469
3470 def istopo(self):
3470 def istopo(self):
3471 # not worth the trouble asserting if the two sets combined are still
3471 # not worth the trouble asserting if the two sets combined are still
3472 # in topographical order. Use the sort() predicate to explicitly sort
3472 # in topographical order. Use the sort() predicate to explicitly sort
3473 # again instead.
3473 # again instead.
3474 return False
3474 return False
3475
3475
3476 def reverse(self):
3476 def reverse(self):
3477 if self._ascending is None:
3477 if self._ascending is None:
3478 self._list.reverse()
3478 self._list.reverse()
3479 else:
3479 else:
3480 self._ascending = not self._ascending
3480 self._ascending = not self._ascending
3481
3481
3482 def first(self):
3482 def first(self):
3483 for x in self:
3483 for x in self:
3484 return x
3484 return x
3485 return None
3485 return None
3486
3486
3487 def last(self):
3487 def last(self):
3488 self.reverse()
3488 self.reverse()
3489 val = self.first()
3489 val = self.first()
3490 self.reverse()
3490 self.reverse()
3491 return val
3491 return val
3492
3492
3493 def __repr__(self):
3493 def __repr__(self):
3494 d = {None: '', False: '-', True: '+'}[self._ascending]
3494 d = {None: '', False: '-', True: '+'}[self._ascending]
3495 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3495 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3496
3496
3497 class generatorset(abstractsmartset):
3497 class generatorset(abstractsmartset):
3498 """Wrap a generator for lazy iteration
3498 """Wrap a generator for lazy iteration
3499
3499
3500 Wrapper structure for generators that provides lazy membership and can
3500 Wrapper structure for generators that provides lazy membership and can
3501 be iterated more than once.
3501 be iterated more than once.
3502 When asked for membership it generates values until either it finds the
3502 When asked for membership it generates values until either it finds the
3503 requested one or has gone through all the elements in the generator
3503 requested one or has gone through all the elements in the generator
3504 """
3504 """
3505 def __init__(self, gen, iterasc=None):
3505 def __init__(self, gen, iterasc=None):
3506 """
3506 """
3507 gen: a generator producing the values for the generatorset.
3507 gen: a generator producing the values for the generatorset.
3508 """
3508 """
3509 self._gen = gen
3509 self._gen = gen
3510 self._asclist = None
3510 self._asclist = None
3511 self._cache = {}
3511 self._cache = {}
3512 self._genlist = []
3512 self._genlist = []
3513 self._finished = False
3513 self._finished = False
3514 self._ascending = True
3514 self._ascending = True
3515 if iterasc is not None:
3515 if iterasc is not None:
3516 if iterasc:
3516 if iterasc:
3517 self.fastasc = self._iterator
3517 self.fastasc = self._iterator
3518 self.__contains__ = self._asccontains
3518 self.__contains__ = self._asccontains
3519 else:
3519 else:
3520 self.fastdesc = self._iterator
3520 self.fastdesc = self._iterator
3521 self.__contains__ = self._desccontains
3521 self.__contains__ = self._desccontains
3522
3522
3523 def __nonzero__(self):
3523 def __nonzero__(self):
3524 # Do not use 'for r in self' because it will enforce the iteration
3524 # Do not use 'for r in self' because it will enforce the iteration
3525 # order (default ascending), possibly unrolling a whole descending
3525 # order (default ascending), possibly unrolling a whole descending
3526 # iterator.
3526 # iterator.
3527 if self._genlist:
3527 if self._genlist:
3528 return True
3528 return True
3529 for r in self._consumegen():
3529 for r in self._consumegen():
3530 return True
3530 return True
3531 return False
3531 return False
3532
3532
3533 def __contains__(self, x):
3533 def __contains__(self, x):
3534 if x in self._cache:
3534 if x in self._cache:
3535 return self._cache[x]
3535 return self._cache[x]
3536
3536
3537 # Use new values only, as existing values would be cached.
3537 # Use new values only, as existing values would be cached.
3538 for l in self._consumegen():
3538 for l in self._consumegen():
3539 if l == x:
3539 if l == x:
3540 return True
3540 return True
3541
3541
3542 self._cache[x] = False
3542 self._cache[x] = False
3543 return False
3543 return False
3544
3544
3545 def _asccontains(self, x):
3545 def _asccontains(self, x):
3546 """version of contains optimised for ascending generator"""
3546 """version of contains optimised for ascending generator"""
3547 if x in self._cache:
3547 if x in self._cache:
3548 return self._cache[x]
3548 return self._cache[x]
3549
3549
3550 # Use new values only, as existing values would be cached.
3550 # Use new values only, as existing values would be cached.
3551 for l in self._consumegen():
3551 for l in self._consumegen():
3552 if l == x:
3552 if l == x:
3553 return True
3553 return True
3554 if l > x:
3554 if l > x:
3555 break
3555 break
3556
3556
3557 self._cache[x] = False
3557 self._cache[x] = False
3558 return False
3558 return False
3559
3559
3560 def _desccontains(self, x):
3560 def _desccontains(self, x):
3561 """version of contains optimised for descending generator"""
3561 """version of contains optimised for descending generator"""
3562 if x in self._cache:
3562 if x in self._cache:
3563 return self._cache[x]
3563 return self._cache[x]
3564
3564
3565 # Use new values only, as existing values would be cached.
3565 # Use new values only, as existing values would be cached.
3566 for l in self._consumegen():
3566 for l in self._consumegen():
3567 if l == x:
3567 if l == x:
3568 return True
3568 return True
3569 if l < x:
3569 if l < x:
3570 break
3570 break
3571
3571
3572 self._cache[x] = False
3572 self._cache[x] = False
3573 return False
3573 return False
3574
3574
3575 def __iter__(self):
3575 def __iter__(self):
3576 if self._ascending:
3576 if self._ascending:
3577 it = self.fastasc
3577 it = self.fastasc
3578 else:
3578 else:
3579 it = self.fastdesc
3579 it = self.fastdesc
3580 if it is not None:
3580 if it is not None:
3581 return it()
3581 return it()
3582 # we need to consume the iterator
3582 # we need to consume the iterator
3583 for x in self._consumegen():
3583 for x in self._consumegen():
3584 pass
3584 pass
3585 # recall the same code
3585 # recall the same code
3586 return iter(self)
3586 return iter(self)
3587
3587
3588 def _iterator(self):
3588 def _iterator(self):
3589 if self._finished:
3589 if self._finished:
3590 return iter(self._genlist)
3590 return iter(self._genlist)
3591
3591
3592 # We have to use this complex iteration strategy to allow multiple
3592 # We have to use this complex iteration strategy to allow multiple
3593 # iterations at the same time. We need to be able to catch revision
3593 # iterations at the same time. We need to be able to catch revision
3594 # removed from _consumegen and added to genlist in another instance.
3594 # removed from _consumegen and added to genlist in another instance.
3595 #
3595 #
3596 # Getting rid of it would provide an about 15% speed up on this
3596 # Getting rid of it would provide an about 15% speed up on this
3597 # iteration.
3597 # iteration.
3598 genlist = self._genlist
3598 genlist = self._genlist
3599 nextrev = self._consumegen().next
3599 nextrev = self._consumegen().next
3600 _len = len # cache global lookup
3600 _len = len # cache global lookup
3601 def gen():
3601 def gen():
3602 i = 0
3602 i = 0
3603 while True:
3603 while True:
3604 if i < _len(genlist):
3604 if i < _len(genlist):
3605 yield genlist[i]
3605 yield genlist[i]
3606 else:
3606 else:
3607 yield nextrev()
3607 yield nextrev()
3608 i += 1
3608 i += 1
3609 return gen()
3609 return gen()
3610
3610
3611 def _consumegen(self):
3611 def _consumegen(self):
3612 cache = self._cache
3612 cache = self._cache
3613 genlist = self._genlist.append
3613 genlist = self._genlist.append
3614 for item in self._gen:
3614 for item in self._gen:
3615 cache[item] = True
3615 cache[item] = True
3616 genlist(item)
3616 genlist(item)
3617 yield item
3617 yield item
3618 if not self._finished:
3618 if not self._finished:
3619 self._finished = True
3619 self._finished = True
3620 asc = self._genlist[:]
3620 asc = self._genlist[:]
3621 asc.sort()
3621 asc.sort()
3622 self._asclist = asc
3622 self._asclist = asc
3623 self.fastasc = asc.__iter__
3623 self.fastasc = asc.__iter__
3624 self.fastdesc = asc.__reversed__
3624 self.fastdesc = asc.__reversed__
3625
3625
3626 def __len__(self):
3626 def __len__(self):
3627 for x in self._consumegen():
3627 for x in self._consumegen():
3628 pass
3628 pass
3629 return len(self._genlist)
3629 return len(self._genlist)
3630
3630
3631 def sort(self, reverse=False):
3631 def sort(self, reverse=False):
3632 self._ascending = not reverse
3632 self._ascending = not reverse
3633
3633
3634 def reverse(self):
3634 def reverse(self):
3635 self._ascending = not self._ascending
3635 self._ascending = not self._ascending
3636
3636
3637 def isascending(self):
3637 def isascending(self):
3638 return self._ascending
3638 return self._ascending
3639
3639
3640 def isdescending(self):
3640 def isdescending(self):
3641 return not self._ascending
3641 return not self._ascending
3642
3642
3643 def istopo(self):
3643 def istopo(self):
3644 # not worth the trouble asserting if the two sets combined are still
3644 # not worth the trouble asserting if the two sets combined are still
3645 # in topographical order. Use the sort() predicate to explicitly sort
3645 # in topographical order. Use the sort() predicate to explicitly sort
3646 # again instead.
3646 # again instead.
3647 return False
3647 return False
3648
3648
3649 def first(self):
3649 def first(self):
3650 if self._ascending:
3650 if self._ascending:
3651 it = self.fastasc
3651 it = self.fastasc
3652 else:
3652 else:
3653 it = self.fastdesc
3653 it = self.fastdesc
3654 if it is None:
3654 if it is None:
3655 # we need to consume all and try again
3655 # we need to consume all and try again
3656 for x in self._consumegen():
3656 for x in self._consumegen():
3657 pass
3657 pass
3658 return self.first()
3658 return self.first()
3659 return next(it(), None)
3659 return next(it(), None)
3660
3660
3661 def last(self):
3661 def last(self):
3662 if self._ascending:
3662 if self._ascending:
3663 it = self.fastdesc
3663 it = self.fastdesc
3664 else:
3664 else:
3665 it = self.fastasc
3665 it = self.fastasc
3666 if it is None:
3666 if it is None:
3667 # we need to consume all and try again
3667 # we need to consume all and try again
3668 for x in self._consumegen():
3668 for x in self._consumegen():
3669 pass
3669 pass
3670 return self.first()
3670 return self.first()
3671 return next(it(), None)
3671 return next(it(), None)
3672
3672
3673 def __repr__(self):
3673 def __repr__(self):
3674 d = {False: '-', True: '+'}[self._ascending]
3674 d = {False: '-', True: '+'}[self._ascending]
3675 return '<%s%s>' % (type(self).__name__, d)
3675 return '<%s%s>' % (type(self).__name__, d)
3676
3676
3677 class spanset(abstractsmartset):
3677 class spanset(abstractsmartset):
3678 """Duck type for baseset class which represents a range of revisions and
3678 """Duck type for baseset class which represents a range of revisions and
3679 can work lazily and without having all the range in memory
3679 can work lazily and without having all the range in memory
3680
3680
3681 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3681 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3682 notable points:
3682 notable points:
3683 - when x < y it will be automatically descending,
3683 - when x < y it will be automatically descending,
3684 - revision filtered with this repoview will be skipped.
3684 - revision filtered with this repoview will be skipped.
3685
3685
3686 """
3686 """
3687 def __init__(self, repo, start=0, end=None):
3687 def __init__(self, repo, start=0, end=None):
3688 """
3688 """
3689 start: first revision included the set
3689 start: first revision included the set
3690 (default to 0)
3690 (default to 0)
3691 end: first revision excluded (last+1)
3691 end: first revision excluded (last+1)
3692 (default to len(repo)
3692 (default to len(repo)
3693
3693
3694 Spanset will be descending if `end` < `start`.
3694 Spanset will be descending if `end` < `start`.
3695 """
3695 """
3696 if end is None:
3696 if end is None:
3697 end = len(repo)
3697 end = len(repo)
3698 self._ascending = start <= end
3698 self._ascending = start <= end
3699 if not self._ascending:
3699 if not self._ascending:
3700 start, end = end + 1, start +1
3700 start, end = end + 1, start +1
3701 self._start = start
3701 self._start = start
3702 self._end = end
3702 self._end = end
3703 self._hiddenrevs = repo.changelog.filteredrevs
3703 self._hiddenrevs = repo.changelog.filteredrevs
3704
3704
3705 def sort(self, reverse=False):
3705 def sort(self, reverse=False):
3706 self._ascending = not reverse
3706 self._ascending = not reverse
3707
3707
3708 def reverse(self):
3708 def reverse(self):
3709 self._ascending = not self._ascending
3709 self._ascending = not self._ascending
3710
3710
3711 def istopo(self):
3711 def istopo(self):
3712 # not worth the trouble asserting if the two sets combined are still
3712 # not worth the trouble asserting if the two sets combined are still
3713 # in topographical order. Use the sort() predicate to explicitly sort
3713 # in topographical order. Use the sort() predicate to explicitly sort
3714 # again instead.
3714 # again instead.
3715 return False
3715 return False
3716
3716
3717 def _iterfilter(self, iterrange):
3717 def _iterfilter(self, iterrange):
3718 s = self._hiddenrevs
3718 s = self._hiddenrevs
3719 for r in iterrange:
3719 for r in iterrange:
3720 if r not in s:
3720 if r not in s:
3721 yield r
3721 yield r
3722
3722
3723 def __iter__(self):
3723 def __iter__(self):
3724 if self._ascending:
3724 if self._ascending:
3725 return self.fastasc()
3725 return self.fastasc()
3726 else:
3726 else:
3727 return self.fastdesc()
3727 return self.fastdesc()
3728
3728
3729 def fastasc(self):
3729 def fastasc(self):
3730 iterrange = xrange(self._start, self._end)
3730 iterrange = xrange(self._start, self._end)
3731 if self._hiddenrevs:
3731 if self._hiddenrevs:
3732 return self._iterfilter(iterrange)
3732 return self._iterfilter(iterrange)
3733 return iter(iterrange)
3733 return iter(iterrange)
3734
3734
3735 def fastdesc(self):
3735 def fastdesc(self):
3736 iterrange = xrange(self._end - 1, self._start - 1, -1)
3736 iterrange = xrange(self._end - 1, self._start - 1, -1)
3737 if self._hiddenrevs:
3737 if self._hiddenrevs:
3738 return self._iterfilter(iterrange)
3738 return self._iterfilter(iterrange)
3739 return iter(iterrange)
3739 return iter(iterrange)
3740
3740
3741 def __contains__(self, rev):
3741 def __contains__(self, rev):
3742 hidden = self._hiddenrevs
3742 hidden = self._hiddenrevs
3743 return ((self._start <= rev < self._end)
3743 return ((self._start <= rev < self._end)
3744 and not (hidden and rev in hidden))
3744 and not (hidden and rev in hidden))
3745
3745
3746 def __nonzero__(self):
3746 def __nonzero__(self):
3747 for r in self:
3747 for r in self:
3748 return True
3748 return True
3749 return False
3749 return False
3750
3750
3751 def __len__(self):
3751 def __len__(self):
3752 if not self._hiddenrevs:
3752 if not self._hiddenrevs:
3753 return abs(self._end - self._start)
3753 return abs(self._end - self._start)
3754 else:
3754 else:
3755 count = 0
3755 count = 0
3756 start = self._start
3756 start = self._start
3757 end = self._end
3757 end = self._end
3758 for rev in self._hiddenrevs:
3758 for rev in self._hiddenrevs:
3759 if (end < rev <= start) or (start <= rev < end):
3759 if (end < rev <= start) or (start <= rev < end):
3760 count += 1
3760 count += 1
3761 return abs(self._end - self._start) - count
3761 return abs(self._end - self._start) - count
3762
3762
3763 def isascending(self):
3763 def isascending(self):
3764 return self._ascending
3764 return self._ascending
3765
3765
3766 def isdescending(self):
3766 def isdescending(self):
3767 return not self._ascending
3767 return not self._ascending
3768
3768
3769 def first(self):
3769 def first(self):
3770 if self._ascending:
3770 if self._ascending:
3771 it = self.fastasc
3771 it = self.fastasc
3772 else:
3772 else:
3773 it = self.fastdesc
3773 it = self.fastdesc
3774 for x in it():
3774 for x in it():
3775 return x
3775 return x
3776 return None
3776 return None
3777
3777
3778 def last(self):
3778 def last(self):
3779 if self._ascending:
3779 if self._ascending:
3780 it = self.fastdesc
3780 it = self.fastdesc
3781 else:
3781 else:
3782 it = self.fastasc
3782 it = self.fastasc
3783 for x in it():
3783 for x in it():
3784 return x
3784 return x
3785 return None
3785 return None
3786
3786
3787 def __repr__(self):
3787 def __repr__(self):
3788 d = {False: '-', True: '+'}[self._ascending]
3788 d = {False: '-', True: '+'}[self._ascending]
3789 return '<%s%s %d:%d>' % (type(self).__name__, d,
3789 return '<%s%s %d:%d>' % (type(self).__name__, d,
3790 self._start, self._end - 1)
3790 self._start, self._end - 1)
3791
3791
3792 class fullreposet(spanset):
3792 class fullreposet(spanset):
3793 """a set containing all revisions in the repo
3793 """a set containing all revisions in the repo
3794
3794
3795 This class exists to host special optimization and magic to handle virtual
3795 This class exists to host special optimization and magic to handle virtual
3796 revisions such as "null".
3796 revisions such as "null".
3797 """
3797 """
3798
3798
3799 def __init__(self, repo):
3799 def __init__(self, repo):
3800 super(fullreposet, self).__init__(repo)
3800 super(fullreposet, self).__init__(repo)
3801
3801
3802 def __and__(self, other):
3802 def __and__(self, other):
3803 """As self contains the whole repo, all of the other set should also be
3803 """As self contains the whole repo, all of the other set should also be
3804 in self. Therefore `self & other = other`.
3804 in self. Therefore `self & other = other`.
3805
3805
3806 This boldly assumes the other contains valid revs only.
3806 This boldly assumes the other contains valid revs only.
3807 """
3807 """
3808 # other not a smartset, make is so
3808 # other not a smartset, make is so
3809 if not util.safehasattr(other, 'isascending'):
3809 if not util.safehasattr(other, 'isascending'):
3810 # filter out hidden revision
3810 # filter out hidden revision
3811 # (this boldly assumes all smartset are pure)
3811 # (this boldly assumes all smartset are pure)
3812 #
3812 #
3813 # `other` was used with "&", let's assume this is a set like
3813 # `other` was used with "&", let's assume this is a set like
3814 # object.
3814 # object.
3815 other = baseset(other - self._hiddenrevs)
3815 other = baseset(other - self._hiddenrevs)
3816
3816
3817 # XXX As fullreposet is also used as bootstrap, this is wrong.
3817 # XXX As fullreposet is also used as bootstrap, this is wrong.
3818 #
3818 #
3819 # With a giveme312() revset returning [3,1,2], this makes
3819 # With a giveme312() revset returning [3,1,2], this makes
3820 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3820 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3821 # We cannot just drop it because other usage still need to sort it:
3821 # We cannot just drop it because other usage still need to sort it:
3822 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3822 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3823 #
3823 #
3824 # There is also some faulty revset implementations that rely on it
3824 # There is also some faulty revset implementations that rely on it
3825 # (eg: children as of its state in e8075329c5fb)
3825 # (eg: children as of its state in e8075329c5fb)
3826 #
3826 #
3827 # When we fix the two points above we can move this into the if clause
3827 # When we fix the two points above we can move this into the if clause
3828 other.sort(reverse=self.isdescending())
3828 other.sort(reverse=self.isdescending())
3829 return other
3829 return other
3830
3830
3831 def prettyformatset(revs):
3831 def prettyformatset(revs):
3832 lines = []
3832 lines = []
3833 rs = repr(revs)
3833 rs = repr(revs)
3834 p = 0
3834 p = 0
3835 while p < len(rs):
3835 while p < len(rs):
3836 q = rs.find('<', p + 1)
3836 q = rs.find('<', p + 1)
3837 if q < 0:
3837 if q < 0:
3838 q = len(rs)
3838 q = len(rs)
3839 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3839 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3840 assert l >= 0
3840 assert l >= 0
3841 lines.append((l, rs[p:q].rstrip()))
3841 lines.append((l, rs[p:q].rstrip()))
3842 p = q
3842 p = q
3843 return '\n'.join(' ' * l + s for l, s in lines)
3843 return '\n'.join(' ' * l + s for l, s in lines)
3844
3844
3845 def loadpredicate(ui, extname, registrarobj):
3845 def loadpredicate(ui, extname, registrarobj):
3846 """Load revset predicates from specified registrarobj
3846 """Load revset predicates from specified registrarobj
3847 """
3847 """
3848 for name, func in registrarobj._table.iteritems():
3848 for name, func in registrarobj._table.iteritems():
3849 symbols[name] = func
3849 symbols[name] = func
3850 if func._safe:
3850 if func._safe:
3851 safesymbols.add(name)
3851 safesymbols.add(name)
3852
3852
3853 # load built-in predicates explicitly to setup safesymbols
3853 # load built-in predicates explicitly to setup safesymbols
3854 loadpredicate(None, None, predicate)
3854 loadpredicate(None, None, predicate)
3855
3855
3856 # tell hggettext to extract docstrings from these functions:
3856 # tell hggettext to extract docstrings from these functions:
3857 i18nfunctions = symbols.values()
3857 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now