##// END OF EJS Templates
revset: infer ordering flag to teach if operation should define/follow order...
Yuya Nishihara -
r29930:90455e7b default
parent child Browse files
Show More
@@ -1,3738 +1,3785
1 # revset.py - revision set queries for mercurial
1 # revset.py - revision set queries for mercurial
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import heapq
10 import heapq
11 import re
11 import re
12
12
13 from .i18n import _
13 from .i18n import _
14 from . import (
14 from . import (
15 destutil,
15 destutil,
16 encoding,
16 encoding,
17 error,
17 error,
18 hbisect,
18 hbisect,
19 match as matchmod,
19 match as matchmod,
20 node,
20 node,
21 obsolete as obsmod,
21 obsolete as obsmod,
22 parser,
22 parser,
23 pathutil,
23 pathutil,
24 phases,
24 phases,
25 registrar,
25 registrar,
26 repoview,
26 repoview,
27 util,
27 util,
28 )
28 )
29
29
30 def _revancestors(repo, revs, followfirst):
30 def _revancestors(repo, revs, followfirst):
31 """Like revlog.ancestors(), but supports followfirst."""
31 """Like revlog.ancestors(), but supports followfirst."""
32 if followfirst:
32 if followfirst:
33 cut = 1
33 cut = 1
34 else:
34 else:
35 cut = None
35 cut = None
36 cl = repo.changelog
36 cl = repo.changelog
37
37
38 def iterate():
38 def iterate():
39 revs.sort(reverse=True)
39 revs.sort(reverse=True)
40 irevs = iter(revs)
40 irevs = iter(revs)
41 h = []
41 h = []
42
42
43 inputrev = next(irevs, None)
43 inputrev = next(irevs, None)
44 if inputrev is not None:
44 if inputrev is not None:
45 heapq.heappush(h, -inputrev)
45 heapq.heappush(h, -inputrev)
46
46
47 seen = set()
47 seen = set()
48 while h:
48 while h:
49 current = -heapq.heappop(h)
49 current = -heapq.heappop(h)
50 if current == inputrev:
50 if current == inputrev:
51 inputrev = next(irevs, None)
51 inputrev = next(irevs, None)
52 if inputrev is not None:
52 if inputrev is not None:
53 heapq.heappush(h, -inputrev)
53 heapq.heappush(h, -inputrev)
54 if current not in seen:
54 if current not in seen:
55 seen.add(current)
55 seen.add(current)
56 yield current
56 yield current
57 for parent in cl.parentrevs(current)[:cut]:
57 for parent in cl.parentrevs(current)[:cut]:
58 if parent != node.nullrev:
58 if parent != node.nullrev:
59 heapq.heappush(h, -parent)
59 heapq.heappush(h, -parent)
60
60
61 return generatorset(iterate(), iterasc=False)
61 return generatorset(iterate(), iterasc=False)
62
62
63 def _revdescendants(repo, revs, followfirst):
63 def _revdescendants(repo, revs, followfirst):
64 """Like revlog.descendants() but supports followfirst."""
64 """Like revlog.descendants() but supports followfirst."""
65 if followfirst:
65 if followfirst:
66 cut = 1
66 cut = 1
67 else:
67 else:
68 cut = None
68 cut = None
69
69
70 def iterate():
70 def iterate():
71 cl = repo.changelog
71 cl = repo.changelog
72 # XXX this should be 'parentset.min()' assuming 'parentset' is a
72 # XXX this should be 'parentset.min()' assuming 'parentset' is a
73 # smartset (and if it is not, it should.)
73 # smartset (and if it is not, it should.)
74 first = min(revs)
74 first = min(revs)
75 nullrev = node.nullrev
75 nullrev = node.nullrev
76 if first == nullrev:
76 if first == nullrev:
77 # Are there nodes with a null first parent and a non-null
77 # Are there nodes with a null first parent and a non-null
78 # second one? Maybe. Do we care? Probably not.
78 # second one? Maybe. Do we care? Probably not.
79 for i in cl:
79 for i in cl:
80 yield i
80 yield i
81 else:
81 else:
82 seen = set(revs)
82 seen = set(revs)
83 for i in cl.revs(first + 1):
83 for i in cl.revs(first + 1):
84 for x in cl.parentrevs(i)[:cut]:
84 for x in cl.parentrevs(i)[:cut]:
85 if x != nullrev and x in seen:
85 if x != nullrev and x in seen:
86 seen.add(i)
86 seen.add(i)
87 yield i
87 yield i
88 break
88 break
89
89
90 return generatorset(iterate(), iterasc=True)
90 return generatorset(iterate(), iterasc=True)
91
91
92 def _reachablerootspure(repo, minroot, roots, heads, includepath):
92 def _reachablerootspure(repo, minroot, roots, heads, includepath):
93 """return (heads(::<roots> and ::<heads>))
93 """return (heads(::<roots> and ::<heads>))
94
94
95 If includepath is True, return (<roots>::<heads>)."""
95 If includepath is True, return (<roots>::<heads>)."""
96 if not roots:
96 if not roots:
97 return []
97 return []
98 parentrevs = repo.changelog.parentrevs
98 parentrevs = repo.changelog.parentrevs
99 roots = set(roots)
99 roots = set(roots)
100 visit = list(heads)
100 visit = list(heads)
101 reachable = set()
101 reachable = set()
102 seen = {}
102 seen = {}
103 # prefetch all the things! (because python is slow)
103 # prefetch all the things! (because python is slow)
104 reached = reachable.add
104 reached = reachable.add
105 dovisit = visit.append
105 dovisit = visit.append
106 nextvisit = visit.pop
106 nextvisit = visit.pop
107 # open-code the post-order traversal due to the tiny size of
107 # open-code the post-order traversal due to the tiny size of
108 # sys.getrecursionlimit()
108 # sys.getrecursionlimit()
109 while visit:
109 while visit:
110 rev = nextvisit()
110 rev = nextvisit()
111 if rev in roots:
111 if rev in roots:
112 reached(rev)
112 reached(rev)
113 if not includepath:
113 if not includepath:
114 continue
114 continue
115 parents = parentrevs(rev)
115 parents = parentrevs(rev)
116 seen[rev] = parents
116 seen[rev] = parents
117 for parent in parents:
117 for parent in parents:
118 if parent >= minroot and parent not in seen:
118 if parent >= minroot and parent not in seen:
119 dovisit(parent)
119 dovisit(parent)
120 if not reachable:
120 if not reachable:
121 return baseset()
121 return baseset()
122 if not includepath:
122 if not includepath:
123 return reachable
123 return reachable
124 for rev in sorted(seen):
124 for rev in sorted(seen):
125 for parent in seen[rev]:
125 for parent in seen[rev]:
126 if parent in reachable:
126 if parent in reachable:
127 reached(rev)
127 reached(rev)
128 return reachable
128 return reachable
129
129
130 def reachableroots(repo, roots, heads, includepath=False):
130 def reachableroots(repo, roots, heads, includepath=False):
131 """return (heads(::<roots> and ::<heads>))
131 """return (heads(::<roots> and ::<heads>))
132
132
133 If includepath is True, return (<roots>::<heads>)."""
133 If includepath is True, return (<roots>::<heads>)."""
134 if not roots:
134 if not roots:
135 return baseset()
135 return baseset()
136 minroot = roots.min()
136 minroot = roots.min()
137 roots = list(roots)
137 roots = list(roots)
138 heads = list(heads)
138 heads = list(heads)
139 try:
139 try:
140 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
140 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
141 except AttributeError:
141 except AttributeError:
142 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
142 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
143 revs = baseset(revs)
143 revs = baseset(revs)
144 revs.sort()
144 revs.sort()
145 return revs
145 return revs
146
146
147 elements = {
147 elements = {
148 # token-type: binding-strength, primary, prefix, infix, suffix
148 # token-type: binding-strength, primary, prefix, infix, suffix
149 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
149 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
150 "##": (20, None, None, ("_concat", 20), None),
150 "##": (20, None, None, ("_concat", 20), None),
151 "~": (18, None, None, ("ancestor", 18), None),
151 "~": (18, None, None, ("ancestor", 18), None),
152 "^": (18, None, None, ("parent", 18), "parentpost"),
152 "^": (18, None, None, ("parent", 18), "parentpost"),
153 "-": (5, None, ("negate", 19), ("minus", 5), None),
153 "-": (5, None, ("negate", 19), ("minus", 5), None),
154 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"),
154 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"),
155 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"),
155 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"),
156 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), "rangepost"),
156 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), "rangepost"),
157 "not": (10, None, ("not", 10), None, None),
157 "not": (10, None, ("not", 10), None, None),
158 "!": (10, None, ("not", 10), None, None),
158 "!": (10, None, ("not", 10), None, None),
159 "and": (5, None, None, ("and", 5), None),
159 "and": (5, None, None, ("and", 5), None),
160 "&": (5, None, None, ("and", 5), None),
160 "&": (5, None, None, ("and", 5), None),
161 "%": (5, None, None, ("only", 5), "onlypost"),
161 "%": (5, None, None, ("only", 5), "onlypost"),
162 "or": (4, None, None, ("or", 4), None),
162 "or": (4, None, None, ("or", 4), None),
163 "|": (4, None, None, ("or", 4), None),
163 "|": (4, None, None, ("or", 4), None),
164 "+": (4, None, None, ("or", 4), None),
164 "+": (4, None, None, ("or", 4), None),
165 "=": (3, None, None, ("keyvalue", 3), None),
165 "=": (3, None, None, ("keyvalue", 3), None),
166 ",": (2, None, None, ("list", 2), None),
166 ",": (2, None, None, ("list", 2), None),
167 ")": (0, None, None, None, None),
167 ")": (0, None, None, None, None),
168 "symbol": (0, "symbol", None, None, None),
168 "symbol": (0, "symbol", None, None, None),
169 "string": (0, "string", None, None, None),
169 "string": (0, "string", None, None, None),
170 "end": (0, None, None, None, None),
170 "end": (0, None, None, None, None),
171 }
171 }
172
172
173 keywords = set(['and', 'or', 'not'])
173 keywords = set(['and', 'or', 'not'])
174
174
175 # default set of valid characters for the initial letter of symbols
175 # default set of valid characters for the initial letter of symbols
176 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
176 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
177 if c.isalnum() or c in '._@' or ord(c) > 127)
177 if c.isalnum() or c in '._@' or ord(c) > 127)
178
178
179 # default set of valid characters for non-initial letters of symbols
179 # default set of valid characters for non-initial letters of symbols
180 _symletters = set(c for c in [chr(i) for i in xrange(256)]
180 _symletters = set(c for c in [chr(i) for i in xrange(256)]
181 if c.isalnum() or c in '-._/@' or ord(c) > 127)
181 if c.isalnum() or c in '-._/@' or ord(c) > 127)
182
182
183 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
183 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
184 '''
184 '''
185 Parse a revset statement into a stream of tokens
185 Parse a revset statement into a stream of tokens
186
186
187 ``syminitletters`` is the set of valid characters for the initial
187 ``syminitletters`` is the set of valid characters for the initial
188 letter of symbols.
188 letter of symbols.
189
189
190 By default, character ``c`` is recognized as valid for initial
190 By default, character ``c`` is recognized as valid for initial
191 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
191 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
192
192
193 ``symletters`` is the set of valid characters for non-initial
193 ``symletters`` is the set of valid characters for non-initial
194 letters of symbols.
194 letters of symbols.
195
195
196 By default, character ``c`` is recognized as valid for non-initial
196 By default, character ``c`` is recognized as valid for non-initial
197 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
197 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
198
198
199 Check that @ is a valid unquoted token character (issue3686):
199 Check that @ is a valid unquoted token character (issue3686):
200 >>> list(tokenize("@::"))
200 >>> list(tokenize("@::"))
201 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
201 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
202
202
203 '''
203 '''
204 if syminitletters is None:
204 if syminitletters is None:
205 syminitletters = _syminitletters
205 syminitletters = _syminitletters
206 if symletters is None:
206 if symletters is None:
207 symletters = _symletters
207 symletters = _symletters
208
208
209 if program and lookup:
209 if program and lookup:
210 # attempt to parse old-style ranges first to deal with
210 # attempt to parse old-style ranges first to deal with
211 # things like old-tag which contain query metacharacters
211 # things like old-tag which contain query metacharacters
212 parts = program.split(':', 1)
212 parts = program.split(':', 1)
213 if all(lookup(sym) for sym in parts if sym):
213 if all(lookup(sym) for sym in parts if sym):
214 if parts[0]:
214 if parts[0]:
215 yield ('symbol', parts[0], 0)
215 yield ('symbol', parts[0], 0)
216 if len(parts) > 1:
216 if len(parts) > 1:
217 s = len(parts[0])
217 s = len(parts[0])
218 yield (':', None, s)
218 yield (':', None, s)
219 if parts[1]:
219 if parts[1]:
220 yield ('symbol', parts[1], s + 1)
220 yield ('symbol', parts[1], s + 1)
221 yield ('end', None, len(program))
221 yield ('end', None, len(program))
222 return
222 return
223
223
224 pos, l = 0, len(program)
224 pos, l = 0, len(program)
225 while pos < l:
225 while pos < l:
226 c = program[pos]
226 c = program[pos]
227 if c.isspace(): # skip inter-token whitespace
227 if c.isspace(): # skip inter-token whitespace
228 pass
228 pass
229 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
229 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
230 yield ('::', None, pos)
230 yield ('::', None, pos)
231 pos += 1 # skip ahead
231 pos += 1 # skip ahead
232 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
232 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
233 yield ('..', None, pos)
233 yield ('..', None, pos)
234 pos += 1 # skip ahead
234 pos += 1 # skip ahead
235 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
235 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
236 yield ('##', None, pos)
236 yield ('##', None, pos)
237 pos += 1 # skip ahead
237 pos += 1 # skip ahead
238 elif c in "():=,-|&+!~^%": # handle simple operators
238 elif c in "():=,-|&+!~^%": # handle simple operators
239 yield (c, None, pos)
239 yield (c, None, pos)
240 elif (c in '"\'' or c == 'r' and
240 elif (c in '"\'' or c == 'r' and
241 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
241 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
242 if c == 'r':
242 if c == 'r':
243 pos += 1
243 pos += 1
244 c = program[pos]
244 c = program[pos]
245 decode = lambda x: x
245 decode = lambda x: x
246 else:
246 else:
247 decode = parser.unescapestr
247 decode = parser.unescapestr
248 pos += 1
248 pos += 1
249 s = pos
249 s = pos
250 while pos < l: # find closing quote
250 while pos < l: # find closing quote
251 d = program[pos]
251 d = program[pos]
252 if d == '\\': # skip over escaped characters
252 if d == '\\': # skip over escaped characters
253 pos += 2
253 pos += 2
254 continue
254 continue
255 if d == c:
255 if d == c:
256 yield ('string', decode(program[s:pos]), s)
256 yield ('string', decode(program[s:pos]), s)
257 break
257 break
258 pos += 1
258 pos += 1
259 else:
259 else:
260 raise error.ParseError(_("unterminated string"), s)
260 raise error.ParseError(_("unterminated string"), s)
261 # gather up a symbol/keyword
261 # gather up a symbol/keyword
262 elif c in syminitletters:
262 elif c in syminitletters:
263 s = pos
263 s = pos
264 pos += 1
264 pos += 1
265 while pos < l: # find end of symbol
265 while pos < l: # find end of symbol
266 d = program[pos]
266 d = program[pos]
267 if d not in symletters:
267 if d not in symletters:
268 break
268 break
269 if d == '.' and program[pos - 1] == '.': # special case for ..
269 if d == '.' and program[pos - 1] == '.': # special case for ..
270 pos -= 1
270 pos -= 1
271 break
271 break
272 pos += 1
272 pos += 1
273 sym = program[s:pos]
273 sym = program[s:pos]
274 if sym in keywords: # operator keywords
274 if sym in keywords: # operator keywords
275 yield (sym, None, s)
275 yield (sym, None, s)
276 elif '-' in sym:
276 elif '-' in sym:
277 # some jerk gave us foo-bar-baz, try to check if it's a symbol
277 # some jerk gave us foo-bar-baz, try to check if it's a symbol
278 if lookup and lookup(sym):
278 if lookup and lookup(sym):
279 # looks like a real symbol
279 # looks like a real symbol
280 yield ('symbol', sym, s)
280 yield ('symbol', sym, s)
281 else:
281 else:
282 # looks like an expression
282 # looks like an expression
283 parts = sym.split('-')
283 parts = sym.split('-')
284 for p in parts[:-1]:
284 for p in parts[:-1]:
285 if p: # possible consecutive -
285 if p: # possible consecutive -
286 yield ('symbol', p, s)
286 yield ('symbol', p, s)
287 s += len(p)
287 s += len(p)
288 yield ('-', None, pos)
288 yield ('-', None, pos)
289 s += 1
289 s += 1
290 if parts[-1]: # possible trailing -
290 if parts[-1]: # possible trailing -
291 yield ('symbol', parts[-1], s)
291 yield ('symbol', parts[-1], s)
292 else:
292 else:
293 yield ('symbol', sym, s)
293 yield ('symbol', sym, s)
294 pos -= 1
294 pos -= 1
295 else:
295 else:
296 raise error.ParseError(_("syntax error in revset '%s'") %
296 raise error.ParseError(_("syntax error in revset '%s'") %
297 program, pos)
297 program, pos)
298 pos += 1
298 pos += 1
299 yield ('end', None, pos)
299 yield ('end', None, pos)
300
300
301 # helpers
301 # helpers
302
302
303 def getsymbol(x):
303 def getsymbol(x):
304 if x and x[0] == 'symbol':
304 if x and x[0] == 'symbol':
305 return x[1]
305 return x[1]
306 raise error.ParseError(_('not a symbol'))
306 raise error.ParseError(_('not a symbol'))
307
307
308 def getstring(x, err):
308 def getstring(x, err):
309 if x and (x[0] == 'string' or x[0] == 'symbol'):
309 if x and (x[0] == 'string' or x[0] == 'symbol'):
310 return x[1]
310 return x[1]
311 raise error.ParseError(err)
311 raise error.ParseError(err)
312
312
313 def getlist(x):
313 def getlist(x):
314 if not x:
314 if not x:
315 return []
315 return []
316 if x[0] == 'list':
316 if x[0] == 'list':
317 return list(x[1:])
317 return list(x[1:])
318 return [x]
318 return [x]
319
319
320 def getargs(x, min, max, err):
320 def getargs(x, min, max, err):
321 l = getlist(x)
321 l = getlist(x)
322 if len(l) < min or (max >= 0 and len(l) > max):
322 if len(l) < min or (max >= 0 and len(l) > max):
323 raise error.ParseError(err)
323 raise error.ParseError(err)
324 return l
324 return l
325
325
326 def getargsdict(x, funcname, keys):
326 def getargsdict(x, funcname, keys):
327 return parser.buildargsdict(getlist(x), funcname, keys.split(),
327 return parser.buildargsdict(getlist(x), funcname, keys.split(),
328 keyvaluenode='keyvalue', keynode='symbol')
328 keyvaluenode='keyvalue', keynode='symbol')
329
329
330 def getset(repo, subset, x):
330 def getset(repo, subset, x):
331 if not x:
331 if not x:
332 raise error.ParseError(_("missing argument"))
332 raise error.ParseError(_("missing argument"))
333 s = methods[x[0]](repo, subset, *x[1:])
333 s = methods[x[0]](repo, subset, *x[1:])
334 if util.safehasattr(s, 'isascending'):
334 if util.safehasattr(s, 'isascending'):
335 return s
335 return s
336 # else case should not happen, because all non-func are internal,
336 # else case should not happen, because all non-func are internal,
337 # ignoring for now.
337 # ignoring for now.
338 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
338 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
339 repo.ui.deprecwarn('revset "%s" uses list instead of smartset'
339 repo.ui.deprecwarn('revset "%s" uses list instead of smartset'
340 % x[1][1],
340 % x[1][1],
341 '3.9')
341 '3.9')
342 return baseset(s)
342 return baseset(s)
343
343
344 def _getrevsource(repo, r):
344 def _getrevsource(repo, r):
345 extra = repo[r].extra()
345 extra = repo[r].extra()
346 for label in ('source', 'transplant_source', 'rebase_source'):
346 for label in ('source', 'transplant_source', 'rebase_source'):
347 if label in extra:
347 if label in extra:
348 try:
348 try:
349 return repo[extra[label]].rev()
349 return repo[extra[label]].rev()
350 except error.RepoLookupError:
350 except error.RepoLookupError:
351 pass
351 pass
352 return None
352 return None
353
353
354 # operator methods
354 # operator methods
355
355
356 def stringset(repo, subset, x):
356 def stringset(repo, subset, x):
357 x = repo[x].rev()
357 x = repo[x].rev()
358 if (x in subset
358 if (x in subset
359 or x == node.nullrev and isinstance(subset, fullreposet)):
359 or x == node.nullrev and isinstance(subset, fullreposet)):
360 return baseset([x])
360 return baseset([x])
361 return baseset()
361 return baseset()
362
362
363 def rangeset(repo, subset, x, y):
363 def rangeset(repo, subset, x, y):
364 m = getset(repo, fullreposet(repo), x)
364 m = getset(repo, fullreposet(repo), x)
365 n = getset(repo, fullreposet(repo), y)
365 n = getset(repo, fullreposet(repo), y)
366
366
367 if not m or not n:
367 if not m or not n:
368 return baseset()
368 return baseset()
369 m, n = m.first(), n.last()
369 m, n = m.first(), n.last()
370
370
371 if m == n:
371 if m == n:
372 r = baseset([m])
372 r = baseset([m])
373 elif n == node.wdirrev:
373 elif n == node.wdirrev:
374 r = spanset(repo, m, len(repo)) + baseset([n])
374 r = spanset(repo, m, len(repo)) + baseset([n])
375 elif m == node.wdirrev:
375 elif m == node.wdirrev:
376 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
376 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
377 elif m < n:
377 elif m < n:
378 r = spanset(repo, m, n + 1)
378 r = spanset(repo, m, n + 1)
379 else:
379 else:
380 r = spanset(repo, m, n - 1)
380 r = spanset(repo, m, n - 1)
381 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
381 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
382 # necessary to ensure we preserve the order in subset.
382 # necessary to ensure we preserve the order in subset.
383 #
383 #
384 # This has performance implication, carrying the sorting over when possible
384 # This has performance implication, carrying the sorting over when possible
385 # would be more efficient.
385 # would be more efficient.
386 return r & subset
386 return r & subset
387
387
388 def dagrange(repo, subset, x, y):
388 def dagrange(repo, subset, x, y):
389 r = fullreposet(repo)
389 r = fullreposet(repo)
390 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
390 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
391 includepath=True)
391 includepath=True)
392 return subset & xs
392 return subset & xs
393
393
394 def andset(repo, subset, x, y):
394 def andset(repo, subset, x, y):
395 return getset(repo, getset(repo, subset, x), y)
395 return getset(repo, getset(repo, subset, x), y)
396
396
397 def differenceset(repo, subset, x, y):
397 def differenceset(repo, subset, x, y):
398 return getset(repo, subset, x) - getset(repo, subset, y)
398 return getset(repo, subset, x) - getset(repo, subset, y)
399
399
400 def _orsetlist(repo, subset, xs):
400 def _orsetlist(repo, subset, xs):
401 assert xs
401 assert xs
402 if len(xs) == 1:
402 if len(xs) == 1:
403 return getset(repo, subset, xs[0])
403 return getset(repo, subset, xs[0])
404 p = len(xs) // 2
404 p = len(xs) // 2
405 a = _orsetlist(repo, subset, xs[:p])
405 a = _orsetlist(repo, subset, xs[:p])
406 b = _orsetlist(repo, subset, xs[p:])
406 b = _orsetlist(repo, subset, xs[p:])
407 return a + b
407 return a + b
408
408
409 def orset(repo, subset, x):
409 def orset(repo, subset, x):
410 return _orsetlist(repo, subset, getlist(x))
410 return _orsetlist(repo, subset, getlist(x))
411
411
412 def notset(repo, subset, x):
412 def notset(repo, subset, x):
413 return subset - getset(repo, subset, x)
413 return subset - getset(repo, subset, x)
414
414
415 def listset(repo, subset, *xs):
415 def listset(repo, subset, *xs):
416 raise error.ParseError(_("can't use a list in this context"),
416 raise error.ParseError(_("can't use a list in this context"),
417 hint=_('see hg help "revsets.x or y"'))
417 hint=_('see hg help "revsets.x or y"'))
418
418
419 def keyvaluepair(repo, subset, k, v):
419 def keyvaluepair(repo, subset, k, v):
420 raise error.ParseError(_("can't use a key-value pair in this context"))
420 raise error.ParseError(_("can't use a key-value pair in this context"))
421
421
422 def func(repo, subset, a, b):
422 def func(repo, subset, a, b):
423 f = getsymbol(a)
423 f = getsymbol(a)
424 if f in symbols:
424 if f in symbols:
425 return symbols[f](repo, subset, b)
425 return symbols[f](repo, subset, b)
426
426
427 keep = lambda fn: getattr(fn, '__doc__', None) is not None
427 keep = lambda fn: getattr(fn, '__doc__', None) is not None
428
428
429 syms = [s for (s, fn) in symbols.items() if keep(fn)]
429 syms = [s for (s, fn) in symbols.items() if keep(fn)]
430 raise error.UnknownIdentifier(f, syms)
430 raise error.UnknownIdentifier(f, syms)
431
431
432 # functions
432 # functions
433
433
434 # symbols are callables like:
434 # symbols are callables like:
435 # fn(repo, subset, x)
435 # fn(repo, subset, x)
436 # with:
436 # with:
437 # repo - current repository instance
437 # repo - current repository instance
438 # subset - of revisions to be examined
438 # subset - of revisions to be examined
439 # x - argument in tree form
439 # x - argument in tree form
440 symbols = {}
440 symbols = {}
441
441
442 # symbols which can't be used for a DoS attack for any given input
442 # symbols which can't be used for a DoS attack for any given input
443 # (e.g. those which accept regexes as plain strings shouldn't be included)
443 # (e.g. those which accept regexes as plain strings shouldn't be included)
444 # functions that just return a lot of changesets (like all) don't count here
444 # functions that just return a lot of changesets (like all) don't count here
445 safesymbols = set()
445 safesymbols = set()
446
446
447 predicate = registrar.revsetpredicate()
447 predicate = registrar.revsetpredicate()
448
448
449 @predicate('_destupdate')
449 @predicate('_destupdate')
450 def _destupdate(repo, subset, x):
450 def _destupdate(repo, subset, x):
451 # experimental revset for update destination
451 # experimental revset for update destination
452 args = getargsdict(x, 'limit', 'clean check')
452 args = getargsdict(x, 'limit', 'clean check')
453 return subset & baseset([destutil.destupdate(repo, **args)[0]])
453 return subset & baseset([destutil.destupdate(repo, **args)[0]])
454
454
455 @predicate('_destmerge')
455 @predicate('_destmerge')
456 def _destmerge(repo, subset, x):
456 def _destmerge(repo, subset, x):
457 # experimental revset for merge destination
457 # experimental revset for merge destination
458 sourceset = None
458 sourceset = None
459 if x is not None:
459 if x is not None:
460 sourceset = getset(repo, fullreposet(repo), x)
460 sourceset = getset(repo, fullreposet(repo), x)
461 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
461 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
462
462
463 @predicate('adds(pattern)', safe=True)
463 @predicate('adds(pattern)', safe=True)
464 def adds(repo, subset, x):
464 def adds(repo, subset, x):
465 """Changesets that add a file matching pattern.
465 """Changesets that add a file matching pattern.
466
466
467 The pattern without explicit kind like ``glob:`` is expected to be
467 The pattern without explicit kind like ``glob:`` is expected to be
468 relative to the current directory and match against a file or a
468 relative to the current directory and match against a file or a
469 directory.
469 directory.
470 """
470 """
471 # i18n: "adds" is a keyword
471 # i18n: "adds" is a keyword
472 pat = getstring(x, _("adds requires a pattern"))
472 pat = getstring(x, _("adds requires a pattern"))
473 return checkstatus(repo, subset, pat, 1)
473 return checkstatus(repo, subset, pat, 1)
474
474
475 @predicate('ancestor(*changeset)', safe=True)
475 @predicate('ancestor(*changeset)', safe=True)
476 def ancestor(repo, subset, x):
476 def ancestor(repo, subset, x):
477 """A greatest common ancestor of the changesets.
477 """A greatest common ancestor of the changesets.
478
478
479 Accepts 0 or more changesets.
479 Accepts 0 or more changesets.
480 Will return empty list when passed no args.
480 Will return empty list when passed no args.
481 Greatest common ancestor of a single changeset is that changeset.
481 Greatest common ancestor of a single changeset is that changeset.
482 """
482 """
483 # i18n: "ancestor" is a keyword
483 # i18n: "ancestor" is a keyword
484 l = getlist(x)
484 l = getlist(x)
485 rl = fullreposet(repo)
485 rl = fullreposet(repo)
486 anc = None
486 anc = None
487
487
488 # (getset(repo, rl, i) for i in l) generates a list of lists
488 # (getset(repo, rl, i) for i in l) generates a list of lists
489 for revs in (getset(repo, rl, i) for i in l):
489 for revs in (getset(repo, rl, i) for i in l):
490 for r in revs:
490 for r in revs:
491 if anc is None:
491 if anc is None:
492 anc = repo[r]
492 anc = repo[r]
493 else:
493 else:
494 anc = anc.ancestor(repo[r])
494 anc = anc.ancestor(repo[r])
495
495
496 if anc is not None and anc.rev() in subset:
496 if anc is not None and anc.rev() in subset:
497 return baseset([anc.rev()])
497 return baseset([anc.rev()])
498 return baseset()
498 return baseset()
499
499
500 def _ancestors(repo, subset, x, followfirst=False):
500 def _ancestors(repo, subset, x, followfirst=False):
501 heads = getset(repo, fullreposet(repo), x)
501 heads = getset(repo, fullreposet(repo), x)
502 if not heads:
502 if not heads:
503 return baseset()
503 return baseset()
504 s = _revancestors(repo, heads, followfirst)
504 s = _revancestors(repo, heads, followfirst)
505 return subset & s
505 return subset & s
506
506
507 @predicate('ancestors(set)', safe=True)
507 @predicate('ancestors(set)', safe=True)
508 def ancestors(repo, subset, x):
508 def ancestors(repo, subset, x):
509 """Changesets that are ancestors of a changeset in set.
509 """Changesets that are ancestors of a changeset in set.
510 """
510 """
511 return _ancestors(repo, subset, x)
511 return _ancestors(repo, subset, x)
512
512
513 @predicate('_firstancestors', safe=True)
513 @predicate('_firstancestors', safe=True)
514 def _firstancestors(repo, subset, x):
514 def _firstancestors(repo, subset, x):
515 # ``_firstancestors(set)``
515 # ``_firstancestors(set)``
516 # Like ``ancestors(set)`` but follows only the first parents.
516 # Like ``ancestors(set)`` but follows only the first parents.
517 return _ancestors(repo, subset, x, followfirst=True)
517 return _ancestors(repo, subset, x, followfirst=True)
518
518
519 def ancestorspec(repo, subset, x, n):
519 def ancestorspec(repo, subset, x, n):
520 """``set~n``
520 """``set~n``
521 Changesets that are the Nth ancestor (first parents only) of a changeset
521 Changesets that are the Nth ancestor (first parents only) of a changeset
522 in set.
522 in set.
523 """
523 """
524 try:
524 try:
525 n = int(n[1])
525 n = int(n[1])
526 except (TypeError, ValueError):
526 except (TypeError, ValueError):
527 raise error.ParseError(_("~ expects a number"))
527 raise error.ParseError(_("~ expects a number"))
528 ps = set()
528 ps = set()
529 cl = repo.changelog
529 cl = repo.changelog
530 for r in getset(repo, fullreposet(repo), x):
530 for r in getset(repo, fullreposet(repo), x):
531 for i in range(n):
531 for i in range(n):
532 r = cl.parentrevs(r)[0]
532 r = cl.parentrevs(r)[0]
533 ps.add(r)
533 ps.add(r)
534 return subset & ps
534 return subset & ps
535
535
536 @predicate('author(string)', safe=True)
536 @predicate('author(string)', safe=True)
537 def author(repo, subset, x):
537 def author(repo, subset, x):
538 """Alias for ``user(string)``.
538 """Alias for ``user(string)``.
539 """
539 """
540 # i18n: "author" is a keyword
540 # i18n: "author" is a keyword
541 n = encoding.lower(getstring(x, _("author requires a string")))
541 n = encoding.lower(getstring(x, _("author requires a string")))
542 kind, pattern, matcher = _substringmatcher(n)
542 kind, pattern, matcher = _substringmatcher(n)
543 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())),
543 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())),
544 condrepr=('<user %r>', n))
544 condrepr=('<user %r>', n))
545
545
546 @predicate('bisect(string)', safe=True)
546 @predicate('bisect(string)', safe=True)
547 def bisect(repo, subset, x):
547 def bisect(repo, subset, x):
548 """Changesets marked in the specified bisect status:
548 """Changesets marked in the specified bisect status:
549
549
550 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
550 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
551 - ``goods``, ``bads`` : csets topologically good/bad
551 - ``goods``, ``bads`` : csets topologically good/bad
552 - ``range`` : csets taking part in the bisection
552 - ``range`` : csets taking part in the bisection
553 - ``pruned`` : csets that are goods, bads or skipped
553 - ``pruned`` : csets that are goods, bads or skipped
554 - ``untested`` : csets whose fate is yet unknown
554 - ``untested`` : csets whose fate is yet unknown
555 - ``ignored`` : csets ignored due to DAG topology
555 - ``ignored`` : csets ignored due to DAG topology
556 - ``current`` : the cset currently being bisected
556 - ``current`` : the cset currently being bisected
557 """
557 """
558 # i18n: "bisect" is a keyword
558 # i18n: "bisect" is a keyword
559 status = getstring(x, _("bisect requires a string")).lower()
559 status = getstring(x, _("bisect requires a string")).lower()
560 state = set(hbisect.get(repo, status))
560 state = set(hbisect.get(repo, status))
561 return subset & state
561 return subset & state
562
562
563 # Backward-compatibility
563 # Backward-compatibility
564 # - no help entry so that we do not advertise it any more
564 # - no help entry so that we do not advertise it any more
565 @predicate('bisected', safe=True)
565 @predicate('bisected', safe=True)
566 def bisected(repo, subset, x):
566 def bisected(repo, subset, x):
567 return bisect(repo, subset, x)
567 return bisect(repo, subset, x)
568
568
569 @predicate('bookmark([name])', safe=True)
569 @predicate('bookmark([name])', safe=True)
570 def bookmark(repo, subset, x):
570 def bookmark(repo, subset, x):
571 """The named bookmark or all bookmarks.
571 """The named bookmark or all bookmarks.
572
572
573 If `name` starts with `re:`, the remainder of the name is treated as
573 If `name` starts with `re:`, the remainder of the name is treated as
574 a regular expression. To match a bookmark that actually starts with `re:`,
574 a regular expression. To match a bookmark that actually starts with `re:`,
575 use the prefix `literal:`.
575 use the prefix `literal:`.
576 """
576 """
577 # i18n: "bookmark" is a keyword
577 # i18n: "bookmark" is a keyword
578 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
578 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
579 if args:
579 if args:
580 bm = getstring(args[0],
580 bm = getstring(args[0],
581 # i18n: "bookmark" is a keyword
581 # i18n: "bookmark" is a keyword
582 _('the argument to bookmark must be a string'))
582 _('the argument to bookmark must be a string'))
583 kind, pattern, matcher = util.stringmatcher(bm)
583 kind, pattern, matcher = util.stringmatcher(bm)
584 bms = set()
584 bms = set()
585 if kind == 'literal':
585 if kind == 'literal':
586 bmrev = repo._bookmarks.get(pattern, None)
586 bmrev = repo._bookmarks.get(pattern, None)
587 if not bmrev:
587 if not bmrev:
588 raise error.RepoLookupError(_("bookmark '%s' does not exist")
588 raise error.RepoLookupError(_("bookmark '%s' does not exist")
589 % pattern)
589 % pattern)
590 bms.add(repo[bmrev].rev())
590 bms.add(repo[bmrev].rev())
591 else:
591 else:
592 matchrevs = set()
592 matchrevs = set()
593 for name, bmrev in repo._bookmarks.iteritems():
593 for name, bmrev in repo._bookmarks.iteritems():
594 if matcher(name):
594 if matcher(name):
595 matchrevs.add(bmrev)
595 matchrevs.add(bmrev)
596 if not matchrevs:
596 if not matchrevs:
597 raise error.RepoLookupError(_("no bookmarks exist"
597 raise error.RepoLookupError(_("no bookmarks exist"
598 " that match '%s'") % pattern)
598 " that match '%s'") % pattern)
599 for bmrev in matchrevs:
599 for bmrev in matchrevs:
600 bms.add(repo[bmrev].rev())
600 bms.add(repo[bmrev].rev())
601 else:
601 else:
602 bms = set([repo[r].rev()
602 bms = set([repo[r].rev()
603 for r in repo._bookmarks.values()])
603 for r in repo._bookmarks.values()])
604 bms -= set([node.nullrev])
604 bms -= set([node.nullrev])
605 return subset & bms
605 return subset & bms
606
606
607 @predicate('branch(string or set)', safe=True)
607 @predicate('branch(string or set)', safe=True)
608 def branch(repo, subset, x):
608 def branch(repo, subset, x):
609 """
609 """
610 All changesets belonging to the given branch or the branches of the given
610 All changesets belonging to the given branch or the branches of the given
611 changesets.
611 changesets.
612
612
613 If `string` starts with `re:`, the remainder of the name is treated as
613 If `string` starts with `re:`, the remainder of the name is treated as
614 a regular expression. To match a branch that actually starts with `re:`,
614 a regular expression. To match a branch that actually starts with `re:`,
615 use the prefix `literal:`.
615 use the prefix `literal:`.
616 """
616 """
617 getbi = repo.revbranchcache().branchinfo
617 getbi = repo.revbranchcache().branchinfo
618
618
619 try:
619 try:
620 b = getstring(x, '')
620 b = getstring(x, '')
621 except error.ParseError:
621 except error.ParseError:
622 # not a string, but another revspec, e.g. tip()
622 # not a string, but another revspec, e.g. tip()
623 pass
623 pass
624 else:
624 else:
625 kind, pattern, matcher = util.stringmatcher(b)
625 kind, pattern, matcher = util.stringmatcher(b)
626 if kind == 'literal':
626 if kind == 'literal':
627 # note: falls through to the revspec case if no branch with
627 # note: falls through to the revspec case if no branch with
628 # this name exists and pattern kind is not specified explicitly
628 # this name exists and pattern kind is not specified explicitly
629 if pattern in repo.branchmap():
629 if pattern in repo.branchmap():
630 return subset.filter(lambda r: matcher(getbi(r)[0]),
630 return subset.filter(lambda r: matcher(getbi(r)[0]),
631 condrepr=('<branch %r>', b))
631 condrepr=('<branch %r>', b))
632 if b.startswith('literal:'):
632 if b.startswith('literal:'):
633 raise error.RepoLookupError(_("branch '%s' does not exist")
633 raise error.RepoLookupError(_("branch '%s' does not exist")
634 % pattern)
634 % pattern)
635 else:
635 else:
636 return subset.filter(lambda r: matcher(getbi(r)[0]),
636 return subset.filter(lambda r: matcher(getbi(r)[0]),
637 condrepr=('<branch %r>', b))
637 condrepr=('<branch %r>', b))
638
638
639 s = getset(repo, fullreposet(repo), x)
639 s = getset(repo, fullreposet(repo), x)
640 b = set()
640 b = set()
641 for r in s:
641 for r in s:
642 b.add(getbi(r)[0])
642 b.add(getbi(r)[0])
643 c = s.__contains__
643 c = s.__contains__
644 return subset.filter(lambda r: c(r) or getbi(r)[0] in b,
644 return subset.filter(lambda r: c(r) or getbi(r)[0] in b,
645 condrepr=lambda: '<branch %r>' % sorted(b))
645 condrepr=lambda: '<branch %r>' % sorted(b))
646
646
647 @predicate('bumped()', safe=True)
647 @predicate('bumped()', safe=True)
648 def bumped(repo, subset, x):
648 def bumped(repo, subset, x):
649 """Mutable changesets marked as successors of public changesets.
649 """Mutable changesets marked as successors of public changesets.
650
650
651 Only non-public and non-obsolete changesets can be `bumped`.
651 Only non-public and non-obsolete changesets can be `bumped`.
652 """
652 """
653 # i18n: "bumped" is a keyword
653 # i18n: "bumped" is a keyword
654 getargs(x, 0, 0, _("bumped takes no arguments"))
654 getargs(x, 0, 0, _("bumped takes no arguments"))
655 bumped = obsmod.getrevs(repo, 'bumped')
655 bumped = obsmod.getrevs(repo, 'bumped')
656 return subset & bumped
656 return subset & bumped
657
657
658 @predicate('bundle()', safe=True)
658 @predicate('bundle()', safe=True)
659 def bundle(repo, subset, x):
659 def bundle(repo, subset, x):
660 """Changesets in the bundle.
660 """Changesets in the bundle.
661
661
662 Bundle must be specified by the -R option."""
662 Bundle must be specified by the -R option."""
663
663
664 try:
664 try:
665 bundlerevs = repo.changelog.bundlerevs
665 bundlerevs = repo.changelog.bundlerevs
666 except AttributeError:
666 except AttributeError:
667 raise error.Abort(_("no bundle provided - specify with -R"))
667 raise error.Abort(_("no bundle provided - specify with -R"))
668 return subset & bundlerevs
668 return subset & bundlerevs
669
669
670 def checkstatus(repo, subset, pat, field):
670 def checkstatus(repo, subset, pat, field):
671 hasset = matchmod.patkind(pat) == 'set'
671 hasset = matchmod.patkind(pat) == 'set'
672
672
673 mcache = [None]
673 mcache = [None]
674 def matches(x):
674 def matches(x):
675 c = repo[x]
675 c = repo[x]
676 if not mcache[0] or hasset:
676 if not mcache[0] or hasset:
677 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
677 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
678 m = mcache[0]
678 m = mcache[0]
679 fname = None
679 fname = None
680 if not m.anypats() and len(m.files()) == 1:
680 if not m.anypats() and len(m.files()) == 1:
681 fname = m.files()[0]
681 fname = m.files()[0]
682 if fname is not None:
682 if fname is not None:
683 if fname not in c.files():
683 if fname not in c.files():
684 return False
684 return False
685 else:
685 else:
686 for f in c.files():
686 for f in c.files():
687 if m(f):
687 if m(f):
688 break
688 break
689 else:
689 else:
690 return False
690 return False
691 files = repo.status(c.p1().node(), c.node())[field]
691 files = repo.status(c.p1().node(), c.node())[field]
692 if fname is not None:
692 if fname is not None:
693 if fname in files:
693 if fname in files:
694 return True
694 return True
695 else:
695 else:
696 for f in files:
696 for f in files:
697 if m(f):
697 if m(f):
698 return True
698 return True
699
699
700 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
700 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
701
701
702 def _children(repo, subset, parentset):
702 def _children(repo, subset, parentset):
703 if not parentset:
703 if not parentset:
704 return baseset()
704 return baseset()
705 cs = set()
705 cs = set()
706 pr = repo.changelog.parentrevs
706 pr = repo.changelog.parentrevs
707 minrev = parentset.min()
707 minrev = parentset.min()
708 for r in subset:
708 for r in subset:
709 if r <= minrev:
709 if r <= minrev:
710 continue
710 continue
711 for p in pr(r):
711 for p in pr(r):
712 if p in parentset:
712 if p in parentset:
713 cs.add(r)
713 cs.add(r)
714 return baseset(cs)
714 return baseset(cs)
715
715
716 @predicate('children(set)', safe=True)
716 @predicate('children(set)', safe=True)
717 def children(repo, subset, x):
717 def children(repo, subset, x):
718 """Child changesets of changesets in set.
718 """Child changesets of changesets in set.
719 """
719 """
720 s = getset(repo, fullreposet(repo), x)
720 s = getset(repo, fullreposet(repo), x)
721 cs = _children(repo, subset, s)
721 cs = _children(repo, subset, s)
722 return subset & cs
722 return subset & cs
723
723
724 @predicate('closed()', safe=True)
724 @predicate('closed()', safe=True)
725 def closed(repo, subset, x):
725 def closed(repo, subset, x):
726 """Changeset is closed.
726 """Changeset is closed.
727 """
727 """
728 # i18n: "closed" is a keyword
728 # i18n: "closed" is a keyword
729 getargs(x, 0, 0, _("closed takes no arguments"))
729 getargs(x, 0, 0, _("closed takes no arguments"))
730 return subset.filter(lambda r: repo[r].closesbranch(),
730 return subset.filter(lambda r: repo[r].closesbranch(),
731 condrepr='<branch closed>')
731 condrepr='<branch closed>')
732
732
733 @predicate('contains(pattern)')
733 @predicate('contains(pattern)')
734 def contains(repo, subset, x):
734 def contains(repo, subset, x):
735 """The revision's manifest contains a file matching pattern (but might not
735 """The revision's manifest contains a file matching pattern (but might not
736 modify it). See :hg:`help patterns` for information about file patterns.
736 modify it). See :hg:`help patterns` for information about file patterns.
737
737
738 The pattern without explicit kind like ``glob:`` is expected to be
738 The pattern without explicit kind like ``glob:`` is expected to be
739 relative to the current directory and match against a file exactly
739 relative to the current directory and match against a file exactly
740 for efficiency.
740 for efficiency.
741 """
741 """
742 # i18n: "contains" is a keyword
742 # i18n: "contains" is a keyword
743 pat = getstring(x, _("contains requires a pattern"))
743 pat = getstring(x, _("contains requires a pattern"))
744
744
745 def matches(x):
745 def matches(x):
746 if not matchmod.patkind(pat):
746 if not matchmod.patkind(pat):
747 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
747 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
748 if pats in repo[x]:
748 if pats in repo[x]:
749 return True
749 return True
750 else:
750 else:
751 c = repo[x]
751 c = repo[x]
752 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
752 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
753 for f in c.manifest():
753 for f in c.manifest():
754 if m(f):
754 if m(f):
755 return True
755 return True
756 return False
756 return False
757
757
758 return subset.filter(matches, condrepr=('<contains %r>', pat))
758 return subset.filter(matches, condrepr=('<contains %r>', pat))
759
759
760 @predicate('converted([id])', safe=True)
760 @predicate('converted([id])', safe=True)
761 def converted(repo, subset, x):
761 def converted(repo, subset, x):
762 """Changesets converted from the given identifier in the old repository if
762 """Changesets converted from the given identifier in the old repository if
763 present, or all converted changesets if no identifier is specified.
763 present, or all converted changesets if no identifier is specified.
764 """
764 """
765
765
766 # There is exactly no chance of resolving the revision, so do a simple
766 # There is exactly no chance of resolving the revision, so do a simple
767 # string compare and hope for the best
767 # string compare and hope for the best
768
768
769 rev = None
769 rev = None
770 # i18n: "converted" is a keyword
770 # i18n: "converted" is a keyword
771 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
771 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
772 if l:
772 if l:
773 # i18n: "converted" is a keyword
773 # i18n: "converted" is a keyword
774 rev = getstring(l[0], _('converted requires a revision'))
774 rev = getstring(l[0], _('converted requires a revision'))
775
775
776 def _matchvalue(r):
776 def _matchvalue(r):
777 source = repo[r].extra().get('convert_revision', None)
777 source = repo[r].extra().get('convert_revision', None)
778 return source is not None and (rev is None or source.startswith(rev))
778 return source is not None and (rev is None or source.startswith(rev))
779
779
780 return subset.filter(lambda r: _matchvalue(r),
780 return subset.filter(lambda r: _matchvalue(r),
781 condrepr=('<converted %r>', rev))
781 condrepr=('<converted %r>', rev))
782
782
783 @predicate('date(interval)', safe=True)
783 @predicate('date(interval)', safe=True)
784 def date(repo, subset, x):
784 def date(repo, subset, x):
785 """Changesets within the interval, see :hg:`help dates`.
785 """Changesets within the interval, see :hg:`help dates`.
786 """
786 """
787 # i18n: "date" is a keyword
787 # i18n: "date" is a keyword
788 ds = getstring(x, _("date requires a string"))
788 ds = getstring(x, _("date requires a string"))
789 dm = util.matchdate(ds)
789 dm = util.matchdate(ds)
790 return subset.filter(lambda x: dm(repo[x].date()[0]),
790 return subset.filter(lambda x: dm(repo[x].date()[0]),
791 condrepr=('<date %r>', ds))
791 condrepr=('<date %r>', ds))
792
792
793 @predicate('desc(string)', safe=True)
793 @predicate('desc(string)', safe=True)
794 def desc(repo, subset, x):
794 def desc(repo, subset, x):
795 """Search commit message for string. The match is case-insensitive.
795 """Search commit message for string. The match is case-insensitive.
796 """
796 """
797 # i18n: "desc" is a keyword
797 # i18n: "desc" is a keyword
798 ds = encoding.lower(getstring(x, _("desc requires a string")))
798 ds = encoding.lower(getstring(x, _("desc requires a string")))
799
799
800 def matches(x):
800 def matches(x):
801 c = repo[x]
801 c = repo[x]
802 return ds in encoding.lower(c.description())
802 return ds in encoding.lower(c.description())
803
803
804 return subset.filter(matches, condrepr=('<desc %r>', ds))
804 return subset.filter(matches, condrepr=('<desc %r>', ds))
805
805
806 def _descendants(repo, subset, x, followfirst=False):
806 def _descendants(repo, subset, x, followfirst=False):
807 roots = getset(repo, fullreposet(repo), x)
807 roots = getset(repo, fullreposet(repo), x)
808 if not roots:
808 if not roots:
809 return baseset()
809 return baseset()
810 s = _revdescendants(repo, roots, followfirst)
810 s = _revdescendants(repo, roots, followfirst)
811
811
812 # Both sets need to be ascending in order to lazily return the union
812 # Both sets need to be ascending in order to lazily return the union
813 # in the correct order.
813 # in the correct order.
814 base = subset & roots
814 base = subset & roots
815 desc = subset & s
815 desc = subset & s
816 result = base + desc
816 result = base + desc
817 if subset.isascending():
817 if subset.isascending():
818 result.sort()
818 result.sort()
819 elif subset.isdescending():
819 elif subset.isdescending():
820 result.sort(reverse=True)
820 result.sort(reverse=True)
821 else:
821 else:
822 result = subset & result
822 result = subset & result
823 return result
823 return result
824
824
825 @predicate('descendants(set)', safe=True)
825 @predicate('descendants(set)', safe=True)
826 def descendants(repo, subset, x):
826 def descendants(repo, subset, x):
827 """Changesets which are descendants of changesets in set.
827 """Changesets which are descendants of changesets in set.
828 """
828 """
829 return _descendants(repo, subset, x)
829 return _descendants(repo, subset, x)
830
830
831 @predicate('_firstdescendants', safe=True)
831 @predicate('_firstdescendants', safe=True)
832 def _firstdescendants(repo, subset, x):
832 def _firstdescendants(repo, subset, x):
833 # ``_firstdescendants(set)``
833 # ``_firstdescendants(set)``
834 # Like ``descendants(set)`` but follows only the first parents.
834 # Like ``descendants(set)`` but follows only the first parents.
835 return _descendants(repo, subset, x, followfirst=True)
835 return _descendants(repo, subset, x, followfirst=True)
836
836
837 @predicate('destination([set])', safe=True)
837 @predicate('destination([set])', safe=True)
838 def destination(repo, subset, x):
838 def destination(repo, subset, x):
839 """Changesets that were created by a graft, transplant or rebase operation,
839 """Changesets that were created by a graft, transplant or rebase operation,
840 with the given revisions specified as the source. Omitting the optional set
840 with the given revisions specified as the source. Omitting the optional set
841 is the same as passing all().
841 is the same as passing all().
842 """
842 """
843 if x is not None:
843 if x is not None:
844 sources = getset(repo, fullreposet(repo), x)
844 sources = getset(repo, fullreposet(repo), x)
845 else:
845 else:
846 sources = fullreposet(repo)
846 sources = fullreposet(repo)
847
847
848 dests = set()
848 dests = set()
849
849
850 # subset contains all of the possible destinations that can be returned, so
850 # subset contains all of the possible destinations that can be returned, so
851 # iterate over them and see if their source(s) were provided in the arg set.
851 # iterate over them and see if their source(s) were provided in the arg set.
852 # Even if the immediate src of r is not in the arg set, src's source (or
852 # Even if the immediate src of r is not in the arg set, src's source (or
853 # further back) may be. Scanning back further than the immediate src allows
853 # further back) may be. Scanning back further than the immediate src allows
854 # transitive transplants and rebases to yield the same results as transitive
854 # transitive transplants and rebases to yield the same results as transitive
855 # grafts.
855 # grafts.
856 for r in subset:
856 for r in subset:
857 src = _getrevsource(repo, r)
857 src = _getrevsource(repo, r)
858 lineage = None
858 lineage = None
859
859
860 while src is not None:
860 while src is not None:
861 if lineage is None:
861 if lineage is None:
862 lineage = list()
862 lineage = list()
863
863
864 lineage.append(r)
864 lineage.append(r)
865
865
866 # The visited lineage is a match if the current source is in the arg
866 # The visited lineage is a match if the current source is in the arg
867 # set. Since every candidate dest is visited by way of iterating
867 # set. Since every candidate dest is visited by way of iterating
868 # subset, any dests further back in the lineage will be tested by a
868 # subset, any dests further back in the lineage will be tested by a
869 # different iteration over subset. Likewise, if the src was already
869 # different iteration over subset. Likewise, if the src was already
870 # selected, the current lineage can be selected without going back
870 # selected, the current lineage can be selected without going back
871 # further.
871 # further.
872 if src in sources or src in dests:
872 if src in sources or src in dests:
873 dests.update(lineage)
873 dests.update(lineage)
874 break
874 break
875
875
876 r = src
876 r = src
877 src = _getrevsource(repo, r)
877 src = _getrevsource(repo, r)
878
878
879 return subset.filter(dests.__contains__,
879 return subset.filter(dests.__contains__,
880 condrepr=lambda: '<destination %r>' % sorted(dests))
880 condrepr=lambda: '<destination %r>' % sorted(dests))
881
881
882 @predicate('divergent()', safe=True)
882 @predicate('divergent()', safe=True)
883 def divergent(repo, subset, x):
883 def divergent(repo, subset, x):
884 """
884 """
885 Final successors of changesets with an alternative set of final successors.
885 Final successors of changesets with an alternative set of final successors.
886 """
886 """
887 # i18n: "divergent" is a keyword
887 # i18n: "divergent" is a keyword
888 getargs(x, 0, 0, _("divergent takes no arguments"))
888 getargs(x, 0, 0, _("divergent takes no arguments"))
889 divergent = obsmod.getrevs(repo, 'divergent')
889 divergent = obsmod.getrevs(repo, 'divergent')
890 return subset & divergent
890 return subset & divergent
891
891
892 @predicate('extinct()', safe=True)
892 @predicate('extinct()', safe=True)
893 def extinct(repo, subset, x):
893 def extinct(repo, subset, x):
894 """Obsolete changesets with obsolete descendants only.
894 """Obsolete changesets with obsolete descendants only.
895 """
895 """
896 # i18n: "extinct" is a keyword
896 # i18n: "extinct" is a keyword
897 getargs(x, 0, 0, _("extinct takes no arguments"))
897 getargs(x, 0, 0, _("extinct takes no arguments"))
898 extincts = obsmod.getrevs(repo, 'extinct')
898 extincts = obsmod.getrevs(repo, 'extinct')
899 return subset & extincts
899 return subset & extincts
900
900
901 @predicate('extra(label, [value])', safe=True)
901 @predicate('extra(label, [value])', safe=True)
902 def extra(repo, subset, x):
902 def extra(repo, subset, x):
903 """Changesets with the given label in the extra metadata, with the given
903 """Changesets with the given label in the extra metadata, with the given
904 optional value.
904 optional value.
905
905
906 If `value` starts with `re:`, the remainder of the value is treated as
906 If `value` starts with `re:`, the remainder of the value is treated as
907 a regular expression. To match a value that actually starts with `re:`,
907 a regular expression. To match a value that actually starts with `re:`,
908 use the prefix `literal:`.
908 use the prefix `literal:`.
909 """
909 """
910 args = getargsdict(x, 'extra', 'label value')
910 args = getargsdict(x, 'extra', 'label value')
911 if 'label' not in args:
911 if 'label' not in args:
912 # i18n: "extra" is a keyword
912 # i18n: "extra" is a keyword
913 raise error.ParseError(_('extra takes at least 1 argument'))
913 raise error.ParseError(_('extra takes at least 1 argument'))
914 # i18n: "extra" is a keyword
914 # i18n: "extra" is a keyword
915 label = getstring(args['label'], _('first argument to extra must be '
915 label = getstring(args['label'], _('first argument to extra must be '
916 'a string'))
916 'a string'))
917 value = None
917 value = None
918
918
919 if 'value' in args:
919 if 'value' in args:
920 # i18n: "extra" is a keyword
920 # i18n: "extra" is a keyword
921 value = getstring(args['value'], _('second argument to extra must be '
921 value = getstring(args['value'], _('second argument to extra must be '
922 'a string'))
922 'a string'))
923 kind, value, matcher = util.stringmatcher(value)
923 kind, value, matcher = util.stringmatcher(value)
924
924
925 def _matchvalue(r):
925 def _matchvalue(r):
926 extra = repo[r].extra()
926 extra = repo[r].extra()
927 return label in extra and (value is None or matcher(extra[label]))
927 return label in extra and (value is None or matcher(extra[label]))
928
928
929 return subset.filter(lambda r: _matchvalue(r),
929 return subset.filter(lambda r: _matchvalue(r),
930 condrepr=('<extra[%r] %r>', label, value))
930 condrepr=('<extra[%r] %r>', label, value))
931
931
932 @predicate('filelog(pattern)', safe=True)
932 @predicate('filelog(pattern)', safe=True)
933 def filelog(repo, subset, x):
933 def filelog(repo, subset, x):
934 """Changesets connected to the specified filelog.
934 """Changesets connected to the specified filelog.
935
935
936 For performance reasons, visits only revisions mentioned in the file-level
936 For performance reasons, visits only revisions mentioned in the file-level
937 filelog, rather than filtering through all changesets (much faster, but
937 filelog, rather than filtering through all changesets (much faster, but
938 doesn't include deletes or duplicate changes). For a slower, more accurate
938 doesn't include deletes or duplicate changes). For a slower, more accurate
939 result, use ``file()``.
939 result, use ``file()``.
940
940
941 The pattern without explicit kind like ``glob:`` is expected to be
941 The pattern without explicit kind like ``glob:`` is expected to be
942 relative to the current directory and match against a file exactly
942 relative to the current directory and match against a file exactly
943 for efficiency.
943 for efficiency.
944
944
945 If some linkrev points to revisions filtered by the current repoview, we'll
945 If some linkrev points to revisions filtered by the current repoview, we'll
946 work around it to return a non-filtered value.
946 work around it to return a non-filtered value.
947 """
947 """
948
948
949 # i18n: "filelog" is a keyword
949 # i18n: "filelog" is a keyword
950 pat = getstring(x, _("filelog requires a pattern"))
950 pat = getstring(x, _("filelog requires a pattern"))
951 s = set()
951 s = set()
952 cl = repo.changelog
952 cl = repo.changelog
953
953
954 if not matchmod.patkind(pat):
954 if not matchmod.patkind(pat):
955 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
955 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
956 files = [f]
956 files = [f]
957 else:
957 else:
958 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
958 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
959 files = (f for f in repo[None] if m(f))
959 files = (f for f in repo[None] if m(f))
960
960
961 for f in files:
961 for f in files:
962 fl = repo.file(f)
962 fl = repo.file(f)
963 known = {}
963 known = {}
964 scanpos = 0
964 scanpos = 0
965 for fr in list(fl):
965 for fr in list(fl):
966 fn = fl.node(fr)
966 fn = fl.node(fr)
967 if fn in known:
967 if fn in known:
968 s.add(known[fn])
968 s.add(known[fn])
969 continue
969 continue
970
970
971 lr = fl.linkrev(fr)
971 lr = fl.linkrev(fr)
972 if lr in cl:
972 if lr in cl:
973 s.add(lr)
973 s.add(lr)
974 elif scanpos is not None:
974 elif scanpos is not None:
975 # lowest matching changeset is filtered, scan further
975 # lowest matching changeset is filtered, scan further
976 # ahead in changelog
976 # ahead in changelog
977 start = max(lr, scanpos) + 1
977 start = max(lr, scanpos) + 1
978 scanpos = None
978 scanpos = None
979 for r in cl.revs(start):
979 for r in cl.revs(start):
980 # minimize parsing of non-matching entries
980 # minimize parsing of non-matching entries
981 if f in cl.revision(r) and f in cl.readfiles(r):
981 if f in cl.revision(r) and f in cl.readfiles(r):
982 try:
982 try:
983 # try to use manifest delta fastpath
983 # try to use manifest delta fastpath
984 n = repo[r].filenode(f)
984 n = repo[r].filenode(f)
985 if n not in known:
985 if n not in known:
986 if n == fn:
986 if n == fn:
987 s.add(r)
987 s.add(r)
988 scanpos = r
988 scanpos = r
989 break
989 break
990 else:
990 else:
991 known[n] = r
991 known[n] = r
992 except error.ManifestLookupError:
992 except error.ManifestLookupError:
993 # deletion in changelog
993 # deletion in changelog
994 continue
994 continue
995
995
996 return subset & s
996 return subset & s
997
997
998 @predicate('first(set, [n])', safe=True)
998 @predicate('first(set, [n])', safe=True)
999 def first(repo, subset, x):
999 def first(repo, subset, x):
1000 """An alias for limit().
1000 """An alias for limit().
1001 """
1001 """
1002 return limit(repo, subset, x)
1002 return limit(repo, subset, x)
1003
1003
1004 def _follow(repo, subset, x, name, followfirst=False):
1004 def _follow(repo, subset, x, name, followfirst=False):
1005 l = getargs(x, 0, 2, _("%s takes no arguments or a pattern "
1005 l = getargs(x, 0, 2, _("%s takes no arguments or a pattern "
1006 "and an optional revset") % name)
1006 "and an optional revset") % name)
1007 c = repo['.']
1007 c = repo['.']
1008 if l:
1008 if l:
1009 x = getstring(l[0], _("%s expected a pattern") % name)
1009 x = getstring(l[0], _("%s expected a pattern") % name)
1010 rev = None
1010 rev = None
1011 if len(l) >= 2:
1011 if len(l) >= 2:
1012 rev = getset(repo, fullreposet(repo), l[1]).last()
1012 rev = getset(repo, fullreposet(repo), l[1]).last()
1013 if rev is None:
1013 if rev is None:
1014 raise error.RepoLookupError(
1014 raise error.RepoLookupError(
1015 _("%s: starting revision set cannot be empty") % name)
1015 _("%s: starting revision set cannot be empty") % name)
1016 c = repo[rev]
1016 c = repo[rev]
1017 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
1017 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
1018 ctx=repo[rev], default='path')
1018 ctx=repo[rev], default='path')
1019
1019
1020 files = c.manifest().walk(matcher)
1020 files = c.manifest().walk(matcher)
1021
1021
1022 s = set()
1022 s = set()
1023 for fname in files:
1023 for fname in files:
1024 fctx = c[fname]
1024 fctx = c[fname]
1025 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
1025 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
1026 # include the revision responsible for the most recent version
1026 # include the revision responsible for the most recent version
1027 s.add(fctx.introrev())
1027 s.add(fctx.introrev())
1028 else:
1028 else:
1029 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1029 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1030
1030
1031 return subset & s
1031 return subset & s
1032
1032
1033 @predicate('follow([pattern[, startrev]])', safe=True)
1033 @predicate('follow([pattern[, startrev]])', safe=True)
1034 def follow(repo, subset, x):
1034 def follow(repo, subset, x):
1035 """
1035 """
1036 An alias for ``::.`` (ancestors of the working directory's first parent).
1036 An alias for ``::.`` (ancestors of the working directory's first parent).
1037 If pattern is specified, the histories of files matching given
1037 If pattern is specified, the histories of files matching given
1038 pattern in the revision given by startrev are followed, including copies.
1038 pattern in the revision given by startrev are followed, including copies.
1039 """
1039 """
1040 return _follow(repo, subset, x, 'follow')
1040 return _follow(repo, subset, x, 'follow')
1041
1041
1042 @predicate('_followfirst', safe=True)
1042 @predicate('_followfirst', safe=True)
1043 def _followfirst(repo, subset, x):
1043 def _followfirst(repo, subset, x):
1044 # ``followfirst([pattern[, startrev]])``
1044 # ``followfirst([pattern[, startrev]])``
1045 # Like ``follow([pattern[, startrev]])`` but follows only the first parent
1045 # Like ``follow([pattern[, startrev]])`` but follows only the first parent
1046 # of every revisions or files revisions.
1046 # of every revisions or files revisions.
1047 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1047 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1048
1048
1049 @predicate('all()', safe=True)
1049 @predicate('all()', safe=True)
1050 def getall(repo, subset, x):
1050 def getall(repo, subset, x):
1051 """All changesets, the same as ``0:tip``.
1051 """All changesets, the same as ``0:tip``.
1052 """
1052 """
1053 # i18n: "all" is a keyword
1053 # i18n: "all" is a keyword
1054 getargs(x, 0, 0, _("all takes no arguments"))
1054 getargs(x, 0, 0, _("all takes no arguments"))
1055 return subset & spanset(repo) # drop "null" if any
1055 return subset & spanset(repo) # drop "null" if any
1056
1056
1057 @predicate('grep(regex)')
1057 @predicate('grep(regex)')
1058 def grep(repo, subset, x):
1058 def grep(repo, subset, x):
1059 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1059 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1060 to ensure special escape characters are handled correctly. Unlike
1060 to ensure special escape characters are handled correctly. Unlike
1061 ``keyword(string)``, the match is case-sensitive.
1061 ``keyword(string)``, the match is case-sensitive.
1062 """
1062 """
1063 try:
1063 try:
1064 # i18n: "grep" is a keyword
1064 # i18n: "grep" is a keyword
1065 gr = re.compile(getstring(x, _("grep requires a string")))
1065 gr = re.compile(getstring(x, _("grep requires a string")))
1066 except re.error as e:
1066 except re.error as e:
1067 raise error.ParseError(_('invalid match pattern: %s') % e)
1067 raise error.ParseError(_('invalid match pattern: %s') % e)
1068
1068
1069 def matches(x):
1069 def matches(x):
1070 c = repo[x]
1070 c = repo[x]
1071 for e in c.files() + [c.user(), c.description()]:
1071 for e in c.files() + [c.user(), c.description()]:
1072 if gr.search(e):
1072 if gr.search(e):
1073 return True
1073 return True
1074 return False
1074 return False
1075
1075
1076 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1076 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1077
1077
1078 @predicate('_matchfiles', safe=True)
1078 @predicate('_matchfiles', safe=True)
1079 def _matchfiles(repo, subset, x):
1079 def _matchfiles(repo, subset, x):
1080 # _matchfiles takes a revset list of prefixed arguments:
1080 # _matchfiles takes a revset list of prefixed arguments:
1081 #
1081 #
1082 # [p:foo, i:bar, x:baz]
1082 # [p:foo, i:bar, x:baz]
1083 #
1083 #
1084 # builds a match object from them and filters subset. Allowed
1084 # builds a match object from them and filters subset. Allowed
1085 # prefixes are 'p:' for regular patterns, 'i:' for include
1085 # prefixes are 'p:' for regular patterns, 'i:' for include
1086 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1086 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1087 # a revision identifier, or the empty string to reference the
1087 # a revision identifier, or the empty string to reference the
1088 # working directory, from which the match object is
1088 # working directory, from which the match object is
1089 # initialized. Use 'd:' to set the default matching mode, default
1089 # initialized. Use 'd:' to set the default matching mode, default
1090 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1090 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1091
1091
1092 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1092 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1093 pats, inc, exc = [], [], []
1093 pats, inc, exc = [], [], []
1094 rev, default = None, None
1094 rev, default = None, None
1095 for arg in l:
1095 for arg in l:
1096 s = getstring(arg, "_matchfiles requires string arguments")
1096 s = getstring(arg, "_matchfiles requires string arguments")
1097 prefix, value = s[:2], s[2:]
1097 prefix, value = s[:2], s[2:]
1098 if prefix == 'p:':
1098 if prefix == 'p:':
1099 pats.append(value)
1099 pats.append(value)
1100 elif prefix == 'i:':
1100 elif prefix == 'i:':
1101 inc.append(value)
1101 inc.append(value)
1102 elif prefix == 'x:':
1102 elif prefix == 'x:':
1103 exc.append(value)
1103 exc.append(value)
1104 elif prefix == 'r:':
1104 elif prefix == 'r:':
1105 if rev is not None:
1105 if rev is not None:
1106 raise error.ParseError('_matchfiles expected at most one '
1106 raise error.ParseError('_matchfiles expected at most one '
1107 'revision')
1107 'revision')
1108 if value != '': # empty means working directory; leave rev as None
1108 if value != '': # empty means working directory; leave rev as None
1109 rev = value
1109 rev = value
1110 elif prefix == 'd:':
1110 elif prefix == 'd:':
1111 if default is not None:
1111 if default is not None:
1112 raise error.ParseError('_matchfiles expected at most one '
1112 raise error.ParseError('_matchfiles expected at most one '
1113 'default mode')
1113 'default mode')
1114 default = value
1114 default = value
1115 else:
1115 else:
1116 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1116 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1117 if not default:
1117 if not default:
1118 default = 'glob'
1118 default = 'glob'
1119
1119
1120 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1120 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1121 exclude=exc, ctx=repo[rev], default=default)
1121 exclude=exc, ctx=repo[rev], default=default)
1122
1122
1123 # This directly read the changelog data as creating changectx for all
1123 # This directly read the changelog data as creating changectx for all
1124 # revisions is quite expensive.
1124 # revisions is quite expensive.
1125 getfiles = repo.changelog.readfiles
1125 getfiles = repo.changelog.readfiles
1126 wdirrev = node.wdirrev
1126 wdirrev = node.wdirrev
1127 def matches(x):
1127 def matches(x):
1128 if x == wdirrev:
1128 if x == wdirrev:
1129 files = repo[x].files()
1129 files = repo[x].files()
1130 else:
1130 else:
1131 files = getfiles(x)
1131 files = getfiles(x)
1132 for f in files:
1132 for f in files:
1133 if m(f):
1133 if m(f):
1134 return True
1134 return True
1135 return False
1135 return False
1136
1136
1137 return subset.filter(matches,
1137 return subset.filter(matches,
1138 condrepr=('<matchfiles patterns=%r, include=%r '
1138 condrepr=('<matchfiles patterns=%r, include=%r '
1139 'exclude=%r, default=%r, rev=%r>',
1139 'exclude=%r, default=%r, rev=%r>',
1140 pats, inc, exc, default, rev))
1140 pats, inc, exc, default, rev))
1141
1141
1142 @predicate('file(pattern)', safe=True)
1142 @predicate('file(pattern)', safe=True)
1143 def hasfile(repo, subset, x):
1143 def hasfile(repo, subset, x):
1144 """Changesets affecting files matched by pattern.
1144 """Changesets affecting files matched by pattern.
1145
1145
1146 For a faster but less accurate result, consider using ``filelog()``
1146 For a faster but less accurate result, consider using ``filelog()``
1147 instead.
1147 instead.
1148
1148
1149 This predicate uses ``glob:`` as the default kind of pattern.
1149 This predicate uses ``glob:`` as the default kind of pattern.
1150 """
1150 """
1151 # i18n: "file" is a keyword
1151 # i18n: "file" is a keyword
1152 pat = getstring(x, _("file requires a pattern"))
1152 pat = getstring(x, _("file requires a pattern"))
1153 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1153 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1154
1154
1155 @predicate('head()', safe=True)
1155 @predicate('head()', safe=True)
1156 def head(repo, subset, x):
1156 def head(repo, subset, x):
1157 """Changeset is a named branch head.
1157 """Changeset is a named branch head.
1158 """
1158 """
1159 # i18n: "head" is a keyword
1159 # i18n: "head" is a keyword
1160 getargs(x, 0, 0, _("head takes no arguments"))
1160 getargs(x, 0, 0, _("head takes no arguments"))
1161 hs = set()
1161 hs = set()
1162 cl = repo.changelog
1162 cl = repo.changelog
1163 for ls in repo.branchmap().itervalues():
1163 for ls in repo.branchmap().itervalues():
1164 hs.update(cl.rev(h) for h in ls)
1164 hs.update(cl.rev(h) for h in ls)
1165 return subset & baseset(hs)
1165 return subset & baseset(hs)
1166
1166
1167 @predicate('heads(set)', safe=True)
1167 @predicate('heads(set)', safe=True)
1168 def heads(repo, subset, x):
1168 def heads(repo, subset, x):
1169 """Members of set with no children in set.
1169 """Members of set with no children in set.
1170 """
1170 """
1171 s = getset(repo, subset, x)
1171 s = getset(repo, subset, x)
1172 ps = parents(repo, subset, x)
1172 ps = parents(repo, subset, x)
1173 return s - ps
1173 return s - ps
1174
1174
1175 @predicate('hidden()', safe=True)
1175 @predicate('hidden()', safe=True)
1176 def hidden(repo, subset, x):
1176 def hidden(repo, subset, x):
1177 """Hidden changesets.
1177 """Hidden changesets.
1178 """
1178 """
1179 # i18n: "hidden" is a keyword
1179 # i18n: "hidden" is a keyword
1180 getargs(x, 0, 0, _("hidden takes no arguments"))
1180 getargs(x, 0, 0, _("hidden takes no arguments"))
1181 hiddenrevs = repoview.filterrevs(repo, 'visible')
1181 hiddenrevs = repoview.filterrevs(repo, 'visible')
1182 return subset & hiddenrevs
1182 return subset & hiddenrevs
1183
1183
1184 @predicate('keyword(string)', safe=True)
1184 @predicate('keyword(string)', safe=True)
1185 def keyword(repo, subset, x):
1185 def keyword(repo, subset, x):
1186 """Search commit message, user name, and names of changed files for
1186 """Search commit message, user name, and names of changed files for
1187 string. The match is case-insensitive.
1187 string. The match is case-insensitive.
1188 """
1188 """
1189 # i18n: "keyword" is a keyword
1189 # i18n: "keyword" is a keyword
1190 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1190 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1191
1191
1192 def matches(r):
1192 def matches(r):
1193 c = repo[r]
1193 c = repo[r]
1194 return any(kw in encoding.lower(t)
1194 return any(kw in encoding.lower(t)
1195 for t in c.files() + [c.user(), c.description()])
1195 for t in c.files() + [c.user(), c.description()])
1196
1196
1197 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1197 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1198
1198
1199 @predicate('limit(set[, n[, offset]])', safe=True)
1199 @predicate('limit(set[, n[, offset]])', safe=True)
1200 def limit(repo, subset, x):
1200 def limit(repo, subset, x):
1201 """First n members of set, defaulting to 1, starting from offset.
1201 """First n members of set, defaulting to 1, starting from offset.
1202 """
1202 """
1203 args = getargsdict(x, 'limit', 'set n offset')
1203 args = getargsdict(x, 'limit', 'set n offset')
1204 if 'set' not in args:
1204 if 'set' not in args:
1205 # i18n: "limit" is a keyword
1205 # i18n: "limit" is a keyword
1206 raise error.ParseError(_("limit requires one to three arguments"))
1206 raise error.ParseError(_("limit requires one to three arguments"))
1207 try:
1207 try:
1208 lim, ofs = 1, 0
1208 lim, ofs = 1, 0
1209 if 'n' in args:
1209 if 'n' in args:
1210 # i18n: "limit" is a keyword
1210 # i18n: "limit" is a keyword
1211 lim = int(getstring(args['n'], _("limit requires a number")))
1211 lim = int(getstring(args['n'], _("limit requires a number")))
1212 if 'offset' in args:
1212 if 'offset' in args:
1213 # i18n: "limit" is a keyword
1213 # i18n: "limit" is a keyword
1214 ofs = int(getstring(args['offset'], _("limit requires a number")))
1214 ofs = int(getstring(args['offset'], _("limit requires a number")))
1215 if ofs < 0:
1215 if ofs < 0:
1216 raise error.ParseError(_("negative offset"))
1216 raise error.ParseError(_("negative offset"))
1217 except (TypeError, ValueError):
1217 except (TypeError, ValueError):
1218 # i18n: "limit" is a keyword
1218 # i18n: "limit" is a keyword
1219 raise error.ParseError(_("limit expects a number"))
1219 raise error.ParseError(_("limit expects a number"))
1220 os = getset(repo, fullreposet(repo), args['set'])
1220 os = getset(repo, fullreposet(repo), args['set'])
1221 result = []
1221 result = []
1222 it = iter(os)
1222 it = iter(os)
1223 for x in xrange(ofs):
1223 for x in xrange(ofs):
1224 y = next(it, None)
1224 y = next(it, None)
1225 if y is None:
1225 if y is None:
1226 break
1226 break
1227 for x in xrange(lim):
1227 for x in xrange(lim):
1228 y = next(it, None)
1228 y = next(it, None)
1229 if y is None:
1229 if y is None:
1230 break
1230 break
1231 elif y in subset:
1231 elif y in subset:
1232 result.append(y)
1232 result.append(y)
1233 return baseset(result, datarepr=('<limit n=%d, offset=%d, %r, %r>',
1233 return baseset(result, datarepr=('<limit n=%d, offset=%d, %r, %r>',
1234 lim, ofs, subset, os))
1234 lim, ofs, subset, os))
1235
1235
1236 @predicate('last(set, [n])', safe=True)
1236 @predicate('last(set, [n])', safe=True)
1237 def last(repo, subset, x):
1237 def last(repo, subset, x):
1238 """Last n members of set, defaulting to 1.
1238 """Last n members of set, defaulting to 1.
1239 """
1239 """
1240 # i18n: "last" is a keyword
1240 # i18n: "last" is a keyword
1241 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1241 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1242 try:
1242 try:
1243 lim = 1
1243 lim = 1
1244 if len(l) == 2:
1244 if len(l) == 2:
1245 # i18n: "last" is a keyword
1245 # i18n: "last" is a keyword
1246 lim = int(getstring(l[1], _("last requires a number")))
1246 lim = int(getstring(l[1], _("last requires a number")))
1247 except (TypeError, ValueError):
1247 except (TypeError, ValueError):
1248 # i18n: "last" is a keyword
1248 # i18n: "last" is a keyword
1249 raise error.ParseError(_("last expects a number"))
1249 raise error.ParseError(_("last expects a number"))
1250 os = getset(repo, fullreposet(repo), l[0])
1250 os = getset(repo, fullreposet(repo), l[0])
1251 os.reverse()
1251 os.reverse()
1252 result = []
1252 result = []
1253 it = iter(os)
1253 it = iter(os)
1254 for x in xrange(lim):
1254 for x in xrange(lim):
1255 y = next(it, None)
1255 y = next(it, None)
1256 if y is None:
1256 if y is None:
1257 break
1257 break
1258 elif y in subset:
1258 elif y in subset:
1259 result.append(y)
1259 result.append(y)
1260 return baseset(result, datarepr=('<last n=%d, %r, %r>', lim, subset, os))
1260 return baseset(result, datarepr=('<last n=%d, %r, %r>', lim, subset, os))
1261
1261
1262 @predicate('max(set)', safe=True)
1262 @predicate('max(set)', safe=True)
1263 def maxrev(repo, subset, x):
1263 def maxrev(repo, subset, x):
1264 """Changeset with highest revision number in set.
1264 """Changeset with highest revision number in set.
1265 """
1265 """
1266 os = getset(repo, fullreposet(repo), x)
1266 os = getset(repo, fullreposet(repo), x)
1267 try:
1267 try:
1268 m = os.max()
1268 m = os.max()
1269 if m in subset:
1269 if m in subset:
1270 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1270 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1271 except ValueError:
1271 except ValueError:
1272 # os.max() throws a ValueError when the collection is empty.
1272 # os.max() throws a ValueError when the collection is empty.
1273 # Same as python's max().
1273 # Same as python's max().
1274 pass
1274 pass
1275 return baseset(datarepr=('<max %r, %r>', subset, os))
1275 return baseset(datarepr=('<max %r, %r>', subset, os))
1276
1276
1277 @predicate('merge()', safe=True)
1277 @predicate('merge()', safe=True)
1278 def merge(repo, subset, x):
1278 def merge(repo, subset, x):
1279 """Changeset is a merge changeset.
1279 """Changeset is a merge changeset.
1280 """
1280 """
1281 # i18n: "merge" is a keyword
1281 # i18n: "merge" is a keyword
1282 getargs(x, 0, 0, _("merge takes no arguments"))
1282 getargs(x, 0, 0, _("merge takes no arguments"))
1283 cl = repo.changelog
1283 cl = repo.changelog
1284 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1284 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1285 condrepr='<merge>')
1285 condrepr='<merge>')
1286
1286
1287 @predicate('branchpoint()', safe=True)
1287 @predicate('branchpoint()', safe=True)
1288 def branchpoint(repo, subset, x):
1288 def branchpoint(repo, subset, x):
1289 """Changesets with more than one child.
1289 """Changesets with more than one child.
1290 """
1290 """
1291 # i18n: "branchpoint" is a keyword
1291 # i18n: "branchpoint" is a keyword
1292 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1292 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1293 cl = repo.changelog
1293 cl = repo.changelog
1294 if not subset:
1294 if not subset:
1295 return baseset()
1295 return baseset()
1296 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1296 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1297 # (and if it is not, it should.)
1297 # (and if it is not, it should.)
1298 baserev = min(subset)
1298 baserev = min(subset)
1299 parentscount = [0]*(len(repo) - baserev)
1299 parentscount = [0]*(len(repo) - baserev)
1300 for r in cl.revs(start=baserev + 1):
1300 for r in cl.revs(start=baserev + 1):
1301 for p in cl.parentrevs(r):
1301 for p in cl.parentrevs(r):
1302 if p >= baserev:
1302 if p >= baserev:
1303 parentscount[p - baserev] += 1
1303 parentscount[p - baserev] += 1
1304 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1304 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1305 condrepr='<branchpoint>')
1305 condrepr='<branchpoint>')
1306
1306
1307 @predicate('min(set)', safe=True)
1307 @predicate('min(set)', safe=True)
1308 def minrev(repo, subset, x):
1308 def minrev(repo, subset, x):
1309 """Changeset with lowest revision number in set.
1309 """Changeset with lowest revision number in set.
1310 """
1310 """
1311 os = getset(repo, fullreposet(repo), x)
1311 os = getset(repo, fullreposet(repo), x)
1312 try:
1312 try:
1313 m = os.min()
1313 m = os.min()
1314 if m in subset:
1314 if m in subset:
1315 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1315 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1316 except ValueError:
1316 except ValueError:
1317 # os.min() throws a ValueError when the collection is empty.
1317 # os.min() throws a ValueError when the collection is empty.
1318 # Same as python's min().
1318 # Same as python's min().
1319 pass
1319 pass
1320 return baseset(datarepr=('<min %r, %r>', subset, os))
1320 return baseset(datarepr=('<min %r, %r>', subset, os))
1321
1321
1322 @predicate('modifies(pattern)', safe=True)
1322 @predicate('modifies(pattern)', safe=True)
1323 def modifies(repo, subset, x):
1323 def modifies(repo, subset, x):
1324 """Changesets modifying files matched by pattern.
1324 """Changesets modifying files matched by pattern.
1325
1325
1326 The pattern without explicit kind like ``glob:`` is expected to be
1326 The pattern without explicit kind like ``glob:`` is expected to be
1327 relative to the current directory and match against a file or a
1327 relative to the current directory and match against a file or a
1328 directory.
1328 directory.
1329 """
1329 """
1330 # i18n: "modifies" is a keyword
1330 # i18n: "modifies" is a keyword
1331 pat = getstring(x, _("modifies requires a pattern"))
1331 pat = getstring(x, _("modifies requires a pattern"))
1332 return checkstatus(repo, subset, pat, 0)
1332 return checkstatus(repo, subset, pat, 0)
1333
1333
1334 @predicate('named(namespace)')
1334 @predicate('named(namespace)')
1335 def named(repo, subset, x):
1335 def named(repo, subset, x):
1336 """The changesets in a given namespace.
1336 """The changesets in a given namespace.
1337
1337
1338 If `namespace` starts with `re:`, the remainder of the string is treated as
1338 If `namespace` starts with `re:`, the remainder of the string is treated as
1339 a regular expression. To match a namespace that actually starts with `re:`,
1339 a regular expression. To match a namespace that actually starts with `re:`,
1340 use the prefix `literal:`.
1340 use the prefix `literal:`.
1341 """
1341 """
1342 # i18n: "named" is a keyword
1342 # i18n: "named" is a keyword
1343 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1343 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1344
1344
1345 ns = getstring(args[0],
1345 ns = getstring(args[0],
1346 # i18n: "named" is a keyword
1346 # i18n: "named" is a keyword
1347 _('the argument to named must be a string'))
1347 _('the argument to named must be a string'))
1348 kind, pattern, matcher = util.stringmatcher(ns)
1348 kind, pattern, matcher = util.stringmatcher(ns)
1349 namespaces = set()
1349 namespaces = set()
1350 if kind == 'literal':
1350 if kind == 'literal':
1351 if pattern not in repo.names:
1351 if pattern not in repo.names:
1352 raise error.RepoLookupError(_("namespace '%s' does not exist")
1352 raise error.RepoLookupError(_("namespace '%s' does not exist")
1353 % ns)
1353 % ns)
1354 namespaces.add(repo.names[pattern])
1354 namespaces.add(repo.names[pattern])
1355 else:
1355 else:
1356 for name, ns in repo.names.iteritems():
1356 for name, ns in repo.names.iteritems():
1357 if matcher(name):
1357 if matcher(name):
1358 namespaces.add(ns)
1358 namespaces.add(ns)
1359 if not namespaces:
1359 if not namespaces:
1360 raise error.RepoLookupError(_("no namespace exists"
1360 raise error.RepoLookupError(_("no namespace exists"
1361 " that match '%s'") % pattern)
1361 " that match '%s'") % pattern)
1362
1362
1363 names = set()
1363 names = set()
1364 for ns in namespaces:
1364 for ns in namespaces:
1365 for name in ns.listnames(repo):
1365 for name in ns.listnames(repo):
1366 if name not in ns.deprecated:
1366 if name not in ns.deprecated:
1367 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1367 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1368
1368
1369 names -= set([node.nullrev])
1369 names -= set([node.nullrev])
1370 return subset & names
1370 return subset & names
1371
1371
1372 @predicate('id(string)', safe=True)
1372 @predicate('id(string)', safe=True)
1373 def node_(repo, subset, x):
1373 def node_(repo, subset, x):
1374 """Revision non-ambiguously specified by the given hex string prefix.
1374 """Revision non-ambiguously specified by the given hex string prefix.
1375 """
1375 """
1376 # i18n: "id" is a keyword
1376 # i18n: "id" is a keyword
1377 l = getargs(x, 1, 1, _("id requires one argument"))
1377 l = getargs(x, 1, 1, _("id requires one argument"))
1378 # i18n: "id" is a keyword
1378 # i18n: "id" is a keyword
1379 n = getstring(l[0], _("id requires a string"))
1379 n = getstring(l[0], _("id requires a string"))
1380 if len(n) == 40:
1380 if len(n) == 40:
1381 try:
1381 try:
1382 rn = repo.changelog.rev(node.bin(n))
1382 rn = repo.changelog.rev(node.bin(n))
1383 except (LookupError, TypeError):
1383 except (LookupError, TypeError):
1384 rn = None
1384 rn = None
1385 else:
1385 else:
1386 rn = None
1386 rn = None
1387 pm = repo.changelog._partialmatch(n)
1387 pm = repo.changelog._partialmatch(n)
1388 if pm is not None:
1388 if pm is not None:
1389 rn = repo.changelog.rev(pm)
1389 rn = repo.changelog.rev(pm)
1390
1390
1391 if rn is None:
1391 if rn is None:
1392 return baseset()
1392 return baseset()
1393 result = baseset([rn])
1393 result = baseset([rn])
1394 return result & subset
1394 return result & subset
1395
1395
1396 @predicate('obsolete()', safe=True)
1396 @predicate('obsolete()', safe=True)
1397 def obsolete(repo, subset, x):
1397 def obsolete(repo, subset, x):
1398 """Mutable changeset with a newer version."""
1398 """Mutable changeset with a newer version."""
1399 # i18n: "obsolete" is a keyword
1399 # i18n: "obsolete" is a keyword
1400 getargs(x, 0, 0, _("obsolete takes no arguments"))
1400 getargs(x, 0, 0, _("obsolete takes no arguments"))
1401 obsoletes = obsmod.getrevs(repo, 'obsolete')
1401 obsoletes = obsmod.getrevs(repo, 'obsolete')
1402 return subset & obsoletes
1402 return subset & obsoletes
1403
1403
1404 @predicate('only(set, [set])', safe=True)
1404 @predicate('only(set, [set])', safe=True)
1405 def only(repo, subset, x):
1405 def only(repo, subset, x):
1406 """Changesets that are ancestors of the first set that are not ancestors
1406 """Changesets that are ancestors of the first set that are not ancestors
1407 of any other head in the repo. If a second set is specified, the result
1407 of any other head in the repo. If a second set is specified, the result
1408 is ancestors of the first set that are not ancestors of the second set
1408 is ancestors of the first set that are not ancestors of the second set
1409 (i.e. ::<set1> - ::<set2>).
1409 (i.e. ::<set1> - ::<set2>).
1410 """
1410 """
1411 cl = repo.changelog
1411 cl = repo.changelog
1412 # i18n: "only" is a keyword
1412 # i18n: "only" is a keyword
1413 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1413 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1414 include = getset(repo, fullreposet(repo), args[0])
1414 include = getset(repo, fullreposet(repo), args[0])
1415 if len(args) == 1:
1415 if len(args) == 1:
1416 if not include:
1416 if not include:
1417 return baseset()
1417 return baseset()
1418
1418
1419 descendants = set(_revdescendants(repo, include, False))
1419 descendants = set(_revdescendants(repo, include, False))
1420 exclude = [rev for rev in cl.headrevs()
1420 exclude = [rev for rev in cl.headrevs()
1421 if not rev in descendants and not rev in include]
1421 if not rev in descendants and not rev in include]
1422 else:
1422 else:
1423 exclude = getset(repo, fullreposet(repo), args[1])
1423 exclude = getset(repo, fullreposet(repo), args[1])
1424
1424
1425 results = set(cl.findmissingrevs(common=exclude, heads=include))
1425 results = set(cl.findmissingrevs(common=exclude, heads=include))
1426 # XXX we should turn this into a baseset instead of a set, smartset may do
1426 # XXX we should turn this into a baseset instead of a set, smartset may do
1427 # some optimisations from the fact this is a baseset.
1427 # some optimisations from the fact this is a baseset.
1428 return subset & results
1428 return subset & results
1429
1429
1430 @predicate('origin([set])', safe=True)
1430 @predicate('origin([set])', safe=True)
1431 def origin(repo, subset, x):
1431 def origin(repo, subset, x):
1432 """
1432 """
1433 Changesets that were specified as a source for the grafts, transplants or
1433 Changesets that were specified as a source for the grafts, transplants or
1434 rebases that created the given revisions. Omitting the optional set is the
1434 rebases that created the given revisions. Omitting the optional set is the
1435 same as passing all(). If a changeset created by these operations is itself
1435 same as passing all(). If a changeset created by these operations is itself
1436 specified as a source for one of these operations, only the source changeset
1436 specified as a source for one of these operations, only the source changeset
1437 for the first operation is selected.
1437 for the first operation is selected.
1438 """
1438 """
1439 if x is not None:
1439 if x is not None:
1440 dests = getset(repo, fullreposet(repo), x)
1440 dests = getset(repo, fullreposet(repo), x)
1441 else:
1441 else:
1442 dests = fullreposet(repo)
1442 dests = fullreposet(repo)
1443
1443
1444 def _firstsrc(rev):
1444 def _firstsrc(rev):
1445 src = _getrevsource(repo, rev)
1445 src = _getrevsource(repo, rev)
1446 if src is None:
1446 if src is None:
1447 return None
1447 return None
1448
1448
1449 while True:
1449 while True:
1450 prev = _getrevsource(repo, src)
1450 prev = _getrevsource(repo, src)
1451
1451
1452 if prev is None:
1452 if prev is None:
1453 return src
1453 return src
1454 src = prev
1454 src = prev
1455
1455
1456 o = set([_firstsrc(r) for r in dests])
1456 o = set([_firstsrc(r) for r in dests])
1457 o -= set([None])
1457 o -= set([None])
1458 # XXX we should turn this into a baseset instead of a set, smartset may do
1458 # XXX we should turn this into a baseset instead of a set, smartset may do
1459 # some optimisations from the fact this is a baseset.
1459 # some optimisations from the fact this is a baseset.
1460 return subset & o
1460 return subset & o
1461
1461
1462 @predicate('outgoing([path])', safe=True)
1462 @predicate('outgoing([path])', safe=True)
1463 def outgoing(repo, subset, x):
1463 def outgoing(repo, subset, x):
1464 """Changesets not found in the specified destination repository, or the
1464 """Changesets not found in the specified destination repository, or the
1465 default push location.
1465 default push location.
1466 """
1466 """
1467 # Avoid cycles.
1467 # Avoid cycles.
1468 from . import (
1468 from . import (
1469 discovery,
1469 discovery,
1470 hg,
1470 hg,
1471 )
1471 )
1472 # i18n: "outgoing" is a keyword
1472 # i18n: "outgoing" is a keyword
1473 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1473 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1474 # i18n: "outgoing" is a keyword
1474 # i18n: "outgoing" is a keyword
1475 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1475 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1476 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1476 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1477 dest, branches = hg.parseurl(dest)
1477 dest, branches = hg.parseurl(dest)
1478 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1478 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1479 if revs:
1479 if revs:
1480 revs = [repo.lookup(rev) for rev in revs]
1480 revs = [repo.lookup(rev) for rev in revs]
1481 other = hg.peer(repo, {}, dest)
1481 other = hg.peer(repo, {}, dest)
1482 repo.ui.pushbuffer()
1482 repo.ui.pushbuffer()
1483 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1483 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1484 repo.ui.popbuffer()
1484 repo.ui.popbuffer()
1485 cl = repo.changelog
1485 cl = repo.changelog
1486 o = set([cl.rev(r) for r in outgoing.missing])
1486 o = set([cl.rev(r) for r in outgoing.missing])
1487 return subset & o
1487 return subset & o
1488
1488
1489 @predicate('p1([set])', safe=True)
1489 @predicate('p1([set])', safe=True)
1490 def p1(repo, subset, x):
1490 def p1(repo, subset, x):
1491 """First parent of changesets in set, or the working directory.
1491 """First parent of changesets in set, or the working directory.
1492 """
1492 """
1493 if x is None:
1493 if x is None:
1494 p = repo[x].p1().rev()
1494 p = repo[x].p1().rev()
1495 if p >= 0:
1495 if p >= 0:
1496 return subset & baseset([p])
1496 return subset & baseset([p])
1497 return baseset()
1497 return baseset()
1498
1498
1499 ps = set()
1499 ps = set()
1500 cl = repo.changelog
1500 cl = repo.changelog
1501 for r in getset(repo, fullreposet(repo), x):
1501 for r in getset(repo, fullreposet(repo), x):
1502 ps.add(cl.parentrevs(r)[0])
1502 ps.add(cl.parentrevs(r)[0])
1503 ps -= set([node.nullrev])
1503 ps -= set([node.nullrev])
1504 # XXX we should turn this into a baseset instead of a set, smartset may do
1504 # XXX we should turn this into a baseset instead of a set, smartset may do
1505 # some optimisations from the fact this is a baseset.
1505 # some optimisations from the fact this is a baseset.
1506 return subset & ps
1506 return subset & ps
1507
1507
1508 @predicate('p2([set])', safe=True)
1508 @predicate('p2([set])', safe=True)
1509 def p2(repo, subset, x):
1509 def p2(repo, subset, x):
1510 """Second parent of changesets in set, or the working directory.
1510 """Second parent of changesets in set, or the working directory.
1511 """
1511 """
1512 if x is None:
1512 if x is None:
1513 ps = repo[x].parents()
1513 ps = repo[x].parents()
1514 try:
1514 try:
1515 p = ps[1].rev()
1515 p = ps[1].rev()
1516 if p >= 0:
1516 if p >= 0:
1517 return subset & baseset([p])
1517 return subset & baseset([p])
1518 return baseset()
1518 return baseset()
1519 except IndexError:
1519 except IndexError:
1520 return baseset()
1520 return baseset()
1521
1521
1522 ps = set()
1522 ps = set()
1523 cl = repo.changelog
1523 cl = repo.changelog
1524 for r in getset(repo, fullreposet(repo), x):
1524 for r in getset(repo, fullreposet(repo), x):
1525 ps.add(cl.parentrevs(r)[1])
1525 ps.add(cl.parentrevs(r)[1])
1526 ps -= set([node.nullrev])
1526 ps -= set([node.nullrev])
1527 # XXX we should turn this into a baseset instead of a set, smartset may do
1527 # XXX we should turn this into a baseset instead of a set, smartset may do
1528 # some optimisations from the fact this is a baseset.
1528 # some optimisations from the fact this is a baseset.
1529 return subset & ps
1529 return subset & ps
1530
1530
1531 @predicate('parents([set])', safe=True)
1531 @predicate('parents([set])', safe=True)
1532 def parents(repo, subset, x):
1532 def parents(repo, subset, x):
1533 """
1533 """
1534 The set of all parents for all changesets in set, or the working directory.
1534 The set of all parents for all changesets in set, or the working directory.
1535 """
1535 """
1536 if x is None:
1536 if x is None:
1537 ps = set(p.rev() for p in repo[x].parents())
1537 ps = set(p.rev() for p in repo[x].parents())
1538 else:
1538 else:
1539 ps = set()
1539 ps = set()
1540 cl = repo.changelog
1540 cl = repo.changelog
1541 up = ps.update
1541 up = ps.update
1542 parentrevs = cl.parentrevs
1542 parentrevs = cl.parentrevs
1543 for r in getset(repo, fullreposet(repo), x):
1543 for r in getset(repo, fullreposet(repo), x):
1544 if r == node.wdirrev:
1544 if r == node.wdirrev:
1545 up(p.rev() for p in repo[r].parents())
1545 up(p.rev() for p in repo[r].parents())
1546 else:
1546 else:
1547 up(parentrevs(r))
1547 up(parentrevs(r))
1548 ps -= set([node.nullrev])
1548 ps -= set([node.nullrev])
1549 return subset & ps
1549 return subset & ps
1550
1550
1551 def _phase(repo, subset, target):
1551 def _phase(repo, subset, target):
1552 """helper to select all rev in phase <target>"""
1552 """helper to select all rev in phase <target>"""
1553 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1553 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1554 if repo._phasecache._phasesets:
1554 if repo._phasecache._phasesets:
1555 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1555 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1556 s = baseset(s)
1556 s = baseset(s)
1557 s.sort() # set are non ordered, so we enforce ascending
1557 s.sort() # set are non ordered, so we enforce ascending
1558 return subset & s
1558 return subset & s
1559 else:
1559 else:
1560 phase = repo._phasecache.phase
1560 phase = repo._phasecache.phase
1561 condition = lambda r: phase(repo, r) == target
1561 condition = lambda r: phase(repo, r) == target
1562 return subset.filter(condition, condrepr=('<phase %r>', target),
1562 return subset.filter(condition, condrepr=('<phase %r>', target),
1563 cache=False)
1563 cache=False)
1564
1564
1565 @predicate('draft()', safe=True)
1565 @predicate('draft()', safe=True)
1566 def draft(repo, subset, x):
1566 def draft(repo, subset, x):
1567 """Changeset in draft phase."""
1567 """Changeset in draft phase."""
1568 # i18n: "draft" is a keyword
1568 # i18n: "draft" is a keyword
1569 getargs(x, 0, 0, _("draft takes no arguments"))
1569 getargs(x, 0, 0, _("draft takes no arguments"))
1570 target = phases.draft
1570 target = phases.draft
1571 return _phase(repo, subset, target)
1571 return _phase(repo, subset, target)
1572
1572
1573 @predicate('secret()', safe=True)
1573 @predicate('secret()', safe=True)
1574 def secret(repo, subset, x):
1574 def secret(repo, subset, x):
1575 """Changeset in secret phase."""
1575 """Changeset in secret phase."""
1576 # i18n: "secret" is a keyword
1576 # i18n: "secret" is a keyword
1577 getargs(x, 0, 0, _("secret takes no arguments"))
1577 getargs(x, 0, 0, _("secret takes no arguments"))
1578 target = phases.secret
1578 target = phases.secret
1579 return _phase(repo, subset, target)
1579 return _phase(repo, subset, target)
1580
1580
1581 def parentspec(repo, subset, x, n):
1581 def parentspec(repo, subset, x, n):
1582 """``set^0``
1582 """``set^0``
1583 The set.
1583 The set.
1584 ``set^1`` (or ``set^``), ``set^2``
1584 ``set^1`` (or ``set^``), ``set^2``
1585 First or second parent, respectively, of all changesets in set.
1585 First or second parent, respectively, of all changesets in set.
1586 """
1586 """
1587 try:
1587 try:
1588 n = int(n[1])
1588 n = int(n[1])
1589 if n not in (0, 1, 2):
1589 if n not in (0, 1, 2):
1590 raise ValueError
1590 raise ValueError
1591 except (TypeError, ValueError):
1591 except (TypeError, ValueError):
1592 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1592 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1593 ps = set()
1593 ps = set()
1594 cl = repo.changelog
1594 cl = repo.changelog
1595 for r in getset(repo, fullreposet(repo), x):
1595 for r in getset(repo, fullreposet(repo), x):
1596 if n == 0:
1596 if n == 0:
1597 ps.add(r)
1597 ps.add(r)
1598 elif n == 1:
1598 elif n == 1:
1599 ps.add(cl.parentrevs(r)[0])
1599 ps.add(cl.parentrevs(r)[0])
1600 elif n == 2:
1600 elif n == 2:
1601 parents = cl.parentrevs(r)
1601 parents = cl.parentrevs(r)
1602 if len(parents) > 1:
1602 if len(parents) > 1:
1603 ps.add(parents[1])
1603 ps.add(parents[1])
1604 return subset & ps
1604 return subset & ps
1605
1605
1606 @predicate('present(set)', safe=True)
1606 @predicate('present(set)', safe=True)
1607 def present(repo, subset, x):
1607 def present(repo, subset, x):
1608 """An empty set, if any revision in set isn't found; otherwise,
1608 """An empty set, if any revision in set isn't found; otherwise,
1609 all revisions in set.
1609 all revisions in set.
1610
1610
1611 If any of specified revisions is not present in the local repository,
1611 If any of specified revisions is not present in the local repository,
1612 the query is normally aborted. But this predicate allows the query
1612 the query is normally aborted. But this predicate allows the query
1613 to continue even in such cases.
1613 to continue even in such cases.
1614 """
1614 """
1615 try:
1615 try:
1616 return getset(repo, subset, x)
1616 return getset(repo, subset, x)
1617 except error.RepoLookupError:
1617 except error.RepoLookupError:
1618 return baseset()
1618 return baseset()
1619
1619
1620 # for internal use
1620 # for internal use
1621 @predicate('_notpublic', safe=True)
1621 @predicate('_notpublic', safe=True)
1622 def _notpublic(repo, subset, x):
1622 def _notpublic(repo, subset, x):
1623 getargs(x, 0, 0, "_notpublic takes no arguments")
1623 getargs(x, 0, 0, "_notpublic takes no arguments")
1624 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1624 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1625 if repo._phasecache._phasesets:
1625 if repo._phasecache._phasesets:
1626 s = set()
1626 s = set()
1627 for u in repo._phasecache._phasesets[1:]:
1627 for u in repo._phasecache._phasesets[1:]:
1628 s.update(u)
1628 s.update(u)
1629 s = baseset(s - repo.changelog.filteredrevs)
1629 s = baseset(s - repo.changelog.filteredrevs)
1630 s.sort()
1630 s.sort()
1631 return subset & s
1631 return subset & s
1632 else:
1632 else:
1633 phase = repo._phasecache.phase
1633 phase = repo._phasecache.phase
1634 target = phases.public
1634 target = phases.public
1635 condition = lambda r: phase(repo, r) != target
1635 condition = lambda r: phase(repo, r) != target
1636 return subset.filter(condition, condrepr=('<phase %r>', target),
1636 return subset.filter(condition, condrepr=('<phase %r>', target),
1637 cache=False)
1637 cache=False)
1638
1638
1639 @predicate('public()', safe=True)
1639 @predicate('public()', safe=True)
1640 def public(repo, subset, x):
1640 def public(repo, subset, x):
1641 """Changeset in public phase."""
1641 """Changeset in public phase."""
1642 # i18n: "public" is a keyword
1642 # i18n: "public" is a keyword
1643 getargs(x, 0, 0, _("public takes no arguments"))
1643 getargs(x, 0, 0, _("public takes no arguments"))
1644 phase = repo._phasecache.phase
1644 phase = repo._phasecache.phase
1645 target = phases.public
1645 target = phases.public
1646 condition = lambda r: phase(repo, r) == target
1646 condition = lambda r: phase(repo, r) == target
1647 return subset.filter(condition, condrepr=('<phase %r>', target),
1647 return subset.filter(condition, condrepr=('<phase %r>', target),
1648 cache=False)
1648 cache=False)
1649
1649
1650 @predicate('remote([id [,path]])', safe=True)
1650 @predicate('remote([id [,path]])', safe=True)
1651 def remote(repo, subset, x):
1651 def remote(repo, subset, x):
1652 """Local revision that corresponds to the given identifier in a
1652 """Local revision that corresponds to the given identifier in a
1653 remote repository, if present. Here, the '.' identifier is a
1653 remote repository, if present. Here, the '.' identifier is a
1654 synonym for the current local branch.
1654 synonym for the current local branch.
1655 """
1655 """
1656
1656
1657 from . import hg # avoid start-up nasties
1657 from . import hg # avoid start-up nasties
1658 # i18n: "remote" is a keyword
1658 # i18n: "remote" is a keyword
1659 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1659 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1660
1660
1661 q = '.'
1661 q = '.'
1662 if len(l) > 0:
1662 if len(l) > 0:
1663 # i18n: "remote" is a keyword
1663 # i18n: "remote" is a keyword
1664 q = getstring(l[0], _("remote requires a string id"))
1664 q = getstring(l[0], _("remote requires a string id"))
1665 if q == '.':
1665 if q == '.':
1666 q = repo['.'].branch()
1666 q = repo['.'].branch()
1667
1667
1668 dest = ''
1668 dest = ''
1669 if len(l) > 1:
1669 if len(l) > 1:
1670 # i18n: "remote" is a keyword
1670 # i18n: "remote" is a keyword
1671 dest = getstring(l[1], _("remote requires a repository path"))
1671 dest = getstring(l[1], _("remote requires a repository path"))
1672 dest = repo.ui.expandpath(dest or 'default')
1672 dest = repo.ui.expandpath(dest or 'default')
1673 dest, branches = hg.parseurl(dest)
1673 dest, branches = hg.parseurl(dest)
1674 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1674 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1675 if revs:
1675 if revs:
1676 revs = [repo.lookup(rev) for rev in revs]
1676 revs = [repo.lookup(rev) for rev in revs]
1677 other = hg.peer(repo, {}, dest)
1677 other = hg.peer(repo, {}, dest)
1678 n = other.lookup(q)
1678 n = other.lookup(q)
1679 if n in repo:
1679 if n in repo:
1680 r = repo[n].rev()
1680 r = repo[n].rev()
1681 if r in subset:
1681 if r in subset:
1682 return baseset([r])
1682 return baseset([r])
1683 return baseset()
1683 return baseset()
1684
1684
1685 @predicate('removes(pattern)', safe=True)
1685 @predicate('removes(pattern)', safe=True)
1686 def removes(repo, subset, x):
1686 def removes(repo, subset, x):
1687 """Changesets which remove files matching pattern.
1687 """Changesets which remove files matching pattern.
1688
1688
1689 The pattern without explicit kind like ``glob:`` is expected to be
1689 The pattern without explicit kind like ``glob:`` is expected to be
1690 relative to the current directory and match against a file or a
1690 relative to the current directory and match against a file or a
1691 directory.
1691 directory.
1692 """
1692 """
1693 # i18n: "removes" is a keyword
1693 # i18n: "removes" is a keyword
1694 pat = getstring(x, _("removes requires a pattern"))
1694 pat = getstring(x, _("removes requires a pattern"))
1695 return checkstatus(repo, subset, pat, 2)
1695 return checkstatus(repo, subset, pat, 2)
1696
1696
1697 @predicate('rev(number)', safe=True)
1697 @predicate('rev(number)', safe=True)
1698 def rev(repo, subset, x):
1698 def rev(repo, subset, x):
1699 """Revision with the given numeric identifier.
1699 """Revision with the given numeric identifier.
1700 """
1700 """
1701 # i18n: "rev" is a keyword
1701 # i18n: "rev" is a keyword
1702 l = getargs(x, 1, 1, _("rev requires one argument"))
1702 l = getargs(x, 1, 1, _("rev requires one argument"))
1703 try:
1703 try:
1704 # i18n: "rev" is a keyword
1704 # i18n: "rev" is a keyword
1705 l = int(getstring(l[0], _("rev requires a number")))
1705 l = int(getstring(l[0], _("rev requires a number")))
1706 except (TypeError, ValueError):
1706 except (TypeError, ValueError):
1707 # i18n: "rev" is a keyword
1707 # i18n: "rev" is a keyword
1708 raise error.ParseError(_("rev expects a number"))
1708 raise error.ParseError(_("rev expects a number"))
1709 if l not in repo.changelog and l != node.nullrev:
1709 if l not in repo.changelog and l != node.nullrev:
1710 return baseset()
1710 return baseset()
1711 return subset & baseset([l])
1711 return subset & baseset([l])
1712
1712
1713 @predicate('matching(revision [, field])', safe=True)
1713 @predicate('matching(revision [, field])', safe=True)
1714 def matching(repo, subset, x):
1714 def matching(repo, subset, x):
1715 """Changesets in which a given set of fields match the set of fields in the
1715 """Changesets in which a given set of fields match the set of fields in the
1716 selected revision or set.
1716 selected revision or set.
1717
1717
1718 To match more than one field pass the list of fields to match separated
1718 To match more than one field pass the list of fields to match separated
1719 by spaces (e.g. ``author description``).
1719 by spaces (e.g. ``author description``).
1720
1720
1721 Valid fields are most regular revision fields and some special fields.
1721 Valid fields are most regular revision fields and some special fields.
1722
1722
1723 Regular revision fields are ``description``, ``author``, ``branch``,
1723 Regular revision fields are ``description``, ``author``, ``branch``,
1724 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1724 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1725 and ``diff``.
1725 and ``diff``.
1726 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1726 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1727 contents of the revision. Two revisions matching their ``diff`` will
1727 contents of the revision. Two revisions matching their ``diff`` will
1728 also match their ``files``.
1728 also match their ``files``.
1729
1729
1730 Special fields are ``summary`` and ``metadata``:
1730 Special fields are ``summary`` and ``metadata``:
1731 ``summary`` matches the first line of the description.
1731 ``summary`` matches the first line of the description.
1732 ``metadata`` is equivalent to matching ``description user date``
1732 ``metadata`` is equivalent to matching ``description user date``
1733 (i.e. it matches the main metadata fields).
1733 (i.e. it matches the main metadata fields).
1734
1734
1735 ``metadata`` is the default field which is used when no fields are
1735 ``metadata`` is the default field which is used when no fields are
1736 specified. You can match more than one field at a time.
1736 specified. You can match more than one field at a time.
1737 """
1737 """
1738 # i18n: "matching" is a keyword
1738 # i18n: "matching" is a keyword
1739 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1739 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1740
1740
1741 revs = getset(repo, fullreposet(repo), l[0])
1741 revs = getset(repo, fullreposet(repo), l[0])
1742
1742
1743 fieldlist = ['metadata']
1743 fieldlist = ['metadata']
1744 if len(l) > 1:
1744 if len(l) > 1:
1745 fieldlist = getstring(l[1],
1745 fieldlist = getstring(l[1],
1746 # i18n: "matching" is a keyword
1746 # i18n: "matching" is a keyword
1747 _("matching requires a string "
1747 _("matching requires a string "
1748 "as its second argument")).split()
1748 "as its second argument")).split()
1749
1749
1750 # Make sure that there are no repeated fields,
1750 # Make sure that there are no repeated fields,
1751 # expand the 'special' 'metadata' field type
1751 # expand the 'special' 'metadata' field type
1752 # and check the 'files' whenever we check the 'diff'
1752 # and check the 'files' whenever we check the 'diff'
1753 fields = []
1753 fields = []
1754 for field in fieldlist:
1754 for field in fieldlist:
1755 if field == 'metadata':
1755 if field == 'metadata':
1756 fields += ['user', 'description', 'date']
1756 fields += ['user', 'description', 'date']
1757 elif field == 'diff':
1757 elif field == 'diff':
1758 # a revision matching the diff must also match the files
1758 # a revision matching the diff must also match the files
1759 # since matching the diff is very costly, make sure to
1759 # since matching the diff is very costly, make sure to
1760 # also match the files first
1760 # also match the files first
1761 fields += ['files', 'diff']
1761 fields += ['files', 'diff']
1762 else:
1762 else:
1763 if field == 'author':
1763 if field == 'author':
1764 field = 'user'
1764 field = 'user'
1765 fields.append(field)
1765 fields.append(field)
1766 fields = set(fields)
1766 fields = set(fields)
1767 if 'summary' in fields and 'description' in fields:
1767 if 'summary' in fields and 'description' in fields:
1768 # If a revision matches its description it also matches its summary
1768 # If a revision matches its description it also matches its summary
1769 fields.discard('summary')
1769 fields.discard('summary')
1770
1770
1771 # We may want to match more than one field
1771 # We may want to match more than one field
1772 # Not all fields take the same amount of time to be matched
1772 # Not all fields take the same amount of time to be matched
1773 # Sort the selected fields in order of increasing matching cost
1773 # Sort the selected fields in order of increasing matching cost
1774 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1774 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1775 'files', 'description', 'substate', 'diff']
1775 'files', 'description', 'substate', 'diff']
1776 def fieldkeyfunc(f):
1776 def fieldkeyfunc(f):
1777 try:
1777 try:
1778 return fieldorder.index(f)
1778 return fieldorder.index(f)
1779 except ValueError:
1779 except ValueError:
1780 # assume an unknown field is very costly
1780 # assume an unknown field is very costly
1781 return len(fieldorder)
1781 return len(fieldorder)
1782 fields = list(fields)
1782 fields = list(fields)
1783 fields.sort(key=fieldkeyfunc)
1783 fields.sort(key=fieldkeyfunc)
1784
1784
1785 # Each field will be matched with its own "getfield" function
1785 # Each field will be matched with its own "getfield" function
1786 # which will be added to the getfieldfuncs array of functions
1786 # which will be added to the getfieldfuncs array of functions
1787 getfieldfuncs = []
1787 getfieldfuncs = []
1788 _funcs = {
1788 _funcs = {
1789 'user': lambda r: repo[r].user(),
1789 'user': lambda r: repo[r].user(),
1790 'branch': lambda r: repo[r].branch(),
1790 'branch': lambda r: repo[r].branch(),
1791 'date': lambda r: repo[r].date(),
1791 'date': lambda r: repo[r].date(),
1792 'description': lambda r: repo[r].description(),
1792 'description': lambda r: repo[r].description(),
1793 'files': lambda r: repo[r].files(),
1793 'files': lambda r: repo[r].files(),
1794 'parents': lambda r: repo[r].parents(),
1794 'parents': lambda r: repo[r].parents(),
1795 'phase': lambda r: repo[r].phase(),
1795 'phase': lambda r: repo[r].phase(),
1796 'substate': lambda r: repo[r].substate,
1796 'substate': lambda r: repo[r].substate,
1797 'summary': lambda r: repo[r].description().splitlines()[0],
1797 'summary': lambda r: repo[r].description().splitlines()[0],
1798 'diff': lambda r: list(repo[r].diff(git=True),)
1798 'diff': lambda r: list(repo[r].diff(git=True),)
1799 }
1799 }
1800 for info in fields:
1800 for info in fields:
1801 getfield = _funcs.get(info, None)
1801 getfield = _funcs.get(info, None)
1802 if getfield is None:
1802 if getfield is None:
1803 raise error.ParseError(
1803 raise error.ParseError(
1804 # i18n: "matching" is a keyword
1804 # i18n: "matching" is a keyword
1805 _("unexpected field name passed to matching: %s") % info)
1805 _("unexpected field name passed to matching: %s") % info)
1806 getfieldfuncs.append(getfield)
1806 getfieldfuncs.append(getfield)
1807 # convert the getfield array of functions into a "getinfo" function
1807 # convert the getfield array of functions into a "getinfo" function
1808 # which returns an array of field values (or a single value if there
1808 # which returns an array of field values (or a single value if there
1809 # is only one field to match)
1809 # is only one field to match)
1810 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1810 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1811
1811
1812 def matches(x):
1812 def matches(x):
1813 for rev in revs:
1813 for rev in revs:
1814 target = getinfo(rev)
1814 target = getinfo(rev)
1815 match = True
1815 match = True
1816 for n, f in enumerate(getfieldfuncs):
1816 for n, f in enumerate(getfieldfuncs):
1817 if target[n] != f(x):
1817 if target[n] != f(x):
1818 match = False
1818 match = False
1819 if match:
1819 if match:
1820 return True
1820 return True
1821 return False
1821 return False
1822
1822
1823 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1823 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1824
1824
1825 @predicate('reverse(set)', safe=True)
1825 @predicate('reverse(set)', safe=True)
1826 def reverse(repo, subset, x):
1826 def reverse(repo, subset, x):
1827 """Reverse order of set.
1827 """Reverse order of set.
1828 """
1828 """
1829 l = getset(repo, subset, x)
1829 l = getset(repo, subset, x)
1830 l.reverse()
1830 l.reverse()
1831 return l
1831 return l
1832
1832
1833 @predicate('roots(set)', safe=True)
1833 @predicate('roots(set)', safe=True)
1834 def roots(repo, subset, x):
1834 def roots(repo, subset, x):
1835 """Changesets in set with no parent changeset in set.
1835 """Changesets in set with no parent changeset in set.
1836 """
1836 """
1837 s = getset(repo, fullreposet(repo), x)
1837 s = getset(repo, fullreposet(repo), x)
1838 parents = repo.changelog.parentrevs
1838 parents = repo.changelog.parentrevs
1839 def filter(r):
1839 def filter(r):
1840 for p in parents(r):
1840 for p in parents(r):
1841 if 0 <= p and p in s:
1841 if 0 <= p and p in s:
1842 return False
1842 return False
1843 return True
1843 return True
1844 return subset & s.filter(filter, condrepr='<roots>')
1844 return subset & s.filter(filter, condrepr='<roots>')
1845
1845
1846 _sortkeyfuncs = {
1846 _sortkeyfuncs = {
1847 'rev': lambda c: c.rev(),
1847 'rev': lambda c: c.rev(),
1848 'branch': lambda c: c.branch(),
1848 'branch': lambda c: c.branch(),
1849 'desc': lambda c: c.description(),
1849 'desc': lambda c: c.description(),
1850 'user': lambda c: c.user(),
1850 'user': lambda c: c.user(),
1851 'author': lambda c: c.user(),
1851 'author': lambda c: c.user(),
1852 'date': lambda c: c.date()[0],
1852 'date': lambda c: c.date()[0],
1853 }
1853 }
1854
1854
1855 def _getsortargs(x):
1855 def _getsortargs(x):
1856 """Parse sort options into (set, [(key, reverse)], opts)"""
1856 """Parse sort options into (set, [(key, reverse)], opts)"""
1857 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
1857 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
1858 if 'set' not in args:
1858 if 'set' not in args:
1859 # i18n: "sort" is a keyword
1859 # i18n: "sort" is a keyword
1860 raise error.ParseError(_('sort requires one or two arguments'))
1860 raise error.ParseError(_('sort requires one or two arguments'))
1861 keys = "rev"
1861 keys = "rev"
1862 if 'keys' in args:
1862 if 'keys' in args:
1863 # i18n: "sort" is a keyword
1863 # i18n: "sort" is a keyword
1864 keys = getstring(args['keys'], _("sort spec must be a string"))
1864 keys = getstring(args['keys'], _("sort spec must be a string"))
1865
1865
1866 keyflags = []
1866 keyflags = []
1867 for k in keys.split():
1867 for k in keys.split():
1868 fk = k
1868 fk = k
1869 reverse = (k[0] == '-')
1869 reverse = (k[0] == '-')
1870 if reverse:
1870 if reverse:
1871 k = k[1:]
1871 k = k[1:]
1872 if k not in _sortkeyfuncs and k != 'topo':
1872 if k not in _sortkeyfuncs and k != 'topo':
1873 raise error.ParseError(_("unknown sort key %r") % fk)
1873 raise error.ParseError(_("unknown sort key %r") % fk)
1874 keyflags.append((k, reverse))
1874 keyflags.append((k, reverse))
1875
1875
1876 if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
1876 if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
1877 # i18n: "topo" is a keyword
1877 # i18n: "topo" is a keyword
1878 raise error.ParseError(_('topo sort order cannot be combined '
1878 raise error.ParseError(_('topo sort order cannot be combined '
1879 'with other sort keys'))
1879 'with other sort keys'))
1880
1880
1881 opts = {}
1881 opts = {}
1882 if 'topo.firstbranch' in args:
1882 if 'topo.firstbranch' in args:
1883 if any(k == 'topo' for k, reverse in keyflags):
1883 if any(k == 'topo' for k, reverse in keyflags):
1884 opts['topo.firstbranch'] = args['topo.firstbranch']
1884 opts['topo.firstbranch'] = args['topo.firstbranch']
1885 else:
1885 else:
1886 # i18n: "topo" and "topo.firstbranch" are keywords
1886 # i18n: "topo" and "topo.firstbranch" are keywords
1887 raise error.ParseError(_('topo.firstbranch can only be used '
1887 raise error.ParseError(_('topo.firstbranch can only be used '
1888 'when using the topo sort key'))
1888 'when using the topo sort key'))
1889
1889
1890 return args['set'], keyflags, opts
1890 return args['set'], keyflags, opts
1891
1891
1892 @predicate('sort(set[, [-]key... [, ...]])', safe=True)
1892 @predicate('sort(set[, [-]key... [, ...]])', safe=True)
1893 def sort(repo, subset, x):
1893 def sort(repo, subset, x):
1894 """Sort set by keys. The default sort order is ascending, specify a key
1894 """Sort set by keys. The default sort order is ascending, specify a key
1895 as ``-key`` to sort in descending order.
1895 as ``-key`` to sort in descending order.
1896
1896
1897 The keys can be:
1897 The keys can be:
1898
1898
1899 - ``rev`` for the revision number,
1899 - ``rev`` for the revision number,
1900 - ``branch`` for the branch name,
1900 - ``branch`` for the branch name,
1901 - ``desc`` for the commit message (description),
1901 - ``desc`` for the commit message (description),
1902 - ``user`` for user name (``author`` can be used as an alias),
1902 - ``user`` for user name (``author`` can be used as an alias),
1903 - ``date`` for the commit date
1903 - ``date`` for the commit date
1904 - ``topo`` for a reverse topographical sort
1904 - ``topo`` for a reverse topographical sort
1905
1905
1906 The ``topo`` sort order cannot be combined with other sort keys. This sort
1906 The ``topo`` sort order cannot be combined with other sort keys. This sort
1907 takes one optional argument, ``topo.firstbranch``, which takes a revset that
1907 takes one optional argument, ``topo.firstbranch``, which takes a revset that
1908 specifies what topographical branches to prioritize in the sort.
1908 specifies what topographical branches to prioritize in the sort.
1909
1909
1910 """
1910 """
1911 s, keyflags, opts = _getsortargs(x)
1911 s, keyflags, opts = _getsortargs(x)
1912 revs = getset(repo, subset, s)
1912 revs = getset(repo, subset, s)
1913
1913
1914 if not keyflags:
1914 if not keyflags:
1915 return revs
1915 return revs
1916 if len(keyflags) == 1 and keyflags[0][0] == "rev":
1916 if len(keyflags) == 1 and keyflags[0][0] == "rev":
1917 revs.sort(reverse=keyflags[0][1])
1917 revs.sort(reverse=keyflags[0][1])
1918 return revs
1918 return revs
1919 elif keyflags[0][0] == "topo":
1919 elif keyflags[0][0] == "topo":
1920 firstbranch = ()
1920 firstbranch = ()
1921 if 'topo.firstbranch' in opts:
1921 if 'topo.firstbranch' in opts:
1922 firstbranch = getset(repo, subset, opts['topo.firstbranch'])
1922 firstbranch = getset(repo, subset, opts['topo.firstbranch'])
1923 revs = baseset(_toposort(revs, repo.changelog.parentrevs, firstbranch),
1923 revs = baseset(_toposort(revs, repo.changelog.parentrevs, firstbranch),
1924 istopo=True)
1924 istopo=True)
1925 if keyflags[0][1]:
1925 if keyflags[0][1]:
1926 revs.reverse()
1926 revs.reverse()
1927 return revs
1927 return revs
1928
1928
1929 # sort() is guaranteed to be stable
1929 # sort() is guaranteed to be stable
1930 ctxs = [repo[r] for r in revs]
1930 ctxs = [repo[r] for r in revs]
1931 for k, reverse in reversed(keyflags):
1931 for k, reverse in reversed(keyflags):
1932 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
1932 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
1933 return baseset([c.rev() for c in ctxs])
1933 return baseset([c.rev() for c in ctxs])
1934
1934
1935 def _toposort(revs, parentsfunc, firstbranch=()):
1935 def _toposort(revs, parentsfunc, firstbranch=()):
1936 """Yield revisions from heads to roots one (topo) branch at a time.
1936 """Yield revisions from heads to roots one (topo) branch at a time.
1937
1937
1938 This function aims to be used by a graph generator that wishes to minimize
1938 This function aims to be used by a graph generator that wishes to minimize
1939 the number of parallel branches and their interleaving.
1939 the number of parallel branches and their interleaving.
1940
1940
1941 Example iteration order (numbers show the "true" order in a changelog):
1941 Example iteration order (numbers show the "true" order in a changelog):
1942
1942
1943 o 4
1943 o 4
1944 |
1944 |
1945 o 1
1945 o 1
1946 |
1946 |
1947 | o 3
1947 | o 3
1948 | |
1948 | |
1949 | o 2
1949 | o 2
1950 |/
1950 |/
1951 o 0
1951 o 0
1952
1952
1953 Note that the ancestors of merges are understood by the current
1953 Note that the ancestors of merges are understood by the current
1954 algorithm to be on the same branch. This means no reordering will
1954 algorithm to be on the same branch. This means no reordering will
1955 occur behind a merge.
1955 occur behind a merge.
1956 """
1956 """
1957
1957
1958 ### Quick summary of the algorithm
1958 ### Quick summary of the algorithm
1959 #
1959 #
1960 # This function is based around a "retention" principle. We keep revisions
1960 # This function is based around a "retention" principle. We keep revisions
1961 # in memory until we are ready to emit a whole branch that immediately
1961 # in memory until we are ready to emit a whole branch that immediately
1962 # "merges" into an existing one. This reduces the number of parallel
1962 # "merges" into an existing one. This reduces the number of parallel
1963 # branches with interleaved revisions.
1963 # branches with interleaved revisions.
1964 #
1964 #
1965 # During iteration revs are split into two groups:
1965 # During iteration revs are split into two groups:
1966 # A) revision already emitted
1966 # A) revision already emitted
1967 # B) revision in "retention". They are stored as different subgroups.
1967 # B) revision in "retention". They are stored as different subgroups.
1968 #
1968 #
1969 # for each REV, we do the following logic:
1969 # for each REV, we do the following logic:
1970 #
1970 #
1971 # 1) if REV is a parent of (A), we will emit it. If there is a
1971 # 1) if REV is a parent of (A), we will emit it. If there is a
1972 # retention group ((B) above) that is blocked on REV being
1972 # retention group ((B) above) that is blocked on REV being
1973 # available, we emit all the revisions out of that retention
1973 # available, we emit all the revisions out of that retention
1974 # group first.
1974 # group first.
1975 #
1975 #
1976 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
1976 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
1977 # available, if such subgroup exist, we add REV to it and the subgroup is
1977 # available, if such subgroup exist, we add REV to it and the subgroup is
1978 # now awaiting for REV.parents() to be available.
1978 # now awaiting for REV.parents() to be available.
1979 #
1979 #
1980 # 3) finally if no such group existed in (B), we create a new subgroup.
1980 # 3) finally if no such group existed in (B), we create a new subgroup.
1981 #
1981 #
1982 #
1982 #
1983 # To bootstrap the algorithm, we emit the tipmost revision (which
1983 # To bootstrap the algorithm, we emit the tipmost revision (which
1984 # puts it in group (A) from above).
1984 # puts it in group (A) from above).
1985
1985
1986 revs.sort(reverse=True)
1986 revs.sort(reverse=True)
1987
1987
1988 # Set of parents of revision that have been emitted. They can be considered
1988 # Set of parents of revision that have been emitted. They can be considered
1989 # unblocked as the graph generator is already aware of them so there is no
1989 # unblocked as the graph generator is already aware of them so there is no
1990 # need to delay the revisions that reference them.
1990 # need to delay the revisions that reference them.
1991 #
1991 #
1992 # If someone wants to prioritize a branch over the others, pre-filling this
1992 # If someone wants to prioritize a branch over the others, pre-filling this
1993 # set will force all other branches to wait until this branch is ready to be
1993 # set will force all other branches to wait until this branch is ready to be
1994 # emitted.
1994 # emitted.
1995 unblocked = set(firstbranch)
1995 unblocked = set(firstbranch)
1996
1996
1997 # list of groups waiting to be displayed, each group is defined by:
1997 # list of groups waiting to be displayed, each group is defined by:
1998 #
1998 #
1999 # (revs: lists of revs waiting to be displayed,
1999 # (revs: lists of revs waiting to be displayed,
2000 # blocked: set of that cannot be displayed before those in 'revs')
2000 # blocked: set of that cannot be displayed before those in 'revs')
2001 #
2001 #
2002 # The second value ('blocked') correspond to parents of any revision in the
2002 # The second value ('blocked') correspond to parents of any revision in the
2003 # group ('revs') that is not itself contained in the group. The main idea
2003 # group ('revs') that is not itself contained in the group. The main idea
2004 # of this algorithm is to delay as much as possible the emission of any
2004 # of this algorithm is to delay as much as possible the emission of any
2005 # revision. This means waiting for the moment we are about to display
2005 # revision. This means waiting for the moment we are about to display
2006 # these parents to display the revs in a group.
2006 # these parents to display the revs in a group.
2007 #
2007 #
2008 # This first implementation is smart until it encounters a merge: it will
2008 # This first implementation is smart until it encounters a merge: it will
2009 # emit revs as soon as any parent is about to be emitted and can grow an
2009 # emit revs as soon as any parent is about to be emitted and can grow an
2010 # arbitrary number of revs in 'blocked'. In practice this mean we properly
2010 # arbitrary number of revs in 'blocked'. In practice this mean we properly
2011 # retains new branches but gives up on any special ordering for ancestors
2011 # retains new branches but gives up on any special ordering for ancestors
2012 # of merges. The implementation can be improved to handle this better.
2012 # of merges. The implementation can be improved to handle this better.
2013 #
2013 #
2014 # The first subgroup is special. It corresponds to all the revision that
2014 # The first subgroup is special. It corresponds to all the revision that
2015 # were already emitted. The 'revs' lists is expected to be empty and the
2015 # were already emitted. The 'revs' lists is expected to be empty and the
2016 # 'blocked' set contains the parents revisions of already emitted revision.
2016 # 'blocked' set contains the parents revisions of already emitted revision.
2017 #
2017 #
2018 # You could pre-seed the <parents> set of groups[0] to a specific
2018 # You could pre-seed the <parents> set of groups[0] to a specific
2019 # changesets to select what the first emitted branch should be.
2019 # changesets to select what the first emitted branch should be.
2020 groups = [([], unblocked)]
2020 groups = [([], unblocked)]
2021 pendingheap = []
2021 pendingheap = []
2022 pendingset = set()
2022 pendingset = set()
2023
2023
2024 heapq.heapify(pendingheap)
2024 heapq.heapify(pendingheap)
2025 heappop = heapq.heappop
2025 heappop = heapq.heappop
2026 heappush = heapq.heappush
2026 heappush = heapq.heappush
2027 for currentrev in revs:
2027 for currentrev in revs:
2028 # Heap works with smallest element, we want highest so we invert
2028 # Heap works with smallest element, we want highest so we invert
2029 if currentrev not in pendingset:
2029 if currentrev not in pendingset:
2030 heappush(pendingheap, -currentrev)
2030 heappush(pendingheap, -currentrev)
2031 pendingset.add(currentrev)
2031 pendingset.add(currentrev)
2032 # iterates on pending rev until after the current rev have been
2032 # iterates on pending rev until after the current rev have been
2033 # processed.
2033 # processed.
2034 rev = None
2034 rev = None
2035 while rev != currentrev:
2035 while rev != currentrev:
2036 rev = -heappop(pendingheap)
2036 rev = -heappop(pendingheap)
2037 pendingset.remove(rev)
2037 pendingset.remove(rev)
2038
2038
2039 # Seek for a subgroup blocked, waiting for the current revision.
2039 # Seek for a subgroup blocked, waiting for the current revision.
2040 matching = [i for i, g in enumerate(groups) if rev in g[1]]
2040 matching = [i for i, g in enumerate(groups) if rev in g[1]]
2041
2041
2042 if matching:
2042 if matching:
2043 # The main idea is to gather together all sets that are blocked
2043 # The main idea is to gather together all sets that are blocked
2044 # on the same revision.
2044 # on the same revision.
2045 #
2045 #
2046 # Groups are merged when a common blocking ancestor is
2046 # Groups are merged when a common blocking ancestor is
2047 # observed. For example, given two groups:
2047 # observed. For example, given two groups:
2048 #
2048 #
2049 # revs [5, 4] waiting for 1
2049 # revs [5, 4] waiting for 1
2050 # revs [3, 2] waiting for 1
2050 # revs [3, 2] waiting for 1
2051 #
2051 #
2052 # These two groups will be merged when we process
2052 # These two groups will be merged when we process
2053 # 1. In theory, we could have merged the groups when
2053 # 1. In theory, we could have merged the groups when
2054 # we added 2 to the group it is now in (we could have
2054 # we added 2 to the group it is now in (we could have
2055 # noticed the groups were both blocked on 1 then), but
2055 # noticed the groups were both blocked on 1 then), but
2056 # the way it works now makes the algorithm simpler.
2056 # the way it works now makes the algorithm simpler.
2057 #
2057 #
2058 # We also always keep the oldest subgroup first. We can
2058 # We also always keep the oldest subgroup first. We can
2059 # probably improve the behavior by having the longest set
2059 # probably improve the behavior by having the longest set
2060 # first. That way, graph algorithms could minimise the length
2060 # first. That way, graph algorithms could minimise the length
2061 # of parallel lines their drawing. This is currently not done.
2061 # of parallel lines their drawing. This is currently not done.
2062 targetidx = matching.pop(0)
2062 targetidx = matching.pop(0)
2063 trevs, tparents = groups[targetidx]
2063 trevs, tparents = groups[targetidx]
2064 for i in matching:
2064 for i in matching:
2065 gr = groups[i]
2065 gr = groups[i]
2066 trevs.extend(gr[0])
2066 trevs.extend(gr[0])
2067 tparents |= gr[1]
2067 tparents |= gr[1]
2068 # delete all merged subgroups (except the one we kept)
2068 # delete all merged subgroups (except the one we kept)
2069 # (starting from the last subgroup for performance and
2069 # (starting from the last subgroup for performance and
2070 # sanity reasons)
2070 # sanity reasons)
2071 for i in reversed(matching):
2071 for i in reversed(matching):
2072 del groups[i]
2072 del groups[i]
2073 else:
2073 else:
2074 # This is a new head. We create a new subgroup for it.
2074 # This is a new head. We create a new subgroup for it.
2075 targetidx = len(groups)
2075 targetidx = len(groups)
2076 groups.append(([], set([rev])))
2076 groups.append(([], set([rev])))
2077
2077
2078 gr = groups[targetidx]
2078 gr = groups[targetidx]
2079
2079
2080 # We now add the current nodes to this subgroups. This is done
2080 # We now add the current nodes to this subgroups. This is done
2081 # after the subgroup merging because all elements from a subgroup
2081 # after the subgroup merging because all elements from a subgroup
2082 # that relied on this rev must precede it.
2082 # that relied on this rev must precede it.
2083 #
2083 #
2084 # we also update the <parents> set to include the parents of the
2084 # we also update the <parents> set to include the parents of the
2085 # new nodes.
2085 # new nodes.
2086 if rev == currentrev: # only display stuff in rev
2086 if rev == currentrev: # only display stuff in rev
2087 gr[0].append(rev)
2087 gr[0].append(rev)
2088 gr[1].remove(rev)
2088 gr[1].remove(rev)
2089 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
2089 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
2090 gr[1].update(parents)
2090 gr[1].update(parents)
2091 for p in parents:
2091 for p in parents:
2092 if p not in pendingset:
2092 if p not in pendingset:
2093 pendingset.add(p)
2093 pendingset.add(p)
2094 heappush(pendingheap, -p)
2094 heappush(pendingheap, -p)
2095
2095
2096 # Look for a subgroup to display
2096 # Look for a subgroup to display
2097 #
2097 #
2098 # When unblocked is empty (if clause), we were not waiting for any
2098 # When unblocked is empty (if clause), we were not waiting for any
2099 # revisions during the first iteration (if no priority was given) or
2099 # revisions during the first iteration (if no priority was given) or
2100 # if we emitted a whole disconnected set of the graph (reached a
2100 # if we emitted a whole disconnected set of the graph (reached a
2101 # root). In that case we arbitrarily take the oldest known
2101 # root). In that case we arbitrarily take the oldest known
2102 # subgroup. The heuristic could probably be better.
2102 # subgroup. The heuristic could probably be better.
2103 #
2103 #
2104 # Otherwise (elif clause) if the subgroup is blocked on
2104 # Otherwise (elif clause) if the subgroup is blocked on
2105 # a revision we just emitted, we can safely emit it as
2105 # a revision we just emitted, we can safely emit it as
2106 # well.
2106 # well.
2107 if not unblocked:
2107 if not unblocked:
2108 if len(groups) > 1: # display other subset
2108 if len(groups) > 1: # display other subset
2109 targetidx = 1
2109 targetidx = 1
2110 gr = groups[1]
2110 gr = groups[1]
2111 elif not gr[1] & unblocked:
2111 elif not gr[1] & unblocked:
2112 gr = None
2112 gr = None
2113
2113
2114 if gr is not None:
2114 if gr is not None:
2115 # update the set of awaited revisions with the one from the
2115 # update the set of awaited revisions with the one from the
2116 # subgroup
2116 # subgroup
2117 unblocked |= gr[1]
2117 unblocked |= gr[1]
2118 # output all revisions in the subgroup
2118 # output all revisions in the subgroup
2119 for r in gr[0]:
2119 for r in gr[0]:
2120 yield r
2120 yield r
2121 # delete the subgroup that you just output
2121 # delete the subgroup that you just output
2122 # unless it is groups[0] in which case you just empty it.
2122 # unless it is groups[0] in which case you just empty it.
2123 if targetidx:
2123 if targetidx:
2124 del groups[targetidx]
2124 del groups[targetidx]
2125 else:
2125 else:
2126 gr[0][:] = []
2126 gr[0][:] = []
2127 # Check if we have some subgroup waiting for revisions we are not going to
2127 # Check if we have some subgroup waiting for revisions we are not going to
2128 # iterate over
2128 # iterate over
2129 for g in groups:
2129 for g in groups:
2130 for r in g[0]:
2130 for r in g[0]:
2131 yield r
2131 yield r
2132
2132
2133 @predicate('subrepo([pattern])')
2133 @predicate('subrepo([pattern])')
2134 def subrepo(repo, subset, x):
2134 def subrepo(repo, subset, x):
2135 """Changesets that add, modify or remove the given subrepo. If no subrepo
2135 """Changesets that add, modify or remove the given subrepo. If no subrepo
2136 pattern is named, any subrepo changes are returned.
2136 pattern is named, any subrepo changes are returned.
2137 """
2137 """
2138 # i18n: "subrepo" is a keyword
2138 # i18n: "subrepo" is a keyword
2139 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
2139 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
2140 pat = None
2140 pat = None
2141 if len(args) != 0:
2141 if len(args) != 0:
2142 pat = getstring(args[0], _("subrepo requires a pattern"))
2142 pat = getstring(args[0], _("subrepo requires a pattern"))
2143
2143
2144 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
2144 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
2145
2145
2146 def submatches(names):
2146 def submatches(names):
2147 k, p, m = util.stringmatcher(pat)
2147 k, p, m = util.stringmatcher(pat)
2148 for name in names:
2148 for name in names:
2149 if m(name):
2149 if m(name):
2150 yield name
2150 yield name
2151
2151
2152 def matches(x):
2152 def matches(x):
2153 c = repo[x]
2153 c = repo[x]
2154 s = repo.status(c.p1().node(), c.node(), match=m)
2154 s = repo.status(c.p1().node(), c.node(), match=m)
2155
2155
2156 if pat is None:
2156 if pat is None:
2157 return s.added or s.modified or s.removed
2157 return s.added or s.modified or s.removed
2158
2158
2159 if s.added:
2159 if s.added:
2160 return any(submatches(c.substate.keys()))
2160 return any(submatches(c.substate.keys()))
2161
2161
2162 if s.modified:
2162 if s.modified:
2163 subs = set(c.p1().substate.keys())
2163 subs = set(c.p1().substate.keys())
2164 subs.update(c.substate.keys())
2164 subs.update(c.substate.keys())
2165
2165
2166 for path in submatches(subs):
2166 for path in submatches(subs):
2167 if c.p1().substate.get(path) != c.substate.get(path):
2167 if c.p1().substate.get(path) != c.substate.get(path):
2168 return True
2168 return True
2169
2169
2170 if s.removed:
2170 if s.removed:
2171 return any(submatches(c.p1().substate.keys()))
2171 return any(submatches(c.p1().substate.keys()))
2172
2172
2173 return False
2173 return False
2174
2174
2175 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
2175 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
2176
2176
2177 def _substringmatcher(pattern):
2177 def _substringmatcher(pattern):
2178 kind, pattern, matcher = util.stringmatcher(pattern)
2178 kind, pattern, matcher = util.stringmatcher(pattern)
2179 if kind == 'literal':
2179 if kind == 'literal':
2180 matcher = lambda s: pattern in s
2180 matcher = lambda s: pattern in s
2181 return kind, pattern, matcher
2181 return kind, pattern, matcher
2182
2182
2183 @predicate('tag([name])', safe=True)
2183 @predicate('tag([name])', safe=True)
2184 def tag(repo, subset, x):
2184 def tag(repo, subset, x):
2185 """The specified tag by name, or all tagged revisions if no name is given.
2185 """The specified tag by name, or all tagged revisions if no name is given.
2186
2186
2187 If `name` starts with `re:`, the remainder of the name is treated as
2187 If `name` starts with `re:`, the remainder of the name is treated as
2188 a regular expression. To match a tag that actually starts with `re:`,
2188 a regular expression. To match a tag that actually starts with `re:`,
2189 use the prefix `literal:`.
2189 use the prefix `literal:`.
2190 """
2190 """
2191 # i18n: "tag" is a keyword
2191 # i18n: "tag" is a keyword
2192 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2192 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2193 cl = repo.changelog
2193 cl = repo.changelog
2194 if args:
2194 if args:
2195 pattern = getstring(args[0],
2195 pattern = getstring(args[0],
2196 # i18n: "tag" is a keyword
2196 # i18n: "tag" is a keyword
2197 _('the argument to tag must be a string'))
2197 _('the argument to tag must be a string'))
2198 kind, pattern, matcher = util.stringmatcher(pattern)
2198 kind, pattern, matcher = util.stringmatcher(pattern)
2199 if kind == 'literal':
2199 if kind == 'literal':
2200 # avoid resolving all tags
2200 # avoid resolving all tags
2201 tn = repo._tagscache.tags.get(pattern, None)
2201 tn = repo._tagscache.tags.get(pattern, None)
2202 if tn is None:
2202 if tn is None:
2203 raise error.RepoLookupError(_("tag '%s' does not exist")
2203 raise error.RepoLookupError(_("tag '%s' does not exist")
2204 % pattern)
2204 % pattern)
2205 s = set([repo[tn].rev()])
2205 s = set([repo[tn].rev()])
2206 else:
2206 else:
2207 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2207 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2208 else:
2208 else:
2209 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2209 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2210 return subset & s
2210 return subset & s
2211
2211
2212 @predicate('tagged', safe=True)
2212 @predicate('tagged', safe=True)
2213 def tagged(repo, subset, x):
2213 def tagged(repo, subset, x):
2214 return tag(repo, subset, x)
2214 return tag(repo, subset, x)
2215
2215
2216 @predicate('unstable()', safe=True)
2216 @predicate('unstable()', safe=True)
2217 def unstable(repo, subset, x):
2217 def unstable(repo, subset, x):
2218 """Non-obsolete changesets with obsolete ancestors.
2218 """Non-obsolete changesets with obsolete ancestors.
2219 """
2219 """
2220 # i18n: "unstable" is a keyword
2220 # i18n: "unstable" is a keyword
2221 getargs(x, 0, 0, _("unstable takes no arguments"))
2221 getargs(x, 0, 0, _("unstable takes no arguments"))
2222 unstables = obsmod.getrevs(repo, 'unstable')
2222 unstables = obsmod.getrevs(repo, 'unstable')
2223 return subset & unstables
2223 return subset & unstables
2224
2224
2225
2225
2226 @predicate('user(string)', safe=True)
2226 @predicate('user(string)', safe=True)
2227 def user(repo, subset, x):
2227 def user(repo, subset, x):
2228 """User name contains string. The match is case-insensitive.
2228 """User name contains string. The match is case-insensitive.
2229
2229
2230 If `string` starts with `re:`, the remainder of the string is treated as
2230 If `string` starts with `re:`, the remainder of the string is treated as
2231 a regular expression. To match a user that actually contains `re:`, use
2231 a regular expression. To match a user that actually contains `re:`, use
2232 the prefix `literal:`.
2232 the prefix `literal:`.
2233 """
2233 """
2234 return author(repo, subset, x)
2234 return author(repo, subset, x)
2235
2235
2236 # experimental
2236 # experimental
2237 @predicate('wdir', safe=True)
2237 @predicate('wdir', safe=True)
2238 def wdir(repo, subset, x):
2238 def wdir(repo, subset, x):
2239 # i18n: "wdir" is a keyword
2239 # i18n: "wdir" is a keyword
2240 getargs(x, 0, 0, _("wdir takes no arguments"))
2240 getargs(x, 0, 0, _("wdir takes no arguments"))
2241 if node.wdirrev in subset or isinstance(subset, fullreposet):
2241 if node.wdirrev in subset or isinstance(subset, fullreposet):
2242 return baseset([node.wdirrev])
2242 return baseset([node.wdirrev])
2243 return baseset()
2243 return baseset()
2244
2244
2245 # for internal use
2245 # for internal use
2246 @predicate('_list', safe=True)
2246 @predicate('_list', safe=True)
2247 def _list(repo, subset, x):
2247 def _list(repo, subset, x):
2248 s = getstring(x, "internal error")
2248 s = getstring(x, "internal error")
2249 if not s:
2249 if not s:
2250 return baseset()
2250 return baseset()
2251 # remove duplicates here. it's difficult for caller to deduplicate sets
2251 # remove duplicates here. it's difficult for caller to deduplicate sets
2252 # because different symbols can point to the same rev.
2252 # because different symbols can point to the same rev.
2253 cl = repo.changelog
2253 cl = repo.changelog
2254 ls = []
2254 ls = []
2255 seen = set()
2255 seen = set()
2256 for t in s.split('\0'):
2256 for t in s.split('\0'):
2257 try:
2257 try:
2258 # fast path for integer revision
2258 # fast path for integer revision
2259 r = int(t)
2259 r = int(t)
2260 if str(r) != t or r not in cl:
2260 if str(r) != t or r not in cl:
2261 raise ValueError
2261 raise ValueError
2262 revs = [r]
2262 revs = [r]
2263 except ValueError:
2263 except ValueError:
2264 revs = stringset(repo, subset, t)
2264 revs = stringset(repo, subset, t)
2265
2265
2266 for r in revs:
2266 for r in revs:
2267 if r in seen:
2267 if r in seen:
2268 continue
2268 continue
2269 if (r in subset
2269 if (r in subset
2270 or r == node.nullrev and isinstance(subset, fullreposet)):
2270 or r == node.nullrev and isinstance(subset, fullreposet)):
2271 ls.append(r)
2271 ls.append(r)
2272 seen.add(r)
2272 seen.add(r)
2273 return baseset(ls)
2273 return baseset(ls)
2274
2274
2275 # for internal use
2275 # for internal use
2276 @predicate('_intlist', safe=True)
2276 @predicate('_intlist', safe=True)
2277 def _intlist(repo, subset, x):
2277 def _intlist(repo, subset, x):
2278 s = getstring(x, "internal error")
2278 s = getstring(x, "internal error")
2279 if not s:
2279 if not s:
2280 return baseset()
2280 return baseset()
2281 ls = [int(r) for r in s.split('\0')]
2281 ls = [int(r) for r in s.split('\0')]
2282 s = subset
2282 s = subset
2283 return baseset([r for r in ls if r in s])
2283 return baseset([r for r in ls if r in s])
2284
2284
2285 # for internal use
2285 # for internal use
2286 @predicate('_hexlist', safe=True)
2286 @predicate('_hexlist', safe=True)
2287 def _hexlist(repo, subset, x):
2287 def _hexlist(repo, subset, x):
2288 s = getstring(x, "internal error")
2288 s = getstring(x, "internal error")
2289 if not s:
2289 if not s:
2290 return baseset()
2290 return baseset()
2291 cl = repo.changelog
2291 cl = repo.changelog
2292 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2292 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2293 s = subset
2293 s = subset
2294 return baseset([r for r in ls if r in s])
2294 return baseset([r for r in ls if r in s])
2295
2295
2296 methods = {
2296 methods = {
2297 "range": rangeset,
2297 "range": rangeset,
2298 "dagrange": dagrange,
2298 "dagrange": dagrange,
2299 "string": stringset,
2299 "string": stringset,
2300 "symbol": stringset,
2300 "symbol": stringset,
2301 "and": andset,
2301 "and": andset,
2302 "or": orset,
2302 "or": orset,
2303 "not": notset,
2303 "not": notset,
2304 "difference": differenceset,
2304 "difference": differenceset,
2305 "list": listset,
2305 "list": listset,
2306 "keyvalue": keyvaluepair,
2306 "keyvalue": keyvaluepair,
2307 "func": func,
2307 "func": func,
2308 "ancestor": ancestorspec,
2308 "ancestor": ancestorspec,
2309 "parent": parentspec,
2309 "parent": parentspec,
2310 "parentpost": p1,
2310 "parentpost": p1,
2311 }
2311 }
2312
2312
2313 # Constants for ordering requirement, used in _analyze():
2314 #
2315 # If 'define', any nested functions and operations can change the ordering of
2316 # the entries in the set. If 'follow', any nested functions and operations
2317 # should take the ordering specified by the first operand to the '&' operator.
2318 #
2319 # For instance,
2320 #
2321 # X & (Y | Z)
2322 # ^ ^^^^^^^
2323 # | follow
2324 # define
2325 #
2326 # will be evaluated as 'or(y(x()), z(x()))', where 'x()' can change the order
2327 # of the entries in the set, but 'y()', 'z()' and 'or()' shouldn't.
2328 #
2329 # 'any' means the order doesn't matter. For instance,
2330 #
2331 # X & !Y
2332 # ^
2333 # any
2334 #
2335 # 'y()' can either enforce its ordering requirement or take the ordering
2336 # specified by 'x()' because 'not()' doesn't care the order.
2337 #
2338 # Transition of ordering requirement:
2339 #
2340 # 1. starts with 'define'
2341 # 2. shifts to 'follow' by 'x & y'
2342 # 3. changes back to 'define' on function call 'f(x)' or function-like
2343 # operation 'x (f) y' because 'f' may have its own ordering requirement
2344 # for 'x' and 'y' (e.g. 'first(x)')
2345 #
2346 anyorder = 'any' # don't care the order
2347 defineorder = 'define' # should define the order
2348 followorder = 'follow' # must follow the current order
2349
2350 # transition table for 'x & y', from the current expression 'x' to 'y'
2351 _tofolloworder = {
2352 anyorder: anyorder,
2353 defineorder: followorder,
2354 followorder: followorder,
2355 }
2356
2313 def _matchonly(revs, bases):
2357 def _matchonly(revs, bases):
2314 """
2358 """
2315 >>> f = lambda *args: _matchonly(*map(parse, args))
2359 >>> f = lambda *args: _matchonly(*map(parse, args))
2316 >>> f('ancestors(A)', 'not ancestors(B)')
2360 >>> f('ancestors(A)', 'not ancestors(B)')
2317 ('list', ('symbol', 'A'), ('symbol', 'B'))
2361 ('list', ('symbol', 'A'), ('symbol', 'B'))
2318 """
2362 """
2319 if (revs is not None
2363 if (revs is not None
2320 and revs[0] == 'func'
2364 and revs[0] == 'func'
2321 and getsymbol(revs[1]) == 'ancestors'
2365 and getsymbol(revs[1]) == 'ancestors'
2322 and bases is not None
2366 and bases is not None
2323 and bases[0] == 'not'
2367 and bases[0] == 'not'
2324 and bases[1][0] == 'func'
2368 and bases[1][0] == 'func'
2325 and getsymbol(bases[1][1]) == 'ancestors'):
2369 and getsymbol(bases[1][1]) == 'ancestors'):
2326 return ('list', revs[2], bases[1][2])
2370 return ('list', revs[2], bases[1][2])
2327
2371
2328 def _fixops(x):
2372 def _fixops(x):
2329 """Rewrite raw parsed tree to resolve ambiguous syntax which cannot be
2373 """Rewrite raw parsed tree to resolve ambiguous syntax which cannot be
2330 handled well by our simple top-down parser"""
2374 handled well by our simple top-down parser"""
2331 if not isinstance(x, tuple):
2375 if not isinstance(x, tuple):
2332 return x
2376 return x
2333
2377
2334 op = x[0]
2378 op = x[0]
2335 if op == 'parent':
2379 if op == 'parent':
2336 # x^:y means (x^) : y, not x ^ (:y)
2380 # x^:y means (x^) : y, not x ^ (:y)
2337 # x^: means (x^) :, not x ^ (:)
2381 # x^: means (x^) :, not x ^ (:)
2338 post = ('parentpost', x[1])
2382 post = ('parentpost', x[1])
2339 if x[2][0] == 'dagrangepre':
2383 if x[2][0] == 'dagrangepre':
2340 return _fixops(('dagrange', post, x[2][1]))
2384 return _fixops(('dagrange', post, x[2][1]))
2341 elif x[2][0] == 'rangepre':
2385 elif x[2][0] == 'rangepre':
2342 return _fixops(('range', post, x[2][1]))
2386 return _fixops(('range', post, x[2][1]))
2343 elif x[2][0] == 'rangeall':
2387 elif x[2][0] == 'rangeall':
2344 return _fixops(('rangepost', post))
2388 return _fixops(('rangepost', post))
2345 elif op == 'or':
2389 elif op == 'or':
2346 # make number of arguments deterministic:
2390 # make number of arguments deterministic:
2347 # x + y + z -> (or x y z) -> (or (list x y z))
2391 # x + y + z -> (or x y z) -> (or (list x y z))
2348 return (op, _fixops(('list',) + x[1:]))
2392 return (op, _fixops(('list',) + x[1:]))
2349
2393
2350 return (op,) + tuple(_fixops(y) for y in x[1:])
2394 return (op,) + tuple(_fixops(y) for y in x[1:])
2351
2395
2352 def _analyze(x):
2396 def _analyze(x, order):
2353 if x is None:
2397 if x is None:
2354 return x
2398 return x
2355
2399
2356 op = x[0]
2400 op = x[0]
2357 if op == 'minus':
2401 if op == 'minus':
2358 return _analyze(('and', x[1], ('not', x[2])))
2402 return _analyze(('and', x[1], ('not', x[2])), order)
2359 elif op == 'only':
2403 elif op == 'only':
2360 t = ('func', ('symbol', 'only'), ('list', x[1], x[2]))
2404 t = ('func', ('symbol', 'only'), ('list', x[1], x[2]))
2361 return _analyze(t)
2405 return _analyze(t, order)
2362 elif op == 'onlypost':
2406 elif op == 'onlypost':
2363 return _analyze(('func', ('symbol', 'only'), x[1]))
2407 return _analyze(('func', ('symbol', 'only'), x[1]), order)
2364 elif op == 'dagrangepre':
2408 elif op == 'dagrangepre':
2365 return _analyze(('func', ('symbol', 'ancestors'), x[1]))
2409 return _analyze(('func', ('symbol', 'ancestors'), x[1]), order)
2366 elif op == 'dagrangepost':
2410 elif op == 'dagrangepost':
2367 return _analyze(('func', ('symbol', 'descendants'), x[1]))
2411 return _analyze(('func', ('symbol', 'descendants'), x[1]), order)
2368 elif op == 'rangeall':
2412 elif op == 'rangeall':
2369 return _analyze(('range', ('string', '0'), ('string', 'tip')))
2413 return _analyze(('range', ('string', '0'), ('string', 'tip')), order)
2370 elif op == 'rangepre':
2414 elif op == 'rangepre':
2371 return _analyze(('range', ('string', '0'), x[1]))
2415 return _analyze(('range', ('string', '0'), x[1]), order)
2372 elif op == 'rangepost':
2416 elif op == 'rangepost':
2373 return _analyze(('range', x[1], ('string', 'tip')))
2417 return _analyze(('range', x[1], ('string', 'tip')), order)
2374 elif op == 'negate':
2418 elif op == 'negate':
2375 s = getstring(x[1], _("can't negate that"))
2419 s = getstring(x[1], _("can't negate that"))
2376 return _analyze(('string', '-' + s))
2420 return _analyze(('string', '-' + s), order)
2377 elif op in ('string', 'symbol'):
2421 elif op in ('string', 'symbol'):
2378 return x
2422 return x
2379 elif op == 'and':
2423 elif op == 'and':
2380 ta = _analyze(x[1])
2424 ta = _analyze(x[1], order)
2381 tb = _analyze(x[2])
2425 tb = _analyze(x[2], _tofolloworder[order])
2382 return (op, ta, tb)
2426 return (op, ta, tb)
2383 elif op == 'or':
2427 elif op == 'or':
2384 return (op, _analyze(x[1]))
2428 return (op, _analyze(x[1], order))
2385 elif op == 'not':
2429 elif op == 'not':
2386 return (op, _analyze(x[1]))
2430 return (op, _analyze(x[1], anyorder))
2387 elif op == 'parentpost':
2431 elif op == 'parentpost':
2388 return (op, _analyze(x[1]))
2432 return (op, _analyze(x[1], defineorder))
2389 elif op == 'group':
2433 elif op == 'group':
2390 return _analyze(x[1])
2434 return _analyze(x[1], order)
2391 elif op in ('dagrange', 'range', 'parent', 'ancestor'):
2435 elif op in ('dagrange', 'range', 'parent', 'ancestor'):
2392 ta = _analyze(x[1])
2436 ta = _analyze(x[1], defineorder)
2393 tb = _analyze(x[2])
2437 tb = _analyze(x[2], defineorder)
2394 return (op, ta, tb)
2438 return (op, ta, tb)
2395 elif op == 'list':
2439 elif op == 'list':
2396 return (op,) + tuple(_analyze(y) for y in x[1:])
2440 return (op,) + tuple(_analyze(y, order) for y in x[1:])
2397 elif op == 'keyvalue':
2441 elif op == 'keyvalue':
2398 return (op, x[1], _analyze(x[2]))
2442 return (op, x[1], _analyze(x[2], order))
2399 elif op == 'func':
2443 elif op == 'func':
2400 return (op, x[1], _analyze(x[2]))
2444 return (op, x[1], _analyze(x[2], defineorder))
2401 raise ValueError('invalid operator %r' % op)
2445 raise ValueError('invalid operator %r' % op)
2402
2446
2403 def analyze(x):
2447 def analyze(x, order=defineorder):
2404 """Transform raw parsed tree to evaluatable tree which can be fed to
2448 """Transform raw parsed tree to evaluatable tree which can be fed to
2405 optimize() or getset()
2449 optimize() or getset()
2406
2450
2407 All pseudo operations should be mapped to real operations or functions
2451 All pseudo operations should be mapped to real operations or functions
2408 defined in methods or symbols table respectively.
2452 defined in methods or symbols table respectively.
2453
2454 'order' specifies how the current expression 'x' is ordered (see the
2455 constants defined above.)
2409 """
2456 """
2410 return _analyze(x)
2457 return _analyze(x, order)
2411
2458
2412 def _optimize(x, small):
2459 def _optimize(x, small):
2413 if x is None:
2460 if x is None:
2414 return 0, x
2461 return 0, x
2415
2462
2416 smallbonus = 1
2463 smallbonus = 1
2417 if small:
2464 if small:
2418 smallbonus = .5
2465 smallbonus = .5
2419
2466
2420 op = x[0]
2467 op = x[0]
2421 if op in ('string', 'symbol'):
2468 if op in ('string', 'symbol'):
2422 return smallbonus, x # single revisions are small
2469 return smallbonus, x # single revisions are small
2423 elif op == 'and':
2470 elif op == 'and':
2424 wa, ta = _optimize(x[1], True)
2471 wa, ta = _optimize(x[1], True)
2425 wb, tb = _optimize(x[2], True)
2472 wb, tb = _optimize(x[2], True)
2426 w = min(wa, wb)
2473 w = min(wa, wb)
2427
2474
2428 # (::x and not ::y)/(not ::y and ::x) have a fast path
2475 # (::x and not ::y)/(not ::y and ::x) have a fast path
2429 tm = _matchonly(ta, tb) or _matchonly(tb, ta)
2476 tm = _matchonly(ta, tb) or _matchonly(tb, ta)
2430 if tm:
2477 if tm:
2431 return w, ('func', ('symbol', 'only'), tm)
2478 return w, ('func', ('symbol', 'only'), tm)
2432
2479
2433 if tb is not None and tb[0] == 'not':
2480 if tb is not None and tb[0] == 'not':
2434 return wa, ('difference', ta, tb[1])
2481 return wa, ('difference', ta, tb[1])
2435
2482
2436 if wa > wb:
2483 if wa > wb:
2437 return w, (op, tb, ta)
2484 return w, (op, tb, ta)
2438 return w, (op, ta, tb)
2485 return w, (op, ta, tb)
2439 elif op == 'or':
2486 elif op == 'or':
2440 # fast path for machine-generated expression, that is likely to have
2487 # fast path for machine-generated expression, that is likely to have
2441 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2488 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2442 ws, ts, ss = [], [], []
2489 ws, ts, ss = [], [], []
2443 def flushss():
2490 def flushss():
2444 if not ss:
2491 if not ss:
2445 return
2492 return
2446 if len(ss) == 1:
2493 if len(ss) == 1:
2447 w, t = ss[0]
2494 w, t = ss[0]
2448 else:
2495 else:
2449 s = '\0'.join(t[1] for w, t in ss)
2496 s = '\0'.join(t[1] for w, t in ss)
2450 y = ('func', ('symbol', '_list'), ('string', s))
2497 y = ('func', ('symbol', '_list'), ('string', s))
2451 w, t = _optimize(y, False)
2498 w, t = _optimize(y, False)
2452 ws.append(w)
2499 ws.append(w)
2453 ts.append(t)
2500 ts.append(t)
2454 del ss[:]
2501 del ss[:]
2455 for y in getlist(x[1]):
2502 for y in getlist(x[1]):
2456 w, t = _optimize(y, False)
2503 w, t = _optimize(y, False)
2457 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2504 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2458 ss.append((w, t))
2505 ss.append((w, t))
2459 continue
2506 continue
2460 flushss()
2507 flushss()
2461 ws.append(w)
2508 ws.append(w)
2462 ts.append(t)
2509 ts.append(t)
2463 flushss()
2510 flushss()
2464 if len(ts) == 1:
2511 if len(ts) == 1:
2465 return ws[0], ts[0] # 'or' operation is fully optimized out
2512 return ws[0], ts[0] # 'or' operation is fully optimized out
2466 # we can't reorder trees by weight because it would change the order.
2513 # we can't reorder trees by weight because it would change the order.
2467 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2514 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2468 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2515 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2469 return max(ws), (op, ('list',) + tuple(ts))
2516 return max(ws), (op, ('list',) + tuple(ts))
2470 elif op == 'not':
2517 elif op == 'not':
2471 # Optimize not public() to _notpublic() because we have a fast version
2518 # Optimize not public() to _notpublic() because we have a fast version
2472 if x[1] == ('func', ('symbol', 'public'), None):
2519 if x[1] == ('func', ('symbol', 'public'), None):
2473 newsym = ('func', ('symbol', '_notpublic'), None)
2520 newsym = ('func', ('symbol', '_notpublic'), None)
2474 o = _optimize(newsym, not small)
2521 o = _optimize(newsym, not small)
2475 return o[0], o[1]
2522 return o[0], o[1]
2476 else:
2523 else:
2477 o = _optimize(x[1], not small)
2524 o = _optimize(x[1], not small)
2478 return o[0], (op, o[1])
2525 return o[0], (op, o[1])
2479 elif op == 'parentpost':
2526 elif op == 'parentpost':
2480 o = _optimize(x[1], small)
2527 o = _optimize(x[1], small)
2481 return o[0], (op, o[1])
2528 return o[0], (op, o[1])
2482 elif op in ('dagrange', 'range', 'parent', 'ancestor'):
2529 elif op in ('dagrange', 'range', 'parent', 'ancestor'):
2483 wa, ta = _optimize(x[1], small)
2530 wa, ta = _optimize(x[1], small)
2484 wb, tb = _optimize(x[2], small)
2531 wb, tb = _optimize(x[2], small)
2485 return wa + wb, (op, ta, tb)
2532 return wa + wb, (op, ta, tb)
2486 elif op == 'list':
2533 elif op == 'list':
2487 ws, ts = zip(*(_optimize(y, small) for y in x[1:]))
2534 ws, ts = zip(*(_optimize(y, small) for y in x[1:]))
2488 return sum(ws), (op,) + ts
2535 return sum(ws), (op,) + ts
2489 elif op == 'keyvalue':
2536 elif op == 'keyvalue':
2490 w, t = _optimize(x[2], small)
2537 w, t = _optimize(x[2], small)
2491 return w, (op, x[1], t)
2538 return w, (op, x[1], t)
2492 elif op == 'func':
2539 elif op == 'func':
2493 f = getsymbol(x[1])
2540 f = getsymbol(x[1])
2494 wa, ta = _optimize(x[2], small)
2541 wa, ta = _optimize(x[2], small)
2495 if f in ('author', 'branch', 'closed', 'date', 'desc', 'file', 'grep',
2542 if f in ('author', 'branch', 'closed', 'date', 'desc', 'file', 'grep',
2496 'keyword', 'outgoing', 'user'):
2543 'keyword', 'outgoing', 'user'):
2497 w = 10 # slow
2544 w = 10 # slow
2498 elif f in ('modifies', 'adds', 'removes'):
2545 elif f in ('modifies', 'adds', 'removes'):
2499 w = 30 # slower
2546 w = 30 # slower
2500 elif f == "contains":
2547 elif f == "contains":
2501 w = 100 # very slow
2548 w = 100 # very slow
2502 elif f == "ancestor":
2549 elif f == "ancestor":
2503 w = 1 * smallbonus
2550 w = 1 * smallbonus
2504 elif f in ('reverse', 'limit', 'first', '_intlist'):
2551 elif f in ('reverse', 'limit', 'first', '_intlist'):
2505 w = 0
2552 w = 0
2506 elif f == "sort":
2553 elif f == "sort":
2507 w = 10 # assume most sorts look at changelog
2554 w = 10 # assume most sorts look at changelog
2508 else:
2555 else:
2509 w = 1
2556 w = 1
2510 return w + wa, (op, x[1], ta)
2557 return w + wa, (op, x[1], ta)
2511 raise ValueError('invalid operator %r' % op)
2558 raise ValueError('invalid operator %r' % op)
2512
2559
2513 def optimize(tree):
2560 def optimize(tree):
2514 """Optimize evaluatable tree
2561 """Optimize evaluatable tree
2515
2562
2516 All pseudo operations should be transformed beforehand.
2563 All pseudo operations should be transformed beforehand.
2517 """
2564 """
2518 _weight, newtree = _optimize(tree, small=True)
2565 _weight, newtree = _optimize(tree, small=True)
2519 return newtree
2566 return newtree
2520
2567
2521 # the set of valid characters for the initial letter of symbols in
2568 # the set of valid characters for the initial letter of symbols in
2522 # alias declarations and definitions
2569 # alias declarations and definitions
2523 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2570 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2524 if c.isalnum() or c in '._@$' or ord(c) > 127)
2571 if c.isalnum() or c in '._@$' or ord(c) > 127)
2525
2572
2526 def _parsewith(spec, lookup=None, syminitletters=None):
2573 def _parsewith(spec, lookup=None, syminitletters=None):
2527 """Generate a parse tree of given spec with given tokenizing options
2574 """Generate a parse tree of given spec with given tokenizing options
2528
2575
2529 >>> _parsewith('foo($1)', syminitletters=_aliassyminitletters)
2576 >>> _parsewith('foo($1)', syminitletters=_aliassyminitletters)
2530 ('func', ('symbol', 'foo'), ('symbol', '$1'))
2577 ('func', ('symbol', 'foo'), ('symbol', '$1'))
2531 >>> _parsewith('$1')
2578 >>> _parsewith('$1')
2532 Traceback (most recent call last):
2579 Traceback (most recent call last):
2533 ...
2580 ...
2534 ParseError: ("syntax error in revset '$1'", 0)
2581 ParseError: ("syntax error in revset '$1'", 0)
2535 >>> _parsewith('foo bar')
2582 >>> _parsewith('foo bar')
2536 Traceback (most recent call last):
2583 Traceback (most recent call last):
2537 ...
2584 ...
2538 ParseError: ('invalid token', 4)
2585 ParseError: ('invalid token', 4)
2539 """
2586 """
2540 p = parser.parser(elements)
2587 p = parser.parser(elements)
2541 tree, pos = p.parse(tokenize(spec, lookup=lookup,
2588 tree, pos = p.parse(tokenize(spec, lookup=lookup,
2542 syminitletters=syminitletters))
2589 syminitletters=syminitletters))
2543 if pos != len(spec):
2590 if pos != len(spec):
2544 raise error.ParseError(_('invalid token'), pos)
2591 raise error.ParseError(_('invalid token'), pos)
2545 return _fixops(parser.simplifyinfixops(tree, ('list', 'or')))
2592 return _fixops(parser.simplifyinfixops(tree, ('list', 'or')))
2546
2593
2547 class _aliasrules(parser.basealiasrules):
2594 class _aliasrules(parser.basealiasrules):
2548 """Parsing and expansion rule set of revset aliases"""
2595 """Parsing and expansion rule set of revset aliases"""
2549 _section = _('revset alias')
2596 _section = _('revset alias')
2550
2597
2551 @staticmethod
2598 @staticmethod
2552 def _parse(spec):
2599 def _parse(spec):
2553 """Parse alias declaration/definition ``spec``
2600 """Parse alias declaration/definition ``spec``
2554
2601
2555 This allows symbol names to use also ``$`` as an initial letter
2602 This allows symbol names to use also ``$`` as an initial letter
2556 (for backward compatibility), and callers of this function should
2603 (for backward compatibility), and callers of this function should
2557 examine whether ``$`` is used also for unexpected symbols or not.
2604 examine whether ``$`` is used also for unexpected symbols or not.
2558 """
2605 """
2559 return _parsewith(spec, syminitletters=_aliassyminitletters)
2606 return _parsewith(spec, syminitletters=_aliassyminitletters)
2560
2607
2561 @staticmethod
2608 @staticmethod
2562 def _trygetfunc(tree):
2609 def _trygetfunc(tree):
2563 if tree[0] == 'func' and tree[1][0] == 'symbol':
2610 if tree[0] == 'func' and tree[1][0] == 'symbol':
2564 return tree[1][1], getlist(tree[2])
2611 return tree[1][1], getlist(tree[2])
2565
2612
2566 def expandaliases(ui, tree):
2613 def expandaliases(ui, tree):
2567 aliases = _aliasrules.buildmap(ui.configitems('revsetalias'))
2614 aliases = _aliasrules.buildmap(ui.configitems('revsetalias'))
2568 tree = _aliasrules.expand(aliases, tree)
2615 tree = _aliasrules.expand(aliases, tree)
2569 # warn about problematic (but not referred) aliases
2616 # warn about problematic (but not referred) aliases
2570 for name, alias in sorted(aliases.iteritems()):
2617 for name, alias in sorted(aliases.iteritems()):
2571 if alias.error and not alias.warned:
2618 if alias.error and not alias.warned:
2572 ui.warn(_('warning: %s\n') % (alias.error))
2619 ui.warn(_('warning: %s\n') % (alias.error))
2573 alias.warned = True
2620 alias.warned = True
2574 return tree
2621 return tree
2575
2622
2576 def foldconcat(tree):
2623 def foldconcat(tree):
2577 """Fold elements to be concatenated by `##`
2624 """Fold elements to be concatenated by `##`
2578 """
2625 """
2579 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2626 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2580 return tree
2627 return tree
2581 if tree[0] == '_concat':
2628 if tree[0] == '_concat':
2582 pending = [tree]
2629 pending = [tree]
2583 l = []
2630 l = []
2584 while pending:
2631 while pending:
2585 e = pending.pop()
2632 e = pending.pop()
2586 if e[0] == '_concat':
2633 if e[0] == '_concat':
2587 pending.extend(reversed(e[1:]))
2634 pending.extend(reversed(e[1:]))
2588 elif e[0] in ('string', 'symbol'):
2635 elif e[0] in ('string', 'symbol'):
2589 l.append(e[1])
2636 l.append(e[1])
2590 else:
2637 else:
2591 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2638 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2592 raise error.ParseError(msg)
2639 raise error.ParseError(msg)
2593 return ('string', ''.join(l))
2640 return ('string', ''.join(l))
2594 else:
2641 else:
2595 return tuple(foldconcat(t) for t in tree)
2642 return tuple(foldconcat(t) for t in tree)
2596
2643
2597 def parse(spec, lookup=None):
2644 def parse(spec, lookup=None):
2598 return _parsewith(spec, lookup=lookup)
2645 return _parsewith(spec, lookup=lookup)
2599
2646
2600 def posttreebuilthook(tree, repo):
2647 def posttreebuilthook(tree, repo):
2601 # hook for extensions to execute code on the optimized tree
2648 # hook for extensions to execute code on the optimized tree
2602 pass
2649 pass
2603
2650
2604 def match(ui, spec, repo=None):
2651 def match(ui, spec, repo=None):
2605 """Create a matcher for a single revision spec."""
2652 """Create a matcher for a single revision spec."""
2606 return matchany(ui, [spec], repo=repo)
2653 return matchany(ui, [spec], repo=repo)
2607
2654
2608 def matchany(ui, specs, repo=None):
2655 def matchany(ui, specs, repo=None):
2609 """Create a matcher that will include any revisions matching one of the
2656 """Create a matcher that will include any revisions matching one of the
2610 given specs"""
2657 given specs"""
2611 if not specs:
2658 if not specs:
2612 def mfunc(repo, subset=None):
2659 def mfunc(repo, subset=None):
2613 return baseset()
2660 return baseset()
2614 return mfunc
2661 return mfunc
2615 if not all(specs):
2662 if not all(specs):
2616 raise error.ParseError(_("empty query"))
2663 raise error.ParseError(_("empty query"))
2617 lookup = None
2664 lookup = None
2618 if repo:
2665 if repo:
2619 lookup = repo.__contains__
2666 lookup = repo.__contains__
2620 if len(specs) == 1:
2667 if len(specs) == 1:
2621 tree = parse(specs[0], lookup)
2668 tree = parse(specs[0], lookup)
2622 else:
2669 else:
2623 tree = ('or', ('list',) + tuple(parse(s, lookup) for s in specs))
2670 tree = ('or', ('list',) + tuple(parse(s, lookup) for s in specs))
2624
2671
2625 if ui:
2672 if ui:
2626 tree = expandaliases(ui, tree)
2673 tree = expandaliases(ui, tree)
2627 tree = foldconcat(tree)
2674 tree = foldconcat(tree)
2628 tree = analyze(tree)
2675 tree = analyze(tree)
2629 tree = optimize(tree)
2676 tree = optimize(tree)
2630 posttreebuilthook(tree, repo)
2677 posttreebuilthook(tree, repo)
2631 return makematcher(tree)
2678 return makematcher(tree)
2632
2679
2633 def makematcher(tree):
2680 def makematcher(tree):
2634 """Create a matcher from an evaluatable tree"""
2681 """Create a matcher from an evaluatable tree"""
2635 def mfunc(repo, subset=None):
2682 def mfunc(repo, subset=None):
2636 if subset is None:
2683 if subset is None:
2637 subset = fullreposet(repo)
2684 subset = fullreposet(repo)
2638 if util.safehasattr(subset, 'isascending'):
2685 if util.safehasattr(subset, 'isascending'):
2639 result = getset(repo, subset, tree)
2686 result = getset(repo, subset, tree)
2640 else:
2687 else:
2641 result = getset(repo, baseset(subset), tree)
2688 result = getset(repo, baseset(subset), tree)
2642 return result
2689 return result
2643 return mfunc
2690 return mfunc
2644
2691
2645 def formatspec(expr, *args):
2692 def formatspec(expr, *args):
2646 '''
2693 '''
2647 This is a convenience function for using revsets internally, and
2694 This is a convenience function for using revsets internally, and
2648 escapes arguments appropriately. Aliases are intentionally ignored
2695 escapes arguments appropriately. Aliases are intentionally ignored
2649 so that intended expression behavior isn't accidentally subverted.
2696 so that intended expression behavior isn't accidentally subverted.
2650
2697
2651 Supported arguments:
2698 Supported arguments:
2652
2699
2653 %r = revset expression, parenthesized
2700 %r = revset expression, parenthesized
2654 %d = int(arg), no quoting
2701 %d = int(arg), no quoting
2655 %s = string(arg), escaped and single-quoted
2702 %s = string(arg), escaped and single-quoted
2656 %b = arg.branch(), escaped and single-quoted
2703 %b = arg.branch(), escaped and single-quoted
2657 %n = hex(arg), single-quoted
2704 %n = hex(arg), single-quoted
2658 %% = a literal '%'
2705 %% = a literal '%'
2659
2706
2660 Prefixing the type with 'l' specifies a parenthesized list of that type.
2707 Prefixing the type with 'l' specifies a parenthesized list of that type.
2661
2708
2662 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2709 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2663 '(10 or 11):: and ((this()) or (that()))'
2710 '(10 or 11):: and ((this()) or (that()))'
2664 >>> formatspec('%d:: and not %d::', 10, 20)
2711 >>> formatspec('%d:: and not %d::', 10, 20)
2665 '10:: and not 20::'
2712 '10:: and not 20::'
2666 >>> formatspec('%ld or %ld', [], [1])
2713 >>> formatspec('%ld or %ld', [], [1])
2667 "_list('') or 1"
2714 "_list('') or 1"
2668 >>> formatspec('keyword(%s)', 'foo\\xe9')
2715 >>> formatspec('keyword(%s)', 'foo\\xe9')
2669 "keyword('foo\\\\xe9')"
2716 "keyword('foo\\\\xe9')"
2670 >>> b = lambda: 'default'
2717 >>> b = lambda: 'default'
2671 >>> b.branch = b
2718 >>> b.branch = b
2672 >>> formatspec('branch(%b)', b)
2719 >>> formatspec('branch(%b)', b)
2673 "branch('default')"
2720 "branch('default')"
2674 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2721 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2675 "root(_list('a\\x00b\\x00c\\x00d'))"
2722 "root(_list('a\\x00b\\x00c\\x00d'))"
2676 '''
2723 '''
2677
2724
2678 def quote(s):
2725 def quote(s):
2679 return repr(str(s))
2726 return repr(str(s))
2680
2727
2681 def argtype(c, arg):
2728 def argtype(c, arg):
2682 if c == 'd':
2729 if c == 'd':
2683 return str(int(arg))
2730 return str(int(arg))
2684 elif c == 's':
2731 elif c == 's':
2685 return quote(arg)
2732 return quote(arg)
2686 elif c == 'r':
2733 elif c == 'r':
2687 parse(arg) # make sure syntax errors are confined
2734 parse(arg) # make sure syntax errors are confined
2688 return '(%s)' % arg
2735 return '(%s)' % arg
2689 elif c == 'n':
2736 elif c == 'n':
2690 return quote(node.hex(arg))
2737 return quote(node.hex(arg))
2691 elif c == 'b':
2738 elif c == 'b':
2692 return quote(arg.branch())
2739 return quote(arg.branch())
2693
2740
2694 def listexp(s, t):
2741 def listexp(s, t):
2695 l = len(s)
2742 l = len(s)
2696 if l == 0:
2743 if l == 0:
2697 return "_list('')"
2744 return "_list('')"
2698 elif l == 1:
2745 elif l == 1:
2699 return argtype(t, s[0])
2746 return argtype(t, s[0])
2700 elif t == 'd':
2747 elif t == 'd':
2701 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2748 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2702 elif t == 's':
2749 elif t == 's':
2703 return "_list('%s')" % "\0".join(s)
2750 return "_list('%s')" % "\0".join(s)
2704 elif t == 'n':
2751 elif t == 'n':
2705 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2752 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2706 elif t == 'b':
2753 elif t == 'b':
2707 return "_list('%s')" % "\0".join(a.branch() for a in s)
2754 return "_list('%s')" % "\0".join(a.branch() for a in s)
2708
2755
2709 m = l // 2
2756 m = l // 2
2710 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2757 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2711
2758
2712 ret = ''
2759 ret = ''
2713 pos = 0
2760 pos = 0
2714 arg = 0
2761 arg = 0
2715 while pos < len(expr):
2762 while pos < len(expr):
2716 c = expr[pos]
2763 c = expr[pos]
2717 if c == '%':
2764 if c == '%':
2718 pos += 1
2765 pos += 1
2719 d = expr[pos]
2766 d = expr[pos]
2720 if d == '%':
2767 if d == '%':
2721 ret += d
2768 ret += d
2722 elif d in 'dsnbr':
2769 elif d in 'dsnbr':
2723 ret += argtype(d, args[arg])
2770 ret += argtype(d, args[arg])
2724 arg += 1
2771 arg += 1
2725 elif d == 'l':
2772 elif d == 'l':
2726 # a list of some type
2773 # a list of some type
2727 pos += 1
2774 pos += 1
2728 d = expr[pos]
2775 d = expr[pos]
2729 ret += listexp(list(args[arg]), d)
2776 ret += listexp(list(args[arg]), d)
2730 arg += 1
2777 arg += 1
2731 else:
2778 else:
2732 raise error.Abort(_('unexpected revspec format character %s')
2779 raise error.Abort(_('unexpected revspec format character %s')
2733 % d)
2780 % d)
2734 else:
2781 else:
2735 ret += c
2782 ret += c
2736 pos += 1
2783 pos += 1
2737
2784
2738 return ret
2785 return ret
2739
2786
2740 def prettyformat(tree):
2787 def prettyformat(tree):
2741 return parser.prettyformat(tree, ('string', 'symbol'))
2788 return parser.prettyformat(tree, ('string', 'symbol'))
2742
2789
2743 def depth(tree):
2790 def depth(tree):
2744 if isinstance(tree, tuple):
2791 if isinstance(tree, tuple):
2745 return max(map(depth, tree)) + 1
2792 return max(map(depth, tree)) + 1
2746 else:
2793 else:
2747 return 0
2794 return 0
2748
2795
2749 def funcsused(tree):
2796 def funcsused(tree):
2750 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2797 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2751 return set()
2798 return set()
2752 else:
2799 else:
2753 funcs = set()
2800 funcs = set()
2754 for s in tree[1:]:
2801 for s in tree[1:]:
2755 funcs |= funcsused(s)
2802 funcs |= funcsused(s)
2756 if tree[0] == 'func':
2803 if tree[0] == 'func':
2757 funcs.add(tree[1][1])
2804 funcs.add(tree[1][1])
2758 return funcs
2805 return funcs
2759
2806
2760 def _formatsetrepr(r):
2807 def _formatsetrepr(r):
2761 """Format an optional printable representation of a set
2808 """Format an optional printable representation of a set
2762
2809
2763 ======== =================================
2810 ======== =================================
2764 type(r) example
2811 type(r) example
2765 ======== =================================
2812 ======== =================================
2766 tuple ('<not %r>', other)
2813 tuple ('<not %r>', other)
2767 str '<branch closed>'
2814 str '<branch closed>'
2768 callable lambda: '<branch %r>' % sorted(b)
2815 callable lambda: '<branch %r>' % sorted(b)
2769 object other
2816 object other
2770 ======== =================================
2817 ======== =================================
2771 """
2818 """
2772 if r is None:
2819 if r is None:
2773 return ''
2820 return ''
2774 elif isinstance(r, tuple):
2821 elif isinstance(r, tuple):
2775 return r[0] % r[1:]
2822 return r[0] % r[1:]
2776 elif isinstance(r, str):
2823 elif isinstance(r, str):
2777 return r
2824 return r
2778 elif callable(r):
2825 elif callable(r):
2779 return r()
2826 return r()
2780 else:
2827 else:
2781 return repr(r)
2828 return repr(r)
2782
2829
2783 class abstractsmartset(object):
2830 class abstractsmartset(object):
2784
2831
2785 def __nonzero__(self):
2832 def __nonzero__(self):
2786 """True if the smartset is not empty"""
2833 """True if the smartset is not empty"""
2787 raise NotImplementedError()
2834 raise NotImplementedError()
2788
2835
2789 def __contains__(self, rev):
2836 def __contains__(self, rev):
2790 """provide fast membership testing"""
2837 """provide fast membership testing"""
2791 raise NotImplementedError()
2838 raise NotImplementedError()
2792
2839
2793 def __iter__(self):
2840 def __iter__(self):
2794 """iterate the set in the order it is supposed to be iterated"""
2841 """iterate the set in the order it is supposed to be iterated"""
2795 raise NotImplementedError()
2842 raise NotImplementedError()
2796
2843
2797 # Attributes containing a function to perform a fast iteration in a given
2844 # Attributes containing a function to perform a fast iteration in a given
2798 # direction. A smartset can have none, one, or both defined.
2845 # direction. A smartset can have none, one, or both defined.
2799 #
2846 #
2800 # Default value is None instead of a function returning None to avoid
2847 # Default value is None instead of a function returning None to avoid
2801 # initializing an iterator just for testing if a fast method exists.
2848 # initializing an iterator just for testing if a fast method exists.
2802 fastasc = None
2849 fastasc = None
2803 fastdesc = None
2850 fastdesc = None
2804
2851
2805 def isascending(self):
2852 def isascending(self):
2806 """True if the set will iterate in ascending order"""
2853 """True if the set will iterate in ascending order"""
2807 raise NotImplementedError()
2854 raise NotImplementedError()
2808
2855
2809 def isdescending(self):
2856 def isdescending(self):
2810 """True if the set will iterate in descending order"""
2857 """True if the set will iterate in descending order"""
2811 raise NotImplementedError()
2858 raise NotImplementedError()
2812
2859
2813 def istopo(self):
2860 def istopo(self):
2814 """True if the set will iterate in topographical order"""
2861 """True if the set will iterate in topographical order"""
2815 raise NotImplementedError()
2862 raise NotImplementedError()
2816
2863
2817 @util.cachefunc
2864 @util.cachefunc
2818 def min(self):
2865 def min(self):
2819 """return the minimum element in the set"""
2866 """return the minimum element in the set"""
2820 if self.fastasc is not None:
2867 if self.fastasc is not None:
2821 for r in self.fastasc():
2868 for r in self.fastasc():
2822 return r
2869 return r
2823 raise ValueError('arg is an empty sequence')
2870 raise ValueError('arg is an empty sequence')
2824 return min(self)
2871 return min(self)
2825
2872
2826 @util.cachefunc
2873 @util.cachefunc
2827 def max(self):
2874 def max(self):
2828 """return the maximum element in the set"""
2875 """return the maximum element in the set"""
2829 if self.fastdesc is not None:
2876 if self.fastdesc is not None:
2830 for r in self.fastdesc():
2877 for r in self.fastdesc():
2831 return r
2878 return r
2832 raise ValueError('arg is an empty sequence')
2879 raise ValueError('arg is an empty sequence')
2833 return max(self)
2880 return max(self)
2834
2881
2835 def first(self):
2882 def first(self):
2836 """return the first element in the set (user iteration perspective)
2883 """return the first element in the set (user iteration perspective)
2837
2884
2838 Return None if the set is empty"""
2885 Return None if the set is empty"""
2839 raise NotImplementedError()
2886 raise NotImplementedError()
2840
2887
2841 def last(self):
2888 def last(self):
2842 """return the last element in the set (user iteration perspective)
2889 """return the last element in the set (user iteration perspective)
2843
2890
2844 Return None if the set is empty"""
2891 Return None if the set is empty"""
2845 raise NotImplementedError()
2892 raise NotImplementedError()
2846
2893
2847 def __len__(self):
2894 def __len__(self):
2848 """return the length of the smartsets
2895 """return the length of the smartsets
2849
2896
2850 This can be expensive on smartset that could be lazy otherwise."""
2897 This can be expensive on smartset that could be lazy otherwise."""
2851 raise NotImplementedError()
2898 raise NotImplementedError()
2852
2899
2853 def reverse(self):
2900 def reverse(self):
2854 """reverse the expected iteration order"""
2901 """reverse the expected iteration order"""
2855 raise NotImplementedError()
2902 raise NotImplementedError()
2856
2903
2857 def sort(self, reverse=True):
2904 def sort(self, reverse=True):
2858 """get the set to iterate in an ascending or descending order"""
2905 """get the set to iterate in an ascending or descending order"""
2859 raise NotImplementedError()
2906 raise NotImplementedError()
2860
2907
2861 def __and__(self, other):
2908 def __and__(self, other):
2862 """Returns a new object with the intersection of the two collections.
2909 """Returns a new object with the intersection of the two collections.
2863
2910
2864 This is part of the mandatory API for smartset."""
2911 This is part of the mandatory API for smartset."""
2865 if isinstance(other, fullreposet):
2912 if isinstance(other, fullreposet):
2866 return self
2913 return self
2867 return self.filter(other.__contains__, condrepr=other, cache=False)
2914 return self.filter(other.__contains__, condrepr=other, cache=False)
2868
2915
2869 def __add__(self, other):
2916 def __add__(self, other):
2870 """Returns a new object with the union of the two collections.
2917 """Returns a new object with the union of the two collections.
2871
2918
2872 This is part of the mandatory API for smartset."""
2919 This is part of the mandatory API for smartset."""
2873 return addset(self, other)
2920 return addset(self, other)
2874
2921
2875 def __sub__(self, other):
2922 def __sub__(self, other):
2876 """Returns a new object with the substraction of the two collections.
2923 """Returns a new object with the substraction of the two collections.
2877
2924
2878 This is part of the mandatory API for smartset."""
2925 This is part of the mandatory API for smartset."""
2879 c = other.__contains__
2926 c = other.__contains__
2880 return self.filter(lambda r: not c(r), condrepr=('<not %r>', other),
2927 return self.filter(lambda r: not c(r), condrepr=('<not %r>', other),
2881 cache=False)
2928 cache=False)
2882
2929
2883 def filter(self, condition, condrepr=None, cache=True):
2930 def filter(self, condition, condrepr=None, cache=True):
2884 """Returns this smartset filtered by condition as a new smartset.
2931 """Returns this smartset filtered by condition as a new smartset.
2885
2932
2886 `condition` is a callable which takes a revision number and returns a
2933 `condition` is a callable which takes a revision number and returns a
2887 boolean. Optional `condrepr` provides a printable representation of
2934 boolean. Optional `condrepr` provides a printable representation of
2888 the given `condition`.
2935 the given `condition`.
2889
2936
2890 This is part of the mandatory API for smartset."""
2937 This is part of the mandatory API for smartset."""
2891 # builtin cannot be cached. but do not needs to
2938 # builtin cannot be cached. but do not needs to
2892 if cache and util.safehasattr(condition, 'func_code'):
2939 if cache and util.safehasattr(condition, 'func_code'):
2893 condition = util.cachefunc(condition)
2940 condition = util.cachefunc(condition)
2894 return filteredset(self, condition, condrepr)
2941 return filteredset(self, condition, condrepr)
2895
2942
2896 class baseset(abstractsmartset):
2943 class baseset(abstractsmartset):
2897 """Basic data structure that represents a revset and contains the basic
2944 """Basic data structure that represents a revset and contains the basic
2898 operation that it should be able to perform.
2945 operation that it should be able to perform.
2899
2946
2900 Every method in this class should be implemented by any smartset class.
2947 Every method in this class should be implemented by any smartset class.
2901 """
2948 """
2902 def __init__(self, data=(), datarepr=None, istopo=False):
2949 def __init__(self, data=(), datarepr=None, istopo=False):
2903 """
2950 """
2904 datarepr: a tuple of (format, obj, ...), a function or an object that
2951 datarepr: a tuple of (format, obj, ...), a function or an object that
2905 provides a printable representation of the given data.
2952 provides a printable representation of the given data.
2906 """
2953 """
2907 self._ascending = None
2954 self._ascending = None
2908 self._istopo = istopo
2955 self._istopo = istopo
2909 if not isinstance(data, list):
2956 if not isinstance(data, list):
2910 if isinstance(data, set):
2957 if isinstance(data, set):
2911 self._set = data
2958 self._set = data
2912 # set has no order we pick one for stability purpose
2959 # set has no order we pick one for stability purpose
2913 self._ascending = True
2960 self._ascending = True
2914 data = list(data)
2961 data = list(data)
2915 self._list = data
2962 self._list = data
2916 self._datarepr = datarepr
2963 self._datarepr = datarepr
2917
2964
2918 @util.propertycache
2965 @util.propertycache
2919 def _set(self):
2966 def _set(self):
2920 return set(self._list)
2967 return set(self._list)
2921
2968
2922 @util.propertycache
2969 @util.propertycache
2923 def _asclist(self):
2970 def _asclist(self):
2924 asclist = self._list[:]
2971 asclist = self._list[:]
2925 asclist.sort()
2972 asclist.sort()
2926 return asclist
2973 return asclist
2927
2974
2928 def __iter__(self):
2975 def __iter__(self):
2929 if self._ascending is None:
2976 if self._ascending is None:
2930 return iter(self._list)
2977 return iter(self._list)
2931 elif self._ascending:
2978 elif self._ascending:
2932 return iter(self._asclist)
2979 return iter(self._asclist)
2933 else:
2980 else:
2934 return reversed(self._asclist)
2981 return reversed(self._asclist)
2935
2982
2936 def fastasc(self):
2983 def fastasc(self):
2937 return iter(self._asclist)
2984 return iter(self._asclist)
2938
2985
2939 def fastdesc(self):
2986 def fastdesc(self):
2940 return reversed(self._asclist)
2987 return reversed(self._asclist)
2941
2988
2942 @util.propertycache
2989 @util.propertycache
2943 def __contains__(self):
2990 def __contains__(self):
2944 return self._set.__contains__
2991 return self._set.__contains__
2945
2992
2946 def __nonzero__(self):
2993 def __nonzero__(self):
2947 return bool(self._list)
2994 return bool(self._list)
2948
2995
2949 def sort(self, reverse=False):
2996 def sort(self, reverse=False):
2950 self._ascending = not bool(reverse)
2997 self._ascending = not bool(reverse)
2951 self._istopo = False
2998 self._istopo = False
2952
2999
2953 def reverse(self):
3000 def reverse(self):
2954 if self._ascending is None:
3001 if self._ascending is None:
2955 self._list.reverse()
3002 self._list.reverse()
2956 else:
3003 else:
2957 self._ascending = not self._ascending
3004 self._ascending = not self._ascending
2958 self._istopo = False
3005 self._istopo = False
2959
3006
2960 def __len__(self):
3007 def __len__(self):
2961 return len(self._list)
3008 return len(self._list)
2962
3009
2963 def isascending(self):
3010 def isascending(self):
2964 """Returns True if the collection is ascending order, False if not.
3011 """Returns True if the collection is ascending order, False if not.
2965
3012
2966 This is part of the mandatory API for smartset."""
3013 This is part of the mandatory API for smartset."""
2967 if len(self) <= 1:
3014 if len(self) <= 1:
2968 return True
3015 return True
2969 return self._ascending is not None and self._ascending
3016 return self._ascending is not None and self._ascending
2970
3017
2971 def isdescending(self):
3018 def isdescending(self):
2972 """Returns True if the collection is descending order, False if not.
3019 """Returns True if the collection is descending order, False if not.
2973
3020
2974 This is part of the mandatory API for smartset."""
3021 This is part of the mandatory API for smartset."""
2975 if len(self) <= 1:
3022 if len(self) <= 1:
2976 return True
3023 return True
2977 return self._ascending is not None and not self._ascending
3024 return self._ascending is not None and not self._ascending
2978
3025
2979 def istopo(self):
3026 def istopo(self):
2980 """Is the collection is in topographical order or not.
3027 """Is the collection is in topographical order or not.
2981
3028
2982 This is part of the mandatory API for smartset."""
3029 This is part of the mandatory API for smartset."""
2983 if len(self) <= 1:
3030 if len(self) <= 1:
2984 return True
3031 return True
2985 return self._istopo
3032 return self._istopo
2986
3033
2987 def first(self):
3034 def first(self):
2988 if self:
3035 if self:
2989 if self._ascending is None:
3036 if self._ascending is None:
2990 return self._list[0]
3037 return self._list[0]
2991 elif self._ascending:
3038 elif self._ascending:
2992 return self._asclist[0]
3039 return self._asclist[0]
2993 else:
3040 else:
2994 return self._asclist[-1]
3041 return self._asclist[-1]
2995 return None
3042 return None
2996
3043
2997 def last(self):
3044 def last(self):
2998 if self:
3045 if self:
2999 if self._ascending is None:
3046 if self._ascending is None:
3000 return self._list[-1]
3047 return self._list[-1]
3001 elif self._ascending:
3048 elif self._ascending:
3002 return self._asclist[-1]
3049 return self._asclist[-1]
3003 else:
3050 else:
3004 return self._asclist[0]
3051 return self._asclist[0]
3005 return None
3052 return None
3006
3053
3007 def __repr__(self):
3054 def __repr__(self):
3008 d = {None: '', False: '-', True: '+'}[self._ascending]
3055 d = {None: '', False: '-', True: '+'}[self._ascending]
3009 s = _formatsetrepr(self._datarepr)
3056 s = _formatsetrepr(self._datarepr)
3010 if not s:
3057 if not s:
3011 l = self._list
3058 l = self._list
3012 # if _list has been built from a set, it might have a different
3059 # if _list has been built from a set, it might have a different
3013 # order from one python implementation to another.
3060 # order from one python implementation to another.
3014 # We fallback to the sorted version for a stable output.
3061 # We fallback to the sorted version for a stable output.
3015 if self._ascending is not None:
3062 if self._ascending is not None:
3016 l = self._asclist
3063 l = self._asclist
3017 s = repr(l)
3064 s = repr(l)
3018 return '<%s%s %s>' % (type(self).__name__, d, s)
3065 return '<%s%s %s>' % (type(self).__name__, d, s)
3019
3066
3020 class filteredset(abstractsmartset):
3067 class filteredset(abstractsmartset):
3021 """Duck type for baseset class which iterates lazily over the revisions in
3068 """Duck type for baseset class which iterates lazily over the revisions in
3022 the subset and contains a function which tests for membership in the
3069 the subset and contains a function which tests for membership in the
3023 revset
3070 revset
3024 """
3071 """
3025 def __init__(self, subset, condition=lambda x: True, condrepr=None):
3072 def __init__(self, subset, condition=lambda x: True, condrepr=None):
3026 """
3073 """
3027 condition: a function that decide whether a revision in the subset
3074 condition: a function that decide whether a revision in the subset
3028 belongs to the revset or not.
3075 belongs to the revset or not.
3029 condrepr: a tuple of (format, obj, ...), a function or an object that
3076 condrepr: a tuple of (format, obj, ...), a function or an object that
3030 provides a printable representation of the given condition.
3077 provides a printable representation of the given condition.
3031 """
3078 """
3032 self._subset = subset
3079 self._subset = subset
3033 self._condition = condition
3080 self._condition = condition
3034 self._condrepr = condrepr
3081 self._condrepr = condrepr
3035
3082
3036 def __contains__(self, x):
3083 def __contains__(self, x):
3037 return x in self._subset and self._condition(x)
3084 return x in self._subset and self._condition(x)
3038
3085
3039 def __iter__(self):
3086 def __iter__(self):
3040 return self._iterfilter(self._subset)
3087 return self._iterfilter(self._subset)
3041
3088
3042 def _iterfilter(self, it):
3089 def _iterfilter(self, it):
3043 cond = self._condition
3090 cond = self._condition
3044 for x in it:
3091 for x in it:
3045 if cond(x):
3092 if cond(x):
3046 yield x
3093 yield x
3047
3094
3048 @property
3095 @property
3049 def fastasc(self):
3096 def fastasc(self):
3050 it = self._subset.fastasc
3097 it = self._subset.fastasc
3051 if it is None:
3098 if it is None:
3052 return None
3099 return None
3053 return lambda: self._iterfilter(it())
3100 return lambda: self._iterfilter(it())
3054
3101
3055 @property
3102 @property
3056 def fastdesc(self):
3103 def fastdesc(self):
3057 it = self._subset.fastdesc
3104 it = self._subset.fastdesc
3058 if it is None:
3105 if it is None:
3059 return None
3106 return None
3060 return lambda: self._iterfilter(it())
3107 return lambda: self._iterfilter(it())
3061
3108
3062 def __nonzero__(self):
3109 def __nonzero__(self):
3063 fast = None
3110 fast = None
3064 candidates = [self.fastasc if self.isascending() else None,
3111 candidates = [self.fastasc if self.isascending() else None,
3065 self.fastdesc if self.isdescending() else None,
3112 self.fastdesc if self.isdescending() else None,
3066 self.fastasc,
3113 self.fastasc,
3067 self.fastdesc]
3114 self.fastdesc]
3068 for candidate in candidates:
3115 for candidate in candidates:
3069 if candidate is not None:
3116 if candidate is not None:
3070 fast = candidate
3117 fast = candidate
3071 break
3118 break
3072
3119
3073 if fast is not None:
3120 if fast is not None:
3074 it = fast()
3121 it = fast()
3075 else:
3122 else:
3076 it = self
3123 it = self
3077
3124
3078 for r in it:
3125 for r in it:
3079 return True
3126 return True
3080 return False
3127 return False
3081
3128
3082 def __len__(self):
3129 def __len__(self):
3083 # Basic implementation to be changed in future patches.
3130 # Basic implementation to be changed in future patches.
3084 # until this gets improved, we use generator expression
3131 # until this gets improved, we use generator expression
3085 # here, since list compr is free to call __len__ again
3132 # here, since list compr is free to call __len__ again
3086 # causing infinite recursion
3133 # causing infinite recursion
3087 l = baseset(r for r in self)
3134 l = baseset(r for r in self)
3088 return len(l)
3135 return len(l)
3089
3136
3090 def sort(self, reverse=False):
3137 def sort(self, reverse=False):
3091 self._subset.sort(reverse=reverse)
3138 self._subset.sort(reverse=reverse)
3092
3139
3093 def reverse(self):
3140 def reverse(self):
3094 self._subset.reverse()
3141 self._subset.reverse()
3095
3142
3096 def isascending(self):
3143 def isascending(self):
3097 return self._subset.isascending()
3144 return self._subset.isascending()
3098
3145
3099 def isdescending(self):
3146 def isdescending(self):
3100 return self._subset.isdescending()
3147 return self._subset.isdescending()
3101
3148
3102 def istopo(self):
3149 def istopo(self):
3103 return self._subset.istopo()
3150 return self._subset.istopo()
3104
3151
3105 def first(self):
3152 def first(self):
3106 for x in self:
3153 for x in self:
3107 return x
3154 return x
3108 return None
3155 return None
3109
3156
3110 def last(self):
3157 def last(self):
3111 it = None
3158 it = None
3112 if self.isascending():
3159 if self.isascending():
3113 it = self.fastdesc
3160 it = self.fastdesc
3114 elif self.isdescending():
3161 elif self.isdescending():
3115 it = self.fastasc
3162 it = self.fastasc
3116 if it is not None:
3163 if it is not None:
3117 for x in it():
3164 for x in it():
3118 return x
3165 return x
3119 return None #empty case
3166 return None #empty case
3120 else:
3167 else:
3121 x = None
3168 x = None
3122 for x in self:
3169 for x in self:
3123 pass
3170 pass
3124 return x
3171 return x
3125
3172
3126 def __repr__(self):
3173 def __repr__(self):
3127 xs = [repr(self._subset)]
3174 xs = [repr(self._subset)]
3128 s = _formatsetrepr(self._condrepr)
3175 s = _formatsetrepr(self._condrepr)
3129 if s:
3176 if s:
3130 xs.append(s)
3177 xs.append(s)
3131 return '<%s %s>' % (type(self).__name__, ', '.join(xs))
3178 return '<%s %s>' % (type(self).__name__, ', '.join(xs))
3132
3179
3133 def _iterordered(ascending, iter1, iter2):
3180 def _iterordered(ascending, iter1, iter2):
3134 """produce an ordered iteration from two iterators with the same order
3181 """produce an ordered iteration from two iterators with the same order
3135
3182
3136 The ascending is used to indicated the iteration direction.
3183 The ascending is used to indicated the iteration direction.
3137 """
3184 """
3138 choice = max
3185 choice = max
3139 if ascending:
3186 if ascending:
3140 choice = min
3187 choice = min
3141
3188
3142 val1 = None
3189 val1 = None
3143 val2 = None
3190 val2 = None
3144 try:
3191 try:
3145 # Consume both iterators in an ordered way until one is empty
3192 # Consume both iterators in an ordered way until one is empty
3146 while True:
3193 while True:
3147 if val1 is None:
3194 if val1 is None:
3148 val1 = next(iter1)
3195 val1 = next(iter1)
3149 if val2 is None:
3196 if val2 is None:
3150 val2 = next(iter2)
3197 val2 = next(iter2)
3151 n = choice(val1, val2)
3198 n = choice(val1, val2)
3152 yield n
3199 yield n
3153 if val1 == n:
3200 if val1 == n:
3154 val1 = None
3201 val1 = None
3155 if val2 == n:
3202 if val2 == n:
3156 val2 = None
3203 val2 = None
3157 except StopIteration:
3204 except StopIteration:
3158 # Flush any remaining values and consume the other one
3205 # Flush any remaining values and consume the other one
3159 it = iter2
3206 it = iter2
3160 if val1 is not None:
3207 if val1 is not None:
3161 yield val1
3208 yield val1
3162 it = iter1
3209 it = iter1
3163 elif val2 is not None:
3210 elif val2 is not None:
3164 # might have been equality and both are empty
3211 # might have been equality and both are empty
3165 yield val2
3212 yield val2
3166 for val in it:
3213 for val in it:
3167 yield val
3214 yield val
3168
3215
3169 class addset(abstractsmartset):
3216 class addset(abstractsmartset):
3170 """Represent the addition of two sets
3217 """Represent the addition of two sets
3171
3218
3172 Wrapper structure for lazily adding two structures without losing much
3219 Wrapper structure for lazily adding two structures without losing much
3173 performance on the __contains__ method
3220 performance on the __contains__ method
3174
3221
3175 If the ascending attribute is set, that means the two structures are
3222 If the ascending attribute is set, that means the two structures are
3176 ordered in either an ascending or descending way. Therefore, we can add
3223 ordered in either an ascending or descending way. Therefore, we can add
3177 them maintaining the order by iterating over both at the same time
3224 them maintaining the order by iterating over both at the same time
3178
3225
3179 >>> xs = baseset([0, 3, 2])
3226 >>> xs = baseset([0, 3, 2])
3180 >>> ys = baseset([5, 2, 4])
3227 >>> ys = baseset([5, 2, 4])
3181
3228
3182 >>> rs = addset(xs, ys)
3229 >>> rs = addset(xs, ys)
3183 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3230 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3184 (True, True, False, True, 0, 4)
3231 (True, True, False, True, 0, 4)
3185 >>> rs = addset(xs, baseset([]))
3232 >>> rs = addset(xs, baseset([]))
3186 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3233 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3187 (True, True, False, 0, 2)
3234 (True, True, False, 0, 2)
3188 >>> rs = addset(baseset([]), baseset([]))
3235 >>> rs = addset(baseset([]), baseset([]))
3189 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3236 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3190 (False, False, None, None)
3237 (False, False, None, None)
3191
3238
3192 iterate unsorted:
3239 iterate unsorted:
3193 >>> rs = addset(xs, ys)
3240 >>> rs = addset(xs, ys)
3194 >>> # (use generator because pypy could call len())
3241 >>> # (use generator because pypy could call len())
3195 >>> list(x for x in rs) # without _genlist
3242 >>> list(x for x in rs) # without _genlist
3196 [0, 3, 2, 5, 4]
3243 [0, 3, 2, 5, 4]
3197 >>> assert not rs._genlist
3244 >>> assert not rs._genlist
3198 >>> len(rs)
3245 >>> len(rs)
3199 5
3246 5
3200 >>> [x for x in rs] # with _genlist
3247 >>> [x for x in rs] # with _genlist
3201 [0, 3, 2, 5, 4]
3248 [0, 3, 2, 5, 4]
3202 >>> assert rs._genlist
3249 >>> assert rs._genlist
3203
3250
3204 iterate ascending:
3251 iterate ascending:
3205 >>> rs = addset(xs, ys, ascending=True)
3252 >>> rs = addset(xs, ys, ascending=True)
3206 >>> # (use generator because pypy could call len())
3253 >>> # (use generator because pypy could call len())
3207 >>> list(x for x in rs), list(x for x in rs.fastasc()) # without _asclist
3254 >>> list(x for x in rs), list(x for x in rs.fastasc()) # without _asclist
3208 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3255 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3209 >>> assert not rs._asclist
3256 >>> assert not rs._asclist
3210 >>> len(rs)
3257 >>> len(rs)
3211 5
3258 5
3212 >>> [x for x in rs], [x for x in rs.fastasc()]
3259 >>> [x for x in rs], [x for x in rs.fastasc()]
3213 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3260 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3214 >>> assert rs._asclist
3261 >>> assert rs._asclist
3215
3262
3216 iterate descending:
3263 iterate descending:
3217 >>> rs = addset(xs, ys, ascending=False)
3264 >>> rs = addset(xs, ys, ascending=False)
3218 >>> # (use generator because pypy could call len())
3265 >>> # (use generator because pypy could call len())
3219 >>> list(x for x in rs), list(x for x in rs.fastdesc()) # without _asclist
3266 >>> list(x for x in rs), list(x for x in rs.fastdesc()) # without _asclist
3220 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3267 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3221 >>> assert not rs._asclist
3268 >>> assert not rs._asclist
3222 >>> len(rs)
3269 >>> len(rs)
3223 5
3270 5
3224 >>> [x for x in rs], [x for x in rs.fastdesc()]
3271 >>> [x for x in rs], [x for x in rs.fastdesc()]
3225 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3272 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3226 >>> assert rs._asclist
3273 >>> assert rs._asclist
3227
3274
3228 iterate ascending without fastasc:
3275 iterate ascending without fastasc:
3229 >>> rs = addset(xs, generatorset(ys), ascending=True)
3276 >>> rs = addset(xs, generatorset(ys), ascending=True)
3230 >>> assert rs.fastasc is None
3277 >>> assert rs.fastasc is None
3231 >>> [x for x in rs]
3278 >>> [x for x in rs]
3232 [0, 2, 3, 4, 5]
3279 [0, 2, 3, 4, 5]
3233
3280
3234 iterate descending without fastdesc:
3281 iterate descending without fastdesc:
3235 >>> rs = addset(generatorset(xs), ys, ascending=False)
3282 >>> rs = addset(generatorset(xs), ys, ascending=False)
3236 >>> assert rs.fastdesc is None
3283 >>> assert rs.fastdesc is None
3237 >>> [x for x in rs]
3284 >>> [x for x in rs]
3238 [5, 4, 3, 2, 0]
3285 [5, 4, 3, 2, 0]
3239 """
3286 """
3240 def __init__(self, revs1, revs2, ascending=None):
3287 def __init__(self, revs1, revs2, ascending=None):
3241 self._r1 = revs1
3288 self._r1 = revs1
3242 self._r2 = revs2
3289 self._r2 = revs2
3243 self._iter = None
3290 self._iter = None
3244 self._ascending = ascending
3291 self._ascending = ascending
3245 self._genlist = None
3292 self._genlist = None
3246 self._asclist = None
3293 self._asclist = None
3247
3294
3248 def __len__(self):
3295 def __len__(self):
3249 return len(self._list)
3296 return len(self._list)
3250
3297
3251 def __nonzero__(self):
3298 def __nonzero__(self):
3252 return bool(self._r1) or bool(self._r2)
3299 return bool(self._r1) or bool(self._r2)
3253
3300
3254 @util.propertycache
3301 @util.propertycache
3255 def _list(self):
3302 def _list(self):
3256 if not self._genlist:
3303 if not self._genlist:
3257 self._genlist = baseset(iter(self))
3304 self._genlist = baseset(iter(self))
3258 return self._genlist
3305 return self._genlist
3259
3306
3260 def __iter__(self):
3307 def __iter__(self):
3261 """Iterate over both collections without repeating elements
3308 """Iterate over both collections without repeating elements
3262
3309
3263 If the ascending attribute is not set, iterate over the first one and
3310 If the ascending attribute is not set, iterate over the first one and
3264 then over the second one checking for membership on the first one so we
3311 then over the second one checking for membership on the first one so we
3265 dont yield any duplicates.
3312 dont yield any duplicates.
3266
3313
3267 If the ascending attribute is set, iterate over both collections at the
3314 If the ascending attribute is set, iterate over both collections at the
3268 same time, yielding only one value at a time in the given order.
3315 same time, yielding only one value at a time in the given order.
3269 """
3316 """
3270 if self._ascending is None:
3317 if self._ascending is None:
3271 if self._genlist:
3318 if self._genlist:
3272 return iter(self._genlist)
3319 return iter(self._genlist)
3273 def arbitraryordergen():
3320 def arbitraryordergen():
3274 for r in self._r1:
3321 for r in self._r1:
3275 yield r
3322 yield r
3276 inr1 = self._r1.__contains__
3323 inr1 = self._r1.__contains__
3277 for r in self._r2:
3324 for r in self._r2:
3278 if not inr1(r):
3325 if not inr1(r):
3279 yield r
3326 yield r
3280 return arbitraryordergen()
3327 return arbitraryordergen()
3281 # try to use our own fast iterator if it exists
3328 # try to use our own fast iterator if it exists
3282 self._trysetasclist()
3329 self._trysetasclist()
3283 if self._ascending:
3330 if self._ascending:
3284 attr = 'fastasc'
3331 attr = 'fastasc'
3285 else:
3332 else:
3286 attr = 'fastdesc'
3333 attr = 'fastdesc'
3287 it = getattr(self, attr)
3334 it = getattr(self, attr)
3288 if it is not None:
3335 if it is not None:
3289 return it()
3336 return it()
3290 # maybe half of the component supports fast
3337 # maybe half of the component supports fast
3291 # get iterator for _r1
3338 # get iterator for _r1
3292 iter1 = getattr(self._r1, attr)
3339 iter1 = getattr(self._r1, attr)
3293 if iter1 is None:
3340 if iter1 is None:
3294 # let's avoid side effect (not sure it matters)
3341 # let's avoid side effect (not sure it matters)
3295 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3342 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3296 else:
3343 else:
3297 iter1 = iter1()
3344 iter1 = iter1()
3298 # get iterator for _r2
3345 # get iterator for _r2
3299 iter2 = getattr(self._r2, attr)
3346 iter2 = getattr(self._r2, attr)
3300 if iter2 is None:
3347 if iter2 is None:
3301 # let's avoid side effect (not sure it matters)
3348 # let's avoid side effect (not sure it matters)
3302 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3349 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3303 else:
3350 else:
3304 iter2 = iter2()
3351 iter2 = iter2()
3305 return _iterordered(self._ascending, iter1, iter2)
3352 return _iterordered(self._ascending, iter1, iter2)
3306
3353
3307 def _trysetasclist(self):
3354 def _trysetasclist(self):
3308 """populate the _asclist attribute if possible and necessary"""
3355 """populate the _asclist attribute if possible and necessary"""
3309 if self._genlist is not None and self._asclist is None:
3356 if self._genlist is not None and self._asclist is None:
3310 self._asclist = sorted(self._genlist)
3357 self._asclist = sorted(self._genlist)
3311
3358
3312 @property
3359 @property
3313 def fastasc(self):
3360 def fastasc(self):
3314 self._trysetasclist()
3361 self._trysetasclist()
3315 if self._asclist is not None:
3362 if self._asclist is not None:
3316 return self._asclist.__iter__
3363 return self._asclist.__iter__
3317 iter1 = self._r1.fastasc
3364 iter1 = self._r1.fastasc
3318 iter2 = self._r2.fastasc
3365 iter2 = self._r2.fastasc
3319 if None in (iter1, iter2):
3366 if None in (iter1, iter2):
3320 return None
3367 return None
3321 return lambda: _iterordered(True, iter1(), iter2())
3368 return lambda: _iterordered(True, iter1(), iter2())
3322
3369
3323 @property
3370 @property
3324 def fastdesc(self):
3371 def fastdesc(self):
3325 self._trysetasclist()
3372 self._trysetasclist()
3326 if self._asclist is not None:
3373 if self._asclist is not None:
3327 return self._asclist.__reversed__
3374 return self._asclist.__reversed__
3328 iter1 = self._r1.fastdesc
3375 iter1 = self._r1.fastdesc
3329 iter2 = self._r2.fastdesc
3376 iter2 = self._r2.fastdesc
3330 if None in (iter1, iter2):
3377 if None in (iter1, iter2):
3331 return None
3378 return None
3332 return lambda: _iterordered(False, iter1(), iter2())
3379 return lambda: _iterordered(False, iter1(), iter2())
3333
3380
3334 def __contains__(self, x):
3381 def __contains__(self, x):
3335 return x in self._r1 or x in self._r2
3382 return x in self._r1 or x in self._r2
3336
3383
3337 def sort(self, reverse=False):
3384 def sort(self, reverse=False):
3338 """Sort the added set
3385 """Sort the added set
3339
3386
3340 For this we use the cached list with all the generated values and if we
3387 For this we use the cached list with all the generated values and if we
3341 know they are ascending or descending we can sort them in a smart way.
3388 know they are ascending or descending we can sort them in a smart way.
3342 """
3389 """
3343 self._ascending = not reverse
3390 self._ascending = not reverse
3344
3391
3345 def isascending(self):
3392 def isascending(self):
3346 return self._ascending is not None and self._ascending
3393 return self._ascending is not None and self._ascending
3347
3394
3348 def isdescending(self):
3395 def isdescending(self):
3349 return self._ascending is not None and not self._ascending
3396 return self._ascending is not None and not self._ascending
3350
3397
3351 def istopo(self):
3398 def istopo(self):
3352 # not worth the trouble asserting if the two sets combined are still
3399 # not worth the trouble asserting if the two sets combined are still
3353 # in topographical order. Use the sort() predicate to explicitly sort
3400 # in topographical order. Use the sort() predicate to explicitly sort
3354 # again instead.
3401 # again instead.
3355 return False
3402 return False
3356
3403
3357 def reverse(self):
3404 def reverse(self):
3358 if self._ascending is None:
3405 if self._ascending is None:
3359 self._list.reverse()
3406 self._list.reverse()
3360 else:
3407 else:
3361 self._ascending = not self._ascending
3408 self._ascending = not self._ascending
3362
3409
3363 def first(self):
3410 def first(self):
3364 for x in self:
3411 for x in self:
3365 return x
3412 return x
3366 return None
3413 return None
3367
3414
3368 def last(self):
3415 def last(self):
3369 self.reverse()
3416 self.reverse()
3370 val = self.first()
3417 val = self.first()
3371 self.reverse()
3418 self.reverse()
3372 return val
3419 return val
3373
3420
3374 def __repr__(self):
3421 def __repr__(self):
3375 d = {None: '', False: '-', True: '+'}[self._ascending]
3422 d = {None: '', False: '-', True: '+'}[self._ascending]
3376 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3423 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3377
3424
3378 class generatorset(abstractsmartset):
3425 class generatorset(abstractsmartset):
3379 """Wrap a generator for lazy iteration
3426 """Wrap a generator for lazy iteration
3380
3427
3381 Wrapper structure for generators that provides lazy membership and can
3428 Wrapper structure for generators that provides lazy membership and can
3382 be iterated more than once.
3429 be iterated more than once.
3383 When asked for membership it generates values until either it finds the
3430 When asked for membership it generates values until either it finds the
3384 requested one or has gone through all the elements in the generator
3431 requested one or has gone through all the elements in the generator
3385 """
3432 """
3386 def __init__(self, gen, iterasc=None):
3433 def __init__(self, gen, iterasc=None):
3387 """
3434 """
3388 gen: a generator producing the values for the generatorset.
3435 gen: a generator producing the values for the generatorset.
3389 """
3436 """
3390 self._gen = gen
3437 self._gen = gen
3391 self._asclist = None
3438 self._asclist = None
3392 self._cache = {}
3439 self._cache = {}
3393 self._genlist = []
3440 self._genlist = []
3394 self._finished = False
3441 self._finished = False
3395 self._ascending = True
3442 self._ascending = True
3396 if iterasc is not None:
3443 if iterasc is not None:
3397 if iterasc:
3444 if iterasc:
3398 self.fastasc = self._iterator
3445 self.fastasc = self._iterator
3399 self.__contains__ = self._asccontains
3446 self.__contains__ = self._asccontains
3400 else:
3447 else:
3401 self.fastdesc = self._iterator
3448 self.fastdesc = self._iterator
3402 self.__contains__ = self._desccontains
3449 self.__contains__ = self._desccontains
3403
3450
3404 def __nonzero__(self):
3451 def __nonzero__(self):
3405 # Do not use 'for r in self' because it will enforce the iteration
3452 # Do not use 'for r in self' because it will enforce the iteration
3406 # order (default ascending), possibly unrolling a whole descending
3453 # order (default ascending), possibly unrolling a whole descending
3407 # iterator.
3454 # iterator.
3408 if self._genlist:
3455 if self._genlist:
3409 return True
3456 return True
3410 for r in self._consumegen():
3457 for r in self._consumegen():
3411 return True
3458 return True
3412 return False
3459 return False
3413
3460
3414 def __contains__(self, x):
3461 def __contains__(self, x):
3415 if x in self._cache:
3462 if x in self._cache:
3416 return self._cache[x]
3463 return self._cache[x]
3417
3464
3418 # Use new values only, as existing values would be cached.
3465 # Use new values only, as existing values would be cached.
3419 for l in self._consumegen():
3466 for l in self._consumegen():
3420 if l == x:
3467 if l == x:
3421 return True
3468 return True
3422
3469
3423 self._cache[x] = False
3470 self._cache[x] = False
3424 return False
3471 return False
3425
3472
3426 def _asccontains(self, x):
3473 def _asccontains(self, x):
3427 """version of contains optimised for ascending generator"""
3474 """version of contains optimised for ascending generator"""
3428 if x in self._cache:
3475 if x in self._cache:
3429 return self._cache[x]
3476 return self._cache[x]
3430
3477
3431 # Use new values only, as existing values would be cached.
3478 # Use new values only, as existing values would be cached.
3432 for l in self._consumegen():
3479 for l in self._consumegen():
3433 if l == x:
3480 if l == x:
3434 return True
3481 return True
3435 if l > x:
3482 if l > x:
3436 break
3483 break
3437
3484
3438 self._cache[x] = False
3485 self._cache[x] = False
3439 return False
3486 return False
3440
3487
3441 def _desccontains(self, x):
3488 def _desccontains(self, x):
3442 """version of contains optimised for descending generator"""
3489 """version of contains optimised for descending generator"""
3443 if x in self._cache:
3490 if x in self._cache:
3444 return self._cache[x]
3491 return self._cache[x]
3445
3492
3446 # Use new values only, as existing values would be cached.
3493 # Use new values only, as existing values would be cached.
3447 for l in self._consumegen():
3494 for l in self._consumegen():
3448 if l == x:
3495 if l == x:
3449 return True
3496 return True
3450 if l < x:
3497 if l < x:
3451 break
3498 break
3452
3499
3453 self._cache[x] = False
3500 self._cache[x] = False
3454 return False
3501 return False
3455
3502
3456 def __iter__(self):
3503 def __iter__(self):
3457 if self._ascending:
3504 if self._ascending:
3458 it = self.fastasc
3505 it = self.fastasc
3459 else:
3506 else:
3460 it = self.fastdesc
3507 it = self.fastdesc
3461 if it is not None:
3508 if it is not None:
3462 return it()
3509 return it()
3463 # we need to consume the iterator
3510 # we need to consume the iterator
3464 for x in self._consumegen():
3511 for x in self._consumegen():
3465 pass
3512 pass
3466 # recall the same code
3513 # recall the same code
3467 return iter(self)
3514 return iter(self)
3468
3515
3469 def _iterator(self):
3516 def _iterator(self):
3470 if self._finished:
3517 if self._finished:
3471 return iter(self._genlist)
3518 return iter(self._genlist)
3472
3519
3473 # We have to use this complex iteration strategy to allow multiple
3520 # We have to use this complex iteration strategy to allow multiple
3474 # iterations at the same time. We need to be able to catch revision
3521 # iterations at the same time. We need to be able to catch revision
3475 # removed from _consumegen and added to genlist in another instance.
3522 # removed from _consumegen and added to genlist in another instance.
3476 #
3523 #
3477 # Getting rid of it would provide an about 15% speed up on this
3524 # Getting rid of it would provide an about 15% speed up on this
3478 # iteration.
3525 # iteration.
3479 genlist = self._genlist
3526 genlist = self._genlist
3480 nextrev = self._consumegen().next
3527 nextrev = self._consumegen().next
3481 _len = len # cache global lookup
3528 _len = len # cache global lookup
3482 def gen():
3529 def gen():
3483 i = 0
3530 i = 0
3484 while True:
3531 while True:
3485 if i < _len(genlist):
3532 if i < _len(genlist):
3486 yield genlist[i]
3533 yield genlist[i]
3487 else:
3534 else:
3488 yield nextrev()
3535 yield nextrev()
3489 i += 1
3536 i += 1
3490 return gen()
3537 return gen()
3491
3538
3492 def _consumegen(self):
3539 def _consumegen(self):
3493 cache = self._cache
3540 cache = self._cache
3494 genlist = self._genlist.append
3541 genlist = self._genlist.append
3495 for item in self._gen:
3542 for item in self._gen:
3496 cache[item] = True
3543 cache[item] = True
3497 genlist(item)
3544 genlist(item)
3498 yield item
3545 yield item
3499 if not self._finished:
3546 if not self._finished:
3500 self._finished = True
3547 self._finished = True
3501 asc = self._genlist[:]
3548 asc = self._genlist[:]
3502 asc.sort()
3549 asc.sort()
3503 self._asclist = asc
3550 self._asclist = asc
3504 self.fastasc = asc.__iter__
3551 self.fastasc = asc.__iter__
3505 self.fastdesc = asc.__reversed__
3552 self.fastdesc = asc.__reversed__
3506
3553
3507 def __len__(self):
3554 def __len__(self):
3508 for x in self._consumegen():
3555 for x in self._consumegen():
3509 pass
3556 pass
3510 return len(self._genlist)
3557 return len(self._genlist)
3511
3558
3512 def sort(self, reverse=False):
3559 def sort(self, reverse=False):
3513 self._ascending = not reverse
3560 self._ascending = not reverse
3514
3561
3515 def reverse(self):
3562 def reverse(self):
3516 self._ascending = not self._ascending
3563 self._ascending = not self._ascending
3517
3564
3518 def isascending(self):
3565 def isascending(self):
3519 return self._ascending
3566 return self._ascending
3520
3567
3521 def isdescending(self):
3568 def isdescending(self):
3522 return not self._ascending
3569 return not self._ascending
3523
3570
3524 def istopo(self):
3571 def istopo(self):
3525 # not worth the trouble asserting if the two sets combined are still
3572 # not worth the trouble asserting if the two sets combined are still
3526 # in topographical order. Use the sort() predicate to explicitly sort
3573 # in topographical order. Use the sort() predicate to explicitly sort
3527 # again instead.
3574 # again instead.
3528 return False
3575 return False
3529
3576
3530 def first(self):
3577 def first(self):
3531 if self._ascending:
3578 if self._ascending:
3532 it = self.fastasc
3579 it = self.fastasc
3533 else:
3580 else:
3534 it = self.fastdesc
3581 it = self.fastdesc
3535 if it is None:
3582 if it is None:
3536 # we need to consume all and try again
3583 # we need to consume all and try again
3537 for x in self._consumegen():
3584 for x in self._consumegen():
3538 pass
3585 pass
3539 return self.first()
3586 return self.first()
3540 return next(it(), None)
3587 return next(it(), None)
3541
3588
3542 def last(self):
3589 def last(self):
3543 if self._ascending:
3590 if self._ascending:
3544 it = self.fastdesc
3591 it = self.fastdesc
3545 else:
3592 else:
3546 it = self.fastasc
3593 it = self.fastasc
3547 if it is None:
3594 if it is None:
3548 # we need to consume all and try again
3595 # we need to consume all and try again
3549 for x in self._consumegen():
3596 for x in self._consumegen():
3550 pass
3597 pass
3551 return self.first()
3598 return self.first()
3552 return next(it(), None)
3599 return next(it(), None)
3553
3600
3554 def __repr__(self):
3601 def __repr__(self):
3555 d = {False: '-', True: '+'}[self._ascending]
3602 d = {False: '-', True: '+'}[self._ascending]
3556 return '<%s%s>' % (type(self).__name__, d)
3603 return '<%s%s>' % (type(self).__name__, d)
3557
3604
3558 class spanset(abstractsmartset):
3605 class spanset(abstractsmartset):
3559 """Duck type for baseset class which represents a range of revisions and
3606 """Duck type for baseset class which represents a range of revisions and
3560 can work lazily and without having all the range in memory
3607 can work lazily and without having all the range in memory
3561
3608
3562 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3609 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3563 notable points:
3610 notable points:
3564 - when x < y it will be automatically descending,
3611 - when x < y it will be automatically descending,
3565 - revision filtered with this repoview will be skipped.
3612 - revision filtered with this repoview will be skipped.
3566
3613
3567 """
3614 """
3568 def __init__(self, repo, start=0, end=None):
3615 def __init__(self, repo, start=0, end=None):
3569 """
3616 """
3570 start: first revision included the set
3617 start: first revision included the set
3571 (default to 0)
3618 (default to 0)
3572 end: first revision excluded (last+1)
3619 end: first revision excluded (last+1)
3573 (default to len(repo)
3620 (default to len(repo)
3574
3621
3575 Spanset will be descending if `end` < `start`.
3622 Spanset will be descending if `end` < `start`.
3576 """
3623 """
3577 if end is None:
3624 if end is None:
3578 end = len(repo)
3625 end = len(repo)
3579 self._ascending = start <= end
3626 self._ascending = start <= end
3580 if not self._ascending:
3627 if not self._ascending:
3581 start, end = end + 1, start +1
3628 start, end = end + 1, start +1
3582 self._start = start
3629 self._start = start
3583 self._end = end
3630 self._end = end
3584 self._hiddenrevs = repo.changelog.filteredrevs
3631 self._hiddenrevs = repo.changelog.filteredrevs
3585
3632
3586 def sort(self, reverse=False):
3633 def sort(self, reverse=False):
3587 self._ascending = not reverse
3634 self._ascending = not reverse
3588
3635
3589 def reverse(self):
3636 def reverse(self):
3590 self._ascending = not self._ascending
3637 self._ascending = not self._ascending
3591
3638
3592 def istopo(self):
3639 def istopo(self):
3593 # not worth the trouble asserting if the two sets combined are still
3640 # not worth the trouble asserting if the two sets combined are still
3594 # in topographical order. Use the sort() predicate to explicitly sort
3641 # in topographical order. Use the sort() predicate to explicitly sort
3595 # again instead.
3642 # again instead.
3596 return False
3643 return False
3597
3644
3598 def _iterfilter(self, iterrange):
3645 def _iterfilter(self, iterrange):
3599 s = self._hiddenrevs
3646 s = self._hiddenrevs
3600 for r in iterrange:
3647 for r in iterrange:
3601 if r not in s:
3648 if r not in s:
3602 yield r
3649 yield r
3603
3650
3604 def __iter__(self):
3651 def __iter__(self):
3605 if self._ascending:
3652 if self._ascending:
3606 return self.fastasc()
3653 return self.fastasc()
3607 else:
3654 else:
3608 return self.fastdesc()
3655 return self.fastdesc()
3609
3656
3610 def fastasc(self):
3657 def fastasc(self):
3611 iterrange = xrange(self._start, self._end)
3658 iterrange = xrange(self._start, self._end)
3612 if self._hiddenrevs:
3659 if self._hiddenrevs:
3613 return self._iterfilter(iterrange)
3660 return self._iterfilter(iterrange)
3614 return iter(iterrange)
3661 return iter(iterrange)
3615
3662
3616 def fastdesc(self):
3663 def fastdesc(self):
3617 iterrange = xrange(self._end - 1, self._start - 1, -1)
3664 iterrange = xrange(self._end - 1, self._start - 1, -1)
3618 if self._hiddenrevs:
3665 if self._hiddenrevs:
3619 return self._iterfilter(iterrange)
3666 return self._iterfilter(iterrange)
3620 return iter(iterrange)
3667 return iter(iterrange)
3621
3668
3622 def __contains__(self, rev):
3669 def __contains__(self, rev):
3623 hidden = self._hiddenrevs
3670 hidden = self._hiddenrevs
3624 return ((self._start <= rev < self._end)
3671 return ((self._start <= rev < self._end)
3625 and not (hidden and rev in hidden))
3672 and not (hidden and rev in hidden))
3626
3673
3627 def __nonzero__(self):
3674 def __nonzero__(self):
3628 for r in self:
3675 for r in self:
3629 return True
3676 return True
3630 return False
3677 return False
3631
3678
3632 def __len__(self):
3679 def __len__(self):
3633 if not self._hiddenrevs:
3680 if not self._hiddenrevs:
3634 return abs(self._end - self._start)
3681 return abs(self._end - self._start)
3635 else:
3682 else:
3636 count = 0
3683 count = 0
3637 start = self._start
3684 start = self._start
3638 end = self._end
3685 end = self._end
3639 for rev in self._hiddenrevs:
3686 for rev in self._hiddenrevs:
3640 if (end < rev <= start) or (start <= rev < end):
3687 if (end < rev <= start) or (start <= rev < end):
3641 count += 1
3688 count += 1
3642 return abs(self._end - self._start) - count
3689 return abs(self._end - self._start) - count
3643
3690
3644 def isascending(self):
3691 def isascending(self):
3645 return self._ascending
3692 return self._ascending
3646
3693
3647 def isdescending(self):
3694 def isdescending(self):
3648 return not self._ascending
3695 return not self._ascending
3649
3696
3650 def first(self):
3697 def first(self):
3651 if self._ascending:
3698 if self._ascending:
3652 it = self.fastasc
3699 it = self.fastasc
3653 else:
3700 else:
3654 it = self.fastdesc
3701 it = self.fastdesc
3655 for x in it():
3702 for x in it():
3656 return x
3703 return x
3657 return None
3704 return None
3658
3705
3659 def last(self):
3706 def last(self):
3660 if self._ascending:
3707 if self._ascending:
3661 it = self.fastdesc
3708 it = self.fastdesc
3662 else:
3709 else:
3663 it = self.fastasc
3710 it = self.fastasc
3664 for x in it():
3711 for x in it():
3665 return x
3712 return x
3666 return None
3713 return None
3667
3714
3668 def __repr__(self):
3715 def __repr__(self):
3669 d = {False: '-', True: '+'}[self._ascending]
3716 d = {False: '-', True: '+'}[self._ascending]
3670 return '<%s%s %d:%d>' % (type(self).__name__, d,
3717 return '<%s%s %d:%d>' % (type(self).__name__, d,
3671 self._start, self._end - 1)
3718 self._start, self._end - 1)
3672
3719
3673 class fullreposet(spanset):
3720 class fullreposet(spanset):
3674 """a set containing all revisions in the repo
3721 """a set containing all revisions in the repo
3675
3722
3676 This class exists to host special optimization and magic to handle virtual
3723 This class exists to host special optimization and magic to handle virtual
3677 revisions such as "null".
3724 revisions such as "null".
3678 """
3725 """
3679
3726
3680 def __init__(self, repo):
3727 def __init__(self, repo):
3681 super(fullreposet, self).__init__(repo)
3728 super(fullreposet, self).__init__(repo)
3682
3729
3683 def __and__(self, other):
3730 def __and__(self, other):
3684 """As self contains the whole repo, all of the other set should also be
3731 """As self contains the whole repo, all of the other set should also be
3685 in self. Therefore `self & other = other`.
3732 in self. Therefore `self & other = other`.
3686
3733
3687 This boldly assumes the other contains valid revs only.
3734 This boldly assumes the other contains valid revs only.
3688 """
3735 """
3689 # other not a smartset, make is so
3736 # other not a smartset, make is so
3690 if not util.safehasattr(other, 'isascending'):
3737 if not util.safehasattr(other, 'isascending'):
3691 # filter out hidden revision
3738 # filter out hidden revision
3692 # (this boldly assumes all smartset are pure)
3739 # (this boldly assumes all smartset are pure)
3693 #
3740 #
3694 # `other` was used with "&", let's assume this is a set like
3741 # `other` was used with "&", let's assume this is a set like
3695 # object.
3742 # object.
3696 other = baseset(other - self._hiddenrevs)
3743 other = baseset(other - self._hiddenrevs)
3697
3744
3698 # XXX As fullreposet is also used as bootstrap, this is wrong.
3745 # XXX As fullreposet is also used as bootstrap, this is wrong.
3699 #
3746 #
3700 # With a giveme312() revset returning [3,1,2], this makes
3747 # With a giveme312() revset returning [3,1,2], this makes
3701 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3748 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3702 # We cannot just drop it because other usage still need to sort it:
3749 # We cannot just drop it because other usage still need to sort it:
3703 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3750 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3704 #
3751 #
3705 # There is also some faulty revset implementations that rely on it
3752 # There is also some faulty revset implementations that rely on it
3706 # (eg: children as of its state in e8075329c5fb)
3753 # (eg: children as of its state in e8075329c5fb)
3707 #
3754 #
3708 # When we fix the two points above we can move this into the if clause
3755 # When we fix the two points above we can move this into the if clause
3709 other.sort(reverse=self.isdescending())
3756 other.sort(reverse=self.isdescending())
3710 return other
3757 return other
3711
3758
3712 def prettyformatset(revs):
3759 def prettyformatset(revs):
3713 lines = []
3760 lines = []
3714 rs = repr(revs)
3761 rs = repr(revs)
3715 p = 0
3762 p = 0
3716 while p < len(rs):
3763 while p < len(rs):
3717 q = rs.find('<', p + 1)
3764 q = rs.find('<', p + 1)
3718 if q < 0:
3765 if q < 0:
3719 q = len(rs)
3766 q = len(rs)
3720 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3767 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3721 assert l >= 0
3768 assert l >= 0
3722 lines.append((l, rs[p:q].rstrip()))
3769 lines.append((l, rs[p:q].rstrip()))
3723 p = q
3770 p = q
3724 return '\n'.join(' ' * l + s for l, s in lines)
3771 return '\n'.join(' ' * l + s for l, s in lines)
3725
3772
3726 def loadpredicate(ui, extname, registrarobj):
3773 def loadpredicate(ui, extname, registrarobj):
3727 """Load revset predicates from specified registrarobj
3774 """Load revset predicates from specified registrarobj
3728 """
3775 """
3729 for name, func in registrarobj._table.iteritems():
3776 for name, func in registrarobj._table.iteritems():
3730 symbols[name] = func
3777 symbols[name] = func
3731 if func._safe:
3778 if func._safe:
3732 safesymbols.add(name)
3779 safesymbols.add(name)
3733
3780
3734 # load built-in predicates explicitly to setup safesymbols
3781 # load built-in predicates explicitly to setup safesymbols
3735 loadpredicate(None, None, predicate)
3782 loadpredicate(None, None, predicate)
3736
3783
3737 # tell hggettext to extract docstrings from these functions:
3784 # tell hggettext to extract docstrings from these functions:
3738 i18nfunctions = symbols.values()
3785 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now