##// END OF EJS Templates
revset: add public function to create matcher from evaluatable tree...
Yuya Nishihara -
r29906:41491cf9 default
parent child Browse files
Show More
@@ -1,3730 +1,3732
1 # revset.py - revision set queries for mercurial
1 # revset.py - revision set queries for mercurial
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import heapq
10 import heapq
11 import re
11 import re
12
12
13 from .i18n import _
13 from .i18n import _
14 from . import (
14 from . import (
15 destutil,
15 destutil,
16 encoding,
16 encoding,
17 error,
17 error,
18 hbisect,
18 hbisect,
19 match as matchmod,
19 match as matchmod,
20 node,
20 node,
21 obsolete as obsmod,
21 obsolete as obsmod,
22 parser,
22 parser,
23 pathutil,
23 pathutil,
24 phases,
24 phases,
25 registrar,
25 registrar,
26 repoview,
26 repoview,
27 util,
27 util,
28 )
28 )
29
29
30 def _revancestors(repo, revs, followfirst):
30 def _revancestors(repo, revs, followfirst):
31 """Like revlog.ancestors(), but supports followfirst."""
31 """Like revlog.ancestors(), but supports followfirst."""
32 if followfirst:
32 if followfirst:
33 cut = 1
33 cut = 1
34 else:
34 else:
35 cut = None
35 cut = None
36 cl = repo.changelog
36 cl = repo.changelog
37
37
38 def iterate():
38 def iterate():
39 revs.sort(reverse=True)
39 revs.sort(reverse=True)
40 irevs = iter(revs)
40 irevs = iter(revs)
41 h = []
41 h = []
42
42
43 inputrev = next(irevs, None)
43 inputrev = next(irevs, None)
44 if inputrev is not None:
44 if inputrev is not None:
45 heapq.heappush(h, -inputrev)
45 heapq.heappush(h, -inputrev)
46
46
47 seen = set()
47 seen = set()
48 while h:
48 while h:
49 current = -heapq.heappop(h)
49 current = -heapq.heappop(h)
50 if current == inputrev:
50 if current == inputrev:
51 inputrev = next(irevs, None)
51 inputrev = next(irevs, None)
52 if inputrev is not None:
52 if inputrev is not None:
53 heapq.heappush(h, -inputrev)
53 heapq.heappush(h, -inputrev)
54 if current not in seen:
54 if current not in seen:
55 seen.add(current)
55 seen.add(current)
56 yield current
56 yield current
57 for parent in cl.parentrevs(current)[:cut]:
57 for parent in cl.parentrevs(current)[:cut]:
58 if parent != node.nullrev:
58 if parent != node.nullrev:
59 heapq.heappush(h, -parent)
59 heapq.heappush(h, -parent)
60
60
61 return generatorset(iterate(), iterasc=False)
61 return generatorset(iterate(), iterasc=False)
62
62
63 def _revdescendants(repo, revs, followfirst):
63 def _revdescendants(repo, revs, followfirst):
64 """Like revlog.descendants() but supports followfirst."""
64 """Like revlog.descendants() but supports followfirst."""
65 if followfirst:
65 if followfirst:
66 cut = 1
66 cut = 1
67 else:
67 else:
68 cut = None
68 cut = None
69
69
70 def iterate():
70 def iterate():
71 cl = repo.changelog
71 cl = repo.changelog
72 # XXX this should be 'parentset.min()' assuming 'parentset' is a
72 # XXX this should be 'parentset.min()' assuming 'parentset' is a
73 # smartset (and if it is not, it should.)
73 # smartset (and if it is not, it should.)
74 first = min(revs)
74 first = min(revs)
75 nullrev = node.nullrev
75 nullrev = node.nullrev
76 if first == nullrev:
76 if first == nullrev:
77 # Are there nodes with a null first parent and a non-null
77 # Are there nodes with a null first parent and a non-null
78 # second one? Maybe. Do we care? Probably not.
78 # second one? Maybe. Do we care? Probably not.
79 for i in cl:
79 for i in cl:
80 yield i
80 yield i
81 else:
81 else:
82 seen = set(revs)
82 seen = set(revs)
83 for i in cl.revs(first + 1):
83 for i in cl.revs(first + 1):
84 for x in cl.parentrevs(i)[:cut]:
84 for x in cl.parentrevs(i)[:cut]:
85 if x != nullrev and x in seen:
85 if x != nullrev and x in seen:
86 seen.add(i)
86 seen.add(i)
87 yield i
87 yield i
88 break
88 break
89
89
90 return generatorset(iterate(), iterasc=True)
90 return generatorset(iterate(), iterasc=True)
91
91
92 def _reachablerootspure(repo, minroot, roots, heads, includepath):
92 def _reachablerootspure(repo, minroot, roots, heads, includepath):
93 """return (heads(::<roots> and ::<heads>))
93 """return (heads(::<roots> and ::<heads>))
94
94
95 If includepath is True, return (<roots>::<heads>)."""
95 If includepath is True, return (<roots>::<heads>)."""
96 if not roots:
96 if not roots:
97 return []
97 return []
98 parentrevs = repo.changelog.parentrevs
98 parentrevs = repo.changelog.parentrevs
99 roots = set(roots)
99 roots = set(roots)
100 visit = list(heads)
100 visit = list(heads)
101 reachable = set()
101 reachable = set()
102 seen = {}
102 seen = {}
103 # prefetch all the things! (because python is slow)
103 # prefetch all the things! (because python is slow)
104 reached = reachable.add
104 reached = reachable.add
105 dovisit = visit.append
105 dovisit = visit.append
106 nextvisit = visit.pop
106 nextvisit = visit.pop
107 # open-code the post-order traversal due to the tiny size of
107 # open-code the post-order traversal due to the tiny size of
108 # sys.getrecursionlimit()
108 # sys.getrecursionlimit()
109 while visit:
109 while visit:
110 rev = nextvisit()
110 rev = nextvisit()
111 if rev in roots:
111 if rev in roots:
112 reached(rev)
112 reached(rev)
113 if not includepath:
113 if not includepath:
114 continue
114 continue
115 parents = parentrevs(rev)
115 parents = parentrevs(rev)
116 seen[rev] = parents
116 seen[rev] = parents
117 for parent in parents:
117 for parent in parents:
118 if parent >= minroot and parent not in seen:
118 if parent >= minroot and parent not in seen:
119 dovisit(parent)
119 dovisit(parent)
120 if not reachable:
120 if not reachable:
121 return baseset()
121 return baseset()
122 if not includepath:
122 if not includepath:
123 return reachable
123 return reachable
124 for rev in sorted(seen):
124 for rev in sorted(seen):
125 for parent in seen[rev]:
125 for parent in seen[rev]:
126 if parent in reachable:
126 if parent in reachable:
127 reached(rev)
127 reached(rev)
128 return reachable
128 return reachable
129
129
130 def reachableroots(repo, roots, heads, includepath=False):
130 def reachableroots(repo, roots, heads, includepath=False):
131 """return (heads(::<roots> and ::<heads>))
131 """return (heads(::<roots> and ::<heads>))
132
132
133 If includepath is True, return (<roots>::<heads>)."""
133 If includepath is True, return (<roots>::<heads>)."""
134 if not roots:
134 if not roots:
135 return baseset()
135 return baseset()
136 minroot = roots.min()
136 minroot = roots.min()
137 roots = list(roots)
137 roots = list(roots)
138 heads = list(heads)
138 heads = list(heads)
139 try:
139 try:
140 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
140 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
141 except AttributeError:
141 except AttributeError:
142 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
142 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
143 revs = baseset(revs)
143 revs = baseset(revs)
144 revs.sort()
144 revs.sort()
145 return revs
145 return revs
146
146
147 elements = {
147 elements = {
148 # token-type: binding-strength, primary, prefix, infix, suffix
148 # token-type: binding-strength, primary, prefix, infix, suffix
149 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
149 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
150 "##": (20, None, None, ("_concat", 20), None),
150 "##": (20, None, None, ("_concat", 20), None),
151 "~": (18, None, None, ("ancestor", 18), None),
151 "~": (18, None, None, ("ancestor", 18), None),
152 "^": (18, None, None, ("parent", 18), "parentpost"),
152 "^": (18, None, None, ("parent", 18), "parentpost"),
153 "-": (5, None, ("negate", 19), ("minus", 5), None),
153 "-": (5, None, ("negate", 19), ("minus", 5), None),
154 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"),
154 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"),
155 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"),
155 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"),
156 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), "rangepost"),
156 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), "rangepost"),
157 "not": (10, None, ("not", 10), None, None),
157 "not": (10, None, ("not", 10), None, None),
158 "!": (10, None, ("not", 10), None, None),
158 "!": (10, None, ("not", 10), None, None),
159 "and": (5, None, None, ("and", 5), None),
159 "and": (5, None, None, ("and", 5), None),
160 "&": (5, None, None, ("and", 5), None),
160 "&": (5, None, None, ("and", 5), None),
161 "%": (5, None, None, ("only", 5), "onlypost"),
161 "%": (5, None, None, ("only", 5), "onlypost"),
162 "or": (4, None, None, ("or", 4), None),
162 "or": (4, None, None, ("or", 4), None),
163 "|": (4, None, None, ("or", 4), None),
163 "|": (4, None, None, ("or", 4), None),
164 "+": (4, None, None, ("or", 4), None),
164 "+": (4, None, None, ("or", 4), None),
165 "=": (3, None, None, ("keyvalue", 3), None),
165 "=": (3, None, None, ("keyvalue", 3), None),
166 ",": (2, None, None, ("list", 2), None),
166 ",": (2, None, None, ("list", 2), None),
167 ")": (0, None, None, None, None),
167 ")": (0, None, None, None, None),
168 "symbol": (0, "symbol", None, None, None),
168 "symbol": (0, "symbol", None, None, None),
169 "string": (0, "string", None, None, None),
169 "string": (0, "string", None, None, None),
170 "end": (0, None, None, None, None),
170 "end": (0, None, None, None, None),
171 }
171 }
172
172
173 keywords = set(['and', 'or', 'not'])
173 keywords = set(['and', 'or', 'not'])
174
174
175 # default set of valid characters for the initial letter of symbols
175 # default set of valid characters for the initial letter of symbols
176 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
176 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
177 if c.isalnum() or c in '._@' or ord(c) > 127)
177 if c.isalnum() or c in '._@' or ord(c) > 127)
178
178
179 # default set of valid characters for non-initial letters of symbols
179 # default set of valid characters for non-initial letters of symbols
180 _symletters = set(c for c in [chr(i) for i in xrange(256)]
180 _symletters = set(c for c in [chr(i) for i in xrange(256)]
181 if c.isalnum() or c in '-._/@' or ord(c) > 127)
181 if c.isalnum() or c in '-._/@' or ord(c) > 127)
182
182
183 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
183 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
184 '''
184 '''
185 Parse a revset statement into a stream of tokens
185 Parse a revset statement into a stream of tokens
186
186
187 ``syminitletters`` is the set of valid characters for the initial
187 ``syminitletters`` is the set of valid characters for the initial
188 letter of symbols.
188 letter of symbols.
189
189
190 By default, character ``c`` is recognized as valid for initial
190 By default, character ``c`` is recognized as valid for initial
191 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
191 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
192
192
193 ``symletters`` is the set of valid characters for non-initial
193 ``symletters`` is the set of valid characters for non-initial
194 letters of symbols.
194 letters of symbols.
195
195
196 By default, character ``c`` is recognized as valid for non-initial
196 By default, character ``c`` is recognized as valid for non-initial
197 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
197 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
198
198
199 Check that @ is a valid unquoted token character (issue3686):
199 Check that @ is a valid unquoted token character (issue3686):
200 >>> list(tokenize("@::"))
200 >>> list(tokenize("@::"))
201 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
201 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
202
202
203 '''
203 '''
204 if syminitletters is None:
204 if syminitletters is None:
205 syminitletters = _syminitletters
205 syminitletters = _syminitletters
206 if symletters is None:
206 if symletters is None:
207 symletters = _symletters
207 symletters = _symletters
208
208
209 if program and lookup:
209 if program and lookup:
210 # attempt to parse old-style ranges first to deal with
210 # attempt to parse old-style ranges first to deal with
211 # things like old-tag which contain query metacharacters
211 # things like old-tag which contain query metacharacters
212 parts = program.split(':', 1)
212 parts = program.split(':', 1)
213 if all(lookup(sym) for sym in parts if sym):
213 if all(lookup(sym) for sym in parts if sym):
214 if parts[0]:
214 if parts[0]:
215 yield ('symbol', parts[0], 0)
215 yield ('symbol', parts[0], 0)
216 if len(parts) > 1:
216 if len(parts) > 1:
217 s = len(parts[0])
217 s = len(parts[0])
218 yield (':', None, s)
218 yield (':', None, s)
219 if parts[1]:
219 if parts[1]:
220 yield ('symbol', parts[1], s + 1)
220 yield ('symbol', parts[1], s + 1)
221 yield ('end', None, len(program))
221 yield ('end', None, len(program))
222 return
222 return
223
223
224 pos, l = 0, len(program)
224 pos, l = 0, len(program)
225 while pos < l:
225 while pos < l:
226 c = program[pos]
226 c = program[pos]
227 if c.isspace(): # skip inter-token whitespace
227 if c.isspace(): # skip inter-token whitespace
228 pass
228 pass
229 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
229 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
230 yield ('::', None, pos)
230 yield ('::', None, pos)
231 pos += 1 # skip ahead
231 pos += 1 # skip ahead
232 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
232 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
233 yield ('..', None, pos)
233 yield ('..', None, pos)
234 pos += 1 # skip ahead
234 pos += 1 # skip ahead
235 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
235 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
236 yield ('##', None, pos)
236 yield ('##', None, pos)
237 pos += 1 # skip ahead
237 pos += 1 # skip ahead
238 elif c in "():=,-|&+!~^%": # handle simple operators
238 elif c in "():=,-|&+!~^%": # handle simple operators
239 yield (c, None, pos)
239 yield (c, None, pos)
240 elif (c in '"\'' or c == 'r' and
240 elif (c in '"\'' or c == 'r' and
241 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
241 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
242 if c == 'r':
242 if c == 'r':
243 pos += 1
243 pos += 1
244 c = program[pos]
244 c = program[pos]
245 decode = lambda x: x
245 decode = lambda x: x
246 else:
246 else:
247 decode = parser.unescapestr
247 decode = parser.unescapestr
248 pos += 1
248 pos += 1
249 s = pos
249 s = pos
250 while pos < l: # find closing quote
250 while pos < l: # find closing quote
251 d = program[pos]
251 d = program[pos]
252 if d == '\\': # skip over escaped characters
252 if d == '\\': # skip over escaped characters
253 pos += 2
253 pos += 2
254 continue
254 continue
255 if d == c:
255 if d == c:
256 yield ('string', decode(program[s:pos]), s)
256 yield ('string', decode(program[s:pos]), s)
257 break
257 break
258 pos += 1
258 pos += 1
259 else:
259 else:
260 raise error.ParseError(_("unterminated string"), s)
260 raise error.ParseError(_("unterminated string"), s)
261 # gather up a symbol/keyword
261 # gather up a symbol/keyword
262 elif c in syminitletters:
262 elif c in syminitletters:
263 s = pos
263 s = pos
264 pos += 1
264 pos += 1
265 while pos < l: # find end of symbol
265 while pos < l: # find end of symbol
266 d = program[pos]
266 d = program[pos]
267 if d not in symletters:
267 if d not in symletters:
268 break
268 break
269 if d == '.' and program[pos - 1] == '.': # special case for ..
269 if d == '.' and program[pos - 1] == '.': # special case for ..
270 pos -= 1
270 pos -= 1
271 break
271 break
272 pos += 1
272 pos += 1
273 sym = program[s:pos]
273 sym = program[s:pos]
274 if sym in keywords: # operator keywords
274 if sym in keywords: # operator keywords
275 yield (sym, None, s)
275 yield (sym, None, s)
276 elif '-' in sym:
276 elif '-' in sym:
277 # some jerk gave us foo-bar-baz, try to check if it's a symbol
277 # some jerk gave us foo-bar-baz, try to check if it's a symbol
278 if lookup and lookup(sym):
278 if lookup and lookup(sym):
279 # looks like a real symbol
279 # looks like a real symbol
280 yield ('symbol', sym, s)
280 yield ('symbol', sym, s)
281 else:
281 else:
282 # looks like an expression
282 # looks like an expression
283 parts = sym.split('-')
283 parts = sym.split('-')
284 for p in parts[:-1]:
284 for p in parts[:-1]:
285 if p: # possible consecutive -
285 if p: # possible consecutive -
286 yield ('symbol', p, s)
286 yield ('symbol', p, s)
287 s += len(p)
287 s += len(p)
288 yield ('-', None, pos)
288 yield ('-', None, pos)
289 s += 1
289 s += 1
290 if parts[-1]: # possible trailing -
290 if parts[-1]: # possible trailing -
291 yield ('symbol', parts[-1], s)
291 yield ('symbol', parts[-1], s)
292 else:
292 else:
293 yield ('symbol', sym, s)
293 yield ('symbol', sym, s)
294 pos -= 1
294 pos -= 1
295 else:
295 else:
296 raise error.ParseError(_("syntax error in revset '%s'") %
296 raise error.ParseError(_("syntax error in revset '%s'") %
297 program, pos)
297 program, pos)
298 pos += 1
298 pos += 1
299 yield ('end', None, pos)
299 yield ('end', None, pos)
300
300
301 # helpers
301 # helpers
302
302
303 def getsymbol(x):
303 def getsymbol(x):
304 if x and x[0] == 'symbol':
304 if x and x[0] == 'symbol':
305 return x[1]
305 return x[1]
306 raise error.ParseError(_('not a symbol'))
306 raise error.ParseError(_('not a symbol'))
307
307
308 def getstring(x, err):
308 def getstring(x, err):
309 if x and (x[0] == 'string' or x[0] == 'symbol'):
309 if x and (x[0] == 'string' or x[0] == 'symbol'):
310 return x[1]
310 return x[1]
311 raise error.ParseError(err)
311 raise error.ParseError(err)
312
312
313 def getlist(x):
313 def getlist(x):
314 if not x:
314 if not x:
315 return []
315 return []
316 if x[0] == 'list':
316 if x[0] == 'list':
317 return list(x[1:])
317 return list(x[1:])
318 return [x]
318 return [x]
319
319
320 def getargs(x, min, max, err):
320 def getargs(x, min, max, err):
321 l = getlist(x)
321 l = getlist(x)
322 if len(l) < min or (max >= 0 and len(l) > max):
322 if len(l) < min or (max >= 0 and len(l) > max):
323 raise error.ParseError(err)
323 raise error.ParseError(err)
324 return l
324 return l
325
325
326 def getargsdict(x, funcname, keys):
326 def getargsdict(x, funcname, keys):
327 return parser.buildargsdict(getlist(x), funcname, keys.split(),
327 return parser.buildargsdict(getlist(x), funcname, keys.split(),
328 keyvaluenode='keyvalue', keynode='symbol')
328 keyvaluenode='keyvalue', keynode='symbol')
329
329
330 def getset(repo, subset, x):
330 def getset(repo, subset, x):
331 if not x:
331 if not x:
332 raise error.ParseError(_("missing argument"))
332 raise error.ParseError(_("missing argument"))
333 s = methods[x[0]](repo, subset, *x[1:])
333 s = methods[x[0]](repo, subset, *x[1:])
334 if util.safehasattr(s, 'isascending'):
334 if util.safehasattr(s, 'isascending'):
335 return s
335 return s
336 # else case should not happen, because all non-func are internal,
336 # else case should not happen, because all non-func are internal,
337 # ignoring for now.
337 # ignoring for now.
338 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
338 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
339 repo.ui.deprecwarn('revset "%s" uses list instead of smartset'
339 repo.ui.deprecwarn('revset "%s" uses list instead of smartset'
340 % x[1][1],
340 % x[1][1],
341 '3.9')
341 '3.9')
342 return baseset(s)
342 return baseset(s)
343
343
344 def _getrevsource(repo, r):
344 def _getrevsource(repo, r):
345 extra = repo[r].extra()
345 extra = repo[r].extra()
346 for label in ('source', 'transplant_source', 'rebase_source'):
346 for label in ('source', 'transplant_source', 'rebase_source'):
347 if label in extra:
347 if label in extra:
348 try:
348 try:
349 return repo[extra[label]].rev()
349 return repo[extra[label]].rev()
350 except error.RepoLookupError:
350 except error.RepoLookupError:
351 pass
351 pass
352 return None
352 return None
353
353
354 # operator methods
354 # operator methods
355
355
356 def stringset(repo, subset, x):
356 def stringset(repo, subset, x):
357 x = repo[x].rev()
357 x = repo[x].rev()
358 if (x in subset
358 if (x in subset
359 or x == node.nullrev and isinstance(subset, fullreposet)):
359 or x == node.nullrev and isinstance(subset, fullreposet)):
360 return baseset([x])
360 return baseset([x])
361 return baseset()
361 return baseset()
362
362
363 def rangeset(repo, subset, x, y):
363 def rangeset(repo, subset, x, y):
364 m = getset(repo, fullreposet(repo), x)
364 m = getset(repo, fullreposet(repo), x)
365 n = getset(repo, fullreposet(repo), y)
365 n = getset(repo, fullreposet(repo), y)
366
366
367 if not m or not n:
367 if not m or not n:
368 return baseset()
368 return baseset()
369 m, n = m.first(), n.last()
369 m, n = m.first(), n.last()
370
370
371 if m == n:
371 if m == n:
372 r = baseset([m])
372 r = baseset([m])
373 elif n == node.wdirrev:
373 elif n == node.wdirrev:
374 r = spanset(repo, m, len(repo)) + baseset([n])
374 r = spanset(repo, m, len(repo)) + baseset([n])
375 elif m == node.wdirrev:
375 elif m == node.wdirrev:
376 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
376 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
377 elif m < n:
377 elif m < n:
378 r = spanset(repo, m, n + 1)
378 r = spanset(repo, m, n + 1)
379 else:
379 else:
380 r = spanset(repo, m, n - 1)
380 r = spanset(repo, m, n - 1)
381 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
381 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
382 # necessary to ensure we preserve the order in subset.
382 # necessary to ensure we preserve the order in subset.
383 #
383 #
384 # This has performance implication, carrying the sorting over when possible
384 # This has performance implication, carrying the sorting over when possible
385 # would be more efficient.
385 # would be more efficient.
386 return r & subset
386 return r & subset
387
387
388 def dagrange(repo, subset, x, y):
388 def dagrange(repo, subset, x, y):
389 r = fullreposet(repo)
389 r = fullreposet(repo)
390 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
390 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
391 includepath=True)
391 includepath=True)
392 return subset & xs
392 return subset & xs
393
393
394 def andset(repo, subset, x, y):
394 def andset(repo, subset, x, y):
395 return getset(repo, getset(repo, subset, x), y)
395 return getset(repo, getset(repo, subset, x), y)
396
396
397 def differenceset(repo, subset, x, y):
397 def differenceset(repo, subset, x, y):
398 return getset(repo, subset, x) - getset(repo, subset, y)
398 return getset(repo, subset, x) - getset(repo, subset, y)
399
399
400 def orset(repo, subset, *xs):
400 def orset(repo, subset, *xs):
401 assert xs
401 assert xs
402 if len(xs) == 1:
402 if len(xs) == 1:
403 return getset(repo, subset, xs[0])
403 return getset(repo, subset, xs[0])
404 p = len(xs) // 2
404 p = len(xs) // 2
405 a = orset(repo, subset, *xs[:p])
405 a = orset(repo, subset, *xs[:p])
406 b = orset(repo, subset, *xs[p:])
406 b = orset(repo, subset, *xs[p:])
407 return a + b
407 return a + b
408
408
409 def notset(repo, subset, x):
409 def notset(repo, subset, x):
410 return subset - getset(repo, subset, x)
410 return subset - getset(repo, subset, x)
411
411
412 def listset(repo, subset, *xs):
412 def listset(repo, subset, *xs):
413 raise error.ParseError(_("can't use a list in this context"),
413 raise error.ParseError(_("can't use a list in this context"),
414 hint=_('see hg help "revsets.x or y"'))
414 hint=_('see hg help "revsets.x or y"'))
415
415
416 def keyvaluepair(repo, subset, k, v):
416 def keyvaluepair(repo, subset, k, v):
417 raise error.ParseError(_("can't use a key-value pair in this context"))
417 raise error.ParseError(_("can't use a key-value pair in this context"))
418
418
419 def func(repo, subset, a, b):
419 def func(repo, subset, a, b):
420 f = getsymbol(a)
420 f = getsymbol(a)
421 if f in symbols:
421 if f in symbols:
422 return symbols[f](repo, subset, b)
422 return symbols[f](repo, subset, b)
423
423
424 keep = lambda fn: getattr(fn, '__doc__', None) is not None
424 keep = lambda fn: getattr(fn, '__doc__', None) is not None
425
425
426 syms = [s for (s, fn) in symbols.items() if keep(fn)]
426 syms = [s for (s, fn) in symbols.items() if keep(fn)]
427 raise error.UnknownIdentifier(f, syms)
427 raise error.UnknownIdentifier(f, syms)
428
428
429 # functions
429 # functions
430
430
431 # symbols are callables like:
431 # symbols are callables like:
432 # fn(repo, subset, x)
432 # fn(repo, subset, x)
433 # with:
433 # with:
434 # repo - current repository instance
434 # repo - current repository instance
435 # subset - of revisions to be examined
435 # subset - of revisions to be examined
436 # x - argument in tree form
436 # x - argument in tree form
437 symbols = {}
437 symbols = {}
438
438
439 # symbols which can't be used for a DoS attack for any given input
439 # symbols which can't be used for a DoS attack for any given input
440 # (e.g. those which accept regexes as plain strings shouldn't be included)
440 # (e.g. those which accept regexes as plain strings shouldn't be included)
441 # functions that just return a lot of changesets (like all) don't count here
441 # functions that just return a lot of changesets (like all) don't count here
442 safesymbols = set()
442 safesymbols = set()
443
443
444 predicate = registrar.revsetpredicate()
444 predicate = registrar.revsetpredicate()
445
445
446 @predicate('_destupdate')
446 @predicate('_destupdate')
447 def _destupdate(repo, subset, x):
447 def _destupdate(repo, subset, x):
448 # experimental revset for update destination
448 # experimental revset for update destination
449 args = getargsdict(x, 'limit', 'clean check')
449 args = getargsdict(x, 'limit', 'clean check')
450 return subset & baseset([destutil.destupdate(repo, **args)[0]])
450 return subset & baseset([destutil.destupdate(repo, **args)[0]])
451
451
452 @predicate('_destmerge')
452 @predicate('_destmerge')
453 def _destmerge(repo, subset, x):
453 def _destmerge(repo, subset, x):
454 # experimental revset for merge destination
454 # experimental revset for merge destination
455 sourceset = None
455 sourceset = None
456 if x is not None:
456 if x is not None:
457 sourceset = getset(repo, fullreposet(repo), x)
457 sourceset = getset(repo, fullreposet(repo), x)
458 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
458 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
459
459
460 @predicate('adds(pattern)', safe=True)
460 @predicate('adds(pattern)', safe=True)
461 def adds(repo, subset, x):
461 def adds(repo, subset, x):
462 """Changesets that add a file matching pattern.
462 """Changesets that add a file matching pattern.
463
463
464 The pattern without explicit kind like ``glob:`` is expected to be
464 The pattern without explicit kind like ``glob:`` is expected to be
465 relative to the current directory and match against a file or a
465 relative to the current directory and match against a file or a
466 directory.
466 directory.
467 """
467 """
468 # i18n: "adds" is a keyword
468 # i18n: "adds" is a keyword
469 pat = getstring(x, _("adds requires a pattern"))
469 pat = getstring(x, _("adds requires a pattern"))
470 return checkstatus(repo, subset, pat, 1)
470 return checkstatus(repo, subset, pat, 1)
471
471
472 @predicate('ancestor(*changeset)', safe=True)
472 @predicate('ancestor(*changeset)', safe=True)
473 def ancestor(repo, subset, x):
473 def ancestor(repo, subset, x):
474 """A greatest common ancestor of the changesets.
474 """A greatest common ancestor of the changesets.
475
475
476 Accepts 0 or more changesets.
476 Accepts 0 or more changesets.
477 Will return empty list when passed no args.
477 Will return empty list when passed no args.
478 Greatest common ancestor of a single changeset is that changeset.
478 Greatest common ancestor of a single changeset is that changeset.
479 """
479 """
480 # i18n: "ancestor" is a keyword
480 # i18n: "ancestor" is a keyword
481 l = getlist(x)
481 l = getlist(x)
482 rl = fullreposet(repo)
482 rl = fullreposet(repo)
483 anc = None
483 anc = None
484
484
485 # (getset(repo, rl, i) for i in l) generates a list of lists
485 # (getset(repo, rl, i) for i in l) generates a list of lists
486 for revs in (getset(repo, rl, i) for i in l):
486 for revs in (getset(repo, rl, i) for i in l):
487 for r in revs:
487 for r in revs:
488 if anc is None:
488 if anc is None:
489 anc = repo[r]
489 anc = repo[r]
490 else:
490 else:
491 anc = anc.ancestor(repo[r])
491 anc = anc.ancestor(repo[r])
492
492
493 if anc is not None and anc.rev() in subset:
493 if anc is not None and anc.rev() in subset:
494 return baseset([anc.rev()])
494 return baseset([anc.rev()])
495 return baseset()
495 return baseset()
496
496
497 def _ancestors(repo, subset, x, followfirst=False):
497 def _ancestors(repo, subset, x, followfirst=False):
498 heads = getset(repo, fullreposet(repo), x)
498 heads = getset(repo, fullreposet(repo), x)
499 if not heads:
499 if not heads:
500 return baseset()
500 return baseset()
501 s = _revancestors(repo, heads, followfirst)
501 s = _revancestors(repo, heads, followfirst)
502 return subset & s
502 return subset & s
503
503
504 @predicate('ancestors(set)', safe=True)
504 @predicate('ancestors(set)', safe=True)
505 def ancestors(repo, subset, x):
505 def ancestors(repo, subset, x):
506 """Changesets that are ancestors of a changeset in set.
506 """Changesets that are ancestors of a changeset in set.
507 """
507 """
508 return _ancestors(repo, subset, x)
508 return _ancestors(repo, subset, x)
509
509
510 @predicate('_firstancestors', safe=True)
510 @predicate('_firstancestors', safe=True)
511 def _firstancestors(repo, subset, x):
511 def _firstancestors(repo, subset, x):
512 # ``_firstancestors(set)``
512 # ``_firstancestors(set)``
513 # Like ``ancestors(set)`` but follows only the first parents.
513 # Like ``ancestors(set)`` but follows only the first parents.
514 return _ancestors(repo, subset, x, followfirst=True)
514 return _ancestors(repo, subset, x, followfirst=True)
515
515
516 def ancestorspec(repo, subset, x, n):
516 def ancestorspec(repo, subset, x, n):
517 """``set~n``
517 """``set~n``
518 Changesets that are the Nth ancestor (first parents only) of a changeset
518 Changesets that are the Nth ancestor (first parents only) of a changeset
519 in set.
519 in set.
520 """
520 """
521 try:
521 try:
522 n = int(n[1])
522 n = int(n[1])
523 except (TypeError, ValueError):
523 except (TypeError, ValueError):
524 raise error.ParseError(_("~ expects a number"))
524 raise error.ParseError(_("~ expects a number"))
525 ps = set()
525 ps = set()
526 cl = repo.changelog
526 cl = repo.changelog
527 for r in getset(repo, fullreposet(repo), x):
527 for r in getset(repo, fullreposet(repo), x):
528 for i in range(n):
528 for i in range(n):
529 r = cl.parentrevs(r)[0]
529 r = cl.parentrevs(r)[0]
530 ps.add(r)
530 ps.add(r)
531 return subset & ps
531 return subset & ps
532
532
533 @predicate('author(string)', safe=True)
533 @predicate('author(string)', safe=True)
534 def author(repo, subset, x):
534 def author(repo, subset, x):
535 """Alias for ``user(string)``.
535 """Alias for ``user(string)``.
536 """
536 """
537 # i18n: "author" is a keyword
537 # i18n: "author" is a keyword
538 n = encoding.lower(getstring(x, _("author requires a string")))
538 n = encoding.lower(getstring(x, _("author requires a string")))
539 kind, pattern, matcher = _substringmatcher(n)
539 kind, pattern, matcher = _substringmatcher(n)
540 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())),
540 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())),
541 condrepr=('<user %r>', n))
541 condrepr=('<user %r>', n))
542
542
543 @predicate('bisect(string)', safe=True)
543 @predicate('bisect(string)', safe=True)
544 def bisect(repo, subset, x):
544 def bisect(repo, subset, x):
545 """Changesets marked in the specified bisect status:
545 """Changesets marked in the specified bisect status:
546
546
547 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
547 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
548 - ``goods``, ``bads`` : csets topologically good/bad
548 - ``goods``, ``bads`` : csets topologically good/bad
549 - ``range`` : csets taking part in the bisection
549 - ``range`` : csets taking part in the bisection
550 - ``pruned`` : csets that are goods, bads or skipped
550 - ``pruned`` : csets that are goods, bads or skipped
551 - ``untested`` : csets whose fate is yet unknown
551 - ``untested`` : csets whose fate is yet unknown
552 - ``ignored`` : csets ignored due to DAG topology
552 - ``ignored`` : csets ignored due to DAG topology
553 - ``current`` : the cset currently being bisected
553 - ``current`` : the cset currently being bisected
554 """
554 """
555 # i18n: "bisect" is a keyword
555 # i18n: "bisect" is a keyword
556 status = getstring(x, _("bisect requires a string")).lower()
556 status = getstring(x, _("bisect requires a string")).lower()
557 state = set(hbisect.get(repo, status))
557 state = set(hbisect.get(repo, status))
558 return subset & state
558 return subset & state
559
559
560 # Backward-compatibility
560 # Backward-compatibility
561 # - no help entry so that we do not advertise it any more
561 # - no help entry so that we do not advertise it any more
562 @predicate('bisected', safe=True)
562 @predicate('bisected', safe=True)
563 def bisected(repo, subset, x):
563 def bisected(repo, subset, x):
564 return bisect(repo, subset, x)
564 return bisect(repo, subset, x)
565
565
566 @predicate('bookmark([name])', safe=True)
566 @predicate('bookmark([name])', safe=True)
567 def bookmark(repo, subset, x):
567 def bookmark(repo, subset, x):
568 """The named bookmark or all bookmarks.
568 """The named bookmark or all bookmarks.
569
569
570 If `name` starts with `re:`, the remainder of the name is treated as
570 If `name` starts with `re:`, the remainder of the name is treated as
571 a regular expression. To match a bookmark that actually starts with `re:`,
571 a regular expression. To match a bookmark that actually starts with `re:`,
572 use the prefix `literal:`.
572 use the prefix `literal:`.
573 """
573 """
574 # i18n: "bookmark" is a keyword
574 # i18n: "bookmark" is a keyword
575 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
575 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
576 if args:
576 if args:
577 bm = getstring(args[0],
577 bm = getstring(args[0],
578 # i18n: "bookmark" is a keyword
578 # i18n: "bookmark" is a keyword
579 _('the argument to bookmark must be a string'))
579 _('the argument to bookmark must be a string'))
580 kind, pattern, matcher = util.stringmatcher(bm)
580 kind, pattern, matcher = util.stringmatcher(bm)
581 bms = set()
581 bms = set()
582 if kind == 'literal':
582 if kind == 'literal':
583 bmrev = repo._bookmarks.get(pattern, None)
583 bmrev = repo._bookmarks.get(pattern, None)
584 if not bmrev:
584 if not bmrev:
585 raise error.RepoLookupError(_("bookmark '%s' does not exist")
585 raise error.RepoLookupError(_("bookmark '%s' does not exist")
586 % pattern)
586 % pattern)
587 bms.add(repo[bmrev].rev())
587 bms.add(repo[bmrev].rev())
588 else:
588 else:
589 matchrevs = set()
589 matchrevs = set()
590 for name, bmrev in repo._bookmarks.iteritems():
590 for name, bmrev in repo._bookmarks.iteritems():
591 if matcher(name):
591 if matcher(name):
592 matchrevs.add(bmrev)
592 matchrevs.add(bmrev)
593 if not matchrevs:
593 if not matchrevs:
594 raise error.RepoLookupError(_("no bookmarks exist"
594 raise error.RepoLookupError(_("no bookmarks exist"
595 " that match '%s'") % pattern)
595 " that match '%s'") % pattern)
596 for bmrev in matchrevs:
596 for bmrev in matchrevs:
597 bms.add(repo[bmrev].rev())
597 bms.add(repo[bmrev].rev())
598 else:
598 else:
599 bms = set([repo[r].rev()
599 bms = set([repo[r].rev()
600 for r in repo._bookmarks.values()])
600 for r in repo._bookmarks.values()])
601 bms -= set([node.nullrev])
601 bms -= set([node.nullrev])
602 return subset & bms
602 return subset & bms
603
603
604 @predicate('branch(string or set)', safe=True)
604 @predicate('branch(string or set)', safe=True)
605 def branch(repo, subset, x):
605 def branch(repo, subset, x):
606 """
606 """
607 All changesets belonging to the given branch or the branches of the given
607 All changesets belonging to the given branch or the branches of the given
608 changesets.
608 changesets.
609
609
610 If `string` starts with `re:`, the remainder of the name is treated as
610 If `string` starts with `re:`, the remainder of the name is treated as
611 a regular expression. To match a branch that actually starts with `re:`,
611 a regular expression. To match a branch that actually starts with `re:`,
612 use the prefix `literal:`.
612 use the prefix `literal:`.
613 """
613 """
614 getbi = repo.revbranchcache().branchinfo
614 getbi = repo.revbranchcache().branchinfo
615
615
616 try:
616 try:
617 b = getstring(x, '')
617 b = getstring(x, '')
618 except error.ParseError:
618 except error.ParseError:
619 # not a string, but another revspec, e.g. tip()
619 # not a string, but another revspec, e.g. tip()
620 pass
620 pass
621 else:
621 else:
622 kind, pattern, matcher = util.stringmatcher(b)
622 kind, pattern, matcher = util.stringmatcher(b)
623 if kind == 'literal':
623 if kind == 'literal':
624 # note: falls through to the revspec case if no branch with
624 # note: falls through to the revspec case if no branch with
625 # this name exists and pattern kind is not specified explicitly
625 # this name exists and pattern kind is not specified explicitly
626 if pattern in repo.branchmap():
626 if pattern in repo.branchmap():
627 return subset.filter(lambda r: matcher(getbi(r)[0]),
627 return subset.filter(lambda r: matcher(getbi(r)[0]),
628 condrepr=('<branch %r>', b))
628 condrepr=('<branch %r>', b))
629 if b.startswith('literal:'):
629 if b.startswith('literal:'):
630 raise error.RepoLookupError(_("branch '%s' does not exist")
630 raise error.RepoLookupError(_("branch '%s' does not exist")
631 % pattern)
631 % pattern)
632 else:
632 else:
633 return subset.filter(lambda r: matcher(getbi(r)[0]),
633 return subset.filter(lambda r: matcher(getbi(r)[0]),
634 condrepr=('<branch %r>', b))
634 condrepr=('<branch %r>', b))
635
635
636 s = getset(repo, fullreposet(repo), x)
636 s = getset(repo, fullreposet(repo), x)
637 b = set()
637 b = set()
638 for r in s:
638 for r in s:
639 b.add(getbi(r)[0])
639 b.add(getbi(r)[0])
640 c = s.__contains__
640 c = s.__contains__
641 return subset.filter(lambda r: c(r) or getbi(r)[0] in b,
641 return subset.filter(lambda r: c(r) or getbi(r)[0] in b,
642 condrepr=lambda: '<branch %r>' % sorted(b))
642 condrepr=lambda: '<branch %r>' % sorted(b))
643
643
644 @predicate('bumped()', safe=True)
644 @predicate('bumped()', safe=True)
645 def bumped(repo, subset, x):
645 def bumped(repo, subset, x):
646 """Mutable changesets marked as successors of public changesets.
646 """Mutable changesets marked as successors of public changesets.
647
647
648 Only non-public and non-obsolete changesets can be `bumped`.
648 Only non-public and non-obsolete changesets can be `bumped`.
649 """
649 """
650 # i18n: "bumped" is a keyword
650 # i18n: "bumped" is a keyword
651 getargs(x, 0, 0, _("bumped takes no arguments"))
651 getargs(x, 0, 0, _("bumped takes no arguments"))
652 bumped = obsmod.getrevs(repo, 'bumped')
652 bumped = obsmod.getrevs(repo, 'bumped')
653 return subset & bumped
653 return subset & bumped
654
654
655 @predicate('bundle()', safe=True)
655 @predicate('bundle()', safe=True)
656 def bundle(repo, subset, x):
656 def bundle(repo, subset, x):
657 """Changesets in the bundle.
657 """Changesets in the bundle.
658
658
659 Bundle must be specified by the -R option."""
659 Bundle must be specified by the -R option."""
660
660
661 try:
661 try:
662 bundlerevs = repo.changelog.bundlerevs
662 bundlerevs = repo.changelog.bundlerevs
663 except AttributeError:
663 except AttributeError:
664 raise error.Abort(_("no bundle provided - specify with -R"))
664 raise error.Abort(_("no bundle provided - specify with -R"))
665 return subset & bundlerevs
665 return subset & bundlerevs
666
666
667 def checkstatus(repo, subset, pat, field):
667 def checkstatus(repo, subset, pat, field):
668 hasset = matchmod.patkind(pat) == 'set'
668 hasset = matchmod.patkind(pat) == 'set'
669
669
670 mcache = [None]
670 mcache = [None]
671 def matches(x):
671 def matches(x):
672 c = repo[x]
672 c = repo[x]
673 if not mcache[0] or hasset:
673 if not mcache[0] or hasset:
674 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
674 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
675 m = mcache[0]
675 m = mcache[0]
676 fname = None
676 fname = None
677 if not m.anypats() and len(m.files()) == 1:
677 if not m.anypats() and len(m.files()) == 1:
678 fname = m.files()[0]
678 fname = m.files()[0]
679 if fname is not None:
679 if fname is not None:
680 if fname not in c.files():
680 if fname not in c.files():
681 return False
681 return False
682 else:
682 else:
683 for f in c.files():
683 for f in c.files():
684 if m(f):
684 if m(f):
685 break
685 break
686 else:
686 else:
687 return False
687 return False
688 files = repo.status(c.p1().node(), c.node())[field]
688 files = repo.status(c.p1().node(), c.node())[field]
689 if fname is not None:
689 if fname is not None:
690 if fname in files:
690 if fname in files:
691 return True
691 return True
692 else:
692 else:
693 for f in files:
693 for f in files:
694 if m(f):
694 if m(f):
695 return True
695 return True
696
696
697 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
697 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
698
698
699 def _children(repo, subset, parentset):
699 def _children(repo, subset, parentset):
700 if not parentset:
700 if not parentset:
701 return baseset()
701 return baseset()
702 cs = set()
702 cs = set()
703 pr = repo.changelog.parentrevs
703 pr = repo.changelog.parentrevs
704 minrev = parentset.min()
704 minrev = parentset.min()
705 for r in subset:
705 for r in subset:
706 if r <= minrev:
706 if r <= minrev:
707 continue
707 continue
708 for p in pr(r):
708 for p in pr(r):
709 if p in parentset:
709 if p in parentset:
710 cs.add(r)
710 cs.add(r)
711 return baseset(cs)
711 return baseset(cs)
712
712
713 @predicate('children(set)', safe=True)
713 @predicate('children(set)', safe=True)
714 def children(repo, subset, x):
714 def children(repo, subset, x):
715 """Child changesets of changesets in set.
715 """Child changesets of changesets in set.
716 """
716 """
717 s = getset(repo, fullreposet(repo), x)
717 s = getset(repo, fullreposet(repo), x)
718 cs = _children(repo, subset, s)
718 cs = _children(repo, subset, s)
719 return subset & cs
719 return subset & cs
720
720
721 @predicate('closed()', safe=True)
721 @predicate('closed()', safe=True)
722 def closed(repo, subset, x):
722 def closed(repo, subset, x):
723 """Changeset is closed.
723 """Changeset is closed.
724 """
724 """
725 # i18n: "closed" is a keyword
725 # i18n: "closed" is a keyword
726 getargs(x, 0, 0, _("closed takes no arguments"))
726 getargs(x, 0, 0, _("closed takes no arguments"))
727 return subset.filter(lambda r: repo[r].closesbranch(),
727 return subset.filter(lambda r: repo[r].closesbranch(),
728 condrepr='<branch closed>')
728 condrepr='<branch closed>')
729
729
730 @predicate('contains(pattern)')
730 @predicate('contains(pattern)')
731 def contains(repo, subset, x):
731 def contains(repo, subset, x):
732 """The revision's manifest contains a file matching pattern (but might not
732 """The revision's manifest contains a file matching pattern (but might not
733 modify it). See :hg:`help patterns` for information about file patterns.
733 modify it). See :hg:`help patterns` for information about file patterns.
734
734
735 The pattern without explicit kind like ``glob:`` is expected to be
735 The pattern without explicit kind like ``glob:`` is expected to be
736 relative to the current directory and match against a file exactly
736 relative to the current directory and match against a file exactly
737 for efficiency.
737 for efficiency.
738 """
738 """
739 # i18n: "contains" is a keyword
739 # i18n: "contains" is a keyword
740 pat = getstring(x, _("contains requires a pattern"))
740 pat = getstring(x, _("contains requires a pattern"))
741
741
742 def matches(x):
742 def matches(x):
743 if not matchmod.patkind(pat):
743 if not matchmod.patkind(pat):
744 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
744 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
745 if pats in repo[x]:
745 if pats in repo[x]:
746 return True
746 return True
747 else:
747 else:
748 c = repo[x]
748 c = repo[x]
749 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
749 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
750 for f in c.manifest():
750 for f in c.manifest():
751 if m(f):
751 if m(f):
752 return True
752 return True
753 return False
753 return False
754
754
755 return subset.filter(matches, condrepr=('<contains %r>', pat))
755 return subset.filter(matches, condrepr=('<contains %r>', pat))
756
756
757 @predicate('converted([id])', safe=True)
757 @predicate('converted([id])', safe=True)
758 def converted(repo, subset, x):
758 def converted(repo, subset, x):
759 """Changesets converted from the given identifier in the old repository if
759 """Changesets converted from the given identifier in the old repository if
760 present, or all converted changesets if no identifier is specified.
760 present, or all converted changesets if no identifier is specified.
761 """
761 """
762
762
763 # There is exactly no chance of resolving the revision, so do a simple
763 # There is exactly no chance of resolving the revision, so do a simple
764 # string compare and hope for the best
764 # string compare and hope for the best
765
765
766 rev = None
766 rev = None
767 # i18n: "converted" is a keyword
767 # i18n: "converted" is a keyword
768 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
768 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
769 if l:
769 if l:
770 # i18n: "converted" is a keyword
770 # i18n: "converted" is a keyword
771 rev = getstring(l[0], _('converted requires a revision'))
771 rev = getstring(l[0], _('converted requires a revision'))
772
772
773 def _matchvalue(r):
773 def _matchvalue(r):
774 source = repo[r].extra().get('convert_revision', None)
774 source = repo[r].extra().get('convert_revision', None)
775 return source is not None and (rev is None or source.startswith(rev))
775 return source is not None and (rev is None or source.startswith(rev))
776
776
777 return subset.filter(lambda r: _matchvalue(r),
777 return subset.filter(lambda r: _matchvalue(r),
778 condrepr=('<converted %r>', rev))
778 condrepr=('<converted %r>', rev))
779
779
780 @predicate('date(interval)', safe=True)
780 @predicate('date(interval)', safe=True)
781 def date(repo, subset, x):
781 def date(repo, subset, x):
782 """Changesets within the interval, see :hg:`help dates`.
782 """Changesets within the interval, see :hg:`help dates`.
783 """
783 """
784 # i18n: "date" is a keyword
784 # i18n: "date" is a keyword
785 ds = getstring(x, _("date requires a string"))
785 ds = getstring(x, _("date requires a string"))
786 dm = util.matchdate(ds)
786 dm = util.matchdate(ds)
787 return subset.filter(lambda x: dm(repo[x].date()[0]),
787 return subset.filter(lambda x: dm(repo[x].date()[0]),
788 condrepr=('<date %r>', ds))
788 condrepr=('<date %r>', ds))
789
789
790 @predicate('desc(string)', safe=True)
790 @predicate('desc(string)', safe=True)
791 def desc(repo, subset, x):
791 def desc(repo, subset, x):
792 """Search commit message for string. The match is case-insensitive.
792 """Search commit message for string. The match is case-insensitive.
793 """
793 """
794 # i18n: "desc" is a keyword
794 # i18n: "desc" is a keyword
795 ds = encoding.lower(getstring(x, _("desc requires a string")))
795 ds = encoding.lower(getstring(x, _("desc requires a string")))
796
796
797 def matches(x):
797 def matches(x):
798 c = repo[x]
798 c = repo[x]
799 return ds in encoding.lower(c.description())
799 return ds in encoding.lower(c.description())
800
800
801 return subset.filter(matches, condrepr=('<desc %r>', ds))
801 return subset.filter(matches, condrepr=('<desc %r>', ds))
802
802
803 def _descendants(repo, subset, x, followfirst=False):
803 def _descendants(repo, subset, x, followfirst=False):
804 roots = getset(repo, fullreposet(repo), x)
804 roots = getset(repo, fullreposet(repo), x)
805 if not roots:
805 if not roots:
806 return baseset()
806 return baseset()
807 s = _revdescendants(repo, roots, followfirst)
807 s = _revdescendants(repo, roots, followfirst)
808
808
809 # Both sets need to be ascending in order to lazily return the union
809 # Both sets need to be ascending in order to lazily return the union
810 # in the correct order.
810 # in the correct order.
811 base = subset & roots
811 base = subset & roots
812 desc = subset & s
812 desc = subset & s
813 result = base + desc
813 result = base + desc
814 if subset.isascending():
814 if subset.isascending():
815 result.sort()
815 result.sort()
816 elif subset.isdescending():
816 elif subset.isdescending():
817 result.sort(reverse=True)
817 result.sort(reverse=True)
818 else:
818 else:
819 result = subset & result
819 result = subset & result
820 return result
820 return result
821
821
822 @predicate('descendants(set)', safe=True)
822 @predicate('descendants(set)', safe=True)
823 def descendants(repo, subset, x):
823 def descendants(repo, subset, x):
824 """Changesets which are descendants of changesets in set.
824 """Changesets which are descendants of changesets in set.
825 """
825 """
826 return _descendants(repo, subset, x)
826 return _descendants(repo, subset, x)
827
827
828 @predicate('_firstdescendants', safe=True)
828 @predicate('_firstdescendants', safe=True)
829 def _firstdescendants(repo, subset, x):
829 def _firstdescendants(repo, subset, x):
830 # ``_firstdescendants(set)``
830 # ``_firstdescendants(set)``
831 # Like ``descendants(set)`` but follows only the first parents.
831 # Like ``descendants(set)`` but follows only the first parents.
832 return _descendants(repo, subset, x, followfirst=True)
832 return _descendants(repo, subset, x, followfirst=True)
833
833
834 @predicate('destination([set])', safe=True)
834 @predicate('destination([set])', safe=True)
835 def destination(repo, subset, x):
835 def destination(repo, subset, x):
836 """Changesets that were created by a graft, transplant or rebase operation,
836 """Changesets that were created by a graft, transplant or rebase operation,
837 with the given revisions specified as the source. Omitting the optional set
837 with the given revisions specified as the source. Omitting the optional set
838 is the same as passing all().
838 is the same as passing all().
839 """
839 """
840 if x is not None:
840 if x is not None:
841 sources = getset(repo, fullreposet(repo), x)
841 sources = getset(repo, fullreposet(repo), x)
842 else:
842 else:
843 sources = fullreposet(repo)
843 sources = fullreposet(repo)
844
844
845 dests = set()
845 dests = set()
846
846
847 # subset contains all of the possible destinations that can be returned, so
847 # subset contains all of the possible destinations that can be returned, so
848 # iterate over them and see if their source(s) were provided in the arg set.
848 # iterate over them and see if their source(s) were provided in the arg set.
849 # Even if the immediate src of r is not in the arg set, src's source (or
849 # Even if the immediate src of r is not in the arg set, src's source (or
850 # further back) may be. Scanning back further than the immediate src allows
850 # further back) may be. Scanning back further than the immediate src allows
851 # transitive transplants and rebases to yield the same results as transitive
851 # transitive transplants and rebases to yield the same results as transitive
852 # grafts.
852 # grafts.
853 for r in subset:
853 for r in subset:
854 src = _getrevsource(repo, r)
854 src = _getrevsource(repo, r)
855 lineage = None
855 lineage = None
856
856
857 while src is not None:
857 while src is not None:
858 if lineage is None:
858 if lineage is None:
859 lineage = list()
859 lineage = list()
860
860
861 lineage.append(r)
861 lineage.append(r)
862
862
863 # The visited lineage is a match if the current source is in the arg
863 # The visited lineage is a match if the current source is in the arg
864 # set. Since every candidate dest is visited by way of iterating
864 # set. Since every candidate dest is visited by way of iterating
865 # subset, any dests further back in the lineage will be tested by a
865 # subset, any dests further back in the lineage will be tested by a
866 # different iteration over subset. Likewise, if the src was already
866 # different iteration over subset. Likewise, if the src was already
867 # selected, the current lineage can be selected without going back
867 # selected, the current lineage can be selected without going back
868 # further.
868 # further.
869 if src in sources or src in dests:
869 if src in sources or src in dests:
870 dests.update(lineage)
870 dests.update(lineage)
871 break
871 break
872
872
873 r = src
873 r = src
874 src = _getrevsource(repo, r)
874 src = _getrevsource(repo, r)
875
875
876 return subset.filter(dests.__contains__,
876 return subset.filter(dests.__contains__,
877 condrepr=lambda: '<destination %r>' % sorted(dests))
877 condrepr=lambda: '<destination %r>' % sorted(dests))
878
878
879 @predicate('divergent()', safe=True)
879 @predicate('divergent()', safe=True)
880 def divergent(repo, subset, x):
880 def divergent(repo, subset, x):
881 """
881 """
882 Final successors of changesets with an alternative set of final successors.
882 Final successors of changesets with an alternative set of final successors.
883 """
883 """
884 # i18n: "divergent" is a keyword
884 # i18n: "divergent" is a keyword
885 getargs(x, 0, 0, _("divergent takes no arguments"))
885 getargs(x, 0, 0, _("divergent takes no arguments"))
886 divergent = obsmod.getrevs(repo, 'divergent')
886 divergent = obsmod.getrevs(repo, 'divergent')
887 return subset & divergent
887 return subset & divergent
888
888
889 @predicate('extinct()', safe=True)
889 @predicate('extinct()', safe=True)
890 def extinct(repo, subset, x):
890 def extinct(repo, subset, x):
891 """Obsolete changesets with obsolete descendants only.
891 """Obsolete changesets with obsolete descendants only.
892 """
892 """
893 # i18n: "extinct" is a keyword
893 # i18n: "extinct" is a keyword
894 getargs(x, 0, 0, _("extinct takes no arguments"))
894 getargs(x, 0, 0, _("extinct takes no arguments"))
895 extincts = obsmod.getrevs(repo, 'extinct')
895 extincts = obsmod.getrevs(repo, 'extinct')
896 return subset & extincts
896 return subset & extincts
897
897
898 @predicate('extra(label, [value])', safe=True)
898 @predicate('extra(label, [value])', safe=True)
899 def extra(repo, subset, x):
899 def extra(repo, subset, x):
900 """Changesets with the given label in the extra metadata, with the given
900 """Changesets with the given label in the extra metadata, with the given
901 optional value.
901 optional value.
902
902
903 If `value` starts with `re:`, the remainder of the value is treated as
903 If `value` starts with `re:`, the remainder of the value is treated as
904 a regular expression. To match a value that actually starts with `re:`,
904 a regular expression. To match a value that actually starts with `re:`,
905 use the prefix `literal:`.
905 use the prefix `literal:`.
906 """
906 """
907 args = getargsdict(x, 'extra', 'label value')
907 args = getargsdict(x, 'extra', 'label value')
908 if 'label' not in args:
908 if 'label' not in args:
909 # i18n: "extra" is a keyword
909 # i18n: "extra" is a keyword
910 raise error.ParseError(_('extra takes at least 1 argument'))
910 raise error.ParseError(_('extra takes at least 1 argument'))
911 # i18n: "extra" is a keyword
911 # i18n: "extra" is a keyword
912 label = getstring(args['label'], _('first argument to extra must be '
912 label = getstring(args['label'], _('first argument to extra must be '
913 'a string'))
913 'a string'))
914 value = None
914 value = None
915
915
916 if 'value' in args:
916 if 'value' in args:
917 # i18n: "extra" is a keyword
917 # i18n: "extra" is a keyword
918 value = getstring(args['value'], _('second argument to extra must be '
918 value = getstring(args['value'], _('second argument to extra must be '
919 'a string'))
919 'a string'))
920 kind, value, matcher = util.stringmatcher(value)
920 kind, value, matcher = util.stringmatcher(value)
921
921
922 def _matchvalue(r):
922 def _matchvalue(r):
923 extra = repo[r].extra()
923 extra = repo[r].extra()
924 return label in extra and (value is None or matcher(extra[label]))
924 return label in extra and (value is None or matcher(extra[label]))
925
925
926 return subset.filter(lambda r: _matchvalue(r),
926 return subset.filter(lambda r: _matchvalue(r),
927 condrepr=('<extra[%r] %r>', label, value))
927 condrepr=('<extra[%r] %r>', label, value))
928
928
929 @predicate('filelog(pattern)', safe=True)
929 @predicate('filelog(pattern)', safe=True)
930 def filelog(repo, subset, x):
930 def filelog(repo, subset, x):
931 """Changesets connected to the specified filelog.
931 """Changesets connected to the specified filelog.
932
932
933 For performance reasons, visits only revisions mentioned in the file-level
933 For performance reasons, visits only revisions mentioned in the file-level
934 filelog, rather than filtering through all changesets (much faster, but
934 filelog, rather than filtering through all changesets (much faster, but
935 doesn't include deletes or duplicate changes). For a slower, more accurate
935 doesn't include deletes or duplicate changes). For a slower, more accurate
936 result, use ``file()``.
936 result, use ``file()``.
937
937
938 The pattern without explicit kind like ``glob:`` is expected to be
938 The pattern without explicit kind like ``glob:`` is expected to be
939 relative to the current directory and match against a file exactly
939 relative to the current directory and match against a file exactly
940 for efficiency.
940 for efficiency.
941
941
942 If some linkrev points to revisions filtered by the current repoview, we'll
942 If some linkrev points to revisions filtered by the current repoview, we'll
943 work around it to return a non-filtered value.
943 work around it to return a non-filtered value.
944 """
944 """
945
945
946 # i18n: "filelog" is a keyword
946 # i18n: "filelog" is a keyword
947 pat = getstring(x, _("filelog requires a pattern"))
947 pat = getstring(x, _("filelog requires a pattern"))
948 s = set()
948 s = set()
949 cl = repo.changelog
949 cl = repo.changelog
950
950
951 if not matchmod.patkind(pat):
951 if not matchmod.patkind(pat):
952 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
952 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
953 files = [f]
953 files = [f]
954 else:
954 else:
955 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
955 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
956 files = (f for f in repo[None] if m(f))
956 files = (f for f in repo[None] if m(f))
957
957
958 for f in files:
958 for f in files:
959 fl = repo.file(f)
959 fl = repo.file(f)
960 known = {}
960 known = {}
961 scanpos = 0
961 scanpos = 0
962 for fr in list(fl):
962 for fr in list(fl):
963 fn = fl.node(fr)
963 fn = fl.node(fr)
964 if fn in known:
964 if fn in known:
965 s.add(known[fn])
965 s.add(known[fn])
966 continue
966 continue
967
967
968 lr = fl.linkrev(fr)
968 lr = fl.linkrev(fr)
969 if lr in cl:
969 if lr in cl:
970 s.add(lr)
970 s.add(lr)
971 elif scanpos is not None:
971 elif scanpos is not None:
972 # lowest matching changeset is filtered, scan further
972 # lowest matching changeset is filtered, scan further
973 # ahead in changelog
973 # ahead in changelog
974 start = max(lr, scanpos) + 1
974 start = max(lr, scanpos) + 1
975 scanpos = None
975 scanpos = None
976 for r in cl.revs(start):
976 for r in cl.revs(start):
977 # minimize parsing of non-matching entries
977 # minimize parsing of non-matching entries
978 if f in cl.revision(r) and f in cl.readfiles(r):
978 if f in cl.revision(r) and f in cl.readfiles(r):
979 try:
979 try:
980 # try to use manifest delta fastpath
980 # try to use manifest delta fastpath
981 n = repo[r].filenode(f)
981 n = repo[r].filenode(f)
982 if n not in known:
982 if n not in known:
983 if n == fn:
983 if n == fn:
984 s.add(r)
984 s.add(r)
985 scanpos = r
985 scanpos = r
986 break
986 break
987 else:
987 else:
988 known[n] = r
988 known[n] = r
989 except error.ManifestLookupError:
989 except error.ManifestLookupError:
990 # deletion in changelog
990 # deletion in changelog
991 continue
991 continue
992
992
993 return subset & s
993 return subset & s
994
994
995 @predicate('first(set, [n])', safe=True)
995 @predicate('first(set, [n])', safe=True)
996 def first(repo, subset, x):
996 def first(repo, subset, x):
997 """An alias for limit().
997 """An alias for limit().
998 """
998 """
999 return limit(repo, subset, x)
999 return limit(repo, subset, x)
1000
1000
1001 def _follow(repo, subset, x, name, followfirst=False):
1001 def _follow(repo, subset, x, name, followfirst=False):
1002 l = getargs(x, 0, 2, _("%s takes no arguments or a pattern "
1002 l = getargs(x, 0, 2, _("%s takes no arguments or a pattern "
1003 "and an optional revset") % name)
1003 "and an optional revset") % name)
1004 c = repo['.']
1004 c = repo['.']
1005 if l:
1005 if l:
1006 x = getstring(l[0], _("%s expected a pattern") % name)
1006 x = getstring(l[0], _("%s expected a pattern") % name)
1007 rev = None
1007 rev = None
1008 if len(l) >= 2:
1008 if len(l) >= 2:
1009 rev = getset(repo, fullreposet(repo), l[1]).last()
1009 rev = getset(repo, fullreposet(repo), l[1]).last()
1010 if rev is None:
1010 if rev is None:
1011 raise error.RepoLookupError(
1011 raise error.RepoLookupError(
1012 _("%s: starting revision set cannot be empty") % name)
1012 _("%s: starting revision set cannot be empty") % name)
1013 c = repo[rev]
1013 c = repo[rev]
1014 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
1014 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
1015 ctx=repo[rev], default='path')
1015 ctx=repo[rev], default='path')
1016
1016
1017 files = c.manifest().walk(matcher)
1017 files = c.manifest().walk(matcher)
1018
1018
1019 s = set()
1019 s = set()
1020 for fname in files:
1020 for fname in files:
1021 fctx = c[fname]
1021 fctx = c[fname]
1022 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
1022 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
1023 # include the revision responsible for the most recent version
1023 # include the revision responsible for the most recent version
1024 s.add(fctx.introrev())
1024 s.add(fctx.introrev())
1025 else:
1025 else:
1026 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1026 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1027
1027
1028 return subset & s
1028 return subset & s
1029
1029
1030 @predicate('follow([pattern[, startrev]])', safe=True)
1030 @predicate('follow([pattern[, startrev]])', safe=True)
1031 def follow(repo, subset, x):
1031 def follow(repo, subset, x):
1032 """
1032 """
1033 An alias for ``::.`` (ancestors of the working directory's first parent).
1033 An alias for ``::.`` (ancestors of the working directory's first parent).
1034 If pattern is specified, the histories of files matching given
1034 If pattern is specified, the histories of files matching given
1035 pattern in the revision given by startrev are followed, including copies.
1035 pattern in the revision given by startrev are followed, including copies.
1036 """
1036 """
1037 return _follow(repo, subset, x, 'follow')
1037 return _follow(repo, subset, x, 'follow')
1038
1038
1039 @predicate('_followfirst', safe=True)
1039 @predicate('_followfirst', safe=True)
1040 def _followfirst(repo, subset, x):
1040 def _followfirst(repo, subset, x):
1041 # ``followfirst([pattern[, startrev]])``
1041 # ``followfirst([pattern[, startrev]])``
1042 # Like ``follow([pattern[, startrev]])`` but follows only the first parent
1042 # Like ``follow([pattern[, startrev]])`` but follows only the first parent
1043 # of every revisions or files revisions.
1043 # of every revisions or files revisions.
1044 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1044 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1045
1045
1046 @predicate('all()', safe=True)
1046 @predicate('all()', safe=True)
1047 def getall(repo, subset, x):
1047 def getall(repo, subset, x):
1048 """All changesets, the same as ``0:tip``.
1048 """All changesets, the same as ``0:tip``.
1049 """
1049 """
1050 # i18n: "all" is a keyword
1050 # i18n: "all" is a keyword
1051 getargs(x, 0, 0, _("all takes no arguments"))
1051 getargs(x, 0, 0, _("all takes no arguments"))
1052 return subset & spanset(repo) # drop "null" if any
1052 return subset & spanset(repo) # drop "null" if any
1053
1053
1054 @predicate('grep(regex)')
1054 @predicate('grep(regex)')
1055 def grep(repo, subset, x):
1055 def grep(repo, subset, x):
1056 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1056 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1057 to ensure special escape characters are handled correctly. Unlike
1057 to ensure special escape characters are handled correctly. Unlike
1058 ``keyword(string)``, the match is case-sensitive.
1058 ``keyword(string)``, the match is case-sensitive.
1059 """
1059 """
1060 try:
1060 try:
1061 # i18n: "grep" is a keyword
1061 # i18n: "grep" is a keyword
1062 gr = re.compile(getstring(x, _("grep requires a string")))
1062 gr = re.compile(getstring(x, _("grep requires a string")))
1063 except re.error as e:
1063 except re.error as e:
1064 raise error.ParseError(_('invalid match pattern: %s') % e)
1064 raise error.ParseError(_('invalid match pattern: %s') % e)
1065
1065
1066 def matches(x):
1066 def matches(x):
1067 c = repo[x]
1067 c = repo[x]
1068 for e in c.files() + [c.user(), c.description()]:
1068 for e in c.files() + [c.user(), c.description()]:
1069 if gr.search(e):
1069 if gr.search(e):
1070 return True
1070 return True
1071 return False
1071 return False
1072
1072
1073 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1073 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1074
1074
1075 @predicate('_matchfiles', safe=True)
1075 @predicate('_matchfiles', safe=True)
1076 def _matchfiles(repo, subset, x):
1076 def _matchfiles(repo, subset, x):
1077 # _matchfiles takes a revset list of prefixed arguments:
1077 # _matchfiles takes a revset list of prefixed arguments:
1078 #
1078 #
1079 # [p:foo, i:bar, x:baz]
1079 # [p:foo, i:bar, x:baz]
1080 #
1080 #
1081 # builds a match object from them and filters subset. Allowed
1081 # builds a match object from them and filters subset. Allowed
1082 # prefixes are 'p:' for regular patterns, 'i:' for include
1082 # prefixes are 'p:' for regular patterns, 'i:' for include
1083 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1083 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1084 # a revision identifier, or the empty string to reference the
1084 # a revision identifier, or the empty string to reference the
1085 # working directory, from which the match object is
1085 # working directory, from which the match object is
1086 # initialized. Use 'd:' to set the default matching mode, default
1086 # initialized. Use 'd:' to set the default matching mode, default
1087 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1087 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1088
1088
1089 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1089 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1090 pats, inc, exc = [], [], []
1090 pats, inc, exc = [], [], []
1091 rev, default = None, None
1091 rev, default = None, None
1092 for arg in l:
1092 for arg in l:
1093 s = getstring(arg, "_matchfiles requires string arguments")
1093 s = getstring(arg, "_matchfiles requires string arguments")
1094 prefix, value = s[:2], s[2:]
1094 prefix, value = s[:2], s[2:]
1095 if prefix == 'p:':
1095 if prefix == 'p:':
1096 pats.append(value)
1096 pats.append(value)
1097 elif prefix == 'i:':
1097 elif prefix == 'i:':
1098 inc.append(value)
1098 inc.append(value)
1099 elif prefix == 'x:':
1099 elif prefix == 'x:':
1100 exc.append(value)
1100 exc.append(value)
1101 elif prefix == 'r:':
1101 elif prefix == 'r:':
1102 if rev is not None:
1102 if rev is not None:
1103 raise error.ParseError('_matchfiles expected at most one '
1103 raise error.ParseError('_matchfiles expected at most one '
1104 'revision')
1104 'revision')
1105 if value != '': # empty means working directory; leave rev as None
1105 if value != '': # empty means working directory; leave rev as None
1106 rev = value
1106 rev = value
1107 elif prefix == 'd:':
1107 elif prefix == 'd:':
1108 if default is not None:
1108 if default is not None:
1109 raise error.ParseError('_matchfiles expected at most one '
1109 raise error.ParseError('_matchfiles expected at most one '
1110 'default mode')
1110 'default mode')
1111 default = value
1111 default = value
1112 else:
1112 else:
1113 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1113 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1114 if not default:
1114 if not default:
1115 default = 'glob'
1115 default = 'glob'
1116
1116
1117 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1117 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1118 exclude=exc, ctx=repo[rev], default=default)
1118 exclude=exc, ctx=repo[rev], default=default)
1119
1119
1120 # This directly read the changelog data as creating changectx for all
1120 # This directly read the changelog data as creating changectx for all
1121 # revisions is quite expensive.
1121 # revisions is quite expensive.
1122 getfiles = repo.changelog.readfiles
1122 getfiles = repo.changelog.readfiles
1123 wdirrev = node.wdirrev
1123 wdirrev = node.wdirrev
1124 def matches(x):
1124 def matches(x):
1125 if x == wdirrev:
1125 if x == wdirrev:
1126 files = repo[x].files()
1126 files = repo[x].files()
1127 else:
1127 else:
1128 files = getfiles(x)
1128 files = getfiles(x)
1129 for f in files:
1129 for f in files:
1130 if m(f):
1130 if m(f):
1131 return True
1131 return True
1132 return False
1132 return False
1133
1133
1134 return subset.filter(matches,
1134 return subset.filter(matches,
1135 condrepr=('<matchfiles patterns=%r, include=%r '
1135 condrepr=('<matchfiles patterns=%r, include=%r '
1136 'exclude=%r, default=%r, rev=%r>',
1136 'exclude=%r, default=%r, rev=%r>',
1137 pats, inc, exc, default, rev))
1137 pats, inc, exc, default, rev))
1138
1138
1139 @predicate('file(pattern)', safe=True)
1139 @predicate('file(pattern)', safe=True)
1140 def hasfile(repo, subset, x):
1140 def hasfile(repo, subset, x):
1141 """Changesets affecting files matched by pattern.
1141 """Changesets affecting files matched by pattern.
1142
1142
1143 For a faster but less accurate result, consider using ``filelog()``
1143 For a faster but less accurate result, consider using ``filelog()``
1144 instead.
1144 instead.
1145
1145
1146 This predicate uses ``glob:`` as the default kind of pattern.
1146 This predicate uses ``glob:`` as the default kind of pattern.
1147 """
1147 """
1148 # i18n: "file" is a keyword
1148 # i18n: "file" is a keyword
1149 pat = getstring(x, _("file requires a pattern"))
1149 pat = getstring(x, _("file requires a pattern"))
1150 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1150 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1151
1151
1152 @predicate('head()', safe=True)
1152 @predicate('head()', safe=True)
1153 def head(repo, subset, x):
1153 def head(repo, subset, x):
1154 """Changeset is a named branch head.
1154 """Changeset is a named branch head.
1155 """
1155 """
1156 # i18n: "head" is a keyword
1156 # i18n: "head" is a keyword
1157 getargs(x, 0, 0, _("head takes no arguments"))
1157 getargs(x, 0, 0, _("head takes no arguments"))
1158 hs = set()
1158 hs = set()
1159 cl = repo.changelog
1159 cl = repo.changelog
1160 for ls in repo.branchmap().itervalues():
1160 for ls in repo.branchmap().itervalues():
1161 hs.update(cl.rev(h) for h in ls)
1161 hs.update(cl.rev(h) for h in ls)
1162 return subset & baseset(hs)
1162 return subset & baseset(hs)
1163
1163
1164 @predicate('heads(set)', safe=True)
1164 @predicate('heads(set)', safe=True)
1165 def heads(repo, subset, x):
1165 def heads(repo, subset, x):
1166 """Members of set with no children in set.
1166 """Members of set with no children in set.
1167 """
1167 """
1168 s = getset(repo, subset, x)
1168 s = getset(repo, subset, x)
1169 ps = parents(repo, subset, x)
1169 ps = parents(repo, subset, x)
1170 return s - ps
1170 return s - ps
1171
1171
1172 @predicate('hidden()', safe=True)
1172 @predicate('hidden()', safe=True)
1173 def hidden(repo, subset, x):
1173 def hidden(repo, subset, x):
1174 """Hidden changesets.
1174 """Hidden changesets.
1175 """
1175 """
1176 # i18n: "hidden" is a keyword
1176 # i18n: "hidden" is a keyword
1177 getargs(x, 0, 0, _("hidden takes no arguments"))
1177 getargs(x, 0, 0, _("hidden takes no arguments"))
1178 hiddenrevs = repoview.filterrevs(repo, 'visible')
1178 hiddenrevs = repoview.filterrevs(repo, 'visible')
1179 return subset & hiddenrevs
1179 return subset & hiddenrevs
1180
1180
1181 @predicate('keyword(string)', safe=True)
1181 @predicate('keyword(string)', safe=True)
1182 def keyword(repo, subset, x):
1182 def keyword(repo, subset, x):
1183 """Search commit message, user name, and names of changed files for
1183 """Search commit message, user name, and names of changed files for
1184 string. The match is case-insensitive.
1184 string. The match is case-insensitive.
1185 """
1185 """
1186 # i18n: "keyword" is a keyword
1186 # i18n: "keyword" is a keyword
1187 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1187 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1188
1188
1189 def matches(r):
1189 def matches(r):
1190 c = repo[r]
1190 c = repo[r]
1191 return any(kw in encoding.lower(t)
1191 return any(kw in encoding.lower(t)
1192 for t in c.files() + [c.user(), c.description()])
1192 for t in c.files() + [c.user(), c.description()])
1193
1193
1194 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1194 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1195
1195
1196 @predicate('limit(set[, n[, offset]])', safe=True)
1196 @predicate('limit(set[, n[, offset]])', safe=True)
1197 def limit(repo, subset, x):
1197 def limit(repo, subset, x):
1198 """First n members of set, defaulting to 1, starting from offset.
1198 """First n members of set, defaulting to 1, starting from offset.
1199 """
1199 """
1200 args = getargsdict(x, 'limit', 'set n offset')
1200 args = getargsdict(x, 'limit', 'set n offset')
1201 if 'set' not in args:
1201 if 'set' not in args:
1202 # i18n: "limit" is a keyword
1202 # i18n: "limit" is a keyword
1203 raise error.ParseError(_("limit requires one to three arguments"))
1203 raise error.ParseError(_("limit requires one to three arguments"))
1204 try:
1204 try:
1205 lim, ofs = 1, 0
1205 lim, ofs = 1, 0
1206 if 'n' in args:
1206 if 'n' in args:
1207 # i18n: "limit" is a keyword
1207 # i18n: "limit" is a keyword
1208 lim = int(getstring(args['n'], _("limit requires a number")))
1208 lim = int(getstring(args['n'], _("limit requires a number")))
1209 if 'offset' in args:
1209 if 'offset' in args:
1210 # i18n: "limit" is a keyword
1210 # i18n: "limit" is a keyword
1211 ofs = int(getstring(args['offset'], _("limit requires a number")))
1211 ofs = int(getstring(args['offset'], _("limit requires a number")))
1212 if ofs < 0:
1212 if ofs < 0:
1213 raise error.ParseError(_("negative offset"))
1213 raise error.ParseError(_("negative offset"))
1214 except (TypeError, ValueError):
1214 except (TypeError, ValueError):
1215 # i18n: "limit" is a keyword
1215 # i18n: "limit" is a keyword
1216 raise error.ParseError(_("limit expects a number"))
1216 raise error.ParseError(_("limit expects a number"))
1217 os = getset(repo, fullreposet(repo), args['set'])
1217 os = getset(repo, fullreposet(repo), args['set'])
1218 result = []
1218 result = []
1219 it = iter(os)
1219 it = iter(os)
1220 for x in xrange(ofs):
1220 for x in xrange(ofs):
1221 y = next(it, None)
1221 y = next(it, None)
1222 if y is None:
1222 if y is None:
1223 break
1223 break
1224 for x in xrange(lim):
1224 for x in xrange(lim):
1225 y = next(it, None)
1225 y = next(it, None)
1226 if y is None:
1226 if y is None:
1227 break
1227 break
1228 elif y in subset:
1228 elif y in subset:
1229 result.append(y)
1229 result.append(y)
1230 return baseset(result, datarepr=('<limit n=%d, offset=%d, %r, %r>',
1230 return baseset(result, datarepr=('<limit n=%d, offset=%d, %r, %r>',
1231 lim, ofs, subset, os))
1231 lim, ofs, subset, os))
1232
1232
1233 @predicate('last(set, [n])', safe=True)
1233 @predicate('last(set, [n])', safe=True)
1234 def last(repo, subset, x):
1234 def last(repo, subset, x):
1235 """Last n members of set, defaulting to 1.
1235 """Last n members of set, defaulting to 1.
1236 """
1236 """
1237 # i18n: "last" is a keyword
1237 # i18n: "last" is a keyword
1238 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1238 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1239 try:
1239 try:
1240 lim = 1
1240 lim = 1
1241 if len(l) == 2:
1241 if len(l) == 2:
1242 # i18n: "last" is a keyword
1242 # i18n: "last" is a keyword
1243 lim = int(getstring(l[1], _("last requires a number")))
1243 lim = int(getstring(l[1], _("last requires a number")))
1244 except (TypeError, ValueError):
1244 except (TypeError, ValueError):
1245 # i18n: "last" is a keyword
1245 # i18n: "last" is a keyword
1246 raise error.ParseError(_("last expects a number"))
1246 raise error.ParseError(_("last expects a number"))
1247 os = getset(repo, fullreposet(repo), l[0])
1247 os = getset(repo, fullreposet(repo), l[0])
1248 os.reverse()
1248 os.reverse()
1249 result = []
1249 result = []
1250 it = iter(os)
1250 it = iter(os)
1251 for x in xrange(lim):
1251 for x in xrange(lim):
1252 y = next(it, None)
1252 y = next(it, None)
1253 if y is None:
1253 if y is None:
1254 break
1254 break
1255 elif y in subset:
1255 elif y in subset:
1256 result.append(y)
1256 result.append(y)
1257 return baseset(result, datarepr=('<last n=%d, %r, %r>', lim, subset, os))
1257 return baseset(result, datarepr=('<last n=%d, %r, %r>', lim, subset, os))
1258
1258
1259 @predicate('max(set)', safe=True)
1259 @predicate('max(set)', safe=True)
1260 def maxrev(repo, subset, x):
1260 def maxrev(repo, subset, x):
1261 """Changeset with highest revision number in set.
1261 """Changeset with highest revision number in set.
1262 """
1262 """
1263 os = getset(repo, fullreposet(repo), x)
1263 os = getset(repo, fullreposet(repo), x)
1264 try:
1264 try:
1265 m = os.max()
1265 m = os.max()
1266 if m in subset:
1266 if m in subset:
1267 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1267 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1268 except ValueError:
1268 except ValueError:
1269 # os.max() throws a ValueError when the collection is empty.
1269 # os.max() throws a ValueError when the collection is empty.
1270 # Same as python's max().
1270 # Same as python's max().
1271 pass
1271 pass
1272 return baseset(datarepr=('<max %r, %r>', subset, os))
1272 return baseset(datarepr=('<max %r, %r>', subset, os))
1273
1273
1274 @predicate('merge()', safe=True)
1274 @predicate('merge()', safe=True)
1275 def merge(repo, subset, x):
1275 def merge(repo, subset, x):
1276 """Changeset is a merge changeset.
1276 """Changeset is a merge changeset.
1277 """
1277 """
1278 # i18n: "merge" is a keyword
1278 # i18n: "merge" is a keyword
1279 getargs(x, 0, 0, _("merge takes no arguments"))
1279 getargs(x, 0, 0, _("merge takes no arguments"))
1280 cl = repo.changelog
1280 cl = repo.changelog
1281 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1281 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1282 condrepr='<merge>')
1282 condrepr='<merge>')
1283
1283
1284 @predicate('branchpoint()', safe=True)
1284 @predicate('branchpoint()', safe=True)
1285 def branchpoint(repo, subset, x):
1285 def branchpoint(repo, subset, x):
1286 """Changesets with more than one child.
1286 """Changesets with more than one child.
1287 """
1287 """
1288 # i18n: "branchpoint" is a keyword
1288 # i18n: "branchpoint" is a keyword
1289 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1289 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1290 cl = repo.changelog
1290 cl = repo.changelog
1291 if not subset:
1291 if not subset:
1292 return baseset()
1292 return baseset()
1293 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1293 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1294 # (and if it is not, it should.)
1294 # (and if it is not, it should.)
1295 baserev = min(subset)
1295 baserev = min(subset)
1296 parentscount = [0]*(len(repo) - baserev)
1296 parentscount = [0]*(len(repo) - baserev)
1297 for r in cl.revs(start=baserev + 1):
1297 for r in cl.revs(start=baserev + 1):
1298 for p in cl.parentrevs(r):
1298 for p in cl.parentrevs(r):
1299 if p >= baserev:
1299 if p >= baserev:
1300 parentscount[p - baserev] += 1
1300 parentscount[p - baserev] += 1
1301 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1301 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1302 condrepr='<branchpoint>')
1302 condrepr='<branchpoint>')
1303
1303
1304 @predicate('min(set)', safe=True)
1304 @predicate('min(set)', safe=True)
1305 def minrev(repo, subset, x):
1305 def minrev(repo, subset, x):
1306 """Changeset with lowest revision number in set.
1306 """Changeset with lowest revision number in set.
1307 """
1307 """
1308 os = getset(repo, fullreposet(repo), x)
1308 os = getset(repo, fullreposet(repo), x)
1309 try:
1309 try:
1310 m = os.min()
1310 m = os.min()
1311 if m in subset:
1311 if m in subset:
1312 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1312 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1313 except ValueError:
1313 except ValueError:
1314 # os.min() throws a ValueError when the collection is empty.
1314 # os.min() throws a ValueError when the collection is empty.
1315 # Same as python's min().
1315 # Same as python's min().
1316 pass
1316 pass
1317 return baseset(datarepr=('<min %r, %r>', subset, os))
1317 return baseset(datarepr=('<min %r, %r>', subset, os))
1318
1318
1319 @predicate('modifies(pattern)', safe=True)
1319 @predicate('modifies(pattern)', safe=True)
1320 def modifies(repo, subset, x):
1320 def modifies(repo, subset, x):
1321 """Changesets modifying files matched by pattern.
1321 """Changesets modifying files matched by pattern.
1322
1322
1323 The pattern without explicit kind like ``glob:`` is expected to be
1323 The pattern without explicit kind like ``glob:`` is expected to be
1324 relative to the current directory and match against a file or a
1324 relative to the current directory and match against a file or a
1325 directory.
1325 directory.
1326 """
1326 """
1327 # i18n: "modifies" is a keyword
1327 # i18n: "modifies" is a keyword
1328 pat = getstring(x, _("modifies requires a pattern"))
1328 pat = getstring(x, _("modifies requires a pattern"))
1329 return checkstatus(repo, subset, pat, 0)
1329 return checkstatus(repo, subset, pat, 0)
1330
1330
1331 @predicate('named(namespace)')
1331 @predicate('named(namespace)')
1332 def named(repo, subset, x):
1332 def named(repo, subset, x):
1333 """The changesets in a given namespace.
1333 """The changesets in a given namespace.
1334
1334
1335 If `namespace` starts with `re:`, the remainder of the string is treated as
1335 If `namespace` starts with `re:`, the remainder of the string is treated as
1336 a regular expression. To match a namespace that actually starts with `re:`,
1336 a regular expression. To match a namespace that actually starts with `re:`,
1337 use the prefix `literal:`.
1337 use the prefix `literal:`.
1338 """
1338 """
1339 # i18n: "named" is a keyword
1339 # i18n: "named" is a keyword
1340 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1340 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1341
1341
1342 ns = getstring(args[0],
1342 ns = getstring(args[0],
1343 # i18n: "named" is a keyword
1343 # i18n: "named" is a keyword
1344 _('the argument to named must be a string'))
1344 _('the argument to named must be a string'))
1345 kind, pattern, matcher = util.stringmatcher(ns)
1345 kind, pattern, matcher = util.stringmatcher(ns)
1346 namespaces = set()
1346 namespaces = set()
1347 if kind == 'literal':
1347 if kind == 'literal':
1348 if pattern not in repo.names:
1348 if pattern not in repo.names:
1349 raise error.RepoLookupError(_("namespace '%s' does not exist")
1349 raise error.RepoLookupError(_("namespace '%s' does not exist")
1350 % ns)
1350 % ns)
1351 namespaces.add(repo.names[pattern])
1351 namespaces.add(repo.names[pattern])
1352 else:
1352 else:
1353 for name, ns in repo.names.iteritems():
1353 for name, ns in repo.names.iteritems():
1354 if matcher(name):
1354 if matcher(name):
1355 namespaces.add(ns)
1355 namespaces.add(ns)
1356 if not namespaces:
1356 if not namespaces:
1357 raise error.RepoLookupError(_("no namespace exists"
1357 raise error.RepoLookupError(_("no namespace exists"
1358 " that match '%s'") % pattern)
1358 " that match '%s'") % pattern)
1359
1359
1360 names = set()
1360 names = set()
1361 for ns in namespaces:
1361 for ns in namespaces:
1362 for name in ns.listnames(repo):
1362 for name in ns.listnames(repo):
1363 if name not in ns.deprecated:
1363 if name not in ns.deprecated:
1364 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1364 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1365
1365
1366 names -= set([node.nullrev])
1366 names -= set([node.nullrev])
1367 return subset & names
1367 return subset & names
1368
1368
1369 @predicate('id(string)', safe=True)
1369 @predicate('id(string)', safe=True)
1370 def node_(repo, subset, x):
1370 def node_(repo, subset, x):
1371 """Revision non-ambiguously specified by the given hex string prefix.
1371 """Revision non-ambiguously specified by the given hex string prefix.
1372 """
1372 """
1373 # i18n: "id" is a keyword
1373 # i18n: "id" is a keyword
1374 l = getargs(x, 1, 1, _("id requires one argument"))
1374 l = getargs(x, 1, 1, _("id requires one argument"))
1375 # i18n: "id" is a keyword
1375 # i18n: "id" is a keyword
1376 n = getstring(l[0], _("id requires a string"))
1376 n = getstring(l[0], _("id requires a string"))
1377 if len(n) == 40:
1377 if len(n) == 40:
1378 try:
1378 try:
1379 rn = repo.changelog.rev(node.bin(n))
1379 rn = repo.changelog.rev(node.bin(n))
1380 except (LookupError, TypeError):
1380 except (LookupError, TypeError):
1381 rn = None
1381 rn = None
1382 else:
1382 else:
1383 rn = None
1383 rn = None
1384 pm = repo.changelog._partialmatch(n)
1384 pm = repo.changelog._partialmatch(n)
1385 if pm is not None:
1385 if pm is not None:
1386 rn = repo.changelog.rev(pm)
1386 rn = repo.changelog.rev(pm)
1387
1387
1388 if rn is None:
1388 if rn is None:
1389 return baseset()
1389 return baseset()
1390 result = baseset([rn])
1390 result = baseset([rn])
1391 return result & subset
1391 return result & subset
1392
1392
1393 @predicate('obsolete()', safe=True)
1393 @predicate('obsolete()', safe=True)
1394 def obsolete(repo, subset, x):
1394 def obsolete(repo, subset, x):
1395 """Mutable changeset with a newer version."""
1395 """Mutable changeset with a newer version."""
1396 # i18n: "obsolete" is a keyword
1396 # i18n: "obsolete" is a keyword
1397 getargs(x, 0, 0, _("obsolete takes no arguments"))
1397 getargs(x, 0, 0, _("obsolete takes no arguments"))
1398 obsoletes = obsmod.getrevs(repo, 'obsolete')
1398 obsoletes = obsmod.getrevs(repo, 'obsolete')
1399 return subset & obsoletes
1399 return subset & obsoletes
1400
1400
1401 @predicate('only(set, [set])', safe=True)
1401 @predicate('only(set, [set])', safe=True)
1402 def only(repo, subset, x):
1402 def only(repo, subset, x):
1403 """Changesets that are ancestors of the first set that are not ancestors
1403 """Changesets that are ancestors of the first set that are not ancestors
1404 of any other head in the repo. If a second set is specified, the result
1404 of any other head in the repo. If a second set is specified, the result
1405 is ancestors of the first set that are not ancestors of the second set
1405 is ancestors of the first set that are not ancestors of the second set
1406 (i.e. ::<set1> - ::<set2>).
1406 (i.e. ::<set1> - ::<set2>).
1407 """
1407 """
1408 cl = repo.changelog
1408 cl = repo.changelog
1409 # i18n: "only" is a keyword
1409 # i18n: "only" is a keyword
1410 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1410 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1411 include = getset(repo, fullreposet(repo), args[0])
1411 include = getset(repo, fullreposet(repo), args[0])
1412 if len(args) == 1:
1412 if len(args) == 1:
1413 if not include:
1413 if not include:
1414 return baseset()
1414 return baseset()
1415
1415
1416 descendants = set(_revdescendants(repo, include, False))
1416 descendants = set(_revdescendants(repo, include, False))
1417 exclude = [rev for rev in cl.headrevs()
1417 exclude = [rev for rev in cl.headrevs()
1418 if not rev in descendants and not rev in include]
1418 if not rev in descendants and not rev in include]
1419 else:
1419 else:
1420 exclude = getset(repo, fullreposet(repo), args[1])
1420 exclude = getset(repo, fullreposet(repo), args[1])
1421
1421
1422 results = set(cl.findmissingrevs(common=exclude, heads=include))
1422 results = set(cl.findmissingrevs(common=exclude, heads=include))
1423 # XXX we should turn this into a baseset instead of a set, smartset may do
1423 # XXX we should turn this into a baseset instead of a set, smartset may do
1424 # some optimisations from the fact this is a baseset.
1424 # some optimisations from the fact this is a baseset.
1425 return subset & results
1425 return subset & results
1426
1426
1427 @predicate('origin([set])', safe=True)
1427 @predicate('origin([set])', safe=True)
1428 def origin(repo, subset, x):
1428 def origin(repo, subset, x):
1429 """
1429 """
1430 Changesets that were specified as a source for the grafts, transplants or
1430 Changesets that were specified as a source for the grafts, transplants or
1431 rebases that created the given revisions. Omitting the optional set is the
1431 rebases that created the given revisions. Omitting the optional set is the
1432 same as passing all(). If a changeset created by these operations is itself
1432 same as passing all(). If a changeset created by these operations is itself
1433 specified as a source for one of these operations, only the source changeset
1433 specified as a source for one of these operations, only the source changeset
1434 for the first operation is selected.
1434 for the first operation is selected.
1435 """
1435 """
1436 if x is not None:
1436 if x is not None:
1437 dests = getset(repo, fullreposet(repo), x)
1437 dests = getset(repo, fullreposet(repo), x)
1438 else:
1438 else:
1439 dests = fullreposet(repo)
1439 dests = fullreposet(repo)
1440
1440
1441 def _firstsrc(rev):
1441 def _firstsrc(rev):
1442 src = _getrevsource(repo, rev)
1442 src = _getrevsource(repo, rev)
1443 if src is None:
1443 if src is None:
1444 return None
1444 return None
1445
1445
1446 while True:
1446 while True:
1447 prev = _getrevsource(repo, src)
1447 prev = _getrevsource(repo, src)
1448
1448
1449 if prev is None:
1449 if prev is None:
1450 return src
1450 return src
1451 src = prev
1451 src = prev
1452
1452
1453 o = set([_firstsrc(r) for r in dests])
1453 o = set([_firstsrc(r) for r in dests])
1454 o -= set([None])
1454 o -= set([None])
1455 # XXX we should turn this into a baseset instead of a set, smartset may do
1455 # XXX we should turn this into a baseset instead of a set, smartset may do
1456 # some optimisations from the fact this is a baseset.
1456 # some optimisations from the fact this is a baseset.
1457 return subset & o
1457 return subset & o
1458
1458
1459 @predicate('outgoing([path])', safe=True)
1459 @predicate('outgoing([path])', safe=True)
1460 def outgoing(repo, subset, x):
1460 def outgoing(repo, subset, x):
1461 """Changesets not found in the specified destination repository, or the
1461 """Changesets not found in the specified destination repository, or the
1462 default push location.
1462 default push location.
1463 """
1463 """
1464 # Avoid cycles.
1464 # Avoid cycles.
1465 from . import (
1465 from . import (
1466 discovery,
1466 discovery,
1467 hg,
1467 hg,
1468 )
1468 )
1469 # i18n: "outgoing" is a keyword
1469 # i18n: "outgoing" is a keyword
1470 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1470 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1471 # i18n: "outgoing" is a keyword
1471 # i18n: "outgoing" is a keyword
1472 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1472 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1473 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1473 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1474 dest, branches = hg.parseurl(dest)
1474 dest, branches = hg.parseurl(dest)
1475 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1475 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1476 if revs:
1476 if revs:
1477 revs = [repo.lookup(rev) for rev in revs]
1477 revs = [repo.lookup(rev) for rev in revs]
1478 other = hg.peer(repo, {}, dest)
1478 other = hg.peer(repo, {}, dest)
1479 repo.ui.pushbuffer()
1479 repo.ui.pushbuffer()
1480 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1480 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1481 repo.ui.popbuffer()
1481 repo.ui.popbuffer()
1482 cl = repo.changelog
1482 cl = repo.changelog
1483 o = set([cl.rev(r) for r in outgoing.missing])
1483 o = set([cl.rev(r) for r in outgoing.missing])
1484 return subset & o
1484 return subset & o
1485
1485
1486 @predicate('p1([set])', safe=True)
1486 @predicate('p1([set])', safe=True)
1487 def p1(repo, subset, x):
1487 def p1(repo, subset, x):
1488 """First parent of changesets in set, or the working directory.
1488 """First parent of changesets in set, or the working directory.
1489 """
1489 """
1490 if x is None:
1490 if x is None:
1491 p = repo[x].p1().rev()
1491 p = repo[x].p1().rev()
1492 if p >= 0:
1492 if p >= 0:
1493 return subset & baseset([p])
1493 return subset & baseset([p])
1494 return baseset()
1494 return baseset()
1495
1495
1496 ps = set()
1496 ps = set()
1497 cl = repo.changelog
1497 cl = repo.changelog
1498 for r in getset(repo, fullreposet(repo), x):
1498 for r in getset(repo, fullreposet(repo), x):
1499 ps.add(cl.parentrevs(r)[0])
1499 ps.add(cl.parentrevs(r)[0])
1500 ps -= set([node.nullrev])
1500 ps -= set([node.nullrev])
1501 # XXX we should turn this into a baseset instead of a set, smartset may do
1501 # XXX we should turn this into a baseset instead of a set, smartset may do
1502 # some optimisations from the fact this is a baseset.
1502 # some optimisations from the fact this is a baseset.
1503 return subset & ps
1503 return subset & ps
1504
1504
1505 @predicate('p2([set])', safe=True)
1505 @predicate('p2([set])', safe=True)
1506 def p2(repo, subset, x):
1506 def p2(repo, subset, x):
1507 """Second parent of changesets in set, or the working directory.
1507 """Second parent of changesets in set, or the working directory.
1508 """
1508 """
1509 if x is None:
1509 if x is None:
1510 ps = repo[x].parents()
1510 ps = repo[x].parents()
1511 try:
1511 try:
1512 p = ps[1].rev()
1512 p = ps[1].rev()
1513 if p >= 0:
1513 if p >= 0:
1514 return subset & baseset([p])
1514 return subset & baseset([p])
1515 return baseset()
1515 return baseset()
1516 except IndexError:
1516 except IndexError:
1517 return baseset()
1517 return baseset()
1518
1518
1519 ps = set()
1519 ps = set()
1520 cl = repo.changelog
1520 cl = repo.changelog
1521 for r in getset(repo, fullreposet(repo), x):
1521 for r in getset(repo, fullreposet(repo), x):
1522 ps.add(cl.parentrevs(r)[1])
1522 ps.add(cl.parentrevs(r)[1])
1523 ps -= set([node.nullrev])
1523 ps -= set([node.nullrev])
1524 # XXX we should turn this into a baseset instead of a set, smartset may do
1524 # XXX we should turn this into a baseset instead of a set, smartset may do
1525 # some optimisations from the fact this is a baseset.
1525 # some optimisations from the fact this is a baseset.
1526 return subset & ps
1526 return subset & ps
1527
1527
1528 @predicate('parents([set])', safe=True)
1528 @predicate('parents([set])', safe=True)
1529 def parents(repo, subset, x):
1529 def parents(repo, subset, x):
1530 """
1530 """
1531 The set of all parents for all changesets in set, or the working directory.
1531 The set of all parents for all changesets in set, or the working directory.
1532 """
1532 """
1533 if x is None:
1533 if x is None:
1534 ps = set(p.rev() for p in repo[x].parents())
1534 ps = set(p.rev() for p in repo[x].parents())
1535 else:
1535 else:
1536 ps = set()
1536 ps = set()
1537 cl = repo.changelog
1537 cl = repo.changelog
1538 up = ps.update
1538 up = ps.update
1539 parentrevs = cl.parentrevs
1539 parentrevs = cl.parentrevs
1540 for r in getset(repo, fullreposet(repo), x):
1540 for r in getset(repo, fullreposet(repo), x):
1541 if r == node.wdirrev:
1541 if r == node.wdirrev:
1542 up(p.rev() for p in repo[r].parents())
1542 up(p.rev() for p in repo[r].parents())
1543 else:
1543 else:
1544 up(parentrevs(r))
1544 up(parentrevs(r))
1545 ps -= set([node.nullrev])
1545 ps -= set([node.nullrev])
1546 return subset & ps
1546 return subset & ps
1547
1547
1548 def _phase(repo, subset, target):
1548 def _phase(repo, subset, target):
1549 """helper to select all rev in phase <target>"""
1549 """helper to select all rev in phase <target>"""
1550 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1550 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1551 if repo._phasecache._phasesets:
1551 if repo._phasecache._phasesets:
1552 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1552 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1553 s = baseset(s)
1553 s = baseset(s)
1554 s.sort() # set are non ordered, so we enforce ascending
1554 s.sort() # set are non ordered, so we enforce ascending
1555 return subset & s
1555 return subset & s
1556 else:
1556 else:
1557 phase = repo._phasecache.phase
1557 phase = repo._phasecache.phase
1558 condition = lambda r: phase(repo, r) == target
1558 condition = lambda r: phase(repo, r) == target
1559 return subset.filter(condition, condrepr=('<phase %r>', target),
1559 return subset.filter(condition, condrepr=('<phase %r>', target),
1560 cache=False)
1560 cache=False)
1561
1561
1562 @predicate('draft()', safe=True)
1562 @predicate('draft()', safe=True)
1563 def draft(repo, subset, x):
1563 def draft(repo, subset, x):
1564 """Changeset in draft phase."""
1564 """Changeset in draft phase."""
1565 # i18n: "draft" is a keyword
1565 # i18n: "draft" is a keyword
1566 getargs(x, 0, 0, _("draft takes no arguments"))
1566 getargs(x, 0, 0, _("draft takes no arguments"))
1567 target = phases.draft
1567 target = phases.draft
1568 return _phase(repo, subset, target)
1568 return _phase(repo, subset, target)
1569
1569
1570 @predicate('secret()', safe=True)
1570 @predicate('secret()', safe=True)
1571 def secret(repo, subset, x):
1571 def secret(repo, subset, x):
1572 """Changeset in secret phase."""
1572 """Changeset in secret phase."""
1573 # i18n: "secret" is a keyword
1573 # i18n: "secret" is a keyword
1574 getargs(x, 0, 0, _("secret takes no arguments"))
1574 getargs(x, 0, 0, _("secret takes no arguments"))
1575 target = phases.secret
1575 target = phases.secret
1576 return _phase(repo, subset, target)
1576 return _phase(repo, subset, target)
1577
1577
1578 def parentspec(repo, subset, x, n):
1578 def parentspec(repo, subset, x, n):
1579 """``set^0``
1579 """``set^0``
1580 The set.
1580 The set.
1581 ``set^1`` (or ``set^``), ``set^2``
1581 ``set^1`` (or ``set^``), ``set^2``
1582 First or second parent, respectively, of all changesets in set.
1582 First or second parent, respectively, of all changesets in set.
1583 """
1583 """
1584 try:
1584 try:
1585 n = int(n[1])
1585 n = int(n[1])
1586 if n not in (0, 1, 2):
1586 if n not in (0, 1, 2):
1587 raise ValueError
1587 raise ValueError
1588 except (TypeError, ValueError):
1588 except (TypeError, ValueError):
1589 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1589 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1590 ps = set()
1590 ps = set()
1591 cl = repo.changelog
1591 cl = repo.changelog
1592 for r in getset(repo, fullreposet(repo), x):
1592 for r in getset(repo, fullreposet(repo), x):
1593 if n == 0:
1593 if n == 0:
1594 ps.add(r)
1594 ps.add(r)
1595 elif n == 1:
1595 elif n == 1:
1596 ps.add(cl.parentrevs(r)[0])
1596 ps.add(cl.parentrevs(r)[0])
1597 elif n == 2:
1597 elif n == 2:
1598 parents = cl.parentrevs(r)
1598 parents = cl.parentrevs(r)
1599 if len(parents) > 1:
1599 if len(parents) > 1:
1600 ps.add(parents[1])
1600 ps.add(parents[1])
1601 return subset & ps
1601 return subset & ps
1602
1602
1603 @predicate('present(set)', safe=True)
1603 @predicate('present(set)', safe=True)
1604 def present(repo, subset, x):
1604 def present(repo, subset, x):
1605 """An empty set, if any revision in set isn't found; otherwise,
1605 """An empty set, if any revision in set isn't found; otherwise,
1606 all revisions in set.
1606 all revisions in set.
1607
1607
1608 If any of specified revisions is not present in the local repository,
1608 If any of specified revisions is not present in the local repository,
1609 the query is normally aborted. But this predicate allows the query
1609 the query is normally aborted. But this predicate allows the query
1610 to continue even in such cases.
1610 to continue even in such cases.
1611 """
1611 """
1612 try:
1612 try:
1613 return getset(repo, subset, x)
1613 return getset(repo, subset, x)
1614 except error.RepoLookupError:
1614 except error.RepoLookupError:
1615 return baseset()
1615 return baseset()
1616
1616
1617 # for internal use
1617 # for internal use
1618 @predicate('_notpublic', safe=True)
1618 @predicate('_notpublic', safe=True)
1619 def _notpublic(repo, subset, x):
1619 def _notpublic(repo, subset, x):
1620 getargs(x, 0, 0, "_notpublic takes no arguments")
1620 getargs(x, 0, 0, "_notpublic takes no arguments")
1621 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1621 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1622 if repo._phasecache._phasesets:
1622 if repo._phasecache._phasesets:
1623 s = set()
1623 s = set()
1624 for u in repo._phasecache._phasesets[1:]:
1624 for u in repo._phasecache._phasesets[1:]:
1625 s.update(u)
1625 s.update(u)
1626 s = baseset(s - repo.changelog.filteredrevs)
1626 s = baseset(s - repo.changelog.filteredrevs)
1627 s.sort()
1627 s.sort()
1628 return subset & s
1628 return subset & s
1629 else:
1629 else:
1630 phase = repo._phasecache.phase
1630 phase = repo._phasecache.phase
1631 target = phases.public
1631 target = phases.public
1632 condition = lambda r: phase(repo, r) != target
1632 condition = lambda r: phase(repo, r) != target
1633 return subset.filter(condition, condrepr=('<phase %r>', target),
1633 return subset.filter(condition, condrepr=('<phase %r>', target),
1634 cache=False)
1634 cache=False)
1635
1635
1636 @predicate('public()', safe=True)
1636 @predicate('public()', safe=True)
1637 def public(repo, subset, x):
1637 def public(repo, subset, x):
1638 """Changeset in public phase."""
1638 """Changeset in public phase."""
1639 # i18n: "public" is a keyword
1639 # i18n: "public" is a keyword
1640 getargs(x, 0, 0, _("public takes no arguments"))
1640 getargs(x, 0, 0, _("public takes no arguments"))
1641 phase = repo._phasecache.phase
1641 phase = repo._phasecache.phase
1642 target = phases.public
1642 target = phases.public
1643 condition = lambda r: phase(repo, r) == target
1643 condition = lambda r: phase(repo, r) == target
1644 return subset.filter(condition, condrepr=('<phase %r>', target),
1644 return subset.filter(condition, condrepr=('<phase %r>', target),
1645 cache=False)
1645 cache=False)
1646
1646
1647 @predicate('remote([id [,path]])', safe=True)
1647 @predicate('remote([id [,path]])', safe=True)
1648 def remote(repo, subset, x):
1648 def remote(repo, subset, x):
1649 """Local revision that corresponds to the given identifier in a
1649 """Local revision that corresponds to the given identifier in a
1650 remote repository, if present. Here, the '.' identifier is a
1650 remote repository, if present. Here, the '.' identifier is a
1651 synonym for the current local branch.
1651 synonym for the current local branch.
1652 """
1652 """
1653
1653
1654 from . import hg # avoid start-up nasties
1654 from . import hg # avoid start-up nasties
1655 # i18n: "remote" is a keyword
1655 # i18n: "remote" is a keyword
1656 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1656 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1657
1657
1658 q = '.'
1658 q = '.'
1659 if len(l) > 0:
1659 if len(l) > 0:
1660 # i18n: "remote" is a keyword
1660 # i18n: "remote" is a keyword
1661 q = getstring(l[0], _("remote requires a string id"))
1661 q = getstring(l[0], _("remote requires a string id"))
1662 if q == '.':
1662 if q == '.':
1663 q = repo['.'].branch()
1663 q = repo['.'].branch()
1664
1664
1665 dest = ''
1665 dest = ''
1666 if len(l) > 1:
1666 if len(l) > 1:
1667 # i18n: "remote" is a keyword
1667 # i18n: "remote" is a keyword
1668 dest = getstring(l[1], _("remote requires a repository path"))
1668 dest = getstring(l[1], _("remote requires a repository path"))
1669 dest = repo.ui.expandpath(dest or 'default')
1669 dest = repo.ui.expandpath(dest or 'default')
1670 dest, branches = hg.parseurl(dest)
1670 dest, branches = hg.parseurl(dest)
1671 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1671 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1672 if revs:
1672 if revs:
1673 revs = [repo.lookup(rev) for rev in revs]
1673 revs = [repo.lookup(rev) for rev in revs]
1674 other = hg.peer(repo, {}, dest)
1674 other = hg.peer(repo, {}, dest)
1675 n = other.lookup(q)
1675 n = other.lookup(q)
1676 if n in repo:
1676 if n in repo:
1677 r = repo[n].rev()
1677 r = repo[n].rev()
1678 if r in subset:
1678 if r in subset:
1679 return baseset([r])
1679 return baseset([r])
1680 return baseset()
1680 return baseset()
1681
1681
1682 @predicate('removes(pattern)', safe=True)
1682 @predicate('removes(pattern)', safe=True)
1683 def removes(repo, subset, x):
1683 def removes(repo, subset, x):
1684 """Changesets which remove files matching pattern.
1684 """Changesets which remove files matching pattern.
1685
1685
1686 The pattern without explicit kind like ``glob:`` is expected to be
1686 The pattern without explicit kind like ``glob:`` is expected to be
1687 relative to the current directory and match against a file or a
1687 relative to the current directory and match against a file or a
1688 directory.
1688 directory.
1689 """
1689 """
1690 # i18n: "removes" is a keyword
1690 # i18n: "removes" is a keyword
1691 pat = getstring(x, _("removes requires a pattern"))
1691 pat = getstring(x, _("removes requires a pattern"))
1692 return checkstatus(repo, subset, pat, 2)
1692 return checkstatus(repo, subset, pat, 2)
1693
1693
1694 @predicate('rev(number)', safe=True)
1694 @predicate('rev(number)', safe=True)
1695 def rev(repo, subset, x):
1695 def rev(repo, subset, x):
1696 """Revision with the given numeric identifier.
1696 """Revision with the given numeric identifier.
1697 """
1697 """
1698 # i18n: "rev" is a keyword
1698 # i18n: "rev" is a keyword
1699 l = getargs(x, 1, 1, _("rev requires one argument"))
1699 l = getargs(x, 1, 1, _("rev requires one argument"))
1700 try:
1700 try:
1701 # i18n: "rev" is a keyword
1701 # i18n: "rev" is a keyword
1702 l = int(getstring(l[0], _("rev requires a number")))
1702 l = int(getstring(l[0], _("rev requires a number")))
1703 except (TypeError, ValueError):
1703 except (TypeError, ValueError):
1704 # i18n: "rev" is a keyword
1704 # i18n: "rev" is a keyword
1705 raise error.ParseError(_("rev expects a number"))
1705 raise error.ParseError(_("rev expects a number"))
1706 if l not in repo.changelog and l != node.nullrev:
1706 if l not in repo.changelog and l != node.nullrev:
1707 return baseset()
1707 return baseset()
1708 return subset & baseset([l])
1708 return subset & baseset([l])
1709
1709
1710 @predicate('matching(revision [, field])', safe=True)
1710 @predicate('matching(revision [, field])', safe=True)
1711 def matching(repo, subset, x):
1711 def matching(repo, subset, x):
1712 """Changesets in which a given set of fields match the set of fields in the
1712 """Changesets in which a given set of fields match the set of fields in the
1713 selected revision or set.
1713 selected revision or set.
1714
1714
1715 To match more than one field pass the list of fields to match separated
1715 To match more than one field pass the list of fields to match separated
1716 by spaces (e.g. ``author description``).
1716 by spaces (e.g. ``author description``).
1717
1717
1718 Valid fields are most regular revision fields and some special fields.
1718 Valid fields are most regular revision fields and some special fields.
1719
1719
1720 Regular revision fields are ``description``, ``author``, ``branch``,
1720 Regular revision fields are ``description``, ``author``, ``branch``,
1721 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1721 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1722 and ``diff``.
1722 and ``diff``.
1723 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1723 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1724 contents of the revision. Two revisions matching their ``diff`` will
1724 contents of the revision. Two revisions matching their ``diff`` will
1725 also match their ``files``.
1725 also match their ``files``.
1726
1726
1727 Special fields are ``summary`` and ``metadata``:
1727 Special fields are ``summary`` and ``metadata``:
1728 ``summary`` matches the first line of the description.
1728 ``summary`` matches the first line of the description.
1729 ``metadata`` is equivalent to matching ``description user date``
1729 ``metadata`` is equivalent to matching ``description user date``
1730 (i.e. it matches the main metadata fields).
1730 (i.e. it matches the main metadata fields).
1731
1731
1732 ``metadata`` is the default field which is used when no fields are
1732 ``metadata`` is the default field which is used when no fields are
1733 specified. You can match more than one field at a time.
1733 specified. You can match more than one field at a time.
1734 """
1734 """
1735 # i18n: "matching" is a keyword
1735 # i18n: "matching" is a keyword
1736 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1736 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1737
1737
1738 revs = getset(repo, fullreposet(repo), l[0])
1738 revs = getset(repo, fullreposet(repo), l[0])
1739
1739
1740 fieldlist = ['metadata']
1740 fieldlist = ['metadata']
1741 if len(l) > 1:
1741 if len(l) > 1:
1742 fieldlist = getstring(l[1],
1742 fieldlist = getstring(l[1],
1743 # i18n: "matching" is a keyword
1743 # i18n: "matching" is a keyword
1744 _("matching requires a string "
1744 _("matching requires a string "
1745 "as its second argument")).split()
1745 "as its second argument")).split()
1746
1746
1747 # Make sure that there are no repeated fields,
1747 # Make sure that there are no repeated fields,
1748 # expand the 'special' 'metadata' field type
1748 # expand the 'special' 'metadata' field type
1749 # and check the 'files' whenever we check the 'diff'
1749 # and check the 'files' whenever we check the 'diff'
1750 fields = []
1750 fields = []
1751 for field in fieldlist:
1751 for field in fieldlist:
1752 if field == 'metadata':
1752 if field == 'metadata':
1753 fields += ['user', 'description', 'date']
1753 fields += ['user', 'description', 'date']
1754 elif field == 'diff':
1754 elif field == 'diff':
1755 # a revision matching the diff must also match the files
1755 # a revision matching the diff must also match the files
1756 # since matching the diff is very costly, make sure to
1756 # since matching the diff is very costly, make sure to
1757 # also match the files first
1757 # also match the files first
1758 fields += ['files', 'diff']
1758 fields += ['files', 'diff']
1759 else:
1759 else:
1760 if field == 'author':
1760 if field == 'author':
1761 field = 'user'
1761 field = 'user'
1762 fields.append(field)
1762 fields.append(field)
1763 fields = set(fields)
1763 fields = set(fields)
1764 if 'summary' in fields and 'description' in fields:
1764 if 'summary' in fields and 'description' in fields:
1765 # If a revision matches its description it also matches its summary
1765 # If a revision matches its description it also matches its summary
1766 fields.discard('summary')
1766 fields.discard('summary')
1767
1767
1768 # We may want to match more than one field
1768 # We may want to match more than one field
1769 # Not all fields take the same amount of time to be matched
1769 # Not all fields take the same amount of time to be matched
1770 # Sort the selected fields in order of increasing matching cost
1770 # Sort the selected fields in order of increasing matching cost
1771 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1771 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1772 'files', 'description', 'substate', 'diff']
1772 'files', 'description', 'substate', 'diff']
1773 def fieldkeyfunc(f):
1773 def fieldkeyfunc(f):
1774 try:
1774 try:
1775 return fieldorder.index(f)
1775 return fieldorder.index(f)
1776 except ValueError:
1776 except ValueError:
1777 # assume an unknown field is very costly
1777 # assume an unknown field is very costly
1778 return len(fieldorder)
1778 return len(fieldorder)
1779 fields = list(fields)
1779 fields = list(fields)
1780 fields.sort(key=fieldkeyfunc)
1780 fields.sort(key=fieldkeyfunc)
1781
1781
1782 # Each field will be matched with its own "getfield" function
1782 # Each field will be matched with its own "getfield" function
1783 # which will be added to the getfieldfuncs array of functions
1783 # which will be added to the getfieldfuncs array of functions
1784 getfieldfuncs = []
1784 getfieldfuncs = []
1785 _funcs = {
1785 _funcs = {
1786 'user': lambda r: repo[r].user(),
1786 'user': lambda r: repo[r].user(),
1787 'branch': lambda r: repo[r].branch(),
1787 'branch': lambda r: repo[r].branch(),
1788 'date': lambda r: repo[r].date(),
1788 'date': lambda r: repo[r].date(),
1789 'description': lambda r: repo[r].description(),
1789 'description': lambda r: repo[r].description(),
1790 'files': lambda r: repo[r].files(),
1790 'files': lambda r: repo[r].files(),
1791 'parents': lambda r: repo[r].parents(),
1791 'parents': lambda r: repo[r].parents(),
1792 'phase': lambda r: repo[r].phase(),
1792 'phase': lambda r: repo[r].phase(),
1793 'substate': lambda r: repo[r].substate,
1793 'substate': lambda r: repo[r].substate,
1794 'summary': lambda r: repo[r].description().splitlines()[0],
1794 'summary': lambda r: repo[r].description().splitlines()[0],
1795 'diff': lambda r: list(repo[r].diff(git=True),)
1795 'diff': lambda r: list(repo[r].diff(git=True),)
1796 }
1796 }
1797 for info in fields:
1797 for info in fields:
1798 getfield = _funcs.get(info, None)
1798 getfield = _funcs.get(info, None)
1799 if getfield is None:
1799 if getfield is None:
1800 raise error.ParseError(
1800 raise error.ParseError(
1801 # i18n: "matching" is a keyword
1801 # i18n: "matching" is a keyword
1802 _("unexpected field name passed to matching: %s") % info)
1802 _("unexpected field name passed to matching: %s") % info)
1803 getfieldfuncs.append(getfield)
1803 getfieldfuncs.append(getfield)
1804 # convert the getfield array of functions into a "getinfo" function
1804 # convert the getfield array of functions into a "getinfo" function
1805 # which returns an array of field values (or a single value if there
1805 # which returns an array of field values (or a single value if there
1806 # is only one field to match)
1806 # is only one field to match)
1807 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1807 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1808
1808
1809 def matches(x):
1809 def matches(x):
1810 for rev in revs:
1810 for rev in revs:
1811 target = getinfo(rev)
1811 target = getinfo(rev)
1812 match = True
1812 match = True
1813 for n, f in enumerate(getfieldfuncs):
1813 for n, f in enumerate(getfieldfuncs):
1814 if target[n] != f(x):
1814 if target[n] != f(x):
1815 match = False
1815 match = False
1816 if match:
1816 if match:
1817 return True
1817 return True
1818 return False
1818 return False
1819
1819
1820 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1820 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1821
1821
1822 @predicate('reverse(set)', safe=True)
1822 @predicate('reverse(set)', safe=True)
1823 def reverse(repo, subset, x):
1823 def reverse(repo, subset, x):
1824 """Reverse order of set.
1824 """Reverse order of set.
1825 """
1825 """
1826 l = getset(repo, subset, x)
1826 l = getset(repo, subset, x)
1827 l.reverse()
1827 l.reverse()
1828 return l
1828 return l
1829
1829
1830 @predicate('roots(set)', safe=True)
1830 @predicate('roots(set)', safe=True)
1831 def roots(repo, subset, x):
1831 def roots(repo, subset, x):
1832 """Changesets in set with no parent changeset in set.
1832 """Changesets in set with no parent changeset in set.
1833 """
1833 """
1834 s = getset(repo, fullreposet(repo), x)
1834 s = getset(repo, fullreposet(repo), x)
1835 parents = repo.changelog.parentrevs
1835 parents = repo.changelog.parentrevs
1836 def filter(r):
1836 def filter(r):
1837 for p in parents(r):
1837 for p in parents(r):
1838 if 0 <= p and p in s:
1838 if 0 <= p and p in s:
1839 return False
1839 return False
1840 return True
1840 return True
1841 return subset & s.filter(filter, condrepr='<roots>')
1841 return subset & s.filter(filter, condrepr='<roots>')
1842
1842
1843 _sortkeyfuncs = {
1843 _sortkeyfuncs = {
1844 'rev': lambda c: c.rev(),
1844 'rev': lambda c: c.rev(),
1845 'branch': lambda c: c.branch(),
1845 'branch': lambda c: c.branch(),
1846 'desc': lambda c: c.description(),
1846 'desc': lambda c: c.description(),
1847 'user': lambda c: c.user(),
1847 'user': lambda c: c.user(),
1848 'author': lambda c: c.user(),
1848 'author': lambda c: c.user(),
1849 'date': lambda c: c.date()[0],
1849 'date': lambda c: c.date()[0],
1850 }
1850 }
1851
1851
1852 def _getsortargs(x):
1852 def _getsortargs(x):
1853 """Parse sort options into (set, [(key, reverse)], opts)"""
1853 """Parse sort options into (set, [(key, reverse)], opts)"""
1854 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
1854 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
1855 if 'set' not in args:
1855 if 'set' not in args:
1856 # i18n: "sort" is a keyword
1856 # i18n: "sort" is a keyword
1857 raise error.ParseError(_('sort requires one or two arguments'))
1857 raise error.ParseError(_('sort requires one or two arguments'))
1858 keys = "rev"
1858 keys = "rev"
1859 if 'keys' in args:
1859 if 'keys' in args:
1860 # i18n: "sort" is a keyword
1860 # i18n: "sort" is a keyword
1861 keys = getstring(args['keys'], _("sort spec must be a string"))
1861 keys = getstring(args['keys'], _("sort spec must be a string"))
1862
1862
1863 keyflags = []
1863 keyflags = []
1864 for k in keys.split():
1864 for k in keys.split():
1865 fk = k
1865 fk = k
1866 reverse = (k[0] == '-')
1866 reverse = (k[0] == '-')
1867 if reverse:
1867 if reverse:
1868 k = k[1:]
1868 k = k[1:]
1869 if k not in _sortkeyfuncs and k != 'topo':
1869 if k not in _sortkeyfuncs and k != 'topo':
1870 raise error.ParseError(_("unknown sort key %r") % fk)
1870 raise error.ParseError(_("unknown sort key %r") % fk)
1871 keyflags.append((k, reverse))
1871 keyflags.append((k, reverse))
1872
1872
1873 if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
1873 if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
1874 # i18n: "topo" is a keyword
1874 # i18n: "topo" is a keyword
1875 raise error.ParseError(_('topo sort order cannot be combined '
1875 raise error.ParseError(_('topo sort order cannot be combined '
1876 'with other sort keys'))
1876 'with other sort keys'))
1877
1877
1878 opts = {}
1878 opts = {}
1879 if 'topo.firstbranch' in args:
1879 if 'topo.firstbranch' in args:
1880 if any(k == 'topo' for k, reverse in keyflags):
1880 if any(k == 'topo' for k, reverse in keyflags):
1881 opts['topo.firstbranch'] = args['topo.firstbranch']
1881 opts['topo.firstbranch'] = args['topo.firstbranch']
1882 else:
1882 else:
1883 # i18n: "topo" and "topo.firstbranch" are keywords
1883 # i18n: "topo" and "topo.firstbranch" are keywords
1884 raise error.ParseError(_('topo.firstbranch can only be used '
1884 raise error.ParseError(_('topo.firstbranch can only be used '
1885 'when using the topo sort key'))
1885 'when using the topo sort key'))
1886
1886
1887 return args['set'], keyflags, opts
1887 return args['set'], keyflags, opts
1888
1888
1889 @predicate('sort(set[, [-]key... [, ...]])', safe=True)
1889 @predicate('sort(set[, [-]key... [, ...]])', safe=True)
1890 def sort(repo, subset, x):
1890 def sort(repo, subset, x):
1891 """Sort set by keys. The default sort order is ascending, specify a key
1891 """Sort set by keys. The default sort order is ascending, specify a key
1892 as ``-key`` to sort in descending order.
1892 as ``-key`` to sort in descending order.
1893
1893
1894 The keys can be:
1894 The keys can be:
1895
1895
1896 - ``rev`` for the revision number,
1896 - ``rev`` for the revision number,
1897 - ``branch`` for the branch name,
1897 - ``branch`` for the branch name,
1898 - ``desc`` for the commit message (description),
1898 - ``desc`` for the commit message (description),
1899 - ``user`` for user name (``author`` can be used as an alias),
1899 - ``user`` for user name (``author`` can be used as an alias),
1900 - ``date`` for the commit date
1900 - ``date`` for the commit date
1901 - ``topo`` for a reverse topographical sort
1901 - ``topo`` for a reverse topographical sort
1902
1902
1903 The ``topo`` sort order cannot be combined with other sort keys. This sort
1903 The ``topo`` sort order cannot be combined with other sort keys. This sort
1904 takes one optional argument, ``topo.firstbranch``, which takes a revset that
1904 takes one optional argument, ``topo.firstbranch``, which takes a revset that
1905 specifies what topographical branches to prioritize in the sort.
1905 specifies what topographical branches to prioritize in the sort.
1906
1906
1907 """
1907 """
1908 s, keyflags, opts = _getsortargs(x)
1908 s, keyflags, opts = _getsortargs(x)
1909 revs = getset(repo, subset, s)
1909 revs = getset(repo, subset, s)
1910
1910
1911 if not keyflags:
1911 if not keyflags:
1912 return revs
1912 return revs
1913 if len(keyflags) == 1 and keyflags[0][0] == "rev":
1913 if len(keyflags) == 1 and keyflags[0][0] == "rev":
1914 revs.sort(reverse=keyflags[0][1])
1914 revs.sort(reverse=keyflags[0][1])
1915 return revs
1915 return revs
1916 elif keyflags[0][0] == "topo":
1916 elif keyflags[0][0] == "topo":
1917 firstbranch = ()
1917 firstbranch = ()
1918 if 'topo.firstbranch' in opts:
1918 if 'topo.firstbranch' in opts:
1919 firstbranch = getset(repo, subset, opts['topo.firstbranch'])
1919 firstbranch = getset(repo, subset, opts['topo.firstbranch'])
1920 revs = baseset(_toposort(revs, repo.changelog.parentrevs, firstbranch),
1920 revs = baseset(_toposort(revs, repo.changelog.parentrevs, firstbranch),
1921 istopo=True)
1921 istopo=True)
1922 if keyflags[0][1]:
1922 if keyflags[0][1]:
1923 revs.reverse()
1923 revs.reverse()
1924 return revs
1924 return revs
1925
1925
1926 # sort() is guaranteed to be stable
1926 # sort() is guaranteed to be stable
1927 ctxs = [repo[r] for r in revs]
1927 ctxs = [repo[r] for r in revs]
1928 for k, reverse in reversed(keyflags):
1928 for k, reverse in reversed(keyflags):
1929 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
1929 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
1930 return baseset([c.rev() for c in ctxs])
1930 return baseset([c.rev() for c in ctxs])
1931
1931
1932 def _toposort(revs, parentsfunc, firstbranch=()):
1932 def _toposort(revs, parentsfunc, firstbranch=()):
1933 """Yield revisions from heads to roots one (topo) branch at a time.
1933 """Yield revisions from heads to roots one (topo) branch at a time.
1934
1934
1935 This function aims to be used by a graph generator that wishes to minimize
1935 This function aims to be used by a graph generator that wishes to minimize
1936 the number of parallel branches and their interleaving.
1936 the number of parallel branches and their interleaving.
1937
1937
1938 Example iteration order (numbers show the "true" order in a changelog):
1938 Example iteration order (numbers show the "true" order in a changelog):
1939
1939
1940 o 4
1940 o 4
1941 |
1941 |
1942 o 1
1942 o 1
1943 |
1943 |
1944 | o 3
1944 | o 3
1945 | |
1945 | |
1946 | o 2
1946 | o 2
1947 |/
1947 |/
1948 o 0
1948 o 0
1949
1949
1950 Note that the ancestors of merges are understood by the current
1950 Note that the ancestors of merges are understood by the current
1951 algorithm to be on the same branch. This means no reordering will
1951 algorithm to be on the same branch. This means no reordering will
1952 occur behind a merge.
1952 occur behind a merge.
1953 """
1953 """
1954
1954
1955 ### Quick summary of the algorithm
1955 ### Quick summary of the algorithm
1956 #
1956 #
1957 # This function is based around a "retention" principle. We keep revisions
1957 # This function is based around a "retention" principle. We keep revisions
1958 # in memory until we are ready to emit a whole branch that immediately
1958 # in memory until we are ready to emit a whole branch that immediately
1959 # "merges" into an existing one. This reduces the number of parallel
1959 # "merges" into an existing one. This reduces the number of parallel
1960 # branches with interleaved revisions.
1960 # branches with interleaved revisions.
1961 #
1961 #
1962 # During iteration revs are split into two groups:
1962 # During iteration revs are split into two groups:
1963 # A) revision already emitted
1963 # A) revision already emitted
1964 # B) revision in "retention". They are stored as different subgroups.
1964 # B) revision in "retention". They are stored as different subgroups.
1965 #
1965 #
1966 # for each REV, we do the following logic:
1966 # for each REV, we do the following logic:
1967 #
1967 #
1968 # 1) if REV is a parent of (A), we will emit it. If there is a
1968 # 1) if REV is a parent of (A), we will emit it. If there is a
1969 # retention group ((B) above) that is blocked on REV being
1969 # retention group ((B) above) that is blocked on REV being
1970 # available, we emit all the revisions out of that retention
1970 # available, we emit all the revisions out of that retention
1971 # group first.
1971 # group first.
1972 #
1972 #
1973 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
1973 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
1974 # available, if such subgroup exist, we add REV to it and the subgroup is
1974 # available, if such subgroup exist, we add REV to it and the subgroup is
1975 # now awaiting for REV.parents() to be available.
1975 # now awaiting for REV.parents() to be available.
1976 #
1976 #
1977 # 3) finally if no such group existed in (B), we create a new subgroup.
1977 # 3) finally if no such group existed in (B), we create a new subgroup.
1978 #
1978 #
1979 #
1979 #
1980 # To bootstrap the algorithm, we emit the tipmost revision (which
1980 # To bootstrap the algorithm, we emit the tipmost revision (which
1981 # puts it in group (A) from above).
1981 # puts it in group (A) from above).
1982
1982
1983 revs.sort(reverse=True)
1983 revs.sort(reverse=True)
1984
1984
1985 # Set of parents of revision that have been emitted. They can be considered
1985 # Set of parents of revision that have been emitted. They can be considered
1986 # unblocked as the graph generator is already aware of them so there is no
1986 # unblocked as the graph generator is already aware of them so there is no
1987 # need to delay the revisions that reference them.
1987 # need to delay the revisions that reference them.
1988 #
1988 #
1989 # If someone wants to prioritize a branch over the others, pre-filling this
1989 # If someone wants to prioritize a branch over the others, pre-filling this
1990 # set will force all other branches to wait until this branch is ready to be
1990 # set will force all other branches to wait until this branch is ready to be
1991 # emitted.
1991 # emitted.
1992 unblocked = set(firstbranch)
1992 unblocked = set(firstbranch)
1993
1993
1994 # list of groups waiting to be displayed, each group is defined by:
1994 # list of groups waiting to be displayed, each group is defined by:
1995 #
1995 #
1996 # (revs: lists of revs waiting to be displayed,
1996 # (revs: lists of revs waiting to be displayed,
1997 # blocked: set of that cannot be displayed before those in 'revs')
1997 # blocked: set of that cannot be displayed before those in 'revs')
1998 #
1998 #
1999 # The second value ('blocked') correspond to parents of any revision in the
1999 # The second value ('blocked') correspond to parents of any revision in the
2000 # group ('revs') that is not itself contained in the group. The main idea
2000 # group ('revs') that is not itself contained in the group. The main idea
2001 # of this algorithm is to delay as much as possible the emission of any
2001 # of this algorithm is to delay as much as possible the emission of any
2002 # revision. This means waiting for the moment we are about to display
2002 # revision. This means waiting for the moment we are about to display
2003 # these parents to display the revs in a group.
2003 # these parents to display the revs in a group.
2004 #
2004 #
2005 # This first implementation is smart until it encounters a merge: it will
2005 # This first implementation is smart until it encounters a merge: it will
2006 # emit revs as soon as any parent is about to be emitted and can grow an
2006 # emit revs as soon as any parent is about to be emitted and can grow an
2007 # arbitrary number of revs in 'blocked'. In practice this mean we properly
2007 # arbitrary number of revs in 'blocked'. In practice this mean we properly
2008 # retains new branches but gives up on any special ordering for ancestors
2008 # retains new branches but gives up on any special ordering for ancestors
2009 # of merges. The implementation can be improved to handle this better.
2009 # of merges. The implementation can be improved to handle this better.
2010 #
2010 #
2011 # The first subgroup is special. It corresponds to all the revision that
2011 # The first subgroup is special. It corresponds to all the revision that
2012 # were already emitted. The 'revs' lists is expected to be empty and the
2012 # were already emitted. The 'revs' lists is expected to be empty and the
2013 # 'blocked' set contains the parents revisions of already emitted revision.
2013 # 'blocked' set contains the parents revisions of already emitted revision.
2014 #
2014 #
2015 # You could pre-seed the <parents> set of groups[0] to a specific
2015 # You could pre-seed the <parents> set of groups[0] to a specific
2016 # changesets to select what the first emitted branch should be.
2016 # changesets to select what the first emitted branch should be.
2017 groups = [([], unblocked)]
2017 groups = [([], unblocked)]
2018 pendingheap = []
2018 pendingheap = []
2019 pendingset = set()
2019 pendingset = set()
2020
2020
2021 heapq.heapify(pendingheap)
2021 heapq.heapify(pendingheap)
2022 heappop = heapq.heappop
2022 heappop = heapq.heappop
2023 heappush = heapq.heappush
2023 heappush = heapq.heappush
2024 for currentrev in revs:
2024 for currentrev in revs:
2025 # Heap works with smallest element, we want highest so we invert
2025 # Heap works with smallest element, we want highest so we invert
2026 if currentrev not in pendingset:
2026 if currentrev not in pendingset:
2027 heappush(pendingheap, -currentrev)
2027 heappush(pendingheap, -currentrev)
2028 pendingset.add(currentrev)
2028 pendingset.add(currentrev)
2029 # iterates on pending rev until after the current rev have been
2029 # iterates on pending rev until after the current rev have been
2030 # processed.
2030 # processed.
2031 rev = None
2031 rev = None
2032 while rev != currentrev:
2032 while rev != currentrev:
2033 rev = -heappop(pendingheap)
2033 rev = -heappop(pendingheap)
2034 pendingset.remove(rev)
2034 pendingset.remove(rev)
2035
2035
2036 # Seek for a subgroup blocked, waiting for the current revision.
2036 # Seek for a subgroup blocked, waiting for the current revision.
2037 matching = [i for i, g in enumerate(groups) if rev in g[1]]
2037 matching = [i for i, g in enumerate(groups) if rev in g[1]]
2038
2038
2039 if matching:
2039 if matching:
2040 # The main idea is to gather together all sets that are blocked
2040 # The main idea is to gather together all sets that are blocked
2041 # on the same revision.
2041 # on the same revision.
2042 #
2042 #
2043 # Groups are merged when a common blocking ancestor is
2043 # Groups are merged when a common blocking ancestor is
2044 # observed. For example, given two groups:
2044 # observed. For example, given two groups:
2045 #
2045 #
2046 # revs [5, 4] waiting for 1
2046 # revs [5, 4] waiting for 1
2047 # revs [3, 2] waiting for 1
2047 # revs [3, 2] waiting for 1
2048 #
2048 #
2049 # These two groups will be merged when we process
2049 # These two groups will be merged when we process
2050 # 1. In theory, we could have merged the groups when
2050 # 1. In theory, we could have merged the groups when
2051 # we added 2 to the group it is now in (we could have
2051 # we added 2 to the group it is now in (we could have
2052 # noticed the groups were both blocked on 1 then), but
2052 # noticed the groups were both blocked on 1 then), but
2053 # the way it works now makes the algorithm simpler.
2053 # the way it works now makes the algorithm simpler.
2054 #
2054 #
2055 # We also always keep the oldest subgroup first. We can
2055 # We also always keep the oldest subgroup first. We can
2056 # probably improve the behavior by having the longest set
2056 # probably improve the behavior by having the longest set
2057 # first. That way, graph algorithms could minimise the length
2057 # first. That way, graph algorithms could minimise the length
2058 # of parallel lines their drawing. This is currently not done.
2058 # of parallel lines their drawing. This is currently not done.
2059 targetidx = matching.pop(0)
2059 targetidx = matching.pop(0)
2060 trevs, tparents = groups[targetidx]
2060 trevs, tparents = groups[targetidx]
2061 for i in matching:
2061 for i in matching:
2062 gr = groups[i]
2062 gr = groups[i]
2063 trevs.extend(gr[0])
2063 trevs.extend(gr[0])
2064 tparents |= gr[1]
2064 tparents |= gr[1]
2065 # delete all merged subgroups (except the one we kept)
2065 # delete all merged subgroups (except the one we kept)
2066 # (starting from the last subgroup for performance and
2066 # (starting from the last subgroup for performance and
2067 # sanity reasons)
2067 # sanity reasons)
2068 for i in reversed(matching):
2068 for i in reversed(matching):
2069 del groups[i]
2069 del groups[i]
2070 else:
2070 else:
2071 # This is a new head. We create a new subgroup for it.
2071 # This is a new head. We create a new subgroup for it.
2072 targetidx = len(groups)
2072 targetidx = len(groups)
2073 groups.append(([], set([rev])))
2073 groups.append(([], set([rev])))
2074
2074
2075 gr = groups[targetidx]
2075 gr = groups[targetidx]
2076
2076
2077 # We now add the current nodes to this subgroups. This is done
2077 # We now add the current nodes to this subgroups. This is done
2078 # after the subgroup merging because all elements from a subgroup
2078 # after the subgroup merging because all elements from a subgroup
2079 # that relied on this rev must precede it.
2079 # that relied on this rev must precede it.
2080 #
2080 #
2081 # we also update the <parents> set to include the parents of the
2081 # we also update the <parents> set to include the parents of the
2082 # new nodes.
2082 # new nodes.
2083 if rev == currentrev: # only display stuff in rev
2083 if rev == currentrev: # only display stuff in rev
2084 gr[0].append(rev)
2084 gr[0].append(rev)
2085 gr[1].remove(rev)
2085 gr[1].remove(rev)
2086 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
2086 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
2087 gr[1].update(parents)
2087 gr[1].update(parents)
2088 for p in parents:
2088 for p in parents:
2089 if p not in pendingset:
2089 if p not in pendingset:
2090 pendingset.add(p)
2090 pendingset.add(p)
2091 heappush(pendingheap, -p)
2091 heappush(pendingheap, -p)
2092
2092
2093 # Look for a subgroup to display
2093 # Look for a subgroup to display
2094 #
2094 #
2095 # When unblocked is empty (if clause), we were not waiting for any
2095 # When unblocked is empty (if clause), we were not waiting for any
2096 # revisions during the first iteration (if no priority was given) or
2096 # revisions during the first iteration (if no priority was given) or
2097 # if we emitted a whole disconnected set of the graph (reached a
2097 # if we emitted a whole disconnected set of the graph (reached a
2098 # root). In that case we arbitrarily take the oldest known
2098 # root). In that case we arbitrarily take the oldest known
2099 # subgroup. The heuristic could probably be better.
2099 # subgroup. The heuristic could probably be better.
2100 #
2100 #
2101 # Otherwise (elif clause) if the subgroup is blocked on
2101 # Otherwise (elif clause) if the subgroup is blocked on
2102 # a revision we just emitted, we can safely emit it as
2102 # a revision we just emitted, we can safely emit it as
2103 # well.
2103 # well.
2104 if not unblocked:
2104 if not unblocked:
2105 if len(groups) > 1: # display other subset
2105 if len(groups) > 1: # display other subset
2106 targetidx = 1
2106 targetidx = 1
2107 gr = groups[1]
2107 gr = groups[1]
2108 elif not gr[1] & unblocked:
2108 elif not gr[1] & unblocked:
2109 gr = None
2109 gr = None
2110
2110
2111 if gr is not None:
2111 if gr is not None:
2112 # update the set of awaited revisions with the one from the
2112 # update the set of awaited revisions with the one from the
2113 # subgroup
2113 # subgroup
2114 unblocked |= gr[1]
2114 unblocked |= gr[1]
2115 # output all revisions in the subgroup
2115 # output all revisions in the subgroup
2116 for r in gr[0]:
2116 for r in gr[0]:
2117 yield r
2117 yield r
2118 # delete the subgroup that you just output
2118 # delete the subgroup that you just output
2119 # unless it is groups[0] in which case you just empty it.
2119 # unless it is groups[0] in which case you just empty it.
2120 if targetidx:
2120 if targetidx:
2121 del groups[targetidx]
2121 del groups[targetidx]
2122 else:
2122 else:
2123 gr[0][:] = []
2123 gr[0][:] = []
2124 # Check if we have some subgroup waiting for revisions we are not going to
2124 # Check if we have some subgroup waiting for revisions we are not going to
2125 # iterate over
2125 # iterate over
2126 for g in groups:
2126 for g in groups:
2127 for r in g[0]:
2127 for r in g[0]:
2128 yield r
2128 yield r
2129
2129
2130 @predicate('subrepo([pattern])')
2130 @predicate('subrepo([pattern])')
2131 def subrepo(repo, subset, x):
2131 def subrepo(repo, subset, x):
2132 """Changesets that add, modify or remove the given subrepo. If no subrepo
2132 """Changesets that add, modify or remove the given subrepo. If no subrepo
2133 pattern is named, any subrepo changes are returned.
2133 pattern is named, any subrepo changes are returned.
2134 """
2134 """
2135 # i18n: "subrepo" is a keyword
2135 # i18n: "subrepo" is a keyword
2136 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
2136 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
2137 pat = None
2137 pat = None
2138 if len(args) != 0:
2138 if len(args) != 0:
2139 pat = getstring(args[0], _("subrepo requires a pattern"))
2139 pat = getstring(args[0], _("subrepo requires a pattern"))
2140
2140
2141 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
2141 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
2142
2142
2143 def submatches(names):
2143 def submatches(names):
2144 k, p, m = util.stringmatcher(pat)
2144 k, p, m = util.stringmatcher(pat)
2145 for name in names:
2145 for name in names:
2146 if m(name):
2146 if m(name):
2147 yield name
2147 yield name
2148
2148
2149 def matches(x):
2149 def matches(x):
2150 c = repo[x]
2150 c = repo[x]
2151 s = repo.status(c.p1().node(), c.node(), match=m)
2151 s = repo.status(c.p1().node(), c.node(), match=m)
2152
2152
2153 if pat is None:
2153 if pat is None:
2154 return s.added or s.modified or s.removed
2154 return s.added or s.modified or s.removed
2155
2155
2156 if s.added:
2156 if s.added:
2157 return any(submatches(c.substate.keys()))
2157 return any(submatches(c.substate.keys()))
2158
2158
2159 if s.modified:
2159 if s.modified:
2160 subs = set(c.p1().substate.keys())
2160 subs = set(c.p1().substate.keys())
2161 subs.update(c.substate.keys())
2161 subs.update(c.substate.keys())
2162
2162
2163 for path in submatches(subs):
2163 for path in submatches(subs):
2164 if c.p1().substate.get(path) != c.substate.get(path):
2164 if c.p1().substate.get(path) != c.substate.get(path):
2165 return True
2165 return True
2166
2166
2167 if s.removed:
2167 if s.removed:
2168 return any(submatches(c.p1().substate.keys()))
2168 return any(submatches(c.p1().substate.keys()))
2169
2169
2170 return False
2170 return False
2171
2171
2172 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
2172 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
2173
2173
2174 def _substringmatcher(pattern):
2174 def _substringmatcher(pattern):
2175 kind, pattern, matcher = util.stringmatcher(pattern)
2175 kind, pattern, matcher = util.stringmatcher(pattern)
2176 if kind == 'literal':
2176 if kind == 'literal':
2177 matcher = lambda s: pattern in s
2177 matcher = lambda s: pattern in s
2178 return kind, pattern, matcher
2178 return kind, pattern, matcher
2179
2179
2180 @predicate('tag([name])', safe=True)
2180 @predicate('tag([name])', safe=True)
2181 def tag(repo, subset, x):
2181 def tag(repo, subset, x):
2182 """The specified tag by name, or all tagged revisions if no name is given.
2182 """The specified tag by name, or all tagged revisions if no name is given.
2183
2183
2184 If `name` starts with `re:`, the remainder of the name is treated as
2184 If `name` starts with `re:`, the remainder of the name is treated as
2185 a regular expression. To match a tag that actually starts with `re:`,
2185 a regular expression. To match a tag that actually starts with `re:`,
2186 use the prefix `literal:`.
2186 use the prefix `literal:`.
2187 """
2187 """
2188 # i18n: "tag" is a keyword
2188 # i18n: "tag" is a keyword
2189 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2189 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2190 cl = repo.changelog
2190 cl = repo.changelog
2191 if args:
2191 if args:
2192 pattern = getstring(args[0],
2192 pattern = getstring(args[0],
2193 # i18n: "tag" is a keyword
2193 # i18n: "tag" is a keyword
2194 _('the argument to tag must be a string'))
2194 _('the argument to tag must be a string'))
2195 kind, pattern, matcher = util.stringmatcher(pattern)
2195 kind, pattern, matcher = util.stringmatcher(pattern)
2196 if kind == 'literal':
2196 if kind == 'literal':
2197 # avoid resolving all tags
2197 # avoid resolving all tags
2198 tn = repo._tagscache.tags.get(pattern, None)
2198 tn = repo._tagscache.tags.get(pattern, None)
2199 if tn is None:
2199 if tn is None:
2200 raise error.RepoLookupError(_("tag '%s' does not exist")
2200 raise error.RepoLookupError(_("tag '%s' does not exist")
2201 % pattern)
2201 % pattern)
2202 s = set([repo[tn].rev()])
2202 s = set([repo[tn].rev()])
2203 else:
2203 else:
2204 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2204 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2205 else:
2205 else:
2206 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2206 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2207 return subset & s
2207 return subset & s
2208
2208
2209 @predicate('tagged', safe=True)
2209 @predicate('tagged', safe=True)
2210 def tagged(repo, subset, x):
2210 def tagged(repo, subset, x):
2211 return tag(repo, subset, x)
2211 return tag(repo, subset, x)
2212
2212
2213 @predicate('unstable()', safe=True)
2213 @predicate('unstable()', safe=True)
2214 def unstable(repo, subset, x):
2214 def unstable(repo, subset, x):
2215 """Non-obsolete changesets with obsolete ancestors.
2215 """Non-obsolete changesets with obsolete ancestors.
2216 """
2216 """
2217 # i18n: "unstable" is a keyword
2217 # i18n: "unstable" is a keyword
2218 getargs(x, 0, 0, _("unstable takes no arguments"))
2218 getargs(x, 0, 0, _("unstable takes no arguments"))
2219 unstables = obsmod.getrevs(repo, 'unstable')
2219 unstables = obsmod.getrevs(repo, 'unstable')
2220 return subset & unstables
2220 return subset & unstables
2221
2221
2222
2222
2223 @predicate('user(string)', safe=True)
2223 @predicate('user(string)', safe=True)
2224 def user(repo, subset, x):
2224 def user(repo, subset, x):
2225 """User name contains string. The match is case-insensitive.
2225 """User name contains string. The match is case-insensitive.
2226
2226
2227 If `string` starts with `re:`, the remainder of the string is treated as
2227 If `string` starts with `re:`, the remainder of the string is treated as
2228 a regular expression. To match a user that actually contains `re:`, use
2228 a regular expression. To match a user that actually contains `re:`, use
2229 the prefix `literal:`.
2229 the prefix `literal:`.
2230 """
2230 """
2231 return author(repo, subset, x)
2231 return author(repo, subset, x)
2232
2232
2233 # experimental
2233 # experimental
2234 @predicate('wdir', safe=True)
2234 @predicate('wdir', safe=True)
2235 def wdir(repo, subset, x):
2235 def wdir(repo, subset, x):
2236 # i18n: "wdir" is a keyword
2236 # i18n: "wdir" is a keyword
2237 getargs(x, 0, 0, _("wdir takes no arguments"))
2237 getargs(x, 0, 0, _("wdir takes no arguments"))
2238 if node.wdirrev in subset or isinstance(subset, fullreposet):
2238 if node.wdirrev in subset or isinstance(subset, fullreposet):
2239 return baseset([node.wdirrev])
2239 return baseset([node.wdirrev])
2240 return baseset()
2240 return baseset()
2241
2241
2242 # for internal use
2242 # for internal use
2243 @predicate('_list', safe=True)
2243 @predicate('_list', safe=True)
2244 def _list(repo, subset, x):
2244 def _list(repo, subset, x):
2245 s = getstring(x, "internal error")
2245 s = getstring(x, "internal error")
2246 if not s:
2246 if not s:
2247 return baseset()
2247 return baseset()
2248 # remove duplicates here. it's difficult for caller to deduplicate sets
2248 # remove duplicates here. it's difficult for caller to deduplicate sets
2249 # because different symbols can point to the same rev.
2249 # because different symbols can point to the same rev.
2250 cl = repo.changelog
2250 cl = repo.changelog
2251 ls = []
2251 ls = []
2252 seen = set()
2252 seen = set()
2253 for t in s.split('\0'):
2253 for t in s.split('\0'):
2254 try:
2254 try:
2255 # fast path for integer revision
2255 # fast path for integer revision
2256 r = int(t)
2256 r = int(t)
2257 if str(r) != t or r not in cl:
2257 if str(r) != t or r not in cl:
2258 raise ValueError
2258 raise ValueError
2259 revs = [r]
2259 revs = [r]
2260 except ValueError:
2260 except ValueError:
2261 revs = stringset(repo, subset, t)
2261 revs = stringset(repo, subset, t)
2262
2262
2263 for r in revs:
2263 for r in revs:
2264 if r in seen:
2264 if r in seen:
2265 continue
2265 continue
2266 if (r in subset
2266 if (r in subset
2267 or r == node.nullrev and isinstance(subset, fullreposet)):
2267 or r == node.nullrev and isinstance(subset, fullreposet)):
2268 ls.append(r)
2268 ls.append(r)
2269 seen.add(r)
2269 seen.add(r)
2270 return baseset(ls)
2270 return baseset(ls)
2271
2271
2272 # for internal use
2272 # for internal use
2273 @predicate('_intlist', safe=True)
2273 @predicate('_intlist', safe=True)
2274 def _intlist(repo, subset, x):
2274 def _intlist(repo, subset, x):
2275 s = getstring(x, "internal error")
2275 s = getstring(x, "internal error")
2276 if not s:
2276 if not s:
2277 return baseset()
2277 return baseset()
2278 ls = [int(r) for r in s.split('\0')]
2278 ls = [int(r) for r in s.split('\0')]
2279 s = subset
2279 s = subset
2280 return baseset([r for r in ls if r in s])
2280 return baseset([r for r in ls if r in s])
2281
2281
2282 # for internal use
2282 # for internal use
2283 @predicate('_hexlist', safe=True)
2283 @predicate('_hexlist', safe=True)
2284 def _hexlist(repo, subset, x):
2284 def _hexlist(repo, subset, x):
2285 s = getstring(x, "internal error")
2285 s = getstring(x, "internal error")
2286 if not s:
2286 if not s:
2287 return baseset()
2287 return baseset()
2288 cl = repo.changelog
2288 cl = repo.changelog
2289 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2289 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2290 s = subset
2290 s = subset
2291 return baseset([r for r in ls if r in s])
2291 return baseset([r for r in ls if r in s])
2292
2292
2293 methods = {
2293 methods = {
2294 "range": rangeset,
2294 "range": rangeset,
2295 "dagrange": dagrange,
2295 "dagrange": dagrange,
2296 "string": stringset,
2296 "string": stringset,
2297 "symbol": stringset,
2297 "symbol": stringset,
2298 "and": andset,
2298 "and": andset,
2299 "or": orset,
2299 "or": orset,
2300 "not": notset,
2300 "not": notset,
2301 "difference": differenceset,
2301 "difference": differenceset,
2302 "list": listset,
2302 "list": listset,
2303 "keyvalue": keyvaluepair,
2303 "keyvalue": keyvaluepair,
2304 "func": func,
2304 "func": func,
2305 "ancestor": ancestorspec,
2305 "ancestor": ancestorspec,
2306 "parent": parentspec,
2306 "parent": parentspec,
2307 "parentpost": p1,
2307 "parentpost": p1,
2308 }
2308 }
2309
2309
2310 def _matchonly(revs, bases):
2310 def _matchonly(revs, bases):
2311 """
2311 """
2312 >>> f = lambda *args: _matchonly(*map(parse, args))
2312 >>> f = lambda *args: _matchonly(*map(parse, args))
2313 >>> f('ancestors(A)', 'not ancestors(B)')
2313 >>> f('ancestors(A)', 'not ancestors(B)')
2314 ('list', ('symbol', 'A'), ('symbol', 'B'))
2314 ('list', ('symbol', 'A'), ('symbol', 'B'))
2315 """
2315 """
2316 if (revs is not None
2316 if (revs is not None
2317 and revs[0] == 'func'
2317 and revs[0] == 'func'
2318 and getsymbol(revs[1]) == 'ancestors'
2318 and getsymbol(revs[1]) == 'ancestors'
2319 and bases is not None
2319 and bases is not None
2320 and bases[0] == 'not'
2320 and bases[0] == 'not'
2321 and bases[1][0] == 'func'
2321 and bases[1][0] == 'func'
2322 and getsymbol(bases[1][1]) == 'ancestors'):
2322 and getsymbol(bases[1][1]) == 'ancestors'):
2323 return ('list', revs[2], bases[1][2])
2323 return ('list', revs[2], bases[1][2])
2324
2324
2325 def _fixops(x):
2325 def _fixops(x):
2326 """Rewrite raw parsed tree to resolve ambiguous syntax which cannot be
2326 """Rewrite raw parsed tree to resolve ambiguous syntax which cannot be
2327 handled well by our simple top-down parser"""
2327 handled well by our simple top-down parser"""
2328 if not isinstance(x, tuple):
2328 if not isinstance(x, tuple):
2329 return x
2329 return x
2330
2330
2331 op = x[0]
2331 op = x[0]
2332 if op == 'parent':
2332 if op == 'parent':
2333 # x^:y means (x^) : y, not x ^ (:y)
2333 # x^:y means (x^) : y, not x ^ (:y)
2334 # x^: means (x^) :, not x ^ (:)
2334 # x^: means (x^) :, not x ^ (:)
2335 post = ('parentpost', x[1])
2335 post = ('parentpost', x[1])
2336 if x[2][0] == 'dagrangepre':
2336 if x[2][0] == 'dagrangepre':
2337 return _fixops(('dagrange', post, x[2][1]))
2337 return _fixops(('dagrange', post, x[2][1]))
2338 elif x[2][0] == 'rangepre':
2338 elif x[2][0] == 'rangepre':
2339 return _fixops(('range', post, x[2][1]))
2339 return _fixops(('range', post, x[2][1]))
2340 elif x[2][0] == 'rangeall':
2340 elif x[2][0] == 'rangeall':
2341 return _fixops(('rangepost', post))
2341 return _fixops(('rangepost', post))
2342
2342
2343 return (op,) + tuple(_fixops(y) for y in x[1:])
2343 return (op,) + tuple(_fixops(y) for y in x[1:])
2344
2344
2345 def _analyze(x):
2345 def _analyze(x):
2346 if x is None:
2346 if x is None:
2347 return x
2347 return x
2348
2348
2349 op = x[0]
2349 op = x[0]
2350 if op == 'minus':
2350 if op == 'minus':
2351 return _analyze(('and', x[1], ('not', x[2])))
2351 return _analyze(('and', x[1], ('not', x[2])))
2352 elif op == 'only':
2352 elif op == 'only':
2353 t = ('func', ('symbol', 'only'), ('list', x[1], x[2]))
2353 t = ('func', ('symbol', 'only'), ('list', x[1], x[2]))
2354 return _analyze(t)
2354 return _analyze(t)
2355 elif op == 'onlypost':
2355 elif op == 'onlypost':
2356 return _analyze(('func', ('symbol', 'only'), x[1]))
2356 return _analyze(('func', ('symbol', 'only'), x[1]))
2357 elif op == 'dagrangepre':
2357 elif op == 'dagrangepre':
2358 return _analyze(('func', ('symbol', 'ancestors'), x[1]))
2358 return _analyze(('func', ('symbol', 'ancestors'), x[1]))
2359 elif op == 'dagrangepost':
2359 elif op == 'dagrangepost':
2360 return _analyze(('func', ('symbol', 'descendants'), x[1]))
2360 return _analyze(('func', ('symbol', 'descendants'), x[1]))
2361 elif op == 'rangeall':
2361 elif op == 'rangeall':
2362 return _analyze(('range', ('string', '0'), ('string', 'tip')))
2362 return _analyze(('range', ('string', '0'), ('string', 'tip')))
2363 elif op == 'rangepre':
2363 elif op == 'rangepre':
2364 return _analyze(('range', ('string', '0'), x[1]))
2364 return _analyze(('range', ('string', '0'), x[1]))
2365 elif op == 'rangepost':
2365 elif op == 'rangepost':
2366 return _analyze(('range', x[1], ('string', 'tip')))
2366 return _analyze(('range', x[1], ('string', 'tip')))
2367 elif op == 'negate':
2367 elif op == 'negate':
2368 s = getstring(x[1], _("can't negate that"))
2368 s = getstring(x[1], _("can't negate that"))
2369 return _analyze(('string', '-' + s))
2369 return _analyze(('string', '-' + s))
2370 elif op in ('string', 'symbol'):
2370 elif op in ('string', 'symbol'):
2371 return x
2371 return x
2372 elif op == 'and':
2372 elif op == 'and':
2373 ta = _analyze(x[1])
2373 ta = _analyze(x[1])
2374 tb = _analyze(x[2])
2374 tb = _analyze(x[2])
2375 return (op, ta, tb)
2375 return (op, ta, tb)
2376 elif op == 'or':
2376 elif op == 'or':
2377 return (op,) + tuple(_analyze(y) for y in x[1:])
2377 return (op,) + tuple(_analyze(y) for y in x[1:])
2378 elif op == 'not':
2378 elif op == 'not':
2379 return (op, _analyze(x[1]))
2379 return (op, _analyze(x[1]))
2380 elif op == 'parentpost':
2380 elif op == 'parentpost':
2381 return (op, _analyze(x[1]))
2381 return (op, _analyze(x[1]))
2382 elif op == 'group':
2382 elif op == 'group':
2383 return _analyze(x[1])
2383 return _analyze(x[1])
2384 elif op in ('dagrange', 'range', 'parent', 'ancestor'):
2384 elif op in ('dagrange', 'range', 'parent', 'ancestor'):
2385 ta = _analyze(x[1])
2385 ta = _analyze(x[1])
2386 tb = _analyze(x[2])
2386 tb = _analyze(x[2])
2387 return (op, ta, tb)
2387 return (op, ta, tb)
2388 elif op == 'list':
2388 elif op == 'list':
2389 return (op,) + tuple(_analyze(y) for y in x[1:])
2389 return (op,) + tuple(_analyze(y) for y in x[1:])
2390 elif op == 'keyvalue':
2390 elif op == 'keyvalue':
2391 return (op, x[1], _analyze(x[2]))
2391 return (op, x[1], _analyze(x[2]))
2392 elif op == 'func':
2392 elif op == 'func':
2393 return (op, x[1], _analyze(x[2]))
2393 return (op, x[1], _analyze(x[2]))
2394 raise ValueError('invalid operator %r' % op)
2394 raise ValueError('invalid operator %r' % op)
2395
2395
2396 def analyze(x):
2396 def analyze(x):
2397 """Transform raw parsed tree to evaluatable tree which can be fed to
2397 """Transform raw parsed tree to evaluatable tree which can be fed to
2398 optimize() or getset()
2398 optimize() or getset()
2399
2399
2400 All pseudo operations should be mapped to real operations or functions
2400 All pseudo operations should be mapped to real operations or functions
2401 defined in methods or symbols table respectively.
2401 defined in methods or symbols table respectively.
2402 """
2402 """
2403 return _analyze(x)
2403 return _analyze(x)
2404
2404
2405 def _optimize(x, small):
2405 def _optimize(x, small):
2406 if x is None:
2406 if x is None:
2407 return 0, x
2407 return 0, x
2408
2408
2409 smallbonus = 1
2409 smallbonus = 1
2410 if small:
2410 if small:
2411 smallbonus = .5
2411 smallbonus = .5
2412
2412
2413 op = x[0]
2413 op = x[0]
2414 if op in ('string', 'symbol'):
2414 if op in ('string', 'symbol'):
2415 return smallbonus, x # single revisions are small
2415 return smallbonus, x # single revisions are small
2416 elif op == 'and':
2416 elif op == 'and':
2417 wa, ta = _optimize(x[1], True)
2417 wa, ta = _optimize(x[1], True)
2418 wb, tb = _optimize(x[2], True)
2418 wb, tb = _optimize(x[2], True)
2419 w = min(wa, wb)
2419 w = min(wa, wb)
2420
2420
2421 # (::x and not ::y)/(not ::y and ::x) have a fast path
2421 # (::x and not ::y)/(not ::y and ::x) have a fast path
2422 tm = _matchonly(ta, tb) or _matchonly(tb, ta)
2422 tm = _matchonly(ta, tb) or _matchonly(tb, ta)
2423 if tm:
2423 if tm:
2424 return w, ('func', ('symbol', 'only'), tm)
2424 return w, ('func', ('symbol', 'only'), tm)
2425
2425
2426 if tb is not None and tb[0] == 'not':
2426 if tb is not None and tb[0] == 'not':
2427 return wa, ('difference', ta, tb[1])
2427 return wa, ('difference', ta, tb[1])
2428
2428
2429 if wa > wb:
2429 if wa > wb:
2430 return w, (op, tb, ta)
2430 return w, (op, tb, ta)
2431 return w, (op, ta, tb)
2431 return w, (op, ta, tb)
2432 elif op == 'or':
2432 elif op == 'or':
2433 # fast path for machine-generated expression, that is likely to have
2433 # fast path for machine-generated expression, that is likely to have
2434 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2434 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2435 ws, ts, ss = [], [], []
2435 ws, ts, ss = [], [], []
2436 def flushss():
2436 def flushss():
2437 if not ss:
2437 if not ss:
2438 return
2438 return
2439 if len(ss) == 1:
2439 if len(ss) == 1:
2440 w, t = ss[0]
2440 w, t = ss[0]
2441 else:
2441 else:
2442 s = '\0'.join(t[1] for w, t in ss)
2442 s = '\0'.join(t[1] for w, t in ss)
2443 y = ('func', ('symbol', '_list'), ('string', s))
2443 y = ('func', ('symbol', '_list'), ('string', s))
2444 w, t = _optimize(y, False)
2444 w, t = _optimize(y, False)
2445 ws.append(w)
2445 ws.append(w)
2446 ts.append(t)
2446 ts.append(t)
2447 del ss[:]
2447 del ss[:]
2448 for y in x[1:]:
2448 for y in x[1:]:
2449 w, t = _optimize(y, False)
2449 w, t = _optimize(y, False)
2450 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2450 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2451 ss.append((w, t))
2451 ss.append((w, t))
2452 continue
2452 continue
2453 flushss()
2453 flushss()
2454 ws.append(w)
2454 ws.append(w)
2455 ts.append(t)
2455 ts.append(t)
2456 flushss()
2456 flushss()
2457 if len(ts) == 1:
2457 if len(ts) == 1:
2458 return ws[0], ts[0] # 'or' operation is fully optimized out
2458 return ws[0], ts[0] # 'or' operation is fully optimized out
2459 # we can't reorder trees by weight because it would change the order.
2459 # we can't reorder trees by weight because it would change the order.
2460 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2460 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2461 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2461 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2462 return max(ws), (op,) + tuple(ts)
2462 return max(ws), (op,) + tuple(ts)
2463 elif op == 'not':
2463 elif op == 'not':
2464 # Optimize not public() to _notpublic() because we have a fast version
2464 # Optimize not public() to _notpublic() because we have a fast version
2465 if x[1] == ('func', ('symbol', 'public'), None):
2465 if x[1] == ('func', ('symbol', 'public'), None):
2466 newsym = ('func', ('symbol', '_notpublic'), None)
2466 newsym = ('func', ('symbol', '_notpublic'), None)
2467 o = _optimize(newsym, not small)
2467 o = _optimize(newsym, not small)
2468 return o[0], o[1]
2468 return o[0], o[1]
2469 else:
2469 else:
2470 o = _optimize(x[1], not small)
2470 o = _optimize(x[1], not small)
2471 return o[0], (op, o[1])
2471 return o[0], (op, o[1])
2472 elif op == 'parentpost':
2472 elif op == 'parentpost':
2473 o = _optimize(x[1], small)
2473 o = _optimize(x[1], small)
2474 return o[0], (op, o[1])
2474 return o[0], (op, o[1])
2475 elif op in ('dagrange', 'range', 'parent', 'ancestor'):
2475 elif op in ('dagrange', 'range', 'parent', 'ancestor'):
2476 wa, ta = _optimize(x[1], small)
2476 wa, ta = _optimize(x[1], small)
2477 wb, tb = _optimize(x[2], small)
2477 wb, tb = _optimize(x[2], small)
2478 return wa + wb, (op, ta, tb)
2478 return wa + wb, (op, ta, tb)
2479 elif op == 'list':
2479 elif op == 'list':
2480 ws, ts = zip(*(_optimize(y, small) for y in x[1:]))
2480 ws, ts = zip(*(_optimize(y, small) for y in x[1:]))
2481 return sum(ws), (op,) + ts
2481 return sum(ws), (op,) + ts
2482 elif op == 'keyvalue':
2482 elif op == 'keyvalue':
2483 w, t = _optimize(x[2], small)
2483 w, t = _optimize(x[2], small)
2484 return w, (op, x[1], t)
2484 return w, (op, x[1], t)
2485 elif op == 'func':
2485 elif op == 'func':
2486 f = getsymbol(x[1])
2486 f = getsymbol(x[1])
2487 wa, ta = _optimize(x[2], small)
2487 wa, ta = _optimize(x[2], small)
2488 if f in ('author', 'branch', 'closed', 'date', 'desc', 'file', 'grep',
2488 if f in ('author', 'branch', 'closed', 'date', 'desc', 'file', 'grep',
2489 'keyword', 'outgoing', 'user'):
2489 'keyword', 'outgoing', 'user'):
2490 w = 10 # slow
2490 w = 10 # slow
2491 elif f in ('modifies', 'adds', 'removes'):
2491 elif f in ('modifies', 'adds', 'removes'):
2492 w = 30 # slower
2492 w = 30 # slower
2493 elif f == "contains":
2493 elif f == "contains":
2494 w = 100 # very slow
2494 w = 100 # very slow
2495 elif f == "ancestor":
2495 elif f == "ancestor":
2496 w = 1 * smallbonus
2496 w = 1 * smallbonus
2497 elif f in ('reverse', 'limit', 'first', '_intlist'):
2497 elif f in ('reverse', 'limit', 'first', '_intlist'):
2498 w = 0
2498 w = 0
2499 elif f == "sort":
2499 elif f == "sort":
2500 w = 10 # assume most sorts look at changelog
2500 w = 10 # assume most sorts look at changelog
2501 else:
2501 else:
2502 w = 1
2502 w = 1
2503 return w + wa, (op, x[1], ta)
2503 return w + wa, (op, x[1], ta)
2504 raise ValueError('invalid operator %r' % op)
2504 raise ValueError('invalid operator %r' % op)
2505
2505
2506 def optimize(tree):
2506 def optimize(tree):
2507 """Optimize evaluatable tree
2507 """Optimize evaluatable tree
2508
2508
2509 All pseudo operations should be transformed beforehand.
2509 All pseudo operations should be transformed beforehand.
2510 """
2510 """
2511 _weight, newtree = _optimize(tree, small=True)
2511 _weight, newtree = _optimize(tree, small=True)
2512 return newtree
2512 return newtree
2513
2513
2514 # the set of valid characters for the initial letter of symbols in
2514 # the set of valid characters for the initial letter of symbols in
2515 # alias declarations and definitions
2515 # alias declarations and definitions
2516 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2516 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2517 if c.isalnum() or c in '._@$' or ord(c) > 127)
2517 if c.isalnum() or c in '._@$' or ord(c) > 127)
2518
2518
2519 def _parsewith(spec, lookup=None, syminitletters=None):
2519 def _parsewith(spec, lookup=None, syminitletters=None):
2520 """Generate a parse tree of given spec with given tokenizing options
2520 """Generate a parse tree of given spec with given tokenizing options
2521
2521
2522 >>> _parsewith('foo($1)', syminitletters=_aliassyminitletters)
2522 >>> _parsewith('foo($1)', syminitletters=_aliassyminitletters)
2523 ('func', ('symbol', 'foo'), ('symbol', '$1'))
2523 ('func', ('symbol', 'foo'), ('symbol', '$1'))
2524 >>> _parsewith('$1')
2524 >>> _parsewith('$1')
2525 Traceback (most recent call last):
2525 Traceback (most recent call last):
2526 ...
2526 ...
2527 ParseError: ("syntax error in revset '$1'", 0)
2527 ParseError: ("syntax error in revset '$1'", 0)
2528 >>> _parsewith('foo bar')
2528 >>> _parsewith('foo bar')
2529 Traceback (most recent call last):
2529 Traceback (most recent call last):
2530 ...
2530 ...
2531 ParseError: ('invalid token', 4)
2531 ParseError: ('invalid token', 4)
2532 """
2532 """
2533 p = parser.parser(elements)
2533 p = parser.parser(elements)
2534 tree, pos = p.parse(tokenize(spec, lookup=lookup,
2534 tree, pos = p.parse(tokenize(spec, lookup=lookup,
2535 syminitletters=syminitletters))
2535 syminitletters=syminitletters))
2536 if pos != len(spec):
2536 if pos != len(spec):
2537 raise error.ParseError(_('invalid token'), pos)
2537 raise error.ParseError(_('invalid token'), pos)
2538 return _fixops(parser.simplifyinfixops(tree, ('list', 'or')))
2538 return _fixops(parser.simplifyinfixops(tree, ('list', 'or')))
2539
2539
2540 class _aliasrules(parser.basealiasrules):
2540 class _aliasrules(parser.basealiasrules):
2541 """Parsing and expansion rule set of revset aliases"""
2541 """Parsing and expansion rule set of revset aliases"""
2542 _section = _('revset alias')
2542 _section = _('revset alias')
2543
2543
2544 @staticmethod
2544 @staticmethod
2545 def _parse(spec):
2545 def _parse(spec):
2546 """Parse alias declaration/definition ``spec``
2546 """Parse alias declaration/definition ``spec``
2547
2547
2548 This allows symbol names to use also ``$`` as an initial letter
2548 This allows symbol names to use also ``$`` as an initial letter
2549 (for backward compatibility), and callers of this function should
2549 (for backward compatibility), and callers of this function should
2550 examine whether ``$`` is used also for unexpected symbols or not.
2550 examine whether ``$`` is used also for unexpected symbols or not.
2551 """
2551 """
2552 return _parsewith(spec, syminitletters=_aliassyminitletters)
2552 return _parsewith(spec, syminitletters=_aliassyminitletters)
2553
2553
2554 @staticmethod
2554 @staticmethod
2555 def _trygetfunc(tree):
2555 def _trygetfunc(tree):
2556 if tree[0] == 'func' and tree[1][0] == 'symbol':
2556 if tree[0] == 'func' and tree[1][0] == 'symbol':
2557 return tree[1][1], getlist(tree[2])
2557 return tree[1][1], getlist(tree[2])
2558
2558
2559 def expandaliases(ui, tree, showwarning=None):
2559 def expandaliases(ui, tree, showwarning=None):
2560 aliases = _aliasrules.buildmap(ui.configitems('revsetalias'))
2560 aliases = _aliasrules.buildmap(ui.configitems('revsetalias'))
2561 tree = _aliasrules.expand(aliases, tree)
2561 tree = _aliasrules.expand(aliases, tree)
2562 if showwarning:
2562 if showwarning:
2563 # warn about problematic (but not referred) aliases
2563 # warn about problematic (but not referred) aliases
2564 for name, alias in sorted(aliases.iteritems()):
2564 for name, alias in sorted(aliases.iteritems()):
2565 if alias.error and not alias.warned:
2565 if alias.error and not alias.warned:
2566 showwarning(_('warning: %s\n') % (alias.error))
2566 showwarning(_('warning: %s\n') % (alias.error))
2567 alias.warned = True
2567 alias.warned = True
2568 return tree
2568 return tree
2569
2569
2570 def foldconcat(tree):
2570 def foldconcat(tree):
2571 """Fold elements to be concatenated by `##`
2571 """Fold elements to be concatenated by `##`
2572 """
2572 """
2573 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2573 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2574 return tree
2574 return tree
2575 if tree[0] == '_concat':
2575 if tree[0] == '_concat':
2576 pending = [tree]
2576 pending = [tree]
2577 l = []
2577 l = []
2578 while pending:
2578 while pending:
2579 e = pending.pop()
2579 e = pending.pop()
2580 if e[0] == '_concat':
2580 if e[0] == '_concat':
2581 pending.extend(reversed(e[1:]))
2581 pending.extend(reversed(e[1:]))
2582 elif e[0] in ('string', 'symbol'):
2582 elif e[0] in ('string', 'symbol'):
2583 l.append(e[1])
2583 l.append(e[1])
2584 else:
2584 else:
2585 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2585 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2586 raise error.ParseError(msg)
2586 raise error.ParseError(msg)
2587 return ('string', ''.join(l))
2587 return ('string', ''.join(l))
2588 else:
2588 else:
2589 return tuple(foldconcat(t) for t in tree)
2589 return tuple(foldconcat(t) for t in tree)
2590
2590
2591 def parse(spec, lookup=None):
2591 def parse(spec, lookup=None):
2592 return _parsewith(spec, lookup=lookup)
2592 return _parsewith(spec, lookup=lookup)
2593
2593
2594 def posttreebuilthook(tree, repo):
2594 def posttreebuilthook(tree, repo):
2595 # hook for extensions to execute code on the optimized tree
2595 # hook for extensions to execute code on the optimized tree
2596 pass
2596 pass
2597
2597
2598 def match(ui, spec, repo=None):
2598 def match(ui, spec, repo=None):
2599 """Create a matcher for a single revision spec."""
2599 """Create a matcher for a single revision spec."""
2600 return matchany(ui, [spec], repo=repo)
2600 return matchany(ui, [spec], repo=repo)
2601
2601
2602 def matchany(ui, specs, repo=None):
2602 def matchany(ui, specs, repo=None):
2603 """Create a matcher that will include any revisions matching one of the
2603 """Create a matcher that will include any revisions matching one of the
2604 given specs"""
2604 given specs"""
2605 if not specs:
2605 if not specs:
2606 def mfunc(repo, subset=None):
2606 def mfunc(repo, subset=None):
2607 return baseset()
2607 return baseset()
2608 return mfunc
2608 return mfunc
2609 if not all(specs):
2609 if not all(specs):
2610 raise error.ParseError(_("empty query"))
2610 raise error.ParseError(_("empty query"))
2611 lookup = None
2611 lookup = None
2612 if repo:
2612 if repo:
2613 lookup = repo.__contains__
2613 lookup = repo.__contains__
2614 if len(specs) == 1:
2614 if len(specs) == 1:
2615 tree = parse(specs[0], lookup)
2615 tree = parse(specs[0], lookup)
2616 else:
2616 else:
2617 tree = ('or',) + tuple(parse(s, lookup) for s in specs)
2617 tree = ('or',) + tuple(parse(s, lookup) for s in specs)
2618 return _makematcher(ui, tree, repo)
2618
2619
2620 def _makematcher(ui, tree, repo):
2621 if ui:
2619 if ui:
2622 tree = expandaliases(ui, tree, showwarning=ui.warn)
2620 tree = expandaliases(ui, tree, showwarning=ui.warn)
2623 tree = foldconcat(tree)
2621 tree = foldconcat(tree)
2624 tree = analyze(tree)
2622 tree = analyze(tree)
2625 tree = optimize(tree)
2623 tree = optimize(tree)
2626 posttreebuilthook(tree, repo)
2624 posttreebuilthook(tree, repo)
2625 return makematcher(tree)
2626
2627 def makematcher(tree):
2628 """Create a matcher from an evaluatable tree"""
2627 def mfunc(repo, subset=None):
2629 def mfunc(repo, subset=None):
2628 if subset is None:
2630 if subset is None:
2629 subset = fullreposet(repo)
2631 subset = fullreposet(repo)
2630 if util.safehasattr(subset, 'isascending'):
2632 if util.safehasattr(subset, 'isascending'):
2631 result = getset(repo, subset, tree)
2633 result = getset(repo, subset, tree)
2632 else:
2634 else:
2633 result = getset(repo, baseset(subset), tree)
2635 result = getset(repo, baseset(subset), tree)
2634 return result
2636 return result
2635 return mfunc
2637 return mfunc
2636
2638
2637 def formatspec(expr, *args):
2639 def formatspec(expr, *args):
2638 '''
2640 '''
2639 This is a convenience function for using revsets internally, and
2641 This is a convenience function for using revsets internally, and
2640 escapes arguments appropriately. Aliases are intentionally ignored
2642 escapes arguments appropriately. Aliases are intentionally ignored
2641 so that intended expression behavior isn't accidentally subverted.
2643 so that intended expression behavior isn't accidentally subverted.
2642
2644
2643 Supported arguments:
2645 Supported arguments:
2644
2646
2645 %r = revset expression, parenthesized
2647 %r = revset expression, parenthesized
2646 %d = int(arg), no quoting
2648 %d = int(arg), no quoting
2647 %s = string(arg), escaped and single-quoted
2649 %s = string(arg), escaped and single-quoted
2648 %b = arg.branch(), escaped and single-quoted
2650 %b = arg.branch(), escaped and single-quoted
2649 %n = hex(arg), single-quoted
2651 %n = hex(arg), single-quoted
2650 %% = a literal '%'
2652 %% = a literal '%'
2651
2653
2652 Prefixing the type with 'l' specifies a parenthesized list of that type.
2654 Prefixing the type with 'l' specifies a parenthesized list of that type.
2653
2655
2654 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2656 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2655 '(10 or 11):: and ((this()) or (that()))'
2657 '(10 or 11):: and ((this()) or (that()))'
2656 >>> formatspec('%d:: and not %d::', 10, 20)
2658 >>> formatspec('%d:: and not %d::', 10, 20)
2657 '10:: and not 20::'
2659 '10:: and not 20::'
2658 >>> formatspec('%ld or %ld', [], [1])
2660 >>> formatspec('%ld or %ld', [], [1])
2659 "_list('') or 1"
2661 "_list('') or 1"
2660 >>> formatspec('keyword(%s)', 'foo\\xe9')
2662 >>> formatspec('keyword(%s)', 'foo\\xe9')
2661 "keyword('foo\\\\xe9')"
2663 "keyword('foo\\\\xe9')"
2662 >>> b = lambda: 'default'
2664 >>> b = lambda: 'default'
2663 >>> b.branch = b
2665 >>> b.branch = b
2664 >>> formatspec('branch(%b)', b)
2666 >>> formatspec('branch(%b)', b)
2665 "branch('default')"
2667 "branch('default')"
2666 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2668 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2667 "root(_list('a\\x00b\\x00c\\x00d'))"
2669 "root(_list('a\\x00b\\x00c\\x00d'))"
2668 '''
2670 '''
2669
2671
2670 def quote(s):
2672 def quote(s):
2671 return repr(str(s))
2673 return repr(str(s))
2672
2674
2673 def argtype(c, arg):
2675 def argtype(c, arg):
2674 if c == 'd':
2676 if c == 'd':
2675 return str(int(arg))
2677 return str(int(arg))
2676 elif c == 's':
2678 elif c == 's':
2677 return quote(arg)
2679 return quote(arg)
2678 elif c == 'r':
2680 elif c == 'r':
2679 parse(arg) # make sure syntax errors are confined
2681 parse(arg) # make sure syntax errors are confined
2680 return '(%s)' % arg
2682 return '(%s)' % arg
2681 elif c == 'n':
2683 elif c == 'n':
2682 return quote(node.hex(arg))
2684 return quote(node.hex(arg))
2683 elif c == 'b':
2685 elif c == 'b':
2684 return quote(arg.branch())
2686 return quote(arg.branch())
2685
2687
2686 def listexp(s, t):
2688 def listexp(s, t):
2687 l = len(s)
2689 l = len(s)
2688 if l == 0:
2690 if l == 0:
2689 return "_list('')"
2691 return "_list('')"
2690 elif l == 1:
2692 elif l == 1:
2691 return argtype(t, s[0])
2693 return argtype(t, s[0])
2692 elif t == 'd':
2694 elif t == 'd':
2693 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2695 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2694 elif t == 's':
2696 elif t == 's':
2695 return "_list('%s')" % "\0".join(s)
2697 return "_list('%s')" % "\0".join(s)
2696 elif t == 'n':
2698 elif t == 'n':
2697 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2699 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2698 elif t == 'b':
2700 elif t == 'b':
2699 return "_list('%s')" % "\0".join(a.branch() for a in s)
2701 return "_list('%s')" % "\0".join(a.branch() for a in s)
2700
2702
2701 m = l // 2
2703 m = l // 2
2702 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2704 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2703
2705
2704 ret = ''
2706 ret = ''
2705 pos = 0
2707 pos = 0
2706 arg = 0
2708 arg = 0
2707 while pos < len(expr):
2709 while pos < len(expr):
2708 c = expr[pos]
2710 c = expr[pos]
2709 if c == '%':
2711 if c == '%':
2710 pos += 1
2712 pos += 1
2711 d = expr[pos]
2713 d = expr[pos]
2712 if d == '%':
2714 if d == '%':
2713 ret += d
2715 ret += d
2714 elif d in 'dsnbr':
2716 elif d in 'dsnbr':
2715 ret += argtype(d, args[arg])
2717 ret += argtype(d, args[arg])
2716 arg += 1
2718 arg += 1
2717 elif d == 'l':
2719 elif d == 'l':
2718 # a list of some type
2720 # a list of some type
2719 pos += 1
2721 pos += 1
2720 d = expr[pos]
2722 d = expr[pos]
2721 ret += listexp(list(args[arg]), d)
2723 ret += listexp(list(args[arg]), d)
2722 arg += 1
2724 arg += 1
2723 else:
2725 else:
2724 raise error.Abort(_('unexpected revspec format character %s')
2726 raise error.Abort(_('unexpected revspec format character %s')
2725 % d)
2727 % d)
2726 else:
2728 else:
2727 ret += c
2729 ret += c
2728 pos += 1
2730 pos += 1
2729
2731
2730 return ret
2732 return ret
2731
2733
2732 def prettyformat(tree):
2734 def prettyformat(tree):
2733 return parser.prettyformat(tree, ('string', 'symbol'))
2735 return parser.prettyformat(tree, ('string', 'symbol'))
2734
2736
2735 def depth(tree):
2737 def depth(tree):
2736 if isinstance(tree, tuple):
2738 if isinstance(tree, tuple):
2737 return max(map(depth, tree)) + 1
2739 return max(map(depth, tree)) + 1
2738 else:
2740 else:
2739 return 0
2741 return 0
2740
2742
2741 def funcsused(tree):
2743 def funcsused(tree):
2742 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2744 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2743 return set()
2745 return set()
2744 else:
2746 else:
2745 funcs = set()
2747 funcs = set()
2746 for s in tree[1:]:
2748 for s in tree[1:]:
2747 funcs |= funcsused(s)
2749 funcs |= funcsused(s)
2748 if tree[0] == 'func':
2750 if tree[0] == 'func':
2749 funcs.add(tree[1][1])
2751 funcs.add(tree[1][1])
2750 return funcs
2752 return funcs
2751
2753
2752 def _formatsetrepr(r):
2754 def _formatsetrepr(r):
2753 """Format an optional printable representation of a set
2755 """Format an optional printable representation of a set
2754
2756
2755 ======== =================================
2757 ======== =================================
2756 type(r) example
2758 type(r) example
2757 ======== =================================
2759 ======== =================================
2758 tuple ('<not %r>', other)
2760 tuple ('<not %r>', other)
2759 str '<branch closed>'
2761 str '<branch closed>'
2760 callable lambda: '<branch %r>' % sorted(b)
2762 callable lambda: '<branch %r>' % sorted(b)
2761 object other
2763 object other
2762 ======== =================================
2764 ======== =================================
2763 """
2765 """
2764 if r is None:
2766 if r is None:
2765 return ''
2767 return ''
2766 elif isinstance(r, tuple):
2768 elif isinstance(r, tuple):
2767 return r[0] % r[1:]
2769 return r[0] % r[1:]
2768 elif isinstance(r, str):
2770 elif isinstance(r, str):
2769 return r
2771 return r
2770 elif callable(r):
2772 elif callable(r):
2771 return r()
2773 return r()
2772 else:
2774 else:
2773 return repr(r)
2775 return repr(r)
2774
2776
2775 class abstractsmartset(object):
2777 class abstractsmartset(object):
2776
2778
2777 def __nonzero__(self):
2779 def __nonzero__(self):
2778 """True if the smartset is not empty"""
2780 """True if the smartset is not empty"""
2779 raise NotImplementedError()
2781 raise NotImplementedError()
2780
2782
2781 def __contains__(self, rev):
2783 def __contains__(self, rev):
2782 """provide fast membership testing"""
2784 """provide fast membership testing"""
2783 raise NotImplementedError()
2785 raise NotImplementedError()
2784
2786
2785 def __iter__(self):
2787 def __iter__(self):
2786 """iterate the set in the order it is supposed to be iterated"""
2788 """iterate the set in the order it is supposed to be iterated"""
2787 raise NotImplementedError()
2789 raise NotImplementedError()
2788
2790
2789 # Attributes containing a function to perform a fast iteration in a given
2791 # Attributes containing a function to perform a fast iteration in a given
2790 # direction. A smartset can have none, one, or both defined.
2792 # direction. A smartset can have none, one, or both defined.
2791 #
2793 #
2792 # Default value is None instead of a function returning None to avoid
2794 # Default value is None instead of a function returning None to avoid
2793 # initializing an iterator just for testing if a fast method exists.
2795 # initializing an iterator just for testing if a fast method exists.
2794 fastasc = None
2796 fastasc = None
2795 fastdesc = None
2797 fastdesc = None
2796
2798
2797 def isascending(self):
2799 def isascending(self):
2798 """True if the set will iterate in ascending order"""
2800 """True if the set will iterate in ascending order"""
2799 raise NotImplementedError()
2801 raise NotImplementedError()
2800
2802
2801 def isdescending(self):
2803 def isdescending(self):
2802 """True if the set will iterate in descending order"""
2804 """True if the set will iterate in descending order"""
2803 raise NotImplementedError()
2805 raise NotImplementedError()
2804
2806
2805 def istopo(self):
2807 def istopo(self):
2806 """True if the set will iterate in topographical order"""
2808 """True if the set will iterate in topographical order"""
2807 raise NotImplementedError()
2809 raise NotImplementedError()
2808
2810
2809 @util.cachefunc
2811 @util.cachefunc
2810 def min(self):
2812 def min(self):
2811 """return the minimum element in the set"""
2813 """return the minimum element in the set"""
2812 if self.fastasc is not None:
2814 if self.fastasc is not None:
2813 for r in self.fastasc():
2815 for r in self.fastasc():
2814 return r
2816 return r
2815 raise ValueError('arg is an empty sequence')
2817 raise ValueError('arg is an empty sequence')
2816 return min(self)
2818 return min(self)
2817
2819
2818 @util.cachefunc
2820 @util.cachefunc
2819 def max(self):
2821 def max(self):
2820 """return the maximum element in the set"""
2822 """return the maximum element in the set"""
2821 if self.fastdesc is not None:
2823 if self.fastdesc is not None:
2822 for r in self.fastdesc():
2824 for r in self.fastdesc():
2823 return r
2825 return r
2824 raise ValueError('arg is an empty sequence')
2826 raise ValueError('arg is an empty sequence')
2825 return max(self)
2827 return max(self)
2826
2828
2827 def first(self):
2829 def first(self):
2828 """return the first element in the set (user iteration perspective)
2830 """return the first element in the set (user iteration perspective)
2829
2831
2830 Return None if the set is empty"""
2832 Return None if the set is empty"""
2831 raise NotImplementedError()
2833 raise NotImplementedError()
2832
2834
2833 def last(self):
2835 def last(self):
2834 """return the last element in the set (user iteration perspective)
2836 """return the last element in the set (user iteration perspective)
2835
2837
2836 Return None if the set is empty"""
2838 Return None if the set is empty"""
2837 raise NotImplementedError()
2839 raise NotImplementedError()
2838
2840
2839 def __len__(self):
2841 def __len__(self):
2840 """return the length of the smartsets
2842 """return the length of the smartsets
2841
2843
2842 This can be expensive on smartset that could be lazy otherwise."""
2844 This can be expensive on smartset that could be lazy otherwise."""
2843 raise NotImplementedError()
2845 raise NotImplementedError()
2844
2846
2845 def reverse(self):
2847 def reverse(self):
2846 """reverse the expected iteration order"""
2848 """reverse the expected iteration order"""
2847 raise NotImplementedError()
2849 raise NotImplementedError()
2848
2850
2849 def sort(self, reverse=True):
2851 def sort(self, reverse=True):
2850 """get the set to iterate in an ascending or descending order"""
2852 """get the set to iterate in an ascending or descending order"""
2851 raise NotImplementedError()
2853 raise NotImplementedError()
2852
2854
2853 def __and__(self, other):
2855 def __and__(self, other):
2854 """Returns a new object with the intersection of the two collections.
2856 """Returns a new object with the intersection of the two collections.
2855
2857
2856 This is part of the mandatory API for smartset."""
2858 This is part of the mandatory API for smartset."""
2857 if isinstance(other, fullreposet):
2859 if isinstance(other, fullreposet):
2858 return self
2860 return self
2859 return self.filter(other.__contains__, condrepr=other, cache=False)
2861 return self.filter(other.__contains__, condrepr=other, cache=False)
2860
2862
2861 def __add__(self, other):
2863 def __add__(self, other):
2862 """Returns a new object with the union of the two collections.
2864 """Returns a new object with the union of the two collections.
2863
2865
2864 This is part of the mandatory API for smartset."""
2866 This is part of the mandatory API for smartset."""
2865 return addset(self, other)
2867 return addset(self, other)
2866
2868
2867 def __sub__(self, other):
2869 def __sub__(self, other):
2868 """Returns a new object with the substraction of the two collections.
2870 """Returns a new object with the substraction of the two collections.
2869
2871
2870 This is part of the mandatory API for smartset."""
2872 This is part of the mandatory API for smartset."""
2871 c = other.__contains__
2873 c = other.__contains__
2872 return self.filter(lambda r: not c(r), condrepr=('<not %r>', other),
2874 return self.filter(lambda r: not c(r), condrepr=('<not %r>', other),
2873 cache=False)
2875 cache=False)
2874
2876
2875 def filter(self, condition, condrepr=None, cache=True):
2877 def filter(self, condition, condrepr=None, cache=True):
2876 """Returns this smartset filtered by condition as a new smartset.
2878 """Returns this smartset filtered by condition as a new smartset.
2877
2879
2878 `condition` is a callable which takes a revision number and returns a
2880 `condition` is a callable which takes a revision number and returns a
2879 boolean. Optional `condrepr` provides a printable representation of
2881 boolean. Optional `condrepr` provides a printable representation of
2880 the given `condition`.
2882 the given `condition`.
2881
2883
2882 This is part of the mandatory API for smartset."""
2884 This is part of the mandatory API for smartset."""
2883 # builtin cannot be cached. but do not needs to
2885 # builtin cannot be cached. but do not needs to
2884 if cache and util.safehasattr(condition, 'func_code'):
2886 if cache and util.safehasattr(condition, 'func_code'):
2885 condition = util.cachefunc(condition)
2887 condition = util.cachefunc(condition)
2886 return filteredset(self, condition, condrepr)
2888 return filteredset(self, condition, condrepr)
2887
2889
2888 class baseset(abstractsmartset):
2890 class baseset(abstractsmartset):
2889 """Basic data structure that represents a revset and contains the basic
2891 """Basic data structure that represents a revset and contains the basic
2890 operation that it should be able to perform.
2892 operation that it should be able to perform.
2891
2893
2892 Every method in this class should be implemented by any smartset class.
2894 Every method in this class should be implemented by any smartset class.
2893 """
2895 """
2894 def __init__(self, data=(), datarepr=None, istopo=False):
2896 def __init__(self, data=(), datarepr=None, istopo=False):
2895 """
2897 """
2896 datarepr: a tuple of (format, obj, ...), a function or an object that
2898 datarepr: a tuple of (format, obj, ...), a function or an object that
2897 provides a printable representation of the given data.
2899 provides a printable representation of the given data.
2898 """
2900 """
2899 self._ascending = None
2901 self._ascending = None
2900 self._istopo = istopo
2902 self._istopo = istopo
2901 if not isinstance(data, list):
2903 if not isinstance(data, list):
2902 if isinstance(data, set):
2904 if isinstance(data, set):
2903 self._set = data
2905 self._set = data
2904 # set has no order we pick one for stability purpose
2906 # set has no order we pick one for stability purpose
2905 self._ascending = True
2907 self._ascending = True
2906 data = list(data)
2908 data = list(data)
2907 self._list = data
2909 self._list = data
2908 self._datarepr = datarepr
2910 self._datarepr = datarepr
2909
2911
2910 @util.propertycache
2912 @util.propertycache
2911 def _set(self):
2913 def _set(self):
2912 return set(self._list)
2914 return set(self._list)
2913
2915
2914 @util.propertycache
2916 @util.propertycache
2915 def _asclist(self):
2917 def _asclist(self):
2916 asclist = self._list[:]
2918 asclist = self._list[:]
2917 asclist.sort()
2919 asclist.sort()
2918 return asclist
2920 return asclist
2919
2921
2920 def __iter__(self):
2922 def __iter__(self):
2921 if self._ascending is None:
2923 if self._ascending is None:
2922 return iter(self._list)
2924 return iter(self._list)
2923 elif self._ascending:
2925 elif self._ascending:
2924 return iter(self._asclist)
2926 return iter(self._asclist)
2925 else:
2927 else:
2926 return reversed(self._asclist)
2928 return reversed(self._asclist)
2927
2929
2928 def fastasc(self):
2930 def fastasc(self):
2929 return iter(self._asclist)
2931 return iter(self._asclist)
2930
2932
2931 def fastdesc(self):
2933 def fastdesc(self):
2932 return reversed(self._asclist)
2934 return reversed(self._asclist)
2933
2935
2934 @util.propertycache
2936 @util.propertycache
2935 def __contains__(self):
2937 def __contains__(self):
2936 return self._set.__contains__
2938 return self._set.__contains__
2937
2939
2938 def __nonzero__(self):
2940 def __nonzero__(self):
2939 return bool(self._list)
2941 return bool(self._list)
2940
2942
2941 def sort(self, reverse=False):
2943 def sort(self, reverse=False):
2942 self._ascending = not bool(reverse)
2944 self._ascending = not bool(reverse)
2943 self._istopo = False
2945 self._istopo = False
2944
2946
2945 def reverse(self):
2947 def reverse(self):
2946 if self._ascending is None:
2948 if self._ascending is None:
2947 self._list.reverse()
2949 self._list.reverse()
2948 else:
2950 else:
2949 self._ascending = not self._ascending
2951 self._ascending = not self._ascending
2950 self._istopo = False
2952 self._istopo = False
2951
2953
2952 def __len__(self):
2954 def __len__(self):
2953 return len(self._list)
2955 return len(self._list)
2954
2956
2955 def isascending(self):
2957 def isascending(self):
2956 """Returns True if the collection is ascending order, False if not.
2958 """Returns True if the collection is ascending order, False if not.
2957
2959
2958 This is part of the mandatory API for smartset."""
2960 This is part of the mandatory API for smartset."""
2959 if len(self) <= 1:
2961 if len(self) <= 1:
2960 return True
2962 return True
2961 return self._ascending is not None and self._ascending
2963 return self._ascending is not None and self._ascending
2962
2964
2963 def isdescending(self):
2965 def isdescending(self):
2964 """Returns True if the collection is descending order, False if not.
2966 """Returns True if the collection is descending order, False if not.
2965
2967
2966 This is part of the mandatory API for smartset."""
2968 This is part of the mandatory API for smartset."""
2967 if len(self) <= 1:
2969 if len(self) <= 1:
2968 return True
2970 return True
2969 return self._ascending is not None and not self._ascending
2971 return self._ascending is not None and not self._ascending
2970
2972
2971 def istopo(self):
2973 def istopo(self):
2972 """Is the collection is in topographical order or not.
2974 """Is the collection is in topographical order or not.
2973
2975
2974 This is part of the mandatory API for smartset."""
2976 This is part of the mandatory API for smartset."""
2975 if len(self) <= 1:
2977 if len(self) <= 1:
2976 return True
2978 return True
2977 return self._istopo
2979 return self._istopo
2978
2980
2979 def first(self):
2981 def first(self):
2980 if self:
2982 if self:
2981 if self._ascending is None:
2983 if self._ascending is None:
2982 return self._list[0]
2984 return self._list[0]
2983 elif self._ascending:
2985 elif self._ascending:
2984 return self._asclist[0]
2986 return self._asclist[0]
2985 else:
2987 else:
2986 return self._asclist[-1]
2988 return self._asclist[-1]
2987 return None
2989 return None
2988
2990
2989 def last(self):
2991 def last(self):
2990 if self:
2992 if self:
2991 if self._ascending is None:
2993 if self._ascending is None:
2992 return self._list[-1]
2994 return self._list[-1]
2993 elif self._ascending:
2995 elif self._ascending:
2994 return self._asclist[-1]
2996 return self._asclist[-1]
2995 else:
2997 else:
2996 return self._asclist[0]
2998 return self._asclist[0]
2997 return None
2999 return None
2998
3000
2999 def __repr__(self):
3001 def __repr__(self):
3000 d = {None: '', False: '-', True: '+'}[self._ascending]
3002 d = {None: '', False: '-', True: '+'}[self._ascending]
3001 s = _formatsetrepr(self._datarepr)
3003 s = _formatsetrepr(self._datarepr)
3002 if not s:
3004 if not s:
3003 l = self._list
3005 l = self._list
3004 # if _list has been built from a set, it might have a different
3006 # if _list has been built from a set, it might have a different
3005 # order from one python implementation to another.
3007 # order from one python implementation to another.
3006 # We fallback to the sorted version for a stable output.
3008 # We fallback to the sorted version for a stable output.
3007 if self._ascending is not None:
3009 if self._ascending is not None:
3008 l = self._asclist
3010 l = self._asclist
3009 s = repr(l)
3011 s = repr(l)
3010 return '<%s%s %s>' % (type(self).__name__, d, s)
3012 return '<%s%s %s>' % (type(self).__name__, d, s)
3011
3013
3012 class filteredset(abstractsmartset):
3014 class filteredset(abstractsmartset):
3013 """Duck type for baseset class which iterates lazily over the revisions in
3015 """Duck type for baseset class which iterates lazily over the revisions in
3014 the subset and contains a function which tests for membership in the
3016 the subset and contains a function which tests for membership in the
3015 revset
3017 revset
3016 """
3018 """
3017 def __init__(self, subset, condition=lambda x: True, condrepr=None):
3019 def __init__(self, subset, condition=lambda x: True, condrepr=None):
3018 """
3020 """
3019 condition: a function that decide whether a revision in the subset
3021 condition: a function that decide whether a revision in the subset
3020 belongs to the revset or not.
3022 belongs to the revset or not.
3021 condrepr: a tuple of (format, obj, ...), a function or an object that
3023 condrepr: a tuple of (format, obj, ...), a function or an object that
3022 provides a printable representation of the given condition.
3024 provides a printable representation of the given condition.
3023 """
3025 """
3024 self._subset = subset
3026 self._subset = subset
3025 self._condition = condition
3027 self._condition = condition
3026 self._condrepr = condrepr
3028 self._condrepr = condrepr
3027
3029
3028 def __contains__(self, x):
3030 def __contains__(self, x):
3029 return x in self._subset and self._condition(x)
3031 return x in self._subset and self._condition(x)
3030
3032
3031 def __iter__(self):
3033 def __iter__(self):
3032 return self._iterfilter(self._subset)
3034 return self._iterfilter(self._subset)
3033
3035
3034 def _iterfilter(self, it):
3036 def _iterfilter(self, it):
3035 cond = self._condition
3037 cond = self._condition
3036 for x in it:
3038 for x in it:
3037 if cond(x):
3039 if cond(x):
3038 yield x
3040 yield x
3039
3041
3040 @property
3042 @property
3041 def fastasc(self):
3043 def fastasc(self):
3042 it = self._subset.fastasc
3044 it = self._subset.fastasc
3043 if it is None:
3045 if it is None:
3044 return None
3046 return None
3045 return lambda: self._iterfilter(it())
3047 return lambda: self._iterfilter(it())
3046
3048
3047 @property
3049 @property
3048 def fastdesc(self):
3050 def fastdesc(self):
3049 it = self._subset.fastdesc
3051 it = self._subset.fastdesc
3050 if it is None:
3052 if it is None:
3051 return None
3053 return None
3052 return lambda: self._iterfilter(it())
3054 return lambda: self._iterfilter(it())
3053
3055
3054 def __nonzero__(self):
3056 def __nonzero__(self):
3055 fast = None
3057 fast = None
3056 candidates = [self.fastasc if self.isascending() else None,
3058 candidates = [self.fastasc if self.isascending() else None,
3057 self.fastdesc if self.isdescending() else None,
3059 self.fastdesc if self.isdescending() else None,
3058 self.fastasc,
3060 self.fastasc,
3059 self.fastdesc]
3061 self.fastdesc]
3060 for candidate in candidates:
3062 for candidate in candidates:
3061 if candidate is not None:
3063 if candidate is not None:
3062 fast = candidate
3064 fast = candidate
3063 break
3065 break
3064
3066
3065 if fast is not None:
3067 if fast is not None:
3066 it = fast()
3068 it = fast()
3067 else:
3069 else:
3068 it = self
3070 it = self
3069
3071
3070 for r in it:
3072 for r in it:
3071 return True
3073 return True
3072 return False
3074 return False
3073
3075
3074 def __len__(self):
3076 def __len__(self):
3075 # Basic implementation to be changed in future patches.
3077 # Basic implementation to be changed in future patches.
3076 # until this gets improved, we use generator expression
3078 # until this gets improved, we use generator expression
3077 # here, since list compr is free to call __len__ again
3079 # here, since list compr is free to call __len__ again
3078 # causing infinite recursion
3080 # causing infinite recursion
3079 l = baseset(r for r in self)
3081 l = baseset(r for r in self)
3080 return len(l)
3082 return len(l)
3081
3083
3082 def sort(self, reverse=False):
3084 def sort(self, reverse=False):
3083 self._subset.sort(reverse=reverse)
3085 self._subset.sort(reverse=reverse)
3084
3086
3085 def reverse(self):
3087 def reverse(self):
3086 self._subset.reverse()
3088 self._subset.reverse()
3087
3089
3088 def isascending(self):
3090 def isascending(self):
3089 return self._subset.isascending()
3091 return self._subset.isascending()
3090
3092
3091 def isdescending(self):
3093 def isdescending(self):
3092 return self._subset.isdescending()
3094 return self._subset.isdescending()
3093
3095
3094 def istopo(self):
3096 def istopo(self):
3095 return self._subset.istopo()
3097 return self._subset.istopo()
3096
3098
3097 def first(self):
3099 def first(self):
3098 for x in self:
3100 for x in self:
3099 return x
3101 return x
3100 return None
3102 return None
3101
3103
3102 def last(self):
3104 def last(self):
3103 it = None
3105 it = None
3104 if self.isascending():
3106 if self.isascending():
3105 it = self.fastdesc
3107 it = self.fastdesc
3106 elif self.isdescending():
3108 elif self.isdescending():
3107 it = self.fastasc
3109 it = self.fastasc
3108 if it is not None:
3110 if it is not None:
3109 for x in it():
3111 for x in it():
3110 return x
3112 return x
3111 return None #empty case
3113 return None #empty case
3112 else:
3114 else:
3113 x = None
3115 x = None
3114 for x in self:
3116 for x in self:
3115 pass
3117 pass
3116 return x
3118 return x
3117
3119
3118 def __repr__(self):
3120 def __repr__(self):
3119 xs = [repr(self._subset)]
3121 xs = [repr(self._subset)]
3120 s = _formatsetrepr(self._condrepr)
3122 s = _formatsetrepr(self._condrepr)
3121 if s:
3123 if s:
3122 xs.append(s)
3124 xs.append(s)
3123 return '<%s %s>' % (type(self).__name__, ', '.join(xs))
3125 return '<%s %s>' % (type(self).__name__, ', '.join(xs))
3124
3126
3125 def _iterordered(ascending, iter1, iter2):
3127 def _iterordered(ascending, iter1, iter2):
3126 """produce an ordered iteration from two iterators with the same order
3128 """produce an ordered iteration from two iterators with the same order
3127
3129
3128 The ascending is used to indicated the iteration direction.
3130 The ascending is used to indicated the iteration direction.
3129 """
3131 """
3130 choice = max
3132 choice = max
3131 if ascending:
3133 if ascending:
3132 choice = min
3134 choice = min
3133
3135
3134 val1 = None
3136 val1 = None
3135 val2 = None
3137 val2 = None
3136 try:
3138 try:
3137 # Consume both iterators in an ordered way until one is empty
3139 # Consume both iterators in an ordered way until one is empty
3138 while True:
3140 while True:
3139 if val1 is None:
3141 if val1 is None:
3140 val1 = next(iter1)
3142 val1 = next(iter1)
3141 if val2 is None:
3143 if val2 is None:
3142 val2 = next(iter2)
3144 val2 = next(iter2)
3143 n = choice(val1, val2)
3145 n = choice(val1, val2)
3144 yield n
3146 yield n
3145 if val1 == n:
3147 if val1 == n:
3146 val1 = None
3148 val1 = None
3147 if val2 == n:
3149 if val2 == n:
3148 val2 = None
3150 val2 = None
3149 except StopIteration:
3151 except StopIteration:
3150 # Flush any remaining values and consume the other one
3152 # Flush any remaining values and consume the other one
3151 it = iter2
3153 it = iter2
3152 if val1 is not None:
3154 if val1 is not None:
3153 yield val1
3155 yield val1
3154 it = iter1
3156 it = iter1
3155 elif val2 is not None:
3157 elif val2 is not None:
3156 # might have been equality and both are empty
3158 # might have been equality and both are empty
3157 yield val2
3159 yield val2
3158 for val in it:
3160 for val in it:
3159 yield val
3161 yield val
3160
3162
3161 class addset(abstractsmartset):
3163 class addset(abstractsmartset):
3162 """Represent the addition of two sets
3164 """Represent the addition of two sets
3163
3165
3164 Wrapper structure for lazily adding two structures without losing much
3166 Wrapper structure for lazily adding two structures without losing much
3165 performance on the __contains__ method
3167 performance on the __contains__ method
3166
3168
3167 If the ascending attribute is set, that means the two structures are
3169 If the ascending attribute is set, that means the two structures are
3168 ordered in either an ascending or descending way. Therefore, we can add
3170 ordered in either an ascending or descending way. Therefore, we can add
3169 them maintaining the order by iterating over both at the same time
3171 them maintaining the order by iterating over both at the same time
3170
3172
3171 >>> xs = baseset([0, 3, 2])
3173 >>> xs = baseset([0, 3, 2])
3172 >>> ys = baseset([5, 2, 4])
3174 >>> ys = baseset([5, 2, 4])
3173
3175
3174 >>> rs = addset(xs, ys)
3176 >>> rs = addset(xs, ys)
3175 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3177 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3176 (True, True, False, True, 0, 4)
3178 (True, True, False, True, 0, 4)
3177 >>> rs = addset(xs, baseset([]))
3179 >>> rs = addset(xs, baseset([]))
3178 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3180 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3179 (True, True, False, 0, 2)
3181 (True, True, False, 0, 2)
3180 >>> rs = addset(baseset([]), baseset([]))
3182 >>> rs = addset(baseset([]), baseset([]))
3181 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3183 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3182 (False, False, None, None)
3184 (False, False, None, None)
3183
3185
3184 iterate unsorted:
3186 iterate unsorted:
3185 >>> rs = addset(xs, ys)
3187 >>> rs = addset(xs, ys)
3186 >>> # (use generator because pypy could call len())
3188 >>> # (use generator because pypy could call len())
3187 >>> list(x for x in rs) # without _genlist
3189 >>> list(x for x in rs) # without _genlist
3188 [0, 3, 2, 5, 4]
3190 [0, 3, 2, 5, 4]
3189 >>> assert not rs._genlist
3191 >>> assert not rs._genlist
3190 >>> len(rs)
3192 >>> len(rs)
3191 5
3193 5
3192 >>> [x for x in rs] # with _genlist
3194 >>> [x for x in rs] # with _genlist
3193 [0, 3, 2, 5, 4]
3195 [0, 3, 2, 5, 4]
3194 >>> assert rs._genlist
3196 >>> assert rs._genlist
3195
3197
3196 iterate ascending:
3198 iterate ascending:
3197 >>> rs = addset(xs, ys, ascending=True)
3199 >>> rs = addset(xs, ys, ascending=True)
3198 >>> # (use generator because pypy could call len())
3200 >>> # (use generator because pypy could call len())
3199 >>> list(x for x in rs), list(x for x in rs.fastasc()) # without _asclist
3201 >>> list(x for x in rs), list(x for x in rs.fastasc()) # without _asclist
3200 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3202 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3201 >>> assert not rs._asclist
3203 >>> assert not rs._asclist
3202 >>> len(rs)
3204 >>> len(rs)
3203 5
3205 5
3204 >>> [x for x in rs], [x for x in rs.fastasc()]
3206 >>> [x for x in rs], [x for x in rs.fastasc()]
3205 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3207 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3206 >>> assert rs._asclist
3208 >>> assert rs._asclist
3207
3209
3208 iterate descending:
3210 iterate descending:
3209 >>> rs = addset(xs, ys, ascending=False)
3211 >>> rs = addset(xs, ys, ascending=False)
3210 >>> # (use generator because pypy could call len())
3212 >>> # (use generator because pypy could call len())
3211 >>> list(x for x in rs), list(x for x in rs.fastdesc()) # without _asclist
3213 >>> list(x for x in rs), list(x for x in rs.fastdesc()) # without _asclist
3212 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3214 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3213 >>> assert not rs._asclist
3215 >>> assert not rs._asclist
3214 >>> len(rs)
3216 >>> len(rs)
3215 5
3217 5
3216 >>> [x for x in rs], [x for x in rs.fastdesc()]
3218 >>> [x for x in rs], [x for x in rs.fastdesc()]
3217 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3219 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3218 >>> assert rs._asclist
3220 >>> assert rs._asclist
3219
3221
3220 iterate ascending without fastasc:
3222 iterate ascending without fastasc:
3221 >>> rs = addset(xs, generatorset(ys), ascending=True)
3223 >>> rs = addset(xs, generatorset(ys), ascending=True)
3222 >>> assert rs.fastasc is None
3224 >>> assert rs.fastasc is None
3223 >>> [x for x in rs]
3225 >>> [x for x in rs]
3224 [0, 2, 3, 4, 5]
3226 [0, 2, 3, 4, 5]
3225
3227
3226 iterate descending without fastdesc:
3228 iterate descending without fastdesc:
3227 >>> rs = addset(generatorset(xs), ys, ascending=False)
3229 >>> rs = addset(generatorset(xs), ys, ascending=False)
3228 >>> assert rs.fastdesc is None
3230 >>> assert rs.fastdesc is None
3229 >>> [x for x in rs]
3231 >>> [x for x in rs]
3230 [5, 4, 3, 2, 0]
3232 [5, 4, 3, 2, 0]
3231 """
3233 """
3232 def __init__(self, revs1, revs2, ascending=None):
3234 def __init__(self, revs1, revs2, ascending=None):
3233 self._r1 = revs1
3235 self._r1 = revs1
3234 self._r2 = revs2
3236 self._r2 = revs2
3235 self._iter = None
3237 self._iter = None
3236 self._ascending = ascending
3238 self._ascending = ascending
3237 self._genlist = None
3239 self._genlist = None
3238 self._asclist = None
3240 self._asclist = None
3239
3241
3240 def __len__(self):
3242 def __len__(self):
3241 return len(self._list)
3243 return len(self._list)
3242
3244
3243 def __nonzero__(self):
3245 def __nonzero__(self):
3244 return bool(self._r1) or bool(self._r2)
3246 return bool(self._r1) or bool(self._r2)
3245
3247
3246 @util.propertycache
3248 @util.propertycache
3247 def _list(self):
3249 def _list(self):
3248 if not self._genlist:
3250 if not self._genlist:
3249 self._genlist = baseset(iter(self))
3251 self._genlist = baseset(iter(self))
3250 return self._genlist
3252 return self._genlist
3251
3253
3252 def __iter__(self):
3254 def __iter__(self):
3253 """Iterate over both collections without repeating elements
3255 """Iterate over both collections without repeating elements
3254
3256
3255 If the ascending attribute is not set, iterate over the first one and
3257 If the ascending attribute is not set, iterate over the first one and
3256 then over the second one checking for membership on the first one so we
3258 then over the second one checking for membership on the first one so we
3257 dont yield any duplicates.
3259 dont yield any duplicates.
3258
3260
3259 If the ascending attribute is set, iterate over both collections at the
3261 If the ascending attribute is set, iterate over both collections at the
3260 same time, yielding only one value at a time in the given order.
3262 same time, yielding only one value at a time in the given order.
3261 """
3263 """
3262 if self._ascending is None:
3264 if self._ascending is None:
3263 if self._genlist:
3265 if self._genlist:
3264 return iter(self._genlist)
3266 return iter(self._genlist)
3265 def arbitraryordergen():
3267 def arbitraryordergen():
3266 for r in self._r1:
3268 for r in self._r1:
3267 yield r
3269 yield r
3268 inr1 = self._r1.__contains__
3270 inr1 = self._r1.__contains__
3269 for r in self._r2:
3271 for r in self._r2:
3270 if not inr1(r):
3272 if not inr1(r):
3271 yield r
3273 yield r
3272 return arbitraryordergen()
3274 return arbitraryordergen()
3273 # try to use our own fast iterator if it exists
3275 # try to use our own fast iterator if it exists
3274 self._trysetasclist()
3276 self._trysetasclist()
3275 if self._ascending:
3277 if self._ascending:
3276 attr = 'fastasc'
3278 attr = 'fastasc'
3277 else:
3279 else:
3278 attr = 'fastdesc'
3280 attr = 'fastdesc'
3279 it = getattr(self, attr)
3281 it = getattr(self, attr)
3280 if it is not None:
3282 if it is not None:
3281 return it()
3283 return it()
3282 # maybe half of the component supports fast
3284 # maybe half of the component supports fast
3283 # get iterator for _r1
3285 # get iterator for _r1
3284 iter1 = getattr(self._r1, attr)
3286 iter1 = getattr(self._r1, attr)
3285 if iter1 is None:
3287 if iter1 is None:
3286 # let's avoid side effect (not sure it matters)
3288 # let's avoid side effect (not sure it matters)
3287 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3289 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3288 else:
3290 else:
3289 iter1 = iter1()
3291 iter1 = iter1()
3290 # get iterator for _r2
3292 # get iterator for _r2
3291 iter2 = getattr(self._r2, attr)
3293 iter2 = getattr(self._r2, attr)
3292 if iter2 is None:
3294 if iter2 is None:
3293 # let's avoid side effect (not sure it matters)
3295 # let's avoid side effect (not sure it matters)
3294 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3296 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3295 else:
3297 else:
3296 iter2 = iter2()
3298 iter2 = iter2()
3297 return _iterordered(self._ascending, iter1, iter2)
3299 return _iterordered(self._ascending, iter1, iter2)
3298
3300
3299 def _trysetasclist(self):
3301 def _trysetasclist(self):
3300 """populate the _asclist attribute if possible and necessary"""
3302 """populate the _asclist attribute if possible and necessary"""
3301 if self._genlist is not None and self._asclist is None:
3303 if self._genlist is not None and self._asclist is None:
3302 self._asclist = sorted(self._genlist)
3304 self._asclist = sorted(self._genlist)
3303
3305
3304 @property
3306 @property
3305 def fastasc(self):
3307 def fastasc(self):
3306 self._trysetasclist()
3308 self._trysetasclist()
3307 if self._asclist is not None:
3309 if self._asclist is not None:
3308 return self._asclist.__iter__
3310 return self._asclist.__iter__
3309 iter1 = self._r1.fastasc
3311 iter1 = self._r1.fastasc
3310 iter2 = self._r2.fastasc
3312 iter2 = self._r2.fastasc
3311 if None in (iter1, iter2):
3313 if None in (iter1, iter2):
3312 return None
3314 return None
3313 return lambda: _iterordered(True, iter1(), iter2())
3315 return lambda: _iterordered(True, iter1(), iter2())
3314
3316
3315 @property
3317 @property
3316 def fastdesc(self):
3318 def fastdesc(self):
3317 self._trysetasclist()
3319 self._trysetasclist()
3318 if self._asclist is not None:
3320 if self._asclist is not None:
3319 return self._asclist.__reversed__
3321 return self._asclist.__reversed__
3320 iter1 = self._r1.fastdesc
3322 iter1 = self._r1.fastdesc
3321 iter2 = self._r2.fastdesc
3323 iter2 = self._r2.fastdesc
3322 if None in (iter1, iter2):
3324 if None in (iter1, iter2):
3323 return None
3325 return None
3324 return lambda: _iterordered(False, iter1(), iter2())
3326 return lambda: _iterordered(False, iter1(), iter2())
3325
3327
3326 def __contains__(self, x):
3328 def __contains__(self, x):
3327 return x in self._r1 or x in self._r2
3329 return x in self._r1 or x in self._r2
3328
3330
3329 def sort(self, reverse=False):
3331 def sort(self, reverse=False):
3330 """Sort the added set
3332 """Sort the added set
3331
3333
3332 For this we use the cached list with all the generated values and if we
3334 For this we use the cached list with all the generated values and if we
3333 know they are ascending or descending we can sort them in a smart way.
3335 know they are ascending or descending we can sort them in a smart way.
3334 """
3336 """
3335 self._ascending = not reverse
3337 self._ascending = not reverse
3336
3338
3337 def isascending(self):
3339 def isascending(self):
3338 return self._ascending is not None and self._ascending
3340 return self._ascending is not None and self._ascending
3339
3341
3340 def isdescending(self):
3342 def isdescending(self):
3341 return self._ascending is not None and not self._ascending
3343 return self._ascending is not None and not self._ascending
3342
3344
3343 def istopo(self):
3345 def istopo(self):
3344 # not worth the trouble asserting if the two sets combined are still
3346 # not worth the trouble asserting if the two sets combined are still
3345 # in topographical order. Use the sort() predicate to explicitly sort
3347 # in topographical order. Use the sort() predicate to explicitly sort
3346 # again instead.
3348 # again instead.
3347 return False
3349 return False
3348
3350
3349 def reverse(self):
3351 def reverse(self):
3350 if self._ascending is None:
3352 if self._ascending is None:
3351 self._list.reverse()
3353 self._list.reverse()
3352 else:
3354 else:
3353 self._ascending = not self._ascending
3355 self._ascending = not self._ascending
3354
3356
3355 def first(self):
3357 def first(self):
3356 for x in self:
3358 for x in self:
3357 return x
3359 return x
3358 return None
3360 return None
3359
3361
3360 def last(self):
3362 def last(self):
3361 self.reverse()
3363 self.reverse()
3362 val = self.first()
3364 val = self.first()
3363 self.reverse()
3365 self.reverse()
3364 return val
3366 return val
3365
3367
3366 def __repr__(self):
3368 def __repr__(self):
3367 d = {None: '', False: '-', True: '+'}[self._ascending]
3369 d = {None: '', False: '-', True: '+'}[self._ascending]
3368 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3370 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3369
3371
3370 class generatorset(abstractsmartset):
3372 class generatorset(abstractsmartset):
3371 """Wrap a generator for lazy iteration
3373 """Wrap a generator for lazy iteration
3372
3374
3373 Wrapper structure for generators that provides lazy membership and can
3375 Wrapper structure for generators that provides lazy membership and can
3374 be iterated more than once.
3376 be iterated more than once.
3375 When asked for membership it generates values until either it finds the
3377 When asked for membership it generates values until either it finds the
3376 requested one or has gone through all the elements in the generator
3378 requested one or has gone through all the elements in the generator
3377 """
3379 """
3378 def __init__(self, gen, iterasc=None):
3380 def __init__(self, gen, iterasc=None):
3379 """
3381 """
3380 gen: a generator producing the values for the generatorset.
3382 gen: a generator producing the values for the generatorset.
3381 """
3383 """
3382 self._gen = gen
3384 self._gen = gen
3383 self._asclist = None
3385 self._asclist = None
3384 self._cache = {}
3386 self._cache = {}
3385 self._genlist = []
3387 self._genlist = []
3386 self._finished = False
3388 self._finished = False
3387 self._ascending = True
3389 self._ascending = True
3388 if iterasc is not None:
3390 if iterasc is not None:
3389 if iterasc:
3391 if iterasc:
3390 self.fastasc = self._iterator
3392 self.fastasc = self._iterator
3391 self.__contains__ = self._asccontains
3393 self.__contains__ = self._asccontains
3392 else:
3394 else:
3393 self.fastdesc = self._iterator
3395 self.fastdesc = self._iterator
3394 self.__contains__ = self._desccontains
3396 self.__contains__ = self._desccontains
3395
3397
3396 def __nonzero__(self):
3398 def __nonzero__(self):
3397 # Do not use 'for r in self' because it will enforce the iteration
3399 # Do not use 'for r in self' because it will enforce the iteration
3398 # order (default ascending), possibly unrolling a whole descending
3400 # order (default ascending), possibly unrolling a whole descending
3399 # iterator.
3401 # iterator.
3400 if self._genlist:
3402 if self._genlist:
3401 return True
3403 return True
3402 for r in self._consumegen():
3404 for r in self._consumegen():
3403 return True
3405 return True
3404 return False
3406 return False
3405
3407
3406 def __contains__(self, x):
3408 def __contains__(self, x):
3407 if x in self._cache:
3409 if x in self._cache:
3408 return self._cache[x]
3410 return self._cache[x]
3409
3411
3410 # Use new values only, as existing values would be cached.
3412 # Use new values only, as existing values would be cached.
3411 for l in self._consumegen():
3413 for l in self._consumegen():
3412 if l == x:
3414 if l == x:
3413 return True
3415 return True
3414
3416
3415 self._cache[x] = False
3417 self._cache[x] = False
3416 return False
3418 return False
3417
3419
3418 def _asccontains(self, x):
3420 def _asccontains(self, x):
3419 """version of contains optimised for ascending generator"""
3421 """version of contains optimised for ascending generator"""
3420 if x in self._cache:
3422 if x in self._cache:
3421 return self._cache[x]
3423 return self._cache[x]
3422
3424
3423 # Use new values only, as existing values would be cached.
3425 # Use new values only, as existing values would be cached.
3424 for l in self._consumegen():
3426 for l in self._consumegen():
3425 if l == x:
3427 if l == x:
3426 return True
3428 return True
3427 if l > x:
3429 if l > x:
3428 break
3430 break
3429
3431
3430 self._cache[x] = False
3432 self._cache[x] = False
3431 return False
3433 return False
3432
3434
3433 def _desccontains(self, x):
3435 def _desccontains(self, x):
3434 """version of contains optimised for descending generator"""
3436 """version of contains optimised for descending generator"""
3435 if x in self._cache:
3437 if x in self._cache:
3436 return self._cache[x]
3438 return self._cache[x]
3437
3439
3438 # Use new values only, as existing values would be cached.
3440 # Use new values only, as existing values would be cached.
3439 for l in self._consumegen():
3441 for l in self._consumegen():
3440 if l == x:
3442 if l == x:
3441 return True
3443 return True
3442 if l < x:
3444 if l < x:
3443 break
3445 break
3444
3446
3445 self._cache[x] = False
3447 self._cache[x] = False
3446 return False
3448 return False
3447
3449
3448 def __iter__(self):
3450 def __iter__(self):
3449 if self._ascending:
3451 if self._ascending:
3450 it = self.fastasc
3452 it = self.fastasc
3451 else:
3453 else:
3452 it = self.fastdesc
3454 it = self.fastdesc
3453 if it is not None:
3455 if it is not None:
3454 return it()
3456 return it()
3455 # we need to consume the iterator
3457 # we need to consume the iterator
3456 for x in self._consumegen():
3458 for x in self._consumegen():
3457 pass
3459 pass
3458 # recall the same code
3460 # recall the same code
3459 return iter(self)
3461 return iter(self)
3460
3462
3461 def _iterator(self):
3463 def _iterator(self):
3462 if self._finished:
3464 if self._finished:
3463 return iter(self._genlist)
3465 return iter(self._genlist)
3464
3466
3465 # We have to use this complex iteration strategy to allow multiple
3467 # We have to use this complex iteration strategy to allow multiple
3466 # iterations at the same time. We need to be able to catch revision
3468 # iterations at the same time. We need to be able to catch revision
3467 # removed from _consumegen and added to genlist in another instance.
3469 # removed from _consumegen and added to genlist in another instance.
3468 #
3470 #
3469 # Getting rid of it would provide an about 15% speed up on this
3471 # Getting rid of it would provide an about 15% speed up on this
3470 # iteration.
3472 # iteration.
3471 genlist = self._genlist
3473 genlist = self._genlist
3472 nextrev = self._consumegen().next
3474 nextrev = self._consumegen().next
3473 _len = len # cache global lookup
3475 _len = len # cache global lookup
3474 def gen():
3476 def gen():
3475 i = 0
3477 i = 0
3476 while True:
3478 while True:
3477 if i < _len(genlist):
3479 if i < _len(genlist):
3478 yield genlist[i]
3480 yield genlist[i]
3479 else:
3481 else:
3480 yield nextrev()
3482 yield nextrev()
3481 i += 1
3483 i += 1
3482 return gen()
3484 return gen()
3483
3485
3484 def _consumegen(self):
3486 def _consumegen(self):
3485 cache = self._cache
3487 cache = self._cache
3486 genlist = self._genlist.append
3488 genlist = self._genlist.append
3487 for item in self._gen:
3489 for item in self._gen:
3488 cache[item] = True
3490 cache[item] = True
3489 genlist(item)
3491 genlist(item)
3490 yield item
3492 yield item
3491 if not self._finished:
3493 if not self._finished:
3492 self._finished = True
3494 self._finished = True
3493 asc = self._genlist[:]
3495 asc = self._genlist[:]
3494 asc.sort()
3496 asc.sort()
3495 self._asclist = asc
3497 self._asclist = asc
3496 self.fastasc = asc.__iter__
3498 self.fastasc = asc.__iter__
3497 self.fastdesc = asc.__reversed__
3499 self.fastdesc = asc.__reversed__
3498
3500
3499 def __len__(self):
3501 def __len__(self):
3500 for x in self._consumegen():
3502 for x in self._consumegen():
3501 pass
3503 pass
3502 return len(self._genlist)
3504 return len(self._genlist)
3503
3505
3504 def sort(self, reverse=False):
3506 def sort(self, reverse=False):
3505 self._ascending = not reverse
3507 self._ascending = not reverse
3506
3508
3507 def reverse(self):
3509 def reverse(self):
3508 self._ascending = not self._ascending
3510 self._ascending = not self._ascending
3509
3511
3510 def isascending(self):
3512 def isascending(self):
3511 return self._ascending
3513 return self._ascending
3512
3514
3513 def isdescending(self):
3515 def isdescending(self):
3514 return not self._ascending
3516 return not self._ascending
3515
3517
3516 def istopo(self):
3518 def istopo(self):
3517 # not worth the trouble asserting if the two sets combined are still
3519 # not worth the trouble asserting if the two sets combined are still
3518 # in topographical order. Use the sort() predicate to explicitly sort
3520 # in topographical order. Use the sort() predicate to explicitly sort
3519 # again instead.
3521 # again instead.
3520 return False
3522 return False
3521
3523
3522 def first(self):
3524 def first(self):
3523 if self._ascending:
3525 if self._ascending:
3524 it = self.fastasc
3526 it = self.fastasc
3525 else:
3527 else:
3526 it = self.fastdesc
3528 it = self.fastdesc
3527 if it is None:
3529 if it is None:
3528 # we need to consume all and try again
3530 # we need to consume all and try again
3529 for x in self._consumegen():
3531 for x in self._consumegen():
3530 pass
3532 pass
3531 return self.first()
3533 return self.first()
3532 return next(it(), None)
3534 return next(it(), None)
3533
3535
3534 def last(self):
3536 def last(self):
3535 if self._ascending:
3537 if self._ascending:
3536 it = self.fastdesc
3538 it = self.fastdesc
3537 else:
3539 else:
3538 it = self.fastasc
3540 it = self.fastasc
3539 if it is None:
3541 if it is None:
3540 # we need to consume all and try again
3542 # we need to consume all and try again
3541 for x in self._consumegen():
3543 for x in self._consumegen():
3542 pass
3544 pass
3543 return self.first()
3545 return self.first()
3544 return next(it(), None)
3546 return next(it(), None)
3545
3547
3546 def __repr__(self):
3548 def __repr__(self):
3547 d = {False: '-', True: '+'}[self._ascending]
3549 d = {False: '-', True: '+'}[self._ascending]
3548 return '<%s%s>' % (type(self).__name__, d)
3550 return '<%s%s>' % (type(self).__name__, d)
3549
3551
3550 class spanset(abstractsmartset):
3552 class spanset(abstractsmartset):
3551 """Duck type for baseset class which represents a range of revisions and
3553 """Duck type for baseset class which represents a range of revisions and
3552 can work lazily and without having all the range in memory
3554 can work lazily and without having all the range in memory
3553
3555
3554 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3556 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3555 notable points:
3557 notable points:
3556 - when x < y it will be automatically descending,
3558 - when x < y it will be automatically descending,
3557 - revision filtered with this repoview will be skipped.
3559 - revision filtered with this repoview will be skipped.
3558
3560
3559 """
3561 """
3560 def __init__(self, repo, start=0, end=None):
3562 def __init__(self, repo, start=0, end=None):
3561 """
3563 """
3562 start: first revision included the set
3564 start: first revision included the set
3563 (default to 0)
3565 (default to 0)
3564 end: first revision excluded (last+1)
3566 end: first revision excluded (last+1)
3565 (default to len(repo)
3567 (default to len(repo)
3566
3568
3567 Spanset will be descending if `end` < `start`.
3569 Spanset will be descending if `end` < `start`.
3568 """
3570 """
3569 if end is None:
3571 if end is None:
3570 end = len(repo)
3572 end = len(repo)
3571 self._ascending = start <= end
3573 self._ascending = start <= end
3572 if not self._ascending:
3574 if not self._ascending:
3573 start, end = end + 1, start +1
3575 start, end = end + 1, start +1
3574 self._start = start
3576 self._start = start
3575 self._end = end
3577 self._end = end
3576 self._hiddenrevs = repo.changelog.filteredrevs
3578 self._hiddenrevs = repo.changelog.filteredrevs
3577
3579
3578 def sort(self, reverse=False):
3580 def sort(self, reverse=False):
3579 self._ascending = not reverse
3581 self._ascending = not reverse
3580
3582
3581 def reverse(self):
3583 def reverse(self):
3582 self._ascending = not self._ascending
3584 self._ascending = not self._ascending
3583
3585
3584 def istopo(self):
3586 def istopo(self):
3585 # not worth the trouble asserting if the two sets combined are still
3587 # not worth the trouble asserting if the two sets combined are still
3586 # in topographical order. Use the sort() predicate to explicitly sort
3588 # in topographical order. Use the sort() predicate to explicitly sort
3587 # again instead.
3589 # again instead.
3588 return False
3590 return False
3589
3591
3590 def _iterfilter(self, iterrange):
3592 def _iterfilter(self, iterrange):
3591 s = self._hiddenrevs
3593 s = self._hiddenrevs
3592 for r in iterrange:
3594 for r in iterrange:
3593 if r not in s:
3595 if r not in s:
3594 yield r
3596 yield r
3595
3597
3596 def __iter__(self):
3598 def __iter__(self):
3597 if self._ascending:
3599 if self._ascending:
3598 return self.fastasc()
3600 return self.fastasc()
3599 else:
3601 else:
3600 return self.fastdesc()
3602 return self.fastdesc()
3601
3603
3602 def fastasc(self):
3604 def fastasc(self):
3603 iterrange = xrange(self._start, self._end)
3605 iterrange = xrange(self._start, self._end)
3604 if self._hiddenrevs:
3606 if self._hiddenrevs:
3605 return self._iterfilter(iterrange)
3607 return self._iterfilter(iterrange)
3606 return iter(iterrange)
3608 return iter(iterrange)
3607
3609
3608 def fastdesc(self):
3610 def fastdesc(self):
3609 iterrange = xrange(self._end - 1, self._start - 1, -1)
3611 iterrange = xrange(self._end - 1, self._start - 1, -1)
3610 if self._hiddenrevs:
3612 if self._hiddenrevs:
3611 return self._iterfilter(iterrange)
3613 return self._iterfilter(iterrange)
3612 return iter(iterrange)
3614 return iter(iterrange)
3613
3615
3614 def __contains__(self, rev):
3616 def __contains__(self, rev):
3615 hidden = self._hiddenrevs
3617 hidden = self._hiddenrevs
3616 return ((self._start <= rev < self._end)
3618 return ((self._start <= rev < self._end)
3617 and not (hidden and rev in hidden))
3619 and not (hidden and rev in hidden))
3618
3620
3619 def __nonzero__(self):
3621 def __nonzero__(self):
3620 for r in self:
3622 for r in self:
3621 return True
3623 return True
3622 return False
3624 return False
3623
3625
3624 def __len__(self):
3626 def __len__(self):
3625 if not self._hiddenrevs:
3627 if not self._hiddenrevs:
3626 return abs(self._end - self._start)
3628 return abs(self._end - self._start)
3627 else:
3629 else:
3628 count = 0
3630 count = 0
3629 start = self._start
3631 start = self._start
3630 end = self._end
3632 end = self._end
3631 for rev in self._hiddenrevs:
3633 for rev in self._hiddenrevs:
3632 if (end < rev <= start) or (start <= rev < end):
3634 if (end < rev <= start) or (start <= rev < end):
3633 count += 1
3635 count += 1
3634 return abs(self._end - self._start) - count
3636 return abs(self._end - self._start) - count
3635
3637
3636 def isascending(self):
3638 def isascending(self):
3637 return self._ascending
3639 return self._ascending
3638
3640
3639 def isdescending(self):
3641 def isdescending(self):
3640 return not self._ascending
3642 return not self._ascending
3641
3643
3642 def first(self):
3644 def first(self):
3643 if self._ascending:
3645 if self._ascending:
3644 it = self.fastasc
3646 it = self.fastasc
3645 else:
3647 else:
3646 it = self.fastdesc
3648 it = self.fastdesc
3647 for x in it():
3649 for x in it():
3648 return x
3650 return x
3649 return None
3651 return None
3650
3652
3651 def last(self):
3653 def last(self):
3652 if self._ascending:
3654 if self._ascending:
3653 it = self.fastdesc
3655 it = self.fastdesc
3654 else:
3656 else:
3655 it = self.fastasc
3657 it = self.fastasc
3656 for x in it():
3658 for x in it():
3657 return x
3659 return x
3658 return None
3660 return None
3659
3661
3660 def __repr__(self):
3662 def __repr__(self):
3661 d = {False: '-', True: '+'}[self._ascending]
3663 d = {False: '-', True: '+'}[self._ascending]
3662 return '<%s%s %d:%d>' % (type(self).__name__, d,
3664 return '<%s%s %d:%d>' % (type(self).__name__, d,
3663 self._start, self._end - 1)
3665 self._start, self._end - 1)
3664
3666
3665 class fullreposet(spanset):
3667 class fullreposet(spanset):
3666 """a set containing all revisions in the repo
3668 """a set containing all revisions in the repo
3667
3669
3668 This class exists to host special optimization and magic to handle virtual
3670 This class exists to host special optimization and magic to handle virtual
3669 revisions such as "null".
3671 revisions such as "null".
3670 """
3672 """
3671
3673
3672 def __init__(self, repo):
3674 def __init__(self, repo):
3673 super(fullreposet, self).__init__(repo)
3675 super(fullreposet, self).__init__(repo)
3674
3676
3675 def __and__(self, other):
3677 def __and__(self, other):
3676 """As self contains the whole repo, all of the other set should also be
3678 """As self contains the whole repo, all of the other set should also be
3677 in self. Therefore `self & other = other`.
3679 in self. Therefore `self & other = other`.
3678
3680
3679 This boldly assumes the other contains valid revs only.
3681 This boldly assumes the other contains valid revs only.
3680 """
3682 """
3681 # other not a smartset, make is so
3683 # other not a smartset, make is so
3682 if not util.safehasattr(other, 'isascending'):
3684 if not util.safehasattr(other, 'isascending'):
3683 # filter out hidden revision
3685 # filter out hidden revision
3684 # (this boldly assumes all smartset are pure)
3686 # (this boldly assumes all smartset are pure)
3685 #
3687 #
3686 # `other` was used with "&", let's assume this is a set like
3688 # `other` was used with "&", let's assume this is a set like
3687 # object.
3689 # object.
3688 other = baseset(other - self._hiddenrevs)
3690 other = baseset(other - self._hiddenrevs)
3689
3691
3690 # XXX As fullreposet is also used as bootstrap, this is wrong.
3692 # XXX As fullreposet is also used as bootstrap, this is wrong.
3691 #
3693 #
3692 # With a giveme312() revset returning [3,1,2], this makes
3694 # With a giveme312() revset returning [3,1,2], this makes
3693 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3695 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3694 # We cannot just drop it because other usage still need to sort it:
3696 # We cannot just drop it because other usage still need to sort it:
3695 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3697 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3696 #
3698 #
3697 # There is also some faulty revset implementations that rely on it
3699 # There is also some faulty revset implementations that rely on it
3698 # (eg: children as of its state in e8075329c5fb)
3700 # (eg: children as of its state in e8075329c5fb)
3699 #
3701 #
3700 # When we fix the two points above we can move this into the if clause
3702 # When we fix the two points above we can move this into the if clause
3701 other.sort(reverse=self.isdescending())
3703 other.sort(reverse=self.isdescending())
3702 return other
3704 return other
3703
3705
3704 def prettyformatset(revs):
3706 def prettyformatset(revs):
3705 lines = []
3707 lines = []
3706 rs = repr(revs)
3708 rs = repr(revs)
3707 p = 0
3709 p = 0
3708 while p < len(rs):
3710 while p < len(rs):
3709 q = rs.find('<', p + 1)
3711 q = rs.find('<', p + 1)
3710 if q < 0:
3712 if q < 0:
3711 q = len(rs)
3713 q = len(rs)
3712 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3714 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3713 assert l >= 0
3715 assert l >= 0
3714 lines.append((l, rs[p:q].rstrip()))
3716 lines.append((l, rs[p:q].rstrip()))
3715 p = q
3717 p = q
3716 return '\n'.join(' ' * l + s for l, s in lines)
3718 return '\n'.join(' ' * l + s for l, s in lines)
3717
3719
3718 def loadpredicate(ui, extname, registrarobj):
3720 def loadpredicate(ui, extname, registrarobj):
3719 """Load revset predicates from specified registrarobj
3721 """Load revset predicates from specified registrarobj
3720 """
3722 """
3721 for name, func in registrarobj._table.iteritems():
3723 for name, func in registrarobj._table.iteritems():
3722 symbols[name] = func
3724 symbols[name] = func
3723 if func._safe:
3725 if func._safe:
3724 safesymbols.add(name)
3726 safesymbols.add(name)
3725
3727
3726 # load built-in predicates explicitly to setup safesymbols
3728 # load built-in predicates explicitly to setup safesymbols
3727 loadpredicate(None, None, predicate)
3729 loadpredicate(None, None, predicate)
3728
3730
3729 # tell hggettext to extract docstrings from these functions:
3731 # tell hggettext to extract docstrings from these functions:
3730 i18nfunctions = symbols.values()
3732 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now