##// END OF EJS Templates
revset: build dict of extra sort options before evaluating set...
Yuya Nishihara -
r29364:76a1a703 default
parent child Browse files
Show More
@@ -1,3664 +1,3667 b''
1 # revset.py - revision set queries for mercurial
1 # revset.py - revision set queries for mercurial
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import heapq
10 import heapq
11 import re
11 import re
12
12
13 from .i18n import _
13 from .i18n import _
14 from . import (
14 from . import (
15 destutil,
15 destutil,
16 encoding,
16 encoding,
17 error,
17 error,
18 hbisect,
18 hbisect,
19 match as matchmod,
19 match as matchmod,
20 node,
20 node,
21 obsolete as obsmod,
21 obsolete as obsmod,
22 parser,
22 parser,
23 pathutil,
23 pathutil,
24 phases,
24 phases,
25 registrar,
25 registrar,
26 repoview,
26 repoview,
27 util,
27 util,
28 )
28 )
29
29
30 def _revancestors(repo, revs, followfirst):
30 def _revancestors(repo, revs, followfirst):
31 """Like revlog.ancestors(), but supports followfirst."""
31 """Like revlog.ancestors(), but supports followfirst."""
32 if followfirst:
32 if followfirst:
33 cut = 1
33 cut = 1
34 else:
34 else:
35 cut = None
35 cut = None
36 cl = repo.changelog
36 cl = repo.changelog
37
37
38 def iterate():
38 def iterate():
39 revs.sort(reverse=True)
39 revs.sort(reverse=True)
40 irevs = iter(revs)
40 irevs = iter(revs)
41 h = []
41 h = []
42
42
43 inputrev = next(irevs, None)
43 inputrev = next(irevs, None)
44 if inputrev is not None:
44 if inputrev is not None:
45 heapq.heappush(h, -inputrev)
45 heapq.heappush(h, -inputrev)
46
46
47 seen = set()
47 seen = set()
48 while h:
48 while h:
49 current = -heapq.heappop(h)
49 current = -heapq.heappop(h)
50 if current == inputrev:
50 if current == inputrev:
51 inputrev = next(irevs, None)
51 inputrev = next(irevs, None)
52 if inputrev is not None:
52 if inputrev is not None:
53 heapq.heappush(h, -inputrev)
53 heapq.heappush(h, -inputrev)
54 if current not in seen:
54 if current not in seen:
55 seen.add(current)
55 seen.add(current)
56 yield current
56 yield current
57 for parent in cl.parentrevs(current)[:cut]:
57 for parent in cl.parentrevs(current)[:cut]:
58 if parent != node.nullrev:
58 if parent != node.nullrev:
59 heapq.heappush(h, -parent)
59 heapq.heappush(h, -parent)
60
60
61 return generatorset(iterate(), iterasc=False)
61 return generatorset(iterate(), iterasc=False)
62
62
63 def _revdescendants(repo, revs, followfirst):
63 def _revdescendants(repo, revs, followfirst):
64 """Like revlog.descendants() but supports followfirst."""
64 """Like revlog.descendants() but supports followfirst."""
65 if followfirst:
65 if followfirst:
66 cut = 1
66 cut = 1
67 else:
67 else:
68 cut = None
68 cut = None
69
69
70 def iterate():
70 def iterate():
71 cl = repo.changelog
71 cl = repo.changelog
72 # XXX this should be 'parentset.min()' assuming 'parentset' is a
72 # XXX this should be 'parentset.min()' assuming 'parentset' is a
73 # smartset (and if it is not, it should.)
73 # smartset (and if it is not, it should.)
74 first = min(revs)
74 first = min(revs)
75 nullrev = node.nullrev
75 nullrev = node.nullrev
76 if first == nullrev:
76 if first == nullrev:
77 # Are there nodes with a null first parent and a non-null
77 # Are there nodes with a null first parent and a non-null
78 # second one? Maybe. Do we care? Probably not.
78 # second one? Maybe. Do we care? Probably not.
79 for i in cl:
79 for i in cl:
80 yield i
80 yield i
81 else:
81 else:
82 seen = set(revs)
82 seen = set(revs)
83 for i in cl.revs(first + 1):
83 for i in cl.revs(first + 1):
84 for x in cl.parentrevs(i)[:cut]:
84 for x in cl.parentrevs(i)[:cut]:
85 if x != nullrev and x in seen:
85 if x != nullrev and x in seen:
86 seen.add(i)
86 seen.add(i)
87 yield i
87 yield i
88 break
88 break
89
89
90 return generatorset(iterate(), iterasc=True)
90 return generatorset(iterate(), iterasc=True)
91
91
92 def _reachablerootspure(repo, minroot, roots, heads, includepath):
92 def _reachablerootspure(repo, minroot, roots, heads, includepath):
93 """return (heads(::<roots> and ::<heads>))
93 """return (heads(::<roots> and ::<heads>))
94
94
95 If includepath is True, return (<roots>::<heads>)."""
95 If includepath is True, return (<roots>::<heads>)."""
96 if not roots:
96 if not roots:
97 return []
97 return []
98 parentrevs = repo.changelog.parentrevs
98 parentrevs = repo.changelog.parentrevs
99 roots = set(roots)
99 roots = set(roots)
100 visit = list(heads)
100 visit = list(heads)
101 reachable = set()
101 reachable = set()
102 seen = {}
102 seen = {}
103 # prefetch all the things! (because python is slow)
103 # prefetch all the things! (because python is slow)
104 reached = reachable.add
104 reached = reachable.add
105 dovisit = visit.append
105 dovisit = visit.append
106 nextvisit = visit.pop
106 nextvisit = visit.pop
107 # open-code the post-order traversal due to the tiny size of
107 # open-code the post-order traversal due to the tiny size of
108 # sys.getrecursionlimit()
108 # sys.getrecursionlimit()
109 while visit:
109 while visit:
110 rev = nextvisit()
110 rev = nextvisit()
111 if rev in roots:
111 if rev in roots:
112 reached(rev)
112 reached(rev)
113 if not includepath:
113 if not includepath:
114 continue
114 continue
115 parents = parentrevs(rev)
115 parents = parentrevs(rev)
116 seen[rev] = parents
116 seen[rev] = parents
117 for parent in parents:
117 for parent in parents:
118 if parent >= minroot and parent not in seen:
118 if parent >= minroot and parent not in seen:
119 dovisit(parent)
119 dovisit(parent)
120 if not reachable:
120 if not reachable:
121 return baseset()
121 return baseset()
122 if not includepath:
122 if not includepath:
123 return reachable
123 return reachable
124 for rev in sorted(seen):
124 for rev in sorted(seen):
125 for parent in seen[rev]:
125 for parent in seen[rev]:
126 if parent in reachable:
126 if parent in reachable:
127 reached(rev)
127 reached(rev)
128 return reachable
128 return reachable
129
129
130 def reachableroots(repo, roots, heads, includepath=False):
130 def reachableroots(repo, roots, heads, includepath=False):
131 """return (heads(::<roots> and ::<heads>))
131 """return (heads(::<roots> and ::<heads>))
132
132
133 If includepath is True, return (<roots>::<heads>)."""
133 If includepath is True, return (<roots>::<heads>)."""
134 if not roots:
134 if not roots:
135 return baseset()
135 return baseset()
136 minroot = roots.min()
136 minroot = roots.min()
137 roots = list(roots)
137 roots = list(roots)
138 heads = list(heads)
138 heads = list(heads)
139 try:
139 try:
140 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
140 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
141 except AttributeError:
141 except AttributeError:
142 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
142 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
143 revs = baseset(revs)
143 revs = baseset(revs)
144 revs.sort()
144 revs.sort()
145 return revs
145 return revs
146
146
147 elements = {
147 elements = {
148 # token-type: binding-strength, primary, prefix, infix, suffix
148 # token-type: binding-strength, primary, prefix, infix, suffix
149 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
149 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
150 "##": (20, None, None, ("_concat", 20), None),
150 "##": (20, None, None, ("_concat", 20), None),
151 "~": (18, None, None, ("ancestor", 18), None),
151 "~": (18, None, None, ("ancestor", 18), None),
152 "^": (18, None, None, ("parent", 18), ("parentpost", 18)),
152 "^": (18, None, None, ("parent", 18), ("parentpost", 18)),
153 "-": (5, None, ("negate", 19), ("minus", 5), None),
153 "-": (5, None, ("negate", 19), ("minus", 5), None),
154 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17),
154 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17),
155 ("dagrangepost", 17)),
155 ("dagrangepost", 17)),
156 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17),
156 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17),
157 ("dagrangepost", 17)),
157 ("dagrangepost", 17)),
158 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), ("rangepost", 15)),
158 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), ("rangepost", 15)),
159 "not": (10, None, ("not", 10), None, None),
159 "not": (10, None, ("not", 10), None, None),
160 "!": (10, None, ("not", 10), None, None),
160 "!": (10, None, ("not", 10), None, None),
161 "and": (5, None, None, ("and", 5), None),
161 "and": (5, None, None, ("and", 5), None),
162 "&": (5, None, None, ("and", 5), None),
162 "&": (5, None, None, ("and", 5), None),
163 "%": (5, None, None, ("only", 5), ("onlypost", 5)),
163 "%": (5, None, None, ("only", 5), ("onlypost", 5)),
164 "or": (4, None, None, ("or", 4), None),
164 "or": (4, None, None, ("or", 4), None),
165 "|": (4, None, None, ("or", 4), None),
165 "|": (4, None, None, ("or", 4), None),
166 "+": (4, None, None, ("or", 4), None),
166 "+": (4, None, None, ("or", 4), None),
167 "=": (3, None, None, ("keyvalue", 3), None),
167 "=": (3, None, None, ("keyvalue", 3), None),
168 ",": (2, None, None, ("list", 2), None),
168 ",": (2, None, None, ("list", 2), None),
169 ")": (0, None, None, None, None),
169 ")": (0, None, None, None, None),
170 "symbol": (0, "symbol", None, None, None),
170 "symbol": (0, "symbol", None, None, None),
171 "string": (0, "string", None, None, None),
171 "string": (0, "string", None, None, None),
172 "end": (0, None, None, None, None),
172 "end": (0, None, None, None, None),
173 }
173 }
174
174
175 keywords = set(['and', 'or', 'not'])
175 keywords = set(['and', 'or', 'not'])
176
176
177 # default set of valid characters for the initial letter of symbols
177 # default set of valid characters for the initial letter of symbols
178 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
178 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
179 if c.isalnum() or c in '._@' or ord(c) > 127)
179 if c.isalnum() or c in '._@' or ord(c) > 127)
180
180
181 # default set of valid characters for non-initial letters of symbols
181 # default set of valid characters for non-initial letters of symbols
182 _symletters = set(c for c in [chr(i) for i in xrange(256)]
182 _symletters = set(c for c in [chr(i) for i in xrange(256)]
183 if c.isalnum() or c in '-._/@' or ord(c) > 127)
183 if c.isalnum() or c in '-._/@' or ord(c) > 127)
184
184
185 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
185 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
186 '''
186 '''
187 Parse a revset statement into a stream of tokens
187 Parse a revset statement into a stream of tokens
188
188
189 ``syminitletters`` is the set of valid characters for the initial
189 ``syminitletters`` is the set of valid characters for the initial
190 letter of symbols.
190 letter of symbols.
191
191
192 By default, character ``c`` is recognized as valid for initial
192 By default, character ``c`` is recognized as valid for initial
193 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
193 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
194
194
195 ``symletters`` is the set of valid characters for non-initial
195 ``symletters`` is the set of valid characters for non-initial
196 letters of symbols.
196 letters of symbols.
197
197
198 By default, character ``c`` is recognized as valid for non-initial
198 By default, character ``c`` is recognized as valid for non-initial
199 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
199 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
200
200
201 Check that @ is a valid unquoted token character (issue3686):
201 Check that @ is a valid unquoted token character (issue3686):
202 >>> list(tokenize("@::"))
202 >>> list(tokenize("@::"))
203 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
203 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
204
204
205 '''
205 '''
206 if syminitletters is None:
206 if syminitletters is None:
207 syminitletters = _syminitletters
207 syminitletters = _syminitletters
208 if symletters is None:
208 if symletters is None:
209 symletters = _symletters
209 symletters = _symletters
210
210
211 if program and lookup:
211 if program and lookup:
212 # attempt to parse old-style ranges first to deal with
212 # attempt to parse old-style ranges first to deal with
213 # things like old-tag which contain query metacharacters
213 # things like old-tag which contain query metacharacters
214 parts = program.split(':', 1)
214 parts = program.split(':', 1)
215 if all(lookup(sym) for sym in parts if sym):
215 if all(lookup(sym) for sym in parts if sym):
216 if parts[0]:
216 if parts[0]:
217 yield ('symbol', parts[0], 0)
217 yield ('symbol', parts[0], 0)
218 if len(parts) > 1:
218 if len(parts) > 1:
219 s = len(parts[0])
219 s = len(parts[0])
220 yield (':', None, s)
220 yield (':', None, s)
221 if parts[1]:
221 if parts[1]:
222 yield ('symbol', parts[1], s + 1)
222 yield ('symbol', parts[1], s + 1)
223 yield ('end', None, len(program))
223 yield ('end', None, len(program))
224 return
224 return
225
225
226 pos, l = 0, len(program)
226 pos, l = 0, len(program)
227 while pos < l:
227 while pos < l:
228 c = program[pos]
228 c = program[pos]
229 if c.isspace(): # skip inter-token whitespace
229 if c.isspace(): # skip inter-token whitespace
230 pass
230 pass
231 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
231 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
232 yield ('::', None, pos)
232 yield ('::', None, pos)
233 pos += 1 # skip ahead
233 pos += 1 # skip ahead
234 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
234 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
235 yield ('..', None, pos)
235 yield ('..', None, pos)
236 pos += 1 # skip ahead
236 pos += 1 # skip ahead
237 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
237 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
238 yield ('##', None, pos)
238 yield ('##', None, pos)
239 pos += 1 # skip ahead
239 pos += 1 # skip ahead
240 elif c in "():=,-|&+!~^%": # handle simple operators
240 elif c in "():=,-|&+!~^%": # handle simple operators
241 yield (c, None, pos)
241 yield (c, None, pos)
242 elif (c in '"\'' or c == 'r' and
242 elif (c in '"\'' or c == 'r' and
243 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
243 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
244 if c == 'r':
244 if c == 'r':
245 pos += 1
245 pos += 1
246 c = program[pos]
246 c = program[pos]
247 decode = lambda x: x
247 decode = lambda x: x
248 else:
248 else:
249 decode = parser.unescapestr
249 decode = parser.unescapestr
250 pos += 1
250 pos += 1
251 s = pos
251 s = pos
252 while pos < l: # find closing quote
252 while pos < l: # find closing quote
253 d = program[pos]
253 d = program[pos]
254 if d == '\\': # skip over escaped characters
254 if d == '\\': # skip over escaped characters
255 pos += 2
255 pos += 2
256 continue
256 continue
257 if d == c:
257 if d == c:
258 yield ('string', decode(program[s:pos]), s)
258 yield ('string', decode(program[s:pos]), s)
259 break
259 break
260 pos += 1
260 pos += 1
261 else:
261 else:
262 raise error.ParseError(_("unterminated string"), s)
262 raise error.ParseError(_("unterminated string"), s)
263 # gather up a symbol/keyword
263 # gather up a symbol/keyword
264 elif c in syminitletters:
264 elif c in syminitletters:
265 s = pos
265 s = pos
266 pos += 1
266 pos += 1
267 while pos < l: # find end of symbol
267 while pos < l: # find end of symbol
268 d = program[pos]
268 d = program[pos]
269 if d not in symletters:
269 if d not in symletters:
270 break
270 break
271 if d == '.' and program[pos - 1] == '.': # special case for ..
271 if d == '.' and program[pos - 1] == '.': # special case for ..
272 pos -= 1
272 pos -= 1
273 break
273 break
274 pos += 1
274 pos += 1
275 sym = program[s:pos]
275 sym = program[s:pos]
276 if sym in keywords: # operator keywords
276 if sym in keywords: # operator keywords
277 yield (sym, None, s)
277 yield (sym, None, s)
278 elif '-' in sym:
278 elif '-' in sym:
279 # some jerk gave us foo-bar-baz, try to check if it's a symbol
279 # some jerk gave us foo-bar-baz, try to check if it's a symbol
280 if lookup and lookup(sym):
280 if lookup and lookup(sym):
281 # looks like a real symbol
281 # looks like a real symbol
282 yield ('symbol', sym, s)
282 yield ('symbol', sym, s)
283 else:
283 else:
284 # looks like an expression
284 # looks like an expression
285 parts = sym.split('-')
285 parts = sym.split('-')
286 for p in parts[:-1]:
286 for p in parts[:-1]:
287 if p: # possible consecutive -
287 if p: # possible consecutive -
288 yield ('symbol', p, s)
288 yield ('symbol', p, s)
289 s += len(p)
289 s += len(p)
290 yield ('-', None, pos)
290 yield ('-', None, pos)
291 s += 1
291 s += 1
292 if parts[-1]: # possible trailing -
292 if parts[-1]: # possible trailing -
293 yield ('symbol', parts[-1], s)
293 yield ('symbol', parts[-1], s)
294 else:
294 else:
295 yield ('symbol', sym, s)
295 yield ('symbol', sym, s)
296 pos -= 1
296 pos -= 1
297 else:
297 else:
298 raise error.ParseError(_("syntax error in revset '%s'") %
298 raise error.ParseError(_("syntax error in revset '%s'") %
299 program, pos)
299 program, pos)
300 pos += 1
300 pos += 1
301 yield ('end', None, pos)
301 yield ('end', None, pos)
302
302
303 # helpers
303 # helpers
304
304
305 def getstring(x, err):
305 def getstring(x, err):
306 if x and (x[0] == 'string' or x[0] == 'symbol'):
306 if x and (x[0] == 'string' or x[0] == 'symbol'):
307 return x[1]
307 return x[1]
308 raise error.ParseError(err)
308 raise error.ParseError(err)
309
309
310 def getlist(x):
310 def getlist(x):
311 if not x:
311 if not x:
312 return []
312 return []
313 if x[0] == 'list':
313 if x[0] == 'list':
314 return list(x[1:])
314 return list(x[1:])
315 return [x]
315 return [x]
316
316
317 def getargs(x, min, max, err):
317 def getargs(x, min, max, err):
318 l = getlist(x)
318 l = getlist(x)
319 if len(l) < min or (max >= 0 and len(l) > max):
319 if len(l) < min or (max >= 0 and len(l) > max):
320 raise error.ParseError(err)
320 raise error.ParseError(err)
321 return l
321 return l
322
322
323 def getargsdict(x, funcname, keys):
323 def getargsdict(x, funcname, keys):
324 return parser.buildargsdict(getlist(x), funcname, keys.split(),
324 return parser.buildargsdict(getlist(x), funcname, keys.split(),
325 keyvaluenode='keyvalue', keynode='symbol')
325 keyvaluenode='keyvalue', keynode='symbol')
326
326
327 def getset(repo, subset, x):
327 def getset(repo, subset, x):
328 if not x:
328 if not x:
329 raise error.ParseError(_("missing argument"))
329 raise error.ParseError(_("missing argument"))
330 s = methods[x[0]](repo, subset, *x[1:])
330 s = methods[x[0]](repo, subset, *x[1:])
331 if util.safehasattr(s, 'isascending'):
331 if util.safehasattr(s, 'isascending'):
332 return s
332 return s
333 # else case should not happen, because all non-func are internal,
333 # else case should not happen, because all non-func are internal,
334 # ignoring for now.
334 # ignoring for now.
335 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
335 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
336 repo.ui.deprecwarn('revset "%s" uses list instead of smartset'
336 repo.ui.deprecwarn('revset "%s" uses list instead of smartset'
337 % x[1][1],
337 % x[1][1],
338 '3.9')
338 '3.9')
339 return baseset(s)
339 return baseset(s)
340
340
341 def _getrevsource(repo, r):
341 def _getrevsource(repo, r):
342 extra = repo[r].extra()
342 extra = repo[r].extra()
343 for label in ('source', 'transplant_source', 'rebase_source'):
343 for label in ('source', 'transplant_source', 'rebase_source'):
344 if label in extra:
344 if label in extra:
345 try:
345 try:
346 return repo[extra[label]].rev()
346 return repo[extra[label]].rev()
347 except error.RepoLookupError:
347 except error.RepoLookupError:
348 pass
348 pass
349 return None
349 return None
350
350
351 # operator methods
351 # operator methods
352
352
353 def stringset(repo, subset, x):
353 def stringset(repo, subset, x):
354 x = repo[x].rev()
354 x = repo[x].rev()
355 if (x in subset
355 if (x in subset
356 or x == node.nullrev and isinstance(subset, fullreposet)):
356 or x == node.nullrev and isinstance(subset, fullreposet)):
357 return baseset([x])
357 return baseset([x])
358 return baseset()
358 return baseset()
359
359
360 def rangeset(repo, subset, x, y):
360 def rangeset(repo, subset, x, y):
361 m = getset(repo, fullreposet(repo), x)
361 m = getset(repo, fullreposet(repo), x)
362 n = getset(repo, fullreposet(repo), y)
362 n = getset(repo, fullreposet(repo), y)
363
363
364 if not m or not n:
364 if not m or not n:
365 return baseset()
365 return baseset()
366 m, n = m.first(), n.last()
366 m, n = m.first(), n.last()
367
367
368 if m == n:
368 if m == n:
369 r = baseset([m])
369 r = baseset([m])
370 elif n == node.wdirrev:
370 elif n == node.wdirrev:
371 r = spanset(repo, m, len(repo)) + baseset([n])
371 r = spanset(repo, m, len(repo)) + baseset([n])
372 elif m == node.wdirrev:
372 elif m == node.wdirrev:
373 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
373 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
374 elif m < n:
374 elif m < n:
375 r = spanset(repo, m, n + 1)
375 r = spanset(repo, m, n + 1)
376 else:
376 else:
377 r = spanset(repo, m, n - 1)
377 r = spanset(repo, m, n - 1)
378 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
378 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
379 # necessary to ensure we preserve the order in subset.
379 # necessary to ensure we preserve the order in subset.
380 #
380 #
381 # This has performance implication, carrying the sorting over when possible
381 # This has performance implication, carrying the sorting over when possible
382 # would be more efficient.
382 # would be more efficient.
383 return r & subset
383 return r & subset
384
384
385 def dagrange(repo, subset, x, y):
385 def dagrange(repo, subset, x, y):
386 r = fullreposet(repo)
386 r = fullreposet(repo)
387 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
387 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
388 includepath=True)
388 includepath=True)
389 return subset & xs
389 return subset & xs
390
390
391 def andset(repo, subset, x, y):
391 def andset(repo, subset, x, y):
392 return getset(repo, getset(repo, subset, x), y)
392 return getset(repo, getset(repo, subset, x), y)
393
393
394 def differenceset(repo, subset, x, y):
394 def differenceset(repo, subset, x, y):
395 return getset(repo, subset, x) - getset(repo, subset, y)
395 return getset(repo, subset, x) - getset(repo, subset, y)
396
396
397 def orset(repo, subset, *xs):
397 def orset(repo, subset, *xs):
398 assert xs
398 assert xs
399 if len(xs) == 1:
399 if len(xs) == 1:
400 return getset(repo, subset, xs[0])
400 return getset(repo, subset, xs[0])
401 p = len(xs) // 2
401 p = len(xs) // 2
402 a = orset(repo, subset, *xs[:p])
402 a = orset(repo, subset, *xs[:p])
403 b = orset(repo, subset, *xs[p:])
403 b = orset(repo, subset, *xs[p:])
404 return a + b
404 return a + b
405
405
406 def notset(repo, subset, x):
406 def notset(repo, subset, x):
407 return subset - getset(repo, subset, x)
407 return subset - getset(repo, subset, x)
408
408
409 def listset(repo, subset, *xs):
409 def listset(repo, subset, *xs):
410 raise error.ParseError(_("can't use a list in this context"),
410 raise error.ParseError(_("can't use a list in this context"),
411 hint=_('see hg help "revsets.x or y"'))
411 hint=_('see hg help "revsets.x or y"'))
412
412
413 def keyvaluepair(repo, subset, k, v):
413 def keyvaluepair(repo, subset, k, v):
414 raise error.ParseError(_("can't use a key-value pair in this context"))
414 raise error.ParseError(_("can't use a key-value pair in this context"))
415
415
416 def func(repo, subset, a, b):
416 def func(repo, subset, a, b):
417 if a[0] == 'symbol' and a[1] in symbols:
417 if a[0] == 'symbol' and a[1] in symbols:
418 return symbols[a[1]](repo, subset, b)
418 return symbols[a[1]](repo, subset, b)
419
419
420 keep = lambda fn: getattr(fn, '__doc__', None) is not None
420 keep = lambda fn: getattr(fn, '__doc__', None) is not None
421
421
422 syms = [s for (s, fn) in symbols.items() if keep(fn)]
422 syms = [s for (s, fn) in symbols.items() if keep(fn)]
423 raise error.UnknownIdentifier(a[1], syms)
423 raise error.UnknownIdentifier(a[1], syms)
424
424
425 # functions
425 # functions
426
426
427 # symbols are callables like:
427 # symbols are callables like:
428 # fn(repo, subset, x)
428 # fn(repo, subset, x)
429 # with:
429 # with:
430 # repo - current repository instance
430 # repo - current repository instance
431 # subset - of revisions to be examined
431 # subset - of revisions to be examined
432 # x - argument in tree form
432 # x - argument in tree form
433 symbols = {}
433 symbols = {}
434
434
435 # symbols which can't be used for a DoS attack for any given input
435 # symbols which can't be used for a DoS attack for any given input
436 # (e.g. those which accept regexes as plain strings shouldn't be included)
436 # (e.g. those which accept regexes as plain strings shouldn't be included)
437 # functions that just return a lot of changesets (like all) don't count here
437 # functions that just return a lot of changesets (like all) don't count here
438 safesymbols = set()
438 safesymbols = set()
439
439
440 predicate = registrar.revsetpredicate()
440 predicate = registrar.revsetpredicate()
441
441
442 @predicate('_destupdate')
442 @predicate('_destupdate')
443 def _destupdate(repo, subset, x):
443 def _destupdate(repo, subset, x):
444 # experimental revset for update destination
444 # experimental revset for update destination
445 args = getargsdict(x, 'limit', 'clean check')
445 args = getargsdict(x, 'limit', 'clean check')
446 return subset & baseset([destutil.destupdate(repo, **args)[0]])
446 return subset & baseset([destutil.destupdate(repo, **args)[0]])
447
447
448 @predicate('_destmerge')
448 @predicate('_destmerge')
449 def _destmerge(repo, subset, x):
449 def _destmerge(repo, subset, x):
450 # experimental revset for merge destination
450 # experimental revset for merge destination
451 sourceset = None
451 sourceset = None
452 if x is not None:
452 if x is not None:
453 sourceset = getset(repo, fullreposet(repo), x)
453 sourceset = getset(repo, fullreposet(repo), x)
454 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
454 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
455
455
456 @predicate('adds(pattern)', safe=True)
456 @predicate('adds(pattern)', safe=True)
457 def adds(repo, subset, x):
457 def adds(repo, subset, x):
458 """Changesets that add a file matching pattern.
458 """Changesets that add a file matching pattern.
459
459
460 The pattern without explicit kind like ``glob:`` is expected to be
460 The pattern without explicit kind like ``glob:`` is expected to be
461 relative to the current directory and match against a file or a
461 relative to the current directory and match against a file or a
462 directory.
462 directory.
463 """
463 """
464 # i18n: "adds" is a keyword
464 # i18n: "adds" is a keyword
465 pat = getstring(x, _("adds requires a pattern"))
465 pat = getstring(x, _("adds requires a pattern"))
466 return checkstatus(repo, subset, pat, 1)
466 return checkstatus(repo, subset, pat, 1)
467
467
468 @predicate('ancestor(*changeset)', safe=True)
468 @predicate('ancestor(*changeset)', safe=True)
469 def ancestor(repo, subset, x):
469 def ancestor(repo, subset, x):
470 """A greatest common ancestor of the changesets.
470 """A greatest common ancestor of the changesets.
471
471
472 Accepts 0 or more changesets.
472 Accepts 0 or more changesets.
473 Will return empty list when passed no args.
473 Will return empty list when passed no args.
474 Greatest common ancestor of a single changeset is that changeset.
474 Greatest common ancestor of a single changeset is that changeset.
475 """
475 """
476 # i18n: "ancestor" is a keyword
476 # i18n: "ancestor" is a keyword
477 l = getlist(x)
477 l = getlist(x)
478 rl = fullreposet(repo)
478 rl = fullreposet(repo)
479 anc = None
479 anc = None
480
480
481 # (getset(repo, rl, i) for i in l) generates a list of lists
481 # (getset(repo, rl, i) for i in l) generates a list of lists
482 for revs in (getset(repo, rl, i) for i in l):
482 for revs in (getset(repo, rl, i) for i in l):
483 for r in revs:
483 for r in revs:
484 if anc is None:
484 if anc is None:
485 anc = repo[r]
485 anc = repo[r]
486 else:
486 else:
487 anc = anc.ancestor(repo[r])
487 anc = anc.ancestor(repo[r])
488
488
489 if anc is not None and anc.rev() in subset:
489 if anc is not None and anc.rev() in subset:
490 return baseset([anc.rev()])
490 return baseset([anc.rev()])
491 return baseset()
491 return baseset()
492
492
493 def _ancestors(repo, subset, x, followfirst=False):
493 def _ancestors(repo, subset, x, followfirst=False):
494 heads = getset(repo, fullreposet(repo), x)
494 heads = getset(repo, fullreposet(repo), x)
495 if not heads:
495 if not heads:
496 return baseset()
496 return baseset()
497 s = _revancestors(repo, heads, followfirst)
497 s = _revancestors(repo, heads, followfirst)
498 return subset & s
498 return subset & s
499
499
500 @predicate('ancestors(set)', safe=True)
500 @predicate('ancestors(set)', safe=True)
501 def ancestors(repo, subset, x):
501 def ancestors(repo, subset, x):
502 """Changesets that are ancestors of a changeset in set.
502 """Changesets that are ancestors of a changeset in set.
503 """
503 """
504 return _ancestors(repo, subset, x)
504 return _ancestors(repo, subset, x)
505
505
506 @predicate('_firstancestors', safe=True)
506 @predicate('_firstancestors', safe=True)
507 def _firstancestors(repo, subset, x):
507 def _firstancestors(repo, subset, x):
508 # ``_firstancestors(set)``
508 # ``_firstancestors(set)``
509 # Like ``ancestors(set)`` but follows only the first parents.
509 # Like ``ancestors(set)`` but follows only the first parents.
510 return _ancestors(repo, subset, x, followfirst=True)
510 return _ancestors(repo, subset, x, followfirst=True)
511
511
512 def ancestorspec(repo, subset, x, n):
512 def ancestorspec(repo, subset, x, n):
513 """``set~n``
513 """``set~n``
514 Changesets that are the Nth ancestor (first parents only) of a changeset
514 Changesets that are the Nth ancestor (first parents only) of a changeset
515 in set.
515 in set.
516 """
516 """
517 try:
517 try:
518 n = int(n[1])
518 n = int(n[1])
519 except (TypeError, ValueError):
519 except (TypeError, ValueError):
520 raise error.ParseError(_("~ expects a number"))
520 raise error.ParseError(_("~ expects a number"))
521 ps = set()
521 ps = set()
522 cl = repo.changelog
522 cl = repo.changelog
523 for r in getset(repo, fullreposet(repo), x):
523 for r in getset(repo, fullreposet(repo), x):
524 for i in range(n):
524 for i in range(n):
525 r = cl.parentrevs(r)[0]
525 r = cl.parentrevs(r)[0]
526 ps.add(r)
526 ps.add(r)
527 return subset & ps
527 return subset & ps
528
528
529 @predicate('author(string)', safe=True)
529 @predicate('author(string)', safe=True)
530 def author(repo, subset, x):
530 def author(repo, subset, x):
531 """Alias for ``user(string)``.
531 """Alias for ``user(string)``.
532 """
532 """
533 # i18n: "author" is a keyword
533 # i18n: "author" is a keyword
534 n = encoding.lower(getstring(x, _("author requires a string")))
534 n = encoding.lower(getstring(x, _("author requires a string")))
535 kind, pattern, matcher = _substringmatcher(n)
535 kind, pattern, matcher = _substringmatcher(n)
536 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())),
536 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())),
537 condrepr=('<user %r>', n))
537 condrepr=('<user %r>', n))
538
538
539 @predicate('bisect(string)', safe=True)
539 @predicate('bisect(string)', safe=True)
540 def bisect(repo, subset, x):
540 def bisect(repo, subset, x):
541 """Changesets marked in the specified bisect status:
541 """Changesets marked in the specified bisect status:
542
542
543 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
543 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
544 - ``goods``, ``bads`` : csets topologically good/bad
544 - ``goods``, ``bads`` : csets topologically good/bad
545 - ``range`` : csets taking part in the bisection
545 - ``range`` : csets taking part in the bisection
546 - ``pruned`` : csets that are goods, bads or skipped
546 - ``pruned`` : csets that are goods, bads or skipped
547 - ``untested`` : csets whose fate is yet unknown
547 - ``untested`` : csets whose fate is yet unknown
548 - ``ignored`` : csets ignored due to DAG topology
548 - ``ignored`` : csets ignored due to DAG topology
549 - ``current`` : the cset currently being bisected
549 - ``current`` : the cset currently being bisected
550 """
550 """
551 # i18n: "bisect" is a keyword
551 # i18n: "bisect" is a keyword
552 status = getstring(x, _("bisect requires a string")).lower()
552 status = getstring(x, _("bisect requires a string")).lower()
553 state = set(hbisect.get(repo, status))
553 state = set(hbisect.get(repo, status))
554 return subset & state
554 return subset & state
555
555
556 # Backward-compatibility
556 # Backward-compatibility
557 # - no help entry so that we do not advertise it any more
557 # - no help entry so that we do not advertise it any more
558 @predicate('bisected', safe=True)
558 @predicate('bisected', safe=True)
559 def bisected(repo, subset, x):
559 def bisected(repo, subset, x):
560 return bisect(repo, subset, x)
560 return bisect(repo, subset, x)
561
561
562 @predicate('bookmark([name])', safe=True)
562 @predicate('bookmark([name])', safe=True)
563 def bookmark(repo, subset, x):
563 def bookmark(repo, subset, x):
564 """The named bookmark or all bookmarks.
564 """The named bookmark or all bookmarks.
565
565
566 If `name` starts with `re:`, the remainder of the name is treated as
566 If `name` starts with `re:`, the remainder of the name is treated as
567 a regular expression. To match a bookmark that actually starts with `re:`,
567 a regular expression. To match a bookmark that actually starts with `re:`,
568 use the prefix `literal:`.
568 use the prefix `literal:`.
569 """
569 """
570 # i18n: "bookmark" is a keyword
570 # i18n: "bookmark" is a keyword
571 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
571 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
572 if args:
572 if args:
573 bm = getstring(args[0],
573 bm = getstring(args[0],
574 # i18n: "bookmark" is a keyword
574 # i18n: "bookmark" is a keyword
575 _('the argument to bookmark must be a string'))
575 _('the argument to bookmark must be a string'))
576 kind, pattern, matcher = util.stringmatcher(bm)
576 kind, pattern, matcher = util.stringmatcher(bm)
577 bms = set()
577 bms = set()
578 if kind == 'literal':
578 if kind == 'literal':
579 bmrev = repo._bookmarks.get(pattern, None)
579 bmrev = repo._bookmarks.get(pattern, None)
580 if not bmrev:
580 if not bmrev:
581 raise error.RepoLookupError(_("bookmark '%s' does not exist")
581 raise error.RepoLookupError(_("bookmark '%s' does not exist")
582 % pattern)
582 % pattern)
583 bms.add(repo[bmrev].rev())
583 bms.add(repo[bmrev].rev())
584 else:
584 else:
585 matchrevs = set()
585 matchrevs = set()
586 for name, bmrev in repo._bookmarks.iteritems():
586 for name, bmrev in repo._bookmarks.iteritems():
587 if matcher(name):
587 if matcher(name):
588 matchrevs.add(bmrev)
588 matchrevs.add(bmrev)
589 if not matchrevs:
589 if not matchrevs:
590 raise error.RepoLookupError(_("no bookmarks exist"
590 raise error.RepoLookupError(_("no bookmarks exist"
591 " that match '%s'") % pattern)
591 " that match '%s'") % pattern)
592 for bmrev in matchrevs:
592 for bmrev in matchrevs:
593 bms.add(repo[bmrev].rev())
593 bms.add(repo[bmrev].rev())
594 else:
594 else:
595 bms = set([repo[r].rev()
595 bms = set([repo[r].rev()
596 for r in repo._bookmarks.values()])
596 for r in repo._bookmarks.values()])
597 bms -= set([node.nullrev])
597 bms -= set([node.nullrev])
598 return subset & bms
598 return subset & bms
599
599
600 @predicate('branch(string or set)', safe=True)
600 @predicate('branch(string or set)', safe=True)
601 def branch(repo, subset, x):
601 def branch(repo, subset, x):
602 """
602 """
603 All changesets belonging to the given branch or the branches of the given
603 All changesets belonging to the given branch or the branches of the given
604 changesets.
604 changesets.
605
605
606 If `string` starts with `re:`, the remainder of the name is treated as
606 If `string` starts with `re:`, the remainder of the name is treated as
607 a regular expression. To match a branch that actually starts with `re:`,
607 a regular expression. To match a branch that actually starts with `re:`,
608 use the prefix `literal:`.
608 use the prefix `literal:`.
609 """
609 """
610 getbi = repo.revbranchcache().branchinfo
610 getbi = repo.revbranchcache().branchinfo
611
611
612 try:
612 try:
613 b = getstring(x, '')
613 b = getstring(x, '')
614 except error.ParseError:
614 except error.ParseError:
615 # not a string, but another revspec, e.g. tip()
615 # not a string, but another revspec, e.g. tip()
616 pass
616 pass
617 else:
617 else:
618 kind, pattern, matcher = util.stringmatcher(b)
618 kind, pattern, matcher = util.stringmatcher(b)
619 if kind == 'literal':
619 if kind == 'literal':
620 # note: falls through to the revspec case if no branch with
620 # note: falls through to the revspec case if no branch with
621 # this name exists and pattern kind is not specified explicitly
621 # this name exists and pattern kind is not specified explicitly
622 if pattern in repo.branchmap():
622 if pattern in repo.branchmap():
623 return subset.filter(lambda r: matcher(getbi(r)[0]),
623 return subset.filter(lambda r: matcher(getbi(r)[0]),
624 condrepr=('<branch %r>', b))
624 condrepr=('<branch %r>', b))
625 if b.startswith('literal:'):
625 if b.startswith('literal:'):
626 raise error.RepoLookupError(_("branch '%s' does not exist")
626 raise error.RepoLookupError(_("branch '%s' does not exist")
627 % pattern)
627 % pattern)
628 else:
628 else:
629 return subset.filter(lambda r: matcher(getbi(r)[0]),
629 return subset.filter(lambda r: matcher(getbi(r)[0]),
630 condrepr=('<branch %r>', b))
630 condrepr=('<branch %r>', b))
631
631
632 s = getset(repo, fullreposet(repo), x)
632 s = getset(repo, fullreposet(repo), x)
633 b = set()
633 b = set()
634 for r in s:
634 for r in s:
635 b.add(getbi(r)[0])
635 b.add(getbi(r)[0])
636 c = s.__contains__
636 c = s.__contains__
637 return subset.filter(lambda r: c(r) or getbi(r)[0] in b,
637 return subset.filter(lambda r: c(r) or getbi(r)[0] in b,
638 condrepr=lambda: '<branch %r>' % sorted(b))
638 condrepr=lambda: '<branch %r>' % sorted(b))
639
639
640 @predicate('bumped()', safe=True)
640 @predicate('bumped()', safe=True)
641 def bumped(repo, subset, x):
641 def bumped(repo, subset, x):
642 """Mutable changesets marked as successors of public changesets.
642 """Mutable changesets marked as successors of public changesets.
643
643
644 Only non-public and non-obsolete changesets can be `bumped`.
644 Only non-public and non-obsolete changesets can be `bumped`.
645 """
645 """
646 # i18n: "bumped" is a keyword
646 # i18n: "bumped" is a keyword
647 getargs(x, 0, 0, _("bumped takes no arguments"))
647 getargs(x, 0, 0, _("bumped takes no arguments"))
648 bumped = obsmod.getrevs(repo, 'bumped')
648 bumped = obsmod.getrevs(repo, 'bumped')
649 return subset & bumped
649 return subset & bumped
650
650
651 @predicate('bundle()', safe=True)
651 @predicate('bundle()', safe=True)
652 def bundle(repo, subset, x):
652 def bundle(repo, subset, x):
653 """Changesets in the bundle.
653 """Changesets in the bundle.
654
654
655 Bundle must be specified by the -R option."""
655 Bundle must be specified by the -R option."""
656
656
657 try:
657 try:
658 bundlerevs = repo.changelog.bundlerevs
658 bundlerevs = repo.changelog.bundlerevs
659 except AttributeError:
659 except AttributeError:
660 raise error.Abort(_("no bundle provided - specify with -R"))
660 raise error.Abort(_("no bundle provided - specify with -R"))
661 return subset & bundlerevs
661 return subset & bundlerevs
662
662
663 def checkstatus(repo, subset, pat, field):
663 def checkstatus(repo, subset, pat, field):
664 hasset = matchmod.patkind(pat) == 'set'
664 hasset = matchmod.patkind(pat) == 'set'
665
665
666 mcache = [None]
666 mcache = [None]
667 def matches(x):
667 def matches(x):
668 c = repo[x]
668 c = repo[x]
669 if not mcache[0] or hasset:
669 if not mcache[0] or hasset:
670 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
670 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
671 m = mcache[0]
671 m = mcache[0]
672 fname = None
672 fname = None
673 if not m.anypats() and len(m.files()) == 1:
673 if not m.anypats() and len(m.files()) == 1:
674 fname = m.files()[0]
674 fname = m.files()[0]
675 if fname is not None:
675 if fname is not None:
676 if fname not in c.files():
676 if fname not in c.files():
677 return False
677 return False
678 else:
678 else:
679 for f in c.files():
679 for f in c.files():
680 if m(f):
680 if m(f):
681 break
681 break
682 else:
682 else:
683 return False
683 return False
684 files = repo.status(c.p1().node(), c.node())[field]
684 files = repo.status(c.p1().node(), c.node())[field]
685 if fname is not None:
685 if fname is not None:
686 if fname in files:
686 if fname in files:
687 return True
687 return True
688 else:
688 else:
689 for f in files:
689 for f in files:
690 if m(f):
690 if m(f):
691 return True
691 return True
692
692
693 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
693 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
694
694
695 def _children(repo, narrow, parentset):
695 def _children(repo, narrow, parentset):
696 if not parentset:
696 if not parentset:
697 return baseset()
697 return baseset()
698 cs = set()
698 cs = set()
699 pr = repo.changelog.parentrevs
699 pr = repo.changelog.parentrevs
700 minrev = parentset.min()
700 minrev = parentset.min()
701 for r in narrow:
701 for r in narrow:
702 if r <= minrev:
702 if r <= minrev:
703 continue
703 continue
704 for p in pr(r):
704 for p in pr(r):
705 if p in parentset:
705 if p in parentset:
706 cs.add(r)
706 cs.add(r)
707 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
707 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
708 # This does not break because of other fullreposet misbehavior.
708 # This does not break because of other fullreposet misbehavior.
709 return baseset(cs)
709 return baseset(cs)
710
710
711 @predicate('children(set)', safe=True)
711 @predicate('children(set)', safe=True)
712 def children(repo, subset, x):
712 def children(repo, subset, x):
713 """Child changesets of changesets in set.
713 """Child changesets of changesets in set.
714 """
714 """
715 s = getset(repo, fullreposet(repo), x)
715 s = getset(repo, fullreposet(repo), x)
716 cs = _children(repo, subset, s)
716 cs = _children(repo, subset, s)
717 return subset & cs
717 return subset & cs
718
718
719 @predicate('closed()', safe=True)
719 @predicate('closed()', safe=True)
720 def closed(repo, subset, x):
720 def closed(repo, subset, x):
721 """Changeset is closed.
721 """Changeset is closed.
722 """
722 """
723 # i18n: "closed" is a keyword
723 # i18n: "closed" is a keyword
724 getargs(x, 0, 0, _("closed takes no arguments"))
724 getargs(x, 0, 0, _("closed takes no arguments"))
725 return subset.filter(lambda r: repo[r].closesbranch(),
725 return subset.filter(lambda r: repo[r].closesbranch(),
726 condrepr='<branch closed>')
726 condrepr='<branch closed>')
727
727
728 @predicate('contains(pattern)')
728 @predicate('contains(pattern)')
729 def contains(repo, subset, x):
729 def contains(repo, subset, x):
730 """The revision's manifest contains a file matching pattern (but might not
730 """The revision's manifest contains a file matching pattern (but might not
731 modify it). See :hg:`help patterns` for information about file patterns.
731 modify it). See :hg:`help patterns` for information about file patterns.
732
732
733 The pattern without explicit kind like ``glob:`` is expected to be
733 The pattern without explicit kind like ``glob:`` is expected to be
734 relative to the current directory and match against a file exactly
734 relative to the current directory and match against a file exactly
735 for efficiency.
735 for efficiency.
736 """
736 """
737 # i18n: "contains" is a keyword
737 # i18n: "contains" is a keyword
738 pat = getstring(x, _("contains requires a pattern"))
738 pat = getstring(x, _("contains requires a pattern"))
739
739
740 def matches(x):
740 def matches(x):
741 if not matchmod.patkind(pat):
741 if not matchmod.patkind(pat):
742 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
742 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
743 if pats in repo[x]:
743 if pats in repo[x]:
744 return True
744 return True
745 else:
745 else:
746 c = repo[x]
746 c = repo[x]
747 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
747 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
748 for f in c.manifest():
748 for f in c.manifest():
749 if m(f):
749 if m(f):
750 return True
750 return True
751 return False
751 return False
752
752
753 return subset.filter(matches, condrepr=('<contains %r>', pat))
753 return subset.filter(matches, condrepr=('<contains %r>', pat))
754
754
755 @predicate('converted([id])', safe=True)
755 @predicate('converted([id])', safe=True)
756 def converted(repo, subset, x):
756 def converted(repo, subset, x):
757 """Changesets converted from the given identifier in the old repository if
757 """Changesets converted from the given identifier in the old repository if
758 present, or all converted changesets if no identifier is specified.
758 present, or all converted changesets if no identifier is specified.
759 """
759 """
760
760
761 # There is exactly no chance of resolving the revision, so do a simple
761 # There is exactly no chance of resolving the revision, so do a simple
762 # string compare and hope for the best
762 # string compare and hope for the best
763
763
764 rev = None
764 rev = None
765 # i18n: "converted" is a keyword
765 # i18n: "converted" is a keyword
766 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
766 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
767 if l:
767 if l:
768 # i18n: "converted" is a keyword
768 # i18n: "converted" is a keyword
769 rev = getstring(l[0], _('converted requires a revision'))
769 rev = getstring(l[0], _('converted requires a revision'))
770
770
771 def _matchvalue(r):
771 def _matchvalue(r):
772 source = repo[r].extra().get('convert_revision', None)
772 source = repo[r].extra().get('convert_revision', None)
773 return source is not None and (rev is None or source.startswith(rev))
773 return source is not None and (rev is None or source.startswith(rev))
774
774
775 return subset.filter(lambda r: _matchvalue(r),
775 return subset.filter(lambda r: _matchvalue(r),
776 condrepr=('<converted %r>', rev))
776 condrepr=('<converted %r>', rev))
777
777
778 @predicate('date(interval)', safe=True)
778 @predicate('date(interval)', safe=True)
779 def date(repo, subset, x):
779 def date(repo, subset, x):
780 """Changesets within the interval, see :hg:`help dates`.
780 """Changesets within the interval, see :hg:`help dates`.
781 """
781 """
782 # i18n: "date" is a keyword
782 # i18n: "date" is a keyword
783 ds = getstring(x, _("date requires a string"))
783 ds = getstring(x, _("date requires a string"))
784 dm = util.matchdate(ds)
784 dm = util.matchdate(ds)
785 return subset.filter(lambda x: dm(repo[x].date()[0]),
785 return subset.filter(lambda x: dm(repo[x].date()[0]),
786 condrepr=('<date %r>', ds))
786 condrepr=('<date %r>', ds))
787
787
788 @predicate('desc(string)', safe=True)
788 @predicate('desc(string)', safe=True)
789 def desc(repo, subset, x):
789 def desc(repo, subset, x):
790 """Search commit message for string. The match is case-insensitive.
790 """Search commit message for string. The match is case-insensitive.
791 """
791 """
792 # i18n: "desc" is a keyword
792 # i18n: "desc" is a keyword
793 ds = encoding.lower(getstring(x, _("desc requires a string")))
793 ds = encoding.lower(getstring(x, _("desc requires a string")))
794
794
795 def matches(x):
795 def matches(x):
796 c = repo[x]
796 c = repo[x]
797 return ds in encoding.lower(c.description())
797 return ds in encoding.lower(c.description())
798
798
799 return subset.filter(matches, condrepr=('<desc %r>', ds))
799 return subset.filter(matches, condrepr=('<desc %r>', ds))
800
800
801 def _descendants(repo, subset, x, followfirst=False):
801 def _descendants(repo, subset, x, followfirst=False):
802 roots = getset(repo, fullreposet(repo), x)
802 roots = getset(repo, fullreposet(repo), x)
803 if not roots:
803 if not roots:
804 return baseset()
804 return baseset()
805 s = _revdescendants(repo, roots, followfirst)
805 s = _revdescendants(repo, roots, followfirst)
806
806
807 # Both sets need to be ascending in order to lazily return the union
807 # Both sets need to be ascending in order to lazily return the union
808 # in the correct order.
808 # in the correct order.
809 base = subset & roots
809 base = subset & roots
810 desc = subset & s
810 desc = subset & s
811 result = base + desc
811 result = base + desc
812 if subset.isascending():
812 if subset.isascending():
813 result.sort()
813 result.sort()
814 elif subset.isdescending():
814 elif subset.isdescending():
815 result.sort(reverse=True)
815 result.sort(reverse=True)
816 else:
816 else:
817 result = subset & result
817 result = subset & result
818 return result
818 return result
819
819
820 @predicate('descendants(set)', safe=True)
820 @predicate('descendants(set)', safe=True)
821 def descendants(repo, subset, x):
821 def descendants(repo, subset, x):
822 """Changesets which are descendants of changesets in set.
822 """Changesets which are descendants of changesets in set.
823 """
823 """
824 return _descendants(repo, subset, x)
824 return _descendants(repo, subset, x)
825
825
826 @predicate('_firstdescendants', safe=True)
826 @predicate('_firstdescendants', safe=True)
827 def _firstdescendants(repo, subset, x):
827 def _firstdescendants(repo, subset, x):
828 # ``_firstdescendants(set)``
828 # ``_firstdescendants(set)``
829 # Like ``descendants(set)`` but follows only the first parents.
829 # Like ``descendants(set)`` but follows only the first parents.
830 return _descendants(repo, subset, x, followfirst=True)
830 return _descendants(repo, subset, x, followfirst=True)
831
831
832 @predicate('destination([set])', safe=True)
832 @predicate('destination([set])', safe=True)
833 def destination(repo, subset, x):
833 def destination(repo, subset, x):
834 """Changesets that were created by a graft, transplant or rebase operation,
834 """Changesets that were created by a graft, transplant or rebase operation,
835 with the given revisions specified as the source. Omitting the optional set
835 with the given revisions specified as the source. Omitting the optional set
836 is the same as passing all().
836 is the same as passing all().
837 """
837 """
838 if x is not None:
838 if x is not None:
839 sources = getset(repo, fullreposet(repo), x)
839 sources = getset(repo, fullreposet(repo), x)
840 else:
840 else:
841 sources = fullreposet(repo)
841 sources = fullreposet(repo)
842
842
843 dests = set()
843 dests = set()
844
844
845 # subset contains all of the possible destinations that can be returned, so
845 # subset contains all of the possible destinations that can be returned, so
846 # iterate over them and see if their source(s) were provided in the arg set.
846 # iterate over them and see if their source(s) were provided in the arg set.
847 # Even if the immediate src of r is not in the arg set, src's source (or
847 # Even if the immediate src of r is not in the arg set, src's source (or
848 # further back) may be. Scanning back further than the immediate src allows
848 # further back) may be. Scanning back further than the immediate src allows
849 # transitive transplants and rebases to yield the same results as transitive
849 # transitive transplants and rebases to yield the same results as transitive
850 # grafts.
850 # grafts.
851 for r in subset:
851 for r in subset:
852 src = _getrevsource(repo, r)
852 src = _getrevsource(repo, r)
853 lineage = None
853 lineage = None
854
854
855 while src is not None:
855 while src is not None:
856 if lineage is None:
856 if lineage is None:
857 lineage = list()
857 lineage = list()
858
858
859 lineage.append(r)
859 lineage.append(r)
860
860
861 # The visited lineage is a match if the current source is in the arg
861 # The visited lineage is a match if the current source is in the arg
862 # set. Since every candidate dest is visited by way of iterating
862 # set. Since every candidate dest is visited by way of iterating
863 # subset, any dests further back in the lineage will be tested by a
863 # subset, any dests further back in the lineage will be tested by a
864 # different iteration over subset. Likewise, if the src was already
864 # different iteration over subset. Likewise, if the src was already
865 # selected, the current lineage can be selected without going back
865 # selected, the current lineage can be selected without going back
866 # further.
866 # further.
867 if src in sources or src in dests:
867 if src in sources or src in dests:
868 dests.update(lineage)
868 dests.update(lineage)
869 break
869 break
870
870
871 r = src
871 r = src
872 src = _getrevsource(repo, r)
872 src = _getrevsource(repo, r)
873
873
874 return subset.filter(dests.__contains__,
874 return subset.filter(dests.__contains__,
875 condrepr=lambda: '<destination %r>' % sorted(dests))
875 condrepr=lambda: '<destination %r>' % sorted(dests))
876
876
877 @predicate('divergent()', safe=True)
877 @predicate('divergent()', safe=True)
878 def divergent(repo, subset, x):
878 def divergent(repo, subset, x):
879 """
879 """
880 Final successors of changesets with an alternative set of final successors.
880 Final successors of changesets with an alternative set of final successors.
881 """
881 """
882 # i18n: "divergent" is a keyword
882 # i18n: "divergent" is a keyword
883 getargs(x, 0, 0, _("divergent takes no arguments"))
883 getargs(x, 0, 0, _("divergent takes no arguments"))
884 divergent = obsmod.getrevs(repo, 'divergent')
884 divergent = obsmod.getrevs(repo, 'divergent')
885 return subset & divergent
885 return subset & divergent
886
886
887 @predicate('extinct()', safe=True)
887 @predicate('extinct()', safe=True)
888 def extinct(repo, subset, x):
888 def extinct(repo, subset, x):
889 """Obsolete changesets with obsolete descendants only.
889 """Obsolete changesets with obsolete descendants only.
890 """
890 """
891 # i18n: "extinct" is a keyword
891 # i18n: "extinct" is a keyword
892 getargs(x, 0, 0, _("extinct takes no arguments"))
892 getargs(x, 0, 0, _("extinct takes no arguments"))
893 extincts = obsmod.getrevs(repo, 'extinct')
893 extincts = obsmod.getrevs(repo, 'extinct')
894 return subset & extincts
894 return subset & extincts
895
895
896 @predicate('extra(label, [value])', safe=True)
896 @predicate('extra(label, [value])', safe=True)
897 def extra(repo, subset, x):
897 def extra(repo, subset, x):
898 """Changesets with the given label in the extra metadata, with the given
898 """Changesets with the given label in the extra metadata, with the given
899 optional value.
899 optional value.
900
900
901 If `value` starts with `re:`, the remainder of the value is treated as
901 If `value` starts with `re:`, the remainder of the value is treated as
902 a regular expression. To match a value that actually starts with `re:`,
902 a regular expression. To match a value that actually starts with `re:`,
903 use the prefix `literal:`.
903 use the prefix `literal:`.
904 """
904 """
905 args = getargsdict(x, 'extra', 'label value')
905 args = getargsdict(x, 'extra', 'label value')
906 if 'label' not in args:
906 if 'label' not in args:
907 # i18n: "extra" is a keyword
907 # i18n: "extra" is a keyword
908 raise error.ParseError(_('extra takes at least 1 argument'))
908 raise error.ParseError(_('extra takes at least 1 argument'))
909 # i18n: "extra" is a keyword
909 # i18n: "extra" is a keyword
910 label = getstring(args['label'], _('first argument to extra must be '
910 label = getstring(args['label'], _('first argument to extra must be '
911 'a string'))
911 'a string'))
912 value = None
912 value = None
913
913
914 if 'value' in args:
914 if 'value' in args:
915 # i18n: "extra" is a keyword
915 # i18n: "extra" is a keyword
916 value = getstring(args['value'], _('second argument to extra must be '
916 value = getstring(args['value'], _('second argument to extra must be '
917 'a string'))
917 'a string'))
918 kind, value, matcher = util.stringmatcher(value)
918 kind, value, matcher = util.stringmatcher(value)
919
919
920 def _matchvalue(r):
920 def _matchvalue(r):
921 extra = repo[r].extra()
921 extra = repo[r].extra()
922 return label in extra and (value is None or matcher(extra[label]))
922 return label in extra and (value is None or matcher(extra[label]))
923
923
924 return subset.filter(lambda r: _matchvalue(r),
924 return subset.filter(lambda r: _matchvalue(r),
925 condrepr=('<extra[%r] %r>', label, value))
925 condrepr=('<extra[%r] %r>', label, value))
926
926
927 @predicate('filelog(pattern)', safe=True)
927 @predicate('filelog(pattern)', safe=True)
928 def filelog(repo, subset, x):
928 def filelog(repo, subset, x):
929 """Changesets connected to the specified filelog.
929 """Changesets connected to the specified filelog.
930
930
931 For performance reasons, visits only revisions mentioned in the file-level
931 For performance reasons, visits only revisions mentioned in the file-level
932 filelog, rather than filtering through all changesets (much faster, but
932 filelog, rather than filtering through all changesets (much faster, but
933 doesn't include deletes or duplicate changes). For a slower, more accurate
933 doesn't include deletes or duplicate changes). For a slower, more accurate
934 result, use ``file()``.
934 result, use ``file()``.
935
935
936 The pattern without explicit kind like ``glob:`` is expected to be
936 The pattern without explicit kind like ``glob:`` is expected to be
937 relative to the current directory and match against a file exactly
937 relative to the current directory and match against a file exactly
938 for efficiency.
938 for efficiency.
939
939
940 If some linkrev points to revisions filtered by the current repoview, we'll
940 If some linkrev points to revisions filtered by the current repoview, we'll
941 work around it to return a non-filtered value.
941 work around it to return a non-filtered value.
942 """
942 """
943
943
944 # i18n: "filelog" is a keyword
944 # i18n: "filelog" is a keyword
945 pat = getstring(x, _("filelog requires a pattern"))
945 pat = getstring(x, _("filelog requires a pattern"))
946 s = set()
946 s = set()
947 cl = repo.changelog
947 cl = repo.changelog
948
948
949 if not matchmod.patkind(pat):
949 if not matchmod.patkind(pat):
950 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
950 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
951 files = [f]
951 files = [f]
952 else:
952 else:
953 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
953 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
954 files = (f for f in repo[None] if m(f))
954 files = (f for f in repo[None] if m(f))
955
955
956 for f in files:
956 for f in files:
957 fl = repo.file(f)
957 fl = repo.file(f)
958 known = {}
958 known = {}
959 scanpos = 0
959 scanpos = 0
960 for fr in list(fl):
960 for fr in list(fl):
961 fn = fl.node(fr)
961 fn = fl.node(fr)
962 if fn in known:
962 if fn in known:
963 s.add(known[fn])
963 s.add(known[fn])
964 continue
964 continue
965
965
966 lr = fl.linkrev(fr)
966 lr = fl.linkrev(fr)
967 if lr in cl:
967 if lr in cl:
968 s.add(lr)
968 s.add(lr)
969 elif scanpos is not None:
969 elif scanpos is not None:
970 # lowest matching changeset is filtered, scan further
970 # lowest matching changeset is filtered, scan further
971 # ahead in changelog
971 # ahead in changelog
972 start = max(lr, scanpos) + 1
972 start = max(lr, scanpos) + 1
973 scanpos = None
973 scanpos = None
974 for r in cl.revs(start):
974 for r in cl.revs(start):
975 # minimize parsing of non-matching entries
975 # minimize parsing of non-matching entries
976 if f in cl.revision(r) and f in cl.readfiles(r):
976 if f in cl.revision(r) and f in cl.readfiles(r):
977 try:
977 try:
978 # try to use manifest delta fastpath
978 # try to use manifest delta fastpath
979 n = repo[r].filenode(f)
979 n = repo[r].filenode(f)
980 if n not in known:
980 if n not in known:
981 if n == fn:
981 if n == fn:
982 s.add(r)
982 s.add(r)
983 scanpos = r
983 scanpos = r
984 break
984 break
985 else:
985 else:
986 known[n] = r
986 known[n] = r
987 except error.ManifestLookupError:
987 except error.ManifestLookupError:
988 # deletion in changelog
988 # deletion in changelog
989 continue
989 continue
990
990
991 return subset & s
991 return subset & s
992
992
993 @predicate('first(set, [n])', safe=True)
993 @predicate('first(set, [n])', safe=True)
994 def first(repo, subset, x):
994 def first(repo, subset, x):
995 """An alias for limit().
995 """An alias for limit().
996 """
996 """
997 return limit(repo, subset, x)
997 return limit(repo, subset, x)
998
998
999 def _follow(repo, subset, x, name, followfirst=False):
999 def _follow(repo, subset, x, name, followfirst=False):
1000 l = getargs(x, 0, 1, _("%s takes no arguments or a pattern") % name)
1000 l = getargs(x, 0, 1, _("%s takes no arguments or a pattern") % name)
1001 c = repo['.']
1001 c = repo['.']
1002 if l:
1002 if l:
1003 x = getstring(l[0], _("%s expected a pattern") % name)
1003 x = getstring(l[0], _("%s expected a pattern") % name)
1004 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
1004 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
1005 ctx=repo[None], default='path')
1005 ctx=repo[None], default='path')
1006
1006
1007 files = c.manifest().walk(matcher)
1007 files = c.manifest().walk(matcher)
1008
1008
1009 s = set()
1009 s = set()
1010 for fname in files:
1010 for fname in files:
1011 fctx = c[fname]
1011 fctx = c[fname]
1012 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
1012 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
1013 # include the revision responsible for the most recent version
1013 # include the revision responsible for the most recent version
1014 s.add(fctx.introrev())
1014 s.add(fctx.introrev())
1015 else:
1015 else:
1016 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1016 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1017
1017
1018 return subset & s
1018 return subset & s
1019
1019
1020 @predicate('follow([pattern])', safe=True)
1020 @predicate('follow([pattern])', safe=True)
1021 def follow(repo, subset, x):
1021 def follow(repo, subset, x):
1022 """
1022 """
1023 An alias for ``::.`` (ancestors of the working directory's first parent).
1023 An alias for ``::.`` (ancestors of the working directory's first parent).
1024 If pattern is specified, the histories of files matching given
1024 If pattern is specified, the histories of files matching given
1025 pattern is followed, including copies.
1025 pattern is followed, including copies.
1026 """
1026 """
1027 return _follow(repo, subset, x, 'follow')
1027 return _follow(repo, subset, x, 'follow')
1028
1028
1029 @predicate('_followfirst', safe=True)
1029 @predicate('_followfirst', safe=True)
1030 def _followfirst(repo, subset, x):
1030 def _followfirst(repo, subset, x):
1031 # ``followfirst([pattern])``
1031 # ``followfirst([pattern])``
1032 # Like ``follow([pattern])`` but follows only the first parent of
1032 # Like ``follow([pattern])`` but follows only the first parent of
1033 # every revisions or files revisions.
1033 # every revisions or files revisions.
1034 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1034 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1035
1035
1036 @predicate('all()', safe=True)
1036 @predicate('all()', safe=True)
1037 def getall(repo, subset, x):
1037 def getall(repo, subset, x):
1038 """All changesets, the same as ``0:tip``.
1038 """All changesets, the same as ``0:tip``.
1039 """
1039 """
1040 # i18n: "all" is a keyword
1040 # i18n: "all" is a keyword
1041 getargs(x, 0, 0, _("all takes no arguments"))
1041 getargs(x, 0, 0, _("all takes no arguments"))
1042 return subset & spanset(repo) # drop "null" if any
1042 return subset & spanset(repo) # drop "null" if any
1043
1043
1044 @predicate('grep(regex)')
1044 @predicate('grep(regex)')
1045 def grep(repo, subset, x):
1045 def grep(repo, subset, x):
1046 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1046 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1047 to ensure special escape characters are handled correctly. Unlike
1047 to ensure special escape characters are handled correctly. Unlike
1048 ``keyword(string)``, the match is case-sensitive.
1048 ``keyword(string)``, the match is case-sensitive.
1049 """
1049 """
1050 try:
1050 try:
1051 # i18n: "grep" is a keyword
1051 # i18n: "grep" is a keyword
1052 gr = re.compile(getstring(x, _("grep requires a string")))
1052 gr = re.compile(getstring(x, _("grep requires a string")))
1053 except re.error as e:
1053 except re.error as e:
1054 raise error.ParseError(_('invalid match pattern: %s') % e)
1054 raise error.ParseError(_('invalid match pattern: %s') % e)
1055
1055
1056 def matches(x):
1056 def matches(x):
1057 c = repo[x]
1057 c = repo[x]
1058 for e in c.files() + [c.user(), c.description()]:
1058 for e in c.files() + [c.user(), c.description()]:
1059 if gr.search(e):
1059 if gr.search(e):
1060 return True
1060 return True
1061 return False
1061 return False
1062
1062
1063 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1063 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1064
1064
1065 @predicate('_matchfiles', safe=True)
1065 @predicate('_matchfiles', safe=True)
1066 def _matchfiles(repo, subset, x):
1066 def _matchfiles(repo, subset, x):
1067 # _matchfiles takes a revset list of prefixed arguments:
1067 # _matchfiles takes a revset list of prefixed arguments:
1068 #
1068 #
1069 # [p:foo, i:bar, x:baz]
1069 # [p:foo, i:bar, x:baz]
1070 #
1070 #
1071 # builds a match object from them and filters subset. Allowed
1071 # builds a match object from them and filters subset. Allowed
1072 # prefixes are 'p:' for regular patterns, 'i:' for include
1072 # prefixes are 'p:' for regular patterns, 'i:' for include
1073 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1073 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1074 # a revision identifier, or the empty string to reference the
1074 # a revision identifier, or the empty string to reference the
1075 # working directory, from which the match object is
1075 # working directory, from which the match object is
1076 # initialized. Use 'd:' to set the default matching mode, default
1076 # initialized. Use 'd:' to set the default matching mode, default
1077 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1077 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1078
1078
1079 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1079 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1080 pats, inc, exc = [], [], []
1080 pats, inc, exc = [], [], []
1081 rev, default = None, None
1081 rev, default = None, None
1082 for arg in l:
1082 for arg in l:
1083 s = getstring(arg, "_matchfiles requires string arguments")
1083 s = getstring(arg, "_matchfiles requires string arguments")
1084 prefix, value = s[:2], s[2:]
1084 prefix, value = s[:2], s[2:]
1085 if prefix == 'p:':
1085 if prefix == 'p:':
1086 pats.append(value)
1086 pats.append(value)
1087 elif prefix == 'i:':
1087 elif prefix == 'i:':
1088 inc.append(value)
1088 inc.append(value)
1089 elif prefix == 'x:':
1089 elif prefix == 'x:':
1090 exc.append(value)
1090 exc.append(value)
1091 elif prefix == 'r:':
1091 elif prefix == 'r:':
1092 if rev is not None:
1092 if rev is not None:
1093 raise error.ParseError('_matchfiles expected at most one '
1093 raise error.ParseError('_matchfiles expected at most one '
1094 'revision')
1094 'revision')
1095 if value != '': # empty means working directory; leave rev as None
1095 if value != '': # empty means working directory; leave rev as None
1096 rev = value
1096 rev = value
1097 elif prefix == 'd:':
1097 elif prefix == 'd:':
1098 if default is not None:
1098 if default is not None:
1099 raise error.ParseError('_matchfiles expected at most one '
1099 raise error.ParseError('_matchfiles expected at most one '
1100 'default mode')
1100 'default mode')
1101 default = value
1101 default = value
1102 else:
1102 else:
1103 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1103 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1104 if not default:
1104 if not default:
1105 default = 'glob'
1105 default = 'glob'
1106
1106
1107 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1107 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1108 exclude=exc, ctx=repo[rev], default=default)
1108 exclude=exc, ctx=repo[rev], default=default)
1109
1109
1110 # This directly read the changelog data as creating changectx for all
1110 # This directly read the changelog data as creating changectx for all
1111 # revisions is quite expensive.
1111 # revisions is quite expensive.
1112 getfiles = repo.changelog.readfiles
1112 getfiles = repo.changelog.readfiles
1113 wdirrev = node.wdirrev
1113 wdirrev = node.wdirrev
1114 def matches(x):
1114 def matches(x):
1115 if x == wdirrev:
1115 if x == wdirrev:
1116 files = repo[x].files()
1116 files = repo[x].files()
1117 else:
1117 else:
1118 files = getfiles(x)
1118 files = getfiles(x)
1119 for f in files:
1119 for f in files:
1120 if m(f):
1120 if m(f):
1121 return True
1121 return True
1122 return False
1122 return False
1123
1123
1124 return subset.filter(matches,
1124 return subset.filter(matches,
1125 condrepr=('<matchfiles patterns=%r, include=%r '
1125 condrepr=('<matchfiles patterns=%r, include=%r '
1126 'exclude=%r, default=%r, rev=%r>',
1126 'exclude=%r, default=%r, rev=%r>',
1127 pats, inc, exc, default, rev))
1127 pats, inc, exc, default, rev))
1128
1128
1129 @predicate('file(pattern)', safe=True)
1129 @predicate('file(pattern)', safe=True)
1130 def hasfile(repo, subset, x):
1130 def hasfile(repo, subset, x):
1131 """Changesets affecting files matched by pattern.
1131 """Changesets affecting files matched by pattern.
1132
1132
1133 For a faster but less accurate result, consider using ``filelog()``
1133 For a faster but less accurate result, consider using ``filelog()``
1134 instead.
1134 instead.
1135
1135
1136 This predicate uses ``glob:`` as the default kind of pattern.
1136 This predicate uses ``glob:`` as the default kind of pattern.
1137 """
1137 """
1138 # i18n: "file" is a keyword
1138 # i18n: "file" is a keyword
1139 pat = getstring(x, _("file requires a pattern"))
1139 pat = getstring(x, _("file requires a pattern"))
1140 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1140 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1141
1141
1142 @predicate('head()', safe=True)
1142 @predicate('head()', safe=True)
1143 def head(repo, subset, x):
1143 def head(repo, subset, x):
1144 """Changeset is a named branch head.
1144 """Changeset is a named branch head.
1145 """
1145 """
1146 # i18n: "head" is a keyword
1146 # i18n: "head" is a keyword
1147 getargs(x, 0, 0, _("head takes no arguments"))
1147 getargs(x, 0, 0, _("head takes no arguments"))
1148 hs = set()
1148 hs = set()
1149 cl = repo.changelog
1149 cl = repo.changelog
1150 for b, ls in repo.branchmap().iteritems():
1150 for b, ls in repo.branchmap().iteritems():
1151 hs.update(cl.rev(h) for h in ls)
1151 hs.update(cl.rev(h) for h in ls)
1152 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
1152 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
1153 # This does not break because of other fullreposet misbehavior.
1153 # This does not break because of other fullreposet misbehavior.
1154 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
1154 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
1155 # necessary to ensure we preserve the order in subset.
1155 # necessary to ensure we preserve the order in subset.
1156 return baseset(hs) & subset
1156 return baseset(hs) & subset
1157
1157
1158 @predicate('heads(set)', safe=True)
1158 @predicate('heads(set)', safe=True)
1159 def heads(repo, subset, x):
1159 def heads(repo, subset, x):
1160 """Members of set with no children in set.
1160 """Members of set with no children in set.
1161 """
1161 """
1162 s = getset(repo, subset, x)
1162 s = getset(repo, subset, x)
1163 ps = parents(repo, subset, x)
1163 ps = parents(repo, subset, x)
1164 return s - ps
1164 return s - ps
1165
1165
1166 @predicate('hidden()', safe=True)
1166 @predicate('hidden()', safe=True)
1167 def hidden(repo, subset, x):
1167 def hidden(repo, subset, x):
1168 """Hidden changesets.
1168 """Hidden changesets.
1169 """
1169 """
1170 # i18n: "hidden" is a keyword
1170 # i18n: "hidden" is a keyword
1171 getargs(x, 0, 0, _("hidden takes no arguments"))
1171 getargs(x, 0, 0, _("hidden takes no arguments"))
1172 hiddenrevs = repoview.filterrevs(repo, 'visible')
1172 hiddenrevs = repoview.filterrevs(repo, 'visible')
1173 return subset & hiddenrevs
1173 return subset & hiddenrevs
1174
1174
1175 @predicate('keyword(string)', safe=True)
1175 @predicate('keyword(string)', safe=True)
1176 def keyword(repo, subset, x):
1176 def keyword(repo, subset, x):
1177 """Search commit message, user name, and names of changed files for
1177 """Search commit message, user name, and names of changed files for
1178 string. The match is case-insensitive.
1178 string. The match is case-insensitive.
1179 """
1179 """
1180 # i18n: "keyword" is a keyword
1180 # i18n: "keyword" is a keyword
1181 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1181 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1182
1182
1183 def matches(r):
1183 def matches(r):
1184 c = repo[r]
1184 c = repo[r]
1185 return any(kw in encoding.lower(t)
1185 return any(kw in encoding.lower(t)
1186 for t in c.files() + [c.user(), c.description()])
1186 for t in c.files() + [c.user(), c.description()])
1187
1187
1188 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1188 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1189
1189
1190 @predicate('limit(set[, n[, offset]])', safe=True)
1190 @predicate('limit(set[, n[, offset]])', safe=True)
1191 def limit(repo, subset, x):
1191 def limit(repo, subset, x):
1192 """First n members of set, defaulting to 1, starting from offset.
1192 """First n members of set, defaulting to 1, starting from offset.
1193 """
1193 """
1194 args = getargsdict(x, 'limit', 'set n offset')
1194 args = getargsdict(x, 'limit', 'set n offset')
1195 if 'set' not in args:
1195 if 'set' not in args:
1196 # i18n: "limit" is a keyword
1196 # i18n: "limit" is a keyword
1197 raise error.ParseError(_("limit requires one to three arguments"))
1197 raise error.ParseError(_("limit requires one to three arguments"))
1198 try:
1198 try:
1199 lim, ofs = 1, 0
1199 lim, ofs = 1, 0
1200 if 'n' in args:
1200 if 'n' in args:
1201 # i18n: "limit" is a keyword
1201 # i18n: "limit" is a keyword
1202 lim = int(getstring(args['n'], _("limit requires a number")))
1202 lim = int(getstring(args['n'], _("limit requires a number")))
1203 if 'offset' in args:
1203 if 'offset' in args:
1204 # i18n: "limit" is a keyword
1204 # i18n: "limit" is a keyword
1205 ofs = int(getstring(args['offset'], _("limit requires a number")))
1205 ofs = int(getstring(args['offset'], _("limit requires a number")))
1206 if ofs < 0:
1206 if ofs < 0:
1207 raise error.ParseError(_("negative offset"))
1207 raise error.ParseError(_("negative offset"))
1208 except (TypeError, ValueError):
1208 except (TypeError, ValueError):
1209 # i18n: "limit" is a keyword
1209 # i18n: "limit" is a keyword
1210 raise error.ParseError(_("limit expects a number"))
1210 raise error.ParseError(_("limit expects a number"))
1211 os = getset(repo, fullreposet(repo), args['set'])
1211 os = getset(repo, fullreposet(repo), args['set'])
1212 result = []
1212 result = []
1213 it = iter(os)
1213 it = iter(os)
1214 for x in xrange(ofs):
1214 for x in xrange(ofs):
1215 y = next(it, None)
1215 y = next(it, None)
1216 if y is None:
1216 if y is None:
1217 break
1217 break
1218 for x in xrange(lim):
1218 for x in xrange(lim):
1219 y = next(it, None)
1219 y = next(it, None)
1220 if y is None:
1220 if y is None:
1221 break
1221 break
1222 elif y in subset:
1222 elif y in subset:
1223 result.append(y)
1223 result.append(y)
1224 return baseset(result, datarepr=('<limit n=%d, offset=%d, %r, %r>',
1224 return baseset(result, datarepr=('<limit n=%d, offset=%d, %r, %r>',
1225 lim, ofs, subset, os))
1225 lim, ofs, subset, os))
1226
1226
1227 @predicate('last(set, [n])', safe=True)
1227 @predicate('last(set, [n])', safe=True)
1228 def last(repo, subset, x):
1228 def last(repo, subset, x):
1229 """Last n members of set, defaulting to 1.
1229 """Last n members of set, defaulting to 1.
1230 """
1230 """
1231 # i18n: "last" is a keyword
1231 # i18n: "last" is a keyword
1232 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1232 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1233 try:
1233 try:
1234 lim = 1
1234 lim = 1
1235 if len(l) == 2:
1235 if len(l) == 2:
1236 # i18n: "last" is a keyword
1236 # i18n: "last" is a keyword
1237 lim = int(getstring(l[1], _("last requires a number")))
1237 lim = int(getstring(l[1], _("last requires a number")))
1238 except (TypeError, ValueError):
1238 except (TypeError, ValueError):
1239 # i18n: "last" is a keyword
1239 # i18n: "last" is a keyword
1240 raise error.ParseError(_("last expects a number"))
1240 raise error.ParseError(_("last expects a number"))
1241 os = getset(repo, fullreposet(repo), l[0])
1241 os = getset(repo, fullreposet(repo), l[0])
1242 os.reverse()
1242 os.reverse()
1243 result = []
1243 result = []
1244 it = iter(os)
1244 it = iter(os)
1245 for x in xrange(lim):
1245 for x in xrange(lim):
1246 y = next(it, None)
1246 y = next(it, None)
1247 if y is None:
1247 if y is None:
1248 break
1248 break
1249 elif y in subset:
1249 elif y in subset:
1250 result.append(y)
1250 result.append(y)
1251 return baseset(result, datarepr=('<last n=%d, %r, %r>', lim, subset, os))
1251 return baseset(result, datarepr=('<last n=%d, %r, %r>', lim, subset, os))
1252
1252
1253 @predicate('max(set)', safe=True)
1253 @predicate('max(set)', safe=True)
1254 def maxrev(repo, subset, x):
1254 def maxrev(repo, subset, x):
1255 """Changeset with highest revision number in set.
1255 """Changeset with highest revision number in set.
1256 """
1256 """
1257 os = getset(repo, fullreposet(repo), x)
1257 os = getset(repo, fullreposet(repo), x)
1258 try:
1258 try:
1259 m = os.max()
1259 m = os.max()
1260 if m in subset:
1260 if m in subset:
1261 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1261 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1262 except ValueError:
1262 except ValueError:
1263 # os.max() throws a ValueError when the collection is empty.
1263 # os.max() throws a ValueError when the collection is empty.
1264 # Same as python's max().
1264 # Same as python's max().
1265 pass
1265 pass
1266 return baseset(datarepr=('<max %r, %r>', subset, os))
1266 return baseset(datarepr=('<max %r, %r>', subset, os))
1267
1267
1268 @predicate('merge()', safe=True)
1268 @predicate('merge()', safe=True)
1269 def merge(repo, subset, x):
1269 def merge(repo, subset, x):
1270 """Changeset is a merge changeset.
1270 """Changeset is a merge changeset.
1271 """
1271 """
1272 # i18n: "merge" is a keyword
1272 # i18n: "merge" is a keyword
1273 getargs(x, 0, 0, _("merge takes no arguments"))
1273 getargs(x, 0, 0, _("merge takes no arguments"))
1274 cl = repo.changelog
1274 cl = repo.changelog
1275 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1275 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1276 condrepr='<merge>')
1276 condrepr='<merge>')
1277
1277
1278 @predicate('branchpoint()', safe=True)
1278 @predicate('branchpoint()', safe=True)
1279 def branchpoint(repo, subset, x):
1279 def branchpoint(repo, subset, x):
1280 """Changesets with more than one child.
1280 """Changesets with more than one child.
1281 """
1281 """
1282 # i18n: "branchpoint" is a keyword
1282 # i18n: "branchpoint" is a keyword
1283 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1283 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1284 cl = repo.changelog
1284 cl = repo.changelog
1285 if not subset:
1285 if not subset:
1286 return baseset()
1286 return baseset()
1287 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1287 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1288 # (and if it is not, it should.)
1288 # (and if it is not, it should.)
1289 baserev = min(subset)
1289 baserev = min(subset)
1290 parentscount = [0]*(len(repo) - baserev)
1290 parentscount = [0]*(len(repo) - baserev)
1291 for r in cl.revs(start=baserev + 1):
1291 for r in cl.revs(start=baserev + 1):
1292 for p in cl.parentrevs(r):
1292 for p in cl.parentrevs(r):
1293 if p >= baserev:
1293 if p >= baserev:
1294 parentscount[p - baserev] += 1
1294 parentscount[p - baserev] += 1
1295 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1295 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1296 condrepr='<branchpoint>')
1296 condrepr='<branchpoint>')
1297
1297
1298 @predicate('min(set)', safe=True)
1298 @predicate('min(set)', safe=True)
1299 def minrev(repo, subset, x):
1299 def minrev(repo, subset, x):
1300 """Changeset with lowest revision number in set.
1300 """Changeset with lowest revision number in set.
1301 """
1301 """
1302 os = getset(repo, fullreposet(repo), x)
1302 os = getset(repo, fullreposet(repo), x)
1303 try:
1303 try:
1304 m = os.min()
1304 m = os.min()
1305 if m in subset:
1305 if m in subset:
1306 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1306 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1307 except ValueError:
1307 except ValueError:
1308 # os.min() throws a ValueError when the collection is empty.
1308 # os.min() throws a ValueError when the collection is empty.
1309 # Same as python's min().
1309 # Same as python's min().
1310 pass
1310 pass
1311 return baseset(datarepr=('<min %r, %r>', subset, os))
1311 return baseset(datarepr=('<min %r, %r>', subset, os))
1312
1312
1313 @predicate('modifies(pattern)', safe=True)
1313 @predicate('modifies(pattern)', safe=True)
1314 def modifies(repo, subset, x):
1314 def modifies(repo, subset, x):
1315 """Changesets modifying files matched by pattern.
1315 """Changesets modifying files matched by pattern.
1316
1316
1317 The pattern without explicit kind like ``glob:`` is expected to be
1317 The pattern without explicit kind like ``glob:`` is expected to be
1318 relative to the current directory and match against a file or a
1318 relative to the current directory and match against a file or a
1319 directory.
1319 directory.
1320 """
1320 """
1321 # i18n: "modifies" is a keyword
1321 # i18n: "modifies" is a keyword
1322 pat = getstring(x, _("modifies requires a pattern"))
1322 pat = getstring(x, _("modifies requires a pattern"))
1323 return checkstatus(repo, subset, pat, 0)
1323 return checkstatus(repo, subset, pat, 0)
1324
1324
1325 @predicate('named(namespace)')
1325 @predicate('named(namespace)')
1326 def named(repo, subset, x):
1326 def named(repo, subset, x):
1327 """The changesets in a given namespace.
1327 """The changesets in a given namespace.
1328
1328
1329 If `namespace` starts with `re:`, the remainder of the string is treated as
1329 If `namespace` starts with `re:`, the remainder of the string is treated as
1330 a regular expression. To match a namespace that actually starts with `re:`,
1330 a regular expression. To match a namespace that actually starts with `re:`,
1331 use the prefix `literal:`.
1331 use the prefix `literal:`.
1332 """
1332 """
1333 # i18n: "named" is a keyword
1333 # i18n: "named" is a keyword
1334 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1334 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1335
1335
1336 ns = getstring(args[0],
1336 ns = getstring(args[0],
1337 # i18n: "named" is a keyword
1337 # i18n: "named" is a keyword
1338 _('the argument to named must be a string'))
1338 _('the argument to named must be a string'))
1339 kind, pattern, matcher = util.stringmatcher(ns)
1339 kind, pattern, matcher = util.stringmatcher(ns)
1340 namespaces = set()
1340 namespaces = set()
1341 if kind == 'literal':
1341 if kind == 'literal':
1342 if pattern not in repo.names:
1342 if pattern not in repo.names:
1343 raise error.RepoLookupError(_("namespace '%s' does not exist")
1343 raise error.RepoLookupError(_("namespace '%s' does not exist")
1344 % ns)
1344 % ns)
1345 namespaces.add(repo.names[pattern])
1345 namespaces.add(repo.names[pattern])
1346 else:
1346 else:
1347 for name, ns in repo.names.iteritems():
1347 for name, ns in repo.names.iteritems():
1348 if matcher(name):
1348 if matcher(name):
1349 namespaces.add(ns)
1349 namespaces.add(ns)
1350 if not namespaces:
1350 if not namespaces:
1351 raise error.RepoLookupError(_("no namespace exists"
1351 raise error.RepoLookupError(_("no namespace exists"
1352 " that match '%s'") % pattern)
1352 " that match '%s'") % pattern)
1353
1353
1354 names = set()
1354 names = set()
1355 for ns in namespaces:
1355 for ns in namespaces:
1356 for name in ns.listnames(repo):
1356 for name in ns.listnames(repo):
1357 if name not in ns.deprecated:
1357 if name not in ns.deprecated:
1358 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1358 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1359
1359
1360 names -= set([node.nullrev])
1360 names -= set([node.nullrev])
1361 return subset & names
1361 return subset & names
1362
1362
1363 @predicate('id(string)', safe=True)
1363 @predicate('id(string)', safe=True)
1364 def node_(repo, subset, x):
1364 def node_(repo, subset, x):
1365 """Revision non-ambiguously specified by the given hex string prefix.
1365 """Revision non-ambiguously specified by the given hex string prefix.
1366 """
1366 """
1367 # i18n: "id" is a keyword
1367 # i18n: "id" is a keyword
1368 l = getargs(x, 1, 1, _("id requires one argument"))
1368 l = getargs(x, 1, 1, _("id requires one argument"))
1369 # i18n: "id" is a keyword
1369 # i18n: "id" is a keyword
1370 n = getstring(l[0], _("id requires a string"))
1370 n = getstring(l[0], _("id requires a string"))
1371 if len(n) == 40:
1371 if len(n) == 40:
1372 try:
1372 try:
1373 rn = repo.changelog.rev(node.bin(n))
1373 rn = repo.changelog.rev(node.bin(n))
1374 except (LookupError, TypeError):
1374 except (LookupError, TypeError):
1375 rn = None
1375 rn = None
1376 else:
1376 else:
1377 rn = None
1377 rn = None
1378 pm = repo.changelog._partialmatch(n)
1378 pm = repo.changelog._partialmatch(n)
1379 if pm is not None:
1379 if pm is not None:
1380 rn = repo.changelog.rev(pm)
1380 rn = repo.changelog.rev(pm)
1381
1381
1382 if rn is None:
1382 if rn is None:
1383 return baseset()
1383 return baseset()
1384 result = baseset([rn])
1384 result = baseset([rn])
1385 return result & subset
1385 return result & subset
1386
1386
1387 @predicate('obsolete()', safe=True)
1387 @predicate('obsolete()', safe=True)
1388 def obsolete(repo, subset, x):
1388 def obsolete(repo, subset, x):
1389 """Mutable changeset with a newer version."""
1389 """Mutable changeset with a newer version."""
1390 # i18n: "obsolete" is a keyword
1390 # i18n: "obsolete" is a keyword
1391 getargs(x, 0, 0, _("obsolete takes no arguments"))
1391 getargs(x, 0, 0, _("obsolete takes no arguments"))
1392 obsoletes = obsmod.getrevs(repo, 'obsolete')
1392 obsoletes = obsmod.getrevs(repo, 'obsolete')
1393 return subset & obsoletes
1393 return subset & obsoletes
1394
1394
1395 @predicate('only(set, [set])', safe=True)
1395 @predicate('only(set, [set])', safe=True)
1396 def only(repo, subset, x):
1396 def only(repo, subset, x):
1397 """Changesets that are ancestors of the first set that are not ancestors
1397 """Changesets that are ancestors of the first set that are not ancestors
1398 of any other head in the repo. If a second set is specified, the result
1398 of any other head in the repo. If a second set is specified, the result
1399 is ancestors of the first set that are not ancestors of the second set
1399 is ancestors of the first set that are not ancestors of the second set
1400 (i.e. ::<set1> - ::<set2>).
1400 (i.e. ::<set1> - ::<set2>).
1401 """
1401 """
1402 cl = repo.changelog
1402 cl = repo.changelog
1403 # i18n: "only" is a keyword
1403 # i18n: "only" is a keyword
1404 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1404 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1405 include = getset(repo, fullreposet(repo), args[0])
1405 include = getset(repo, fullreposet(repo), args[0])
1406 if len(args) == 1:
1406 if len(args) == 1:
1407 if not include:
1407 if not include:
1408 return baseset()
1408 return baseset()
1409
1409
1410 descendants = set(_revdescendants(repo, include, False))
1410 descendants = set(_revdescendants(repo, include, False))
1411 exclude = [rev for rev in cl.headrevs()
1411 exclude = [rev for rev in cl.headrevs()
1412 if not rev in descendants and not rev in include]
1412 if not rev in descendants and not rev in include]
1413 else:
1413 else:
1414 exclude = getset(repo, fullreposet(repo), args[1])
1414 exclude = getset(repo, fullreposet(repo), args[1])
1415
1415
1416 results = set(cl.findmissingrevs(common=exclude, heads=include))
1416 results = set(cl.findmissingrevs(common=exclude, heads=include))
1417 # XXX we should turn this into a baseset instead of a set, smartset may do
1417 # XXX we should turn this into a baseset instead of a set, smartset may do
1418 # some optimisations from the fact this is a baseset.
1418 # some optimisations from the fact this is a baseset.
1419 return subset & results
1419 return subset & results
1420
1420
1421 @predicate('origin([set])', safe=True)
1421 @predicate('origin([set])', safe=True)
1422 def origin(repo, subset, x):
1422 def origin(repo, subset, x):
1423 """
1423 """
1424 Changesets that were specified as a source for the grafts, transplants or
1424 Changesets that were specified as a source for the grafts, transplants or
1425 rebases that created the given revisions. Omitting the optional set is the
1425 rebases that created the given revisions. Omitting the optional set is the
1426 same as passing all(). If a changeset created by these operations is itself
1426 same as passing all(). If a changeset created by these operations is itself
1427 specified as a source for one of these operations, only the source changeset
1427 specified as a source for one of these operations, only the source changeset
1428 for the first operation is selected.
1428 for the first operation is selected.
1429 """
1429 """
1430 if x is not None:
1430 if x is not None:
1431 dests = getset(repo, fullreposet(repo), x)
1431 dests = getset(repo, fullreposet(repo), x)
1432 else:
1432 else:
1433 dests = fullreposet(repo)
1433 dests = fullreposet(repo)
1434
1434
1435 def _firstsrc(rev):
1435 def _firstsrc(rev):
1436 src = _getrevsource(repo, rev)
1436 src = _getrevsource(repo, rev)
1437 if src is None:
1437 if src is None:
1438 return None
1438 return None
1439
1439
1440 while True:
1440 while True:
1441 prev = _getrevsource(repo, src)
1441 prev = _getrevsource(repo, src)
1442
1442
1443 if prev is None:
1443 if prev is None:
1444 return src
1444 return src
1445 src = prev
1445 src = prev
1446
1446
1447 o = set([_firstsrc(r) for r in dests])
1447 o = set([_firstsrc(r) for r in dests])
1448 o -= set([None])
1448 o -= set([None])
1449 # XXX we should turn this into a baseset instead of a set, smartset may do
1449 # XXX we should turn this into a baseset instead of a set, smartset may do
1450 # some optimisations from the fact this is a baseset.
1450 # some optimisations from the fact this is a baseset.
1451 return subset & o
1451 return subset & o
1452
1452
1453 @predicate('outgoing([path])', safe=True)
1453 @predicate('outgoing([path])', safe=True)
1454 def outgoing(repo, subset, x):
1454 def outgoing(repo, subset, x):
1455 """Changesets not found in the specified destination repository, or the
1455 """Changesets not found in the specified destination repository, or the
1456 default push location.
1456 default push location.
1457 """
1457 """
1458 # Avoid cycles.
1458 # Avoid cycles.
1459 from . import (
1459 from . import (
1460 discovery,
1460 discovery,
1461 hg,
1461 hg,
1462 )
1462 )
1463 # i18n: "outgoing" is a keyword
1463 # i18n: "outgoing" is a keyword
1464 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1464 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1465 # i18n: "outgoing" is a keyword
1465 # i18n: "outgoing" is a keyword
1466 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1466 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1467 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1467 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1468 dest, branches = hg.parseurl(dest)
1468 dest, branches = hg.parseurl(dest)
1469 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1469 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1470 if revs:
1470 if revs:
1471 revs = [repo.lookup(rev) for rev in revs]
1471 revs = [repo.lookup(rev) for rev in revs]
1472 other = hg.peer(repo, {}, dest)
1472 other = hg.peer(repo, {}, dest)
1473 repo.ui.pushbuffer()
1473 repo.ui.pushbuffer()
1474 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1474 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1475 repo.ui.popbuffer()
1475 repo.ui.popbuffer()
1476 cl = repo.changelog
1476 cl = repo.changelog
1477 o = set([cl.rev(r) for r in outgoing.missing])
1477 o = set([cl.rev(r) for r in outgoing.missing])
1478 return subset & o
1478 return subset & o
1479
1479
1480 @predicate('p1([set])', safe=True)
1480 @predicate('p1([set])', safe=True)
1481 def p1(repo, subset, x):
1481 def p1(repo, subset, x):
1482 """First parent of changesets in set, or the working directory.
1482 """First parent of changesets in set, or the working directory.
1483 """
1483 """
1484 if x is None:
1484 if x is None:
1485 p = repo[x].p1().rev()
1485 p = repo[x].p1().rev()
1486 if p >= 0:
1486 if p >= 0:
1487 return subset & baseset([p])
1487 return subset & baseset([p])
1488 return baseset()
1488 return baseset()
1489
1489
1490 ps = set()
1490 ps = set()
1491 cl = repo.changelog
1491 cl = repo.changelog
1492 for r in getset(repo, fullreposet(repo), x):
1492 for r in getset(repo, fullreposet(repo), x):
1493 ps.add(cl.parentrevs(r)[0])
1493 ps.add(cl.parentrevs(r)[0])
1494 ps -= set([node.nullrev])
1494 ps -= set([node.nullrev])
1495 # XXX we should turn this into a baseset instead of a set, smartset may do
1495 # XXX we should turn this into a baseset instead of a set, smartset may do
1496 # some optimisations from the fact this is a baseset.
1496 # some optimisations from the fact this is a baseset.
1497 return subset & ps
1497 return subset & ps
1498
1498
1499 @predicate('p2([set])', safe=True)
1499 @predicate('p2([set])', safe=True)
1500 def p2(repo, subset, x):
1500 def p2(repo, subset, x):
1501 """Second parent of changesets in set, or the working directory.
1501 """Second parent of changesets in set, or the working directory.
1502 """
1502 """
1503 if x is None:
1503 if x is None:
1504 ps = repo[x].parents()
1504 ps = repo[x].parents()
1505 try:
1505 try:
1506 p = ps[1].rev()
1506 p = ps[1].rev()
1507 if p >= 0:
1507 if p >= 0:
1508 return subset & baseset([p])
1508 return subset & baseset([p])
1509 return baseset()
1509 return baseset()
1510 except IndexError:
1510 except IndexError:
1511 return baseset()
1511 return baseset()
1512
1512
1513 ps = set()
1513 ps = set()
1514 cl = repo.changelog
1514 cl = repo.changelog
1515 for r in getset(repo, fullreposet(repo), x):
1515 for r in getset(repo, fullreposet(repo), x):
1516 ps.add(cl.parentrevs(r)[1])
1516 ps.add(cl.parentrevs(r)[1])
1517 ps -= set([node.nullrev])
1517 ps -= set([node.nullrev])
1518 # XXX we should turn this into a baseset instead of a set, smartset may do
1518 # XXX we should turn this into a baseset instead of a set, smartset may do
1519 # some optimisations from the fact this is a baseset.
1519 # some optimisations from the fact this is a baseset.
1520 return subset & ps
1520 return subset & ps
1521
1521
1522 @predicate('parents([set])', safe=True)
1522 @predicate('parents([set])', safe=True)
1523 def parents(repo, subset, x):
1523 def parents(repo, subset, x):
1524 """
1524 """
1525 The set of all parents for all changesets in set, or the working directory.
1525 The set of all parents for all changesets in set, or the working directory.
1526 """
1526 """
1527 if x is None:
1527 if x is None:
1528 ps = set(p.rev() for p in repo[x].parents())
1528 ps = set(p.rev() for p in repo[x].parents())
1529 else:
1529 else:
1530 ps = set()
1530 ps = set()
1531 cl = repo.changelog
1531 cl = repo.changelog
1532 up = ps.update
1532 up = ps.update
1533 parentrevs = cl.parentrevs
1533 parentrevs = cl.parentrevs
1534 for r in getset(repo, fullreposet(repo), x):
1534 for r in getset(repo, fullreposet(repo), x):
1535 if r == node.wdirrev:
1535 if r == node.wdirrev:
1536 up(p.rev() for p in repo[r].parents())
1536 up(p.rev() for p in repo[r].parents())
1537 else:
1537 else:
1538 up(parentrevs(r))
1538 up(parentrevs(r))
1539 ps -= set([node.nullrev])
1539 ps -= set([node.nullrev])
1540 return subset & ps
1540 return subset & ps
1541
1541
1542 def _phase(repo, subset, target):
1542 def _phase(repo, subset, target):
1543 """helper to select all rev in phase <target>"""
1543 """helper to select all rev in phase <target>"""
1544 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1544 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1545 if repo._phasecache._phasesets:
1545 if repo._phasecache._phasesets:
1546 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1546 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1547 s = baseset(s)
1547 s = baseset(s)
1548 s.sort() # set are non ordered, so we enforce ascending
1548 s.sort() # set are non ordered, so we enforce ascending
1549 return subset & s
1549 return subset & s
1550 else:
1550 else:
1551 phase = repo._phasecache.phase
1551 phase = repo._phasecache.phase
1552 condition = lambda r: phase(repo, r) == target
1552 condition = lambda r: phase(repo, r) == target
1553 return subset.filter(condition, condrepr=('<phase %r>', target),
1553 return subset.filter(condition, condrepr=('<phase %r>', target),
1554 cache=False)
1554 cache=False)
1555
1555
1556 @predicate('draft()', safe=True)
1556 @predicate('draft()', safe=True)
1557 def draft(repo, subset, x):
1557 def draft(repo, subset, x):
1558 """Changeset in draft phase."""
1558 """Changeset in draft phase."""
1559 # i18n: "draft" is a keyword
1559 # i18n: "draft" is a keyword
1560 getargs(x, 0, 0, _("draft takes no arguments"))
1560 getargs(x, 0, 0, _("draft takes no arguments"))
1561 target = phases.draft
1561 target = phases.draft
1562 return _phase(repo, subset, target)
1562 return _phase(repo, subset, target)
1563
1563
1564 @predicate('secret()', safe=True)
1564 @predicate('secret()', safe=True)
1565 def secret(repo, subset, x):
1565 def secret(repo, subset, x):
1566 """Changeset in secret phase."""
1566 """Changeset in secret phase."""
1567 # i18n: "secret" is a keyword
1567 # i18n: "secret" is a keyword
1568 getargs(x, 0, 0, _("secret takes no arguments"))
1568 getargs(x, 0, 0, _("secret takes no arguments"))
1569 target = phases.secret
1569 target = phases.secret
1570 return _phase(repo, subset, target)
1570 return _phase(repo, subset, target)
1571
1571
1572 def parentspec(repo, subset, x, n):
1572 def parentspec(repo, subset, x, n):
1573 """``set^0``
1573 """``set^0``
1574 The set.
1574 The set.
1575 ``set^1`` (or ``set^``), ``set^2``
1575 ``set^1`` (or ``set^``), ``set^2``
1576 First or second parent, respectively, of all changesets in set.
1576 First or second parent, respectively, of all changesets in set.
1577 """
1577 """
1578 try:
1578 try:
1579 n = int(n[1])
1579 n = int(n[1])
1580 if n not in (0, 1, 2):
1580 if n not in (0, 1, 2):
1581 raise ValueError
1581 raise ValueError
1582 except (TypeError, ValueError):
1582 except (TypeError, ValueError):
1583 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1583 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1584 ps = set()
1584 ps = set()
1585 cl = repo.changelog
1585 cl = repo.changelog
1586 for r in getset(repo, fullreposet(repo), x):
1586 for r in getset(repo, fullreposet(repo), x):
1587 if n == 0:
1587 if n == 0:
1588 ps.add(r)
1588 ps.add(r)
1589 elif n == 1:
1589 elif n == 1:
1590 ps.add(cl.parentrevs(r)[0])
1590 ps.add(cl.parentrevs(r)[0])
1591 elif n == 2:
1591 elif n == 2:
1592 parents = cl.parentrevs(r)
1592 parents = cl.parentrevs(r)
1593 if len(parents) > 1:
1593 if len(parents) > 1:
1594 ps.add(parents[1])
1594 ps.add(parents[1])
1595 return subset & ps
1595 return subset & ps
1596
1596
1597 @predicate('present(set)', safe=True)
1597 @predicate('present(set)', safe=True)
1598 def present(repo, subset, x):
1598 def present(repo, subset, x):
1599 """An empty set, if any revision in set isn't found; otherwise,
1599 """An empty set, if any revision in set isn't found; otherwise,
1600 all revisions in set.
1600 all revisions in set.
1601
1601
1602 If any of specified revisions is not present in the local repository,
1602 If any of specified revisions is not present in the local repository,
1603 the query is normally aborted. But this predicate allows the query
1603 the query is normally aborted. But this predicate allows the query
1604 to continue even in such cases.
1604 to continue even in such cases.
1605 """
1605 """
1606 try:
1606 try:
1607 return getset(repo, subset, x)
1607 return getset(repo, subset, x)
1608 except error.RepoLookupError:
1608 except error.RepoLookupError:
1609 return baseset()
1609 return baseset()
1610
1610
1611 # for internal use
1611 # for internal use
1612 @predicate('_notpublic', safe=True)
1612 @predicate('_notpublic', safe=True)
1613 def _notpublic(repo, subset, x):
1613 def _notpublic(repo, subset, x):
1614 getargs(x, 0, 0, "_notpublic takes no arguments")
1614 getargs(x, 0, 0, "_notpublic takes no arguments")
1615 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1615 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1616 if repo._phasecache._phasesets:
1616 if repo._phasecache._phasesets:
1617 s = set()
1617 s = set()
1618 for u in repo._phasecache._phasesets[1:]:
1618 for u in repo._phasecache._phasesets[1:]:
1619 s.update(u)
1619 s.update(u)
1620 s = baseset(s - repo.changelog.filteredrevs)
1620 s = baseset(s - repo.changelog.filteredrevs)
1621 s.sort()
1621 s.sort()
1622 return subset & s
1622 return subset & s
1623 else:
1623 else:
1624 phase = repo._phasecache.phase
1624 phase = repo._phasecache.phase
1625 target = phases.public
1625 target = phases.public
1626 condition = lambda r: phase(repo, r) != target
1626 condition = lambda r: phase(repo, r) != target
1627 return subset.filter(condition, condrepr=('<phase %r>', target),
1627 return subset.filter(condition, condrepr=('<phase %r>', target),
1628 cache=False)
1628 cache=False)
1629
1629
1630 @predicate('public()', safe=True)
1630 @predicate('public()', safe=True)
1631 def public(repo, subset, x):
1631 def public(repo, subset, x):
1632 """Changeset in public phase."""
1632 """Changeset in public phase."""
1633 # i18n: "public" is a keyword
1633 # i18n: "public" is a keyword
1634 getargs(x, 0, 0, _("public takes no arguments"))
1634 getargs(x, 0, 0, _("public takes no arguments"))
1635 phase = repo._phasecache.phase
1635 phase = repo._phasecache.phase
1636 target = phases.public
1636 target = phases.public
1637 condition = lambda r: phase(repo, r) == target
1637 condition = lambda r: phase(repo, r) == target
1638 return subset.filter(condition, condrepr=('<phase %r>', target),
1638 return subset.filter(condition, condrepr=('<phase %r>', target),
1639 cache=False)
1639 cache=False)
1640
1640
1641 @predicate('remote([id [,path]])', safe=True)
1641 @predicate('remote([id [,path]])', safe=True)
1642 def remote(repo, subset, x):
1642 def remote(repo, subset, x):
1643 """Local revision that corresponds to the given identifier in a
1643 """Local revision that corresponds to the given identifier in a
1644 remote repository, if present. Here, the '.' identifier is a
1644 remote repository, if present. Here, the '.' identifier is a
1645 synonym for the current local branch.
1645 synonym for the current local branch.
1646 """
1646 """
1647
1647
1648 from . import hg # avoid start-up nasties
1648 from . import hg # avoid start-up nasties
1649 # i18n: "remote" is a keyword
1649 # i18n: "remote" is a keyword
1650 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1650 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1651
1651
1652 q = '.'
1652 q = '.'
1653 if len(l) > 0:
1653 if len(l) > 0:
1654 # i18n: "remote" is a keyword
1654 # i18n: "remote" is a keyword
1655 q = getstring(l[0], _("remote requires a string id"))
1655 q = getstring(l[0], _("remote requires a string id"))
1656 if q == '.':
1656 if q == '.':
1657 q = repo['.'].branch()
1657 q = repo['.'].branch()
1658
1658
1659 dest = ''
1659 dest = ''
1660 if len(l) > 1:
1660 if len(l) > 1:
1661 # i18n: "remote" is a keyword
1661 # i18n: "remote" is a keyword
1662 dest = getstring(l[1], _("remote requires a repository path"))
1662 dest = getstring(l[1], _("remote requires a repository path"))
1663 dest = repo.ui.expandpath(dest or 'default')
1663 dest = repo.ui.expandpath(dest or 'default')
1664 dest, branches = hg.parseurl(dest)
1664 dest, branches = hg.parseurl(dest)
1665 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1665 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1666 if revs:
1666 if revs:
1667 revs = [repo.lookup(rev) for rev in revs]
1667 revs = [repo.lookup(rev) for rev in revs]
1668 other = hg.peer(repo, {}, dest)
1668 other = hg.peer(repo, {}, dest)
1669 n = other.lookup(q)
1669 n = other.lookup(q)
1670 if n in repo:
1670 if n in repo:
1671 r = repo[n].rev()
1671 r = repo[n].rev()
1672 if r in subset:
1672 if r in subset:
1673 return baseset([r])
1673 return baseset([r])
1674 return baseset()
1674 return baseset()
1675
1675
1676 @predicate('removes(pattern)', safe=True)
1676 @predicate('removes(pattern)', safe=True)
1677 def removes(repo, subset, x):
1677 def removes(repo, subset, x):
1678 """Changesets which remove files matching pattern.
1678 """Changesets which remove files matching pattern.
1679
1679
1680 The pattern without explicit kind like ``glob:`` is expected to be
1680 The pattern without explicit kind like ``glob:`` is expected to be
1681 relative to the current directory and match against a file or a
1681 relative to the current directory and match against a file or a
1682 directory.
1682 directory.
1683 """
1683 """
1684 # i18n: "removes" is a keyword
1684 # i18n: "removes" is a keyword
1685 pat = getstring(x, _("removes requires a pattern"))
1685 pat = getstring(x, _("removes requires a pattern"))
1686 return checkstatus(repo, subset, pat, 2)
1686 return checkstatus(repo, subset, pat, 2)
1687
1687
1688 @predicate('rev(number)', safe=True)
1688 @predicate('rev(number)', safe=True)
1689 def rev(repo, subset, x):
1689 def rev(repo, subset, x):
1690 """Revision with the given numeric identifier.
1690 """Revision with the given numeric identifier.
1691 """
1691 """
1692 # i18n: "rev" is a keyword
1692 # i18n: "rev" is a keyword
1693 l = getargs(x, 1, 1, _("rev requires one argument"))
1693 l = getargs(x, 1, 1, _("rev requires one argument"))
1694 try:
1694 try:
1695 # i18n: "rev" is a keyword
1695 # i18n: "rev" is a keyword
1696 l = int(getstring(l[0], _("rev requires a number")))
1696 l = int(getstring(l[0], _("rev requires a number")))
1697 except (TypeError, ValueError):
1697 except (TypeError, ValueError):
1698 # i18n: "rev" is a keyword
1698 # i18n: "rev" is a keyword
1699 raise error.ParseError(_("rev expects a number"))
1699 raise error.ParseError(_("rev expects a number"))
1700 if l not in repo.changelog and l != node.nullrev:
1700 if l not in repo.changelog and l != node.nullrev:
1701 return baseset()
1701 return baseset()
1702 return subset & baseset([l])
1702 return subset & baseset([l])
1703
1703
1704 @predicate('matching(revision [, field])', safe=True)
1704 @predicate('matching(revision [, field])', safe=True)
1705 def matching(repo, subset, x):
1705 def matching(repo, subset, x):
1706 """Changesets in which a given set of fields match the set of fields in the
1706 """Changesets in which a given set of fields match the set of fields in the
1707 selected revision or set.
1707 selected revision or set.
1708
1708
1709 To match more than one field pass the list of fields to match separated
1709 To match more than one field pass the list of fields to match separated
1710 by spaces (e.g. ``author description``).
1710 by spaces (e.g. ``author description``).
1711
1711
1712 Valid fields are most regular revision fields and some special fields.
1712 Valid fields are most regular revision fields and some special fields.
1713
1713
1714 Regular revision fields are ``description``, ``author``, ``branch``,
1714 Regular revision fields are ``description``, ``author``, ``branch``,
1715 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1715 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1716 and ``diff``.
1716 and ``diff``.
1717 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1717 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1718 contents of the revision. Two revisions matching their ``diff`` will
1718 contents of the revision. Two revisions matching their ``diff`` will
1719 also match their ``files``.
1719 also match their ``files``.
1720
1720
1721 Special fields are ``summary`` and ``metadata``:
1721 Special fields are ``summary`` and ``metadata``:
1722 ``summary`` matches the first line of the description.
1722 ``summary`` matches the first line of the description.
1723 ``metadata`` is equivalent to matching ``description user date``
1723 ``metadata`` is equivalent to matching ``description user date``
1724 (i.e. it matches the main metadata fields).
1724 (i.e. it matches the main metadata fields).
1725
1725
1726 ``metadata`` is the default field which is used when no fields are
1726 ``metadata`` is the default field which is used when no fields are
1727 specified. You can match more than one field at a time.
1727 specified. You can match more than one field at a time.
1728 """
1728 """
1729 # i18n: "matching" is a keyword
1729 # i18n: "matching" is a keyword
1730 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1730 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1731
1731
1732 revs = getset(repo, fullreposet(repo), l[0])
1732 revs = getset(repo, fullreposet(repo), l[0])
1733
1733
1734 fieldlist = ['metadata']
1734 fieldlist = ['metadata']
1735 if len(l) > 1:
1735 if len(l) > 1:
1736 fieldlist = getstring(l[1],
1736 fieldlist = getstring(l[1],
1737 # i18n: "matching" is a keyword
1737 # i18n: "matching" is a keyword
1738 _("matching requires a string "
1738 _("matching requires a string "
1739 "as its second argument")).split()
1739 "as its second argument")).split()
1740
1740
1741 # Make sure that there are no repeated fields,
1741 # Make sure that there are no repeated fields,
1742 # expand the 'special' 'metadata' field type
1742 # expand the 'special' 'metadata' field type
1743 # and check the 'files' whenever we check the 'diff'
1743 # and check the 'files' whenever we check the 'diff'
1744 fields = []
1744 fields = []
1745 for field in fieldlist:
1745 for field in fieldlist:
1746 if field == 'metadata':
1746 if field == 'metadata':
1747 fields += ['user', 'description', 'date']
1747 fields += ['user', 'description', 'date']
1748 elif field == 'diff':
1748 elif field == 'diff':
1749 # a revision matching the diff must also match the files
1749 # a revision matching the diff must also match the files
1750 # since matching the diff is very costly, make sure to
1750 # since matching the diff is very costly, make sure to
1751 # also match the files first
1751 # also match the files first
1752 fields += ['files', 'diff']
1752 fields += ['files', 'diff']
1753 else:
1753 else:
1754 if field == 'author':
1754 if field == 'author':
1755 field = 'user'
1755 field = 'user'
1756 fields.append(field)
1756 fields.append(field)
1757 fields = set(fields)
1757 fields = set(fields)
1758 if 'summary' in fields and 'description' in fields:
1758 if 'summary' in fields and 'description' in fields:
1759 # If a revision matches its description it also matches its summary
1759 # If a revision matches its description it also matches its summary
1760 fields.discard('summary')
1760 fields.discard('summary')
1761
1761
1762 # We may want to match more than one field
1762 # We may want to match more than one field
1763 # Not all fields take the same amount of time to be matched
1763 # Not all fields take the same amount of time to be matched
1764 # Sort the selected fields in order of increasing matching cost
1764 # Sort the selected fields in order of increasing matching cost
1765 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1765 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1766 'files', 'description', 'substate', 'diff']
1766 'files', 'description', 'substate', 'diff']
1767 def fieldkeyfunc(f):
1767 def fieldkeyfunc(f):
1768 try:
1768 try:
1769 return fieldorder.index(f)
1769 return fieldorder.index(f)
1770 except ValueError:
1770 except ValueError:
1771 # assume an unknown field is very costly
1771 # assume an unknown field is very costly
1772 return len(fieldorder)
1772 return len(fieldorder)
1773 fields = list(fields)
1773 fields = list(fields)
1774 fields.sort(key=fieldkeyfunc)
1774 fields.sort(key=fieldkeyfunc)
1775
1775
1776 # Each field will be matched with its own "getfield" function
1776 # Each field will be matched with its own "getfield" function
1777 # which will be added to the getfieldfuncs array of functions
1777 # which will be added to the getfieldfuncs array of functions
1778 getfieldfuncs = []
1778 getfieldfuncs = []
1779 _funcs = {
1779 _funcs = {
1780 'user': lambda r: repo[r].user(),
1780 'user': lambda r: repo[r].user(),
1781 'branch': lambda r: repo[r].branch(),
1781 'branch': lambda r: repo[r].branch(),
1782 'date': lambda r: repo[r].date(),
1782 'date': lambda r: repo[r].date(),
1783 'description': lambda r: repo[r].description(),
1783 'description': lambda r: repo[r].description(),
1784 'files': lambda r: repo[r].files(),
1784 'files': lambda r: repo[r].files(),
1785 'parents': lambda r: repo[r].parents(),
1785 'parents': lambda r: repo[r].parents(),
1786 'phase': lambda r: repo[r].phase(),
1786 'phase': lambda r: repo[r].phase(),
1787 'substate': lambda r: repo[r].substate,
1787 'substate': lambda r: repo[r].substate,
1788 'summary': lambda r: repo[r].description().splitlines()[0],
1788 'summary': lambda r: repo[r].description().splitlines()[0],
1789 'diff': lambda r: list(repo[r].diff(git=True),)
1789 'diff': lambda r: list(repo[r].diff(git=True),)
1790 }
1790 }
1791 for info in fields:
1791 for info in fields:
1792 getfield = _funcs.get(info, None)
1792 getfield = _funcs.get(info, None)
1793 if getfield is None:
1793 if getfield is None:
1794 raise error.ParseError(
1794 raise error.ParseError(
1795 # i18n: "matching" is a keyword
1795 # i18n: "matching" is a keyword
1796 _("unexpected field name passed to matching: %s") % info)
1796 _("unexpected field name passed to matching: %s") % info)
1797 getfieldfuncs.append(getfield)
1797 getfieldfuncs.append(getfield)
1798 # convert the getfield array of functions into a "getinfo" function
1798 # convert the getfield array of functions into a "getinfo" function
1799 # which returns an array of field values (or a single value if there
1799 # which returns an array of field values (or a single value if there
1800 # is only one field to match)
1800 # is only one field to match)
1801 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1801 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1802
1802
1803 def matches(x):
1803 def matches(x):
1804 for rev in revs:
1804 for rev in revs:
1805 target = getinfo(rev)
1805 target = getinfo(rev)
1806 match = True
1806 match = True
1807 for n, f in enumerate(getfieldfuncs):
1807 for n, f in enumerate(getfieldfuncs):
1808 if target[n] != f(x):
1808 if target[n] != f(x):
1809 match = False
1809 match = False
1810 if match:
1810 if match:
1811 return True
1811 return True
1812 return False
1812 return False
1813
1813
1814 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1814 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1815
1815
1816 @predicate('reverse(set)', safe=True)
1816 @predicate('reverse(set)', safe=True)
1817 def reverse(repo, subset, x):
1817 def reverse(repo, subset, x):
1818 """Reverse order of set.
1818 """Reverse order of set.
1819 """
1819 """
1820 l = getset(repo, subset, x)
1820 l = getset(repo, subset, x)
1821 l.reverse()
1821 l.reverse()
1822 return l
1822 return l
1823
1823
1824 @predicate('roots(set)', safe=True)
1824 @predicate('roots(set)', safe=True)
1825 def roots(repo, subset, x):
1825 def roots(repo, subset, x):
1826 """Changesets in set with no parent changeset in set.
1826 """Changesets in set with no parent changeset in set.
1827 """
1827 """
1828 s = getset(repo, fullreposet(repo), x)
1828 s = getset(repo, fullreposet(repo), x)
1829 parents = repo.changelog.parentrevs
1829 parents = repo.changelog.parentrevs
1830 def filter(r):
1830 def filter(r):
1831 for p in parents(r):
1831 for p in parents(r):
1832 if 0 <= p and p in s:
1832 if 0 <= p and p in s:
1833 return False
1833 return False
1834 return True
1834 return True
1835 return subset & s.filter(filter, condrepr='<roots>')
1835 return subset & s.filter(filter, condrepr='<roots>')
1836
1836
1837 _sortkeyfuncs = {
1837 _sortkeyfuncs = {
1838 'rev': lambda c: c.rev(),
1838 'rev': lambda c: c.rev(),
1839 'branch': lambda c: c.branch(),
1839 'branch': lambda c: c.branch(),
1840 'desc': lambda c: c.description(),
1840 'desc': lambda c: c.description(),
1841 'user': lambda c: c.user(),
1841 'user': lambda c: c.user(),
1842 'author': lambda c: c.user(),
1842 'author': lambda c: c.user(),
1843 'date': lambda c: c.date()[0],
1843 'date': lambda c: c.date()[0],
1844 }
1844 }
1845
1845
1846 @predicate('sort(set[, [-]key... [, ...]])', safe=True)
1846 @predicate('sort(set[, [-]key... [, ...]])', safe=True)
1847 def sort(repo, subset, x):
1847 def sort(repo, subset, x):
1848 """Sort set by keys. The default sort order is ascending, specify a key
1848 """Sort set by keys. The default sort order is ascending, specify a key
1849 as ``-key`` to sort in descending order.
1849 as ``-key`` to sort in descending order.
1850
1850
1851 The keys can be:
1851 The keys can be:
1852
1852
1853 - ``rev`` for the revision number,
1853 - ``rev`` for the revision number,
1854 - ``branch`` for the branch name,
1854 - ``branch`` for the branch name,
1855 - ``desc`` for the commit message (description),
1855 - ``desc`` for the commit message (description),
1856 - ``user`` for user name (``author`` can be used as an alias),
1856 - ``user`` for user name (``author`` can be used as an alias),
1857 - ``date`` for the commit date
1857 - ``date`` for the commit date
1858 - ``topo`` for a reverse topographical sort
1858 - ``topo`` for a reverse topographical sort
1859
1859
1860 The ``topo`` sort order cannot be combined with other sort keys. This sort
1860 The ``topo`` sort order cannot be combined with other sort keys. This sort
1861 takes one optional argument, ``topo.firstbranch``, which takes a revset that
1861 takes one optional argument, ``topo.firstbranch``, which takes a revset that
1862 specifies what topographical branches to prioritize in the sort.
1862 specifies what topographical branches to prioritize in the sort.
1863
1863
1864 """
1864 """
1865 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
1865 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
1866 if 'set' not in args:
1866 if 'set' not in args:
1867 # i18n: "sort" is a keyword
1867 # i18n: "sort" is a keyword
1868 raise error.ParseError(_('sort requires one or two arguments'))
1868 raise error.ParseError(_('sort requires one or two arguments'))
1869 keys = "rev"
1869 keys = "rev"
1870 if 'keys' in args:
1870 if 'keys' in args:
1871 # i18n: "sort" is a keyword
1871 # i18n: "sort" is a keyword
1872 keys = getstring(args['keys'], _("sort spec must be a string"))
1872 keys = getstring(args['keys'], _("sort spec must be a string"))
1873
1873
1874 keyflags = []
1874 keyflags = []
1875 for k in keys.split():
1875 for k in keys.split():
1876 fk = k
1876 fk = k
1877 reverse = (k[0] == '-')
1877 reverse = (k[0] == '-')
1878 if reverse:
1878 if reverse:
1879 k = k[1:]
1879 k = k[1:]
1880 if k not in _sortkeyfuncs and k != 'topo':
1880 if k not in _sortkeyfuncs and k != 'topo':
1881 raise error.ParseError(_("unknown sort key %r") % fk)
1881 raise error.ParseError(_("unknown sort key %r") % fk)
1882 keyflags.append((k, reverse))
1882 keyflags.append((k, reverse))
1883
1883
1884 s = args['set']
1885 revs = getset(repo, subset, s)
1886
1887 if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
1884 if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
1888 # i18n: "topo" is a keyword
1885 # i18n: "topo" is a keyword
1889 raise error.ParseError(_(
1886 raise error.ParseError(_(
1890 'topo sort order cannot be combined with other sort keys'))
1887 'topo sort order cannot be combined with other sort keys'))
1891
1888
1892 firstbranch = ()
1889 opts = {}
1893 if 'topo.firstbranch' in args:
1890 if 'topo.firstbranch' in args:
1894 if any(k == 'topo' for k, reverse in keyflags):
1891 if any(k == 'topo' for k, reverse in keyflags):
1895 firstbranch = getset(repo, subset, args['topo.firstbranch'])
1892 opts['topo.firstbranch'] = args['topo.firstbranch']
1896 else:
1893 else:
1897 # i18n: "topo" and "topo.firstbranch" are keywords
1894 # i18n: "topo" and "topo.firstbranch" are keywords
1898 raise error.ParseError(_(
1895 raise error.ParseError(_(
1899 'topo.firstbranch can only be used when using the topo sort '
1896 'topo.firstbranch can only be used when using the topo sort '
1900 'key'))
1897 'key'))
1901
1898
1899 s = args['set']
1900 revs = getset(repo, subset, s)
1901
1902 if not keyflags:
1902 if not keyflags:
1903 return revs
1903 return revs
1904 if len(keyflags) == 1 and keyflags[0][0] == "rev":
1904 if len(keyflags) == 1 and keyflags[0][0] == "rev":
1905 revs.sort(reverse=keyflags[0][1])
1905 revs.sort(reverse=keyflags[0][1])
1906 return revs
1906 return revs
1907 elif keyflags[0][0] == "topo":
1907 elif keyflags[0][0] == "topo":
1908 firstbranch = ()
1909 if 'topo.firstbranch' in opts:
1910 firstbranch = getset(repo, subset, opts['topo.firstbranch'])
1908 revs = baseset(_toposort(revs, repo.changelog.parentrevs, firstbranch),
1911 revs = baseset(_toposort(revs, repo.changelog.parentrevs, firstbranch),
1909 istopo=True)
1912 istopo=True)
1910 if keyflags[0][1]:
1913 if keyflags[0][1]:
1911 revs.reverse()
1914 revs.reverse()
1912 return revs
1915 return revs
1913
1916
1914 # sort() is guaranteed to be stable
1917 # sort() is guaranteed to be stable
1915 ctxs = [repo[r] for r in revs]
1918 ctxs = [repo[r] for r in revs]
1916 for k, reverse in reversed(keyflags):
1919 for k, reverse in reversed(keyflags):
1917 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
1920 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
1918 return baseset([c.rev() for c in ctxs])
1921 return baseset([c.rev() for c in ctxs])
1919
1922
1920 def _toposort(revs, parentsfunc, firstbranch=()):
1923 def _toposort(revs, parentsfunc, firstbranch=()):
1921 """Yield revisions from heads to roots one (topo) branch at a time.
1924 """Yield revisions from heads to roots one (topo) branch at a time.
1922
1925
1923 This function aims to be used by a graph generator that wishes to minimize
1926 This function aims to be used by a graph generator that wishes to minimize
1924 the number of parallel branches and their interleaving.
1927 the number of parallel branches and their interleaving.
1925
1928
1926 Example iteration order (numbers show the "true" order in a changelog):
1929 Example iteration order (numbers show the "true" order in a changelog):
1927
1930
1928 o 4
1931 o 4
1929 |
1932 |
1930 o 1
1933 o 1
1931 |
1934 |
1932 | o 3
1935 | o 3
1933 | |
1936 | |
1934 | o 2
1937 | o 2
1935 |/
1938 |/
1936 o 0
1939 o 0
1937
1940
1938 Note that the ancestors of merges are understood by the current
1941 Note that the ancestors of merges are understood by the current
1939 algorithm to be on the same branch. This means no reordering will
1942 algorithm to be on the same branch. This means no reordering will
1940 occur behind a merge.
1943 occur behind a merge.
1941 """
1944 """
1942
1945
1943 ### Quick summary of the algorithm
1946 ### Quick summary of the algorithm
1944 #
1947 #
1945 # This function is based around a "retention" principle. We keep revisions
1948 # This function is based around a "retention" principle. We keep revisions
1946 # in memory until we are ready to emit a whole branch that immediately
1949 # in memory until we are ready to emit a whole branch that immediately
1947 # "merges" into an existing one. This reduces the number of parallel
1950 # "merges" into an existing one. This reduces the number of parallel
1948 # branches with interleaved revisions.
1951 # branches with interleaved revisions.
1949 #
1952 #
1950 # During iteration revs are split into two groups:
1953 # During iteration revs are split into two groups:
1951 # A) revision already emitted
1954 # A) revision already emitted
1952 # B) revision in "retention". They are stored as different subgroups.
1955 # B) revision in "retention". They are stored as different subgroups.
1953 #
1956 #
1954 # for each REV, we do the following logic:
1957 # for each REV, we do the following logic:
1955 #
1958 #
1956 # 1) if REV is a parent of (A), we will emit it. If there is a
1959 # 1) if REV is a parent of (A), we will emit it. If there is a
1957 # retention group ((B) above) that is blocked on REV being
1960 # retention group ((B) above) that is blocked on REV being
1958 # available, we emit all the revisions out of that retention
1961 # available, we emit all the revisions out of that retention
1959 # group first.
1962 # group first.
1960 #
1963 #
1961 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
1964 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
1962 # available, if such subgroup exist, we add REV to it and the subgroup is
1965 # available, if such subgroup exist, we add REV to it and the subgroup is
1963 # now awaiting for REV.parents() to be available.
1966 # now awaiting for REV.parents() to be available.
1964 #
1967 #
1965 # 3) finally if no such group existed in (B), we create a new subgroup.
1968 # 3) finally if no such group existed in (B), we create a new subgroup.
1966 #
1969 #
1967 #
1970 #
1968 # To bootstrap the algorithm, we emit the tipmost revision (which
1971 # To bootstrap the algorithm, we emit the tipmost revision (which
1969 # puts it in group (A) from above).
1972 # puts it in group (A) from above).
1970
1973
1971 revs.sort(reverse=True)
1974 revs.sort(reverse=True)
1972
1975
1973 # Set of parents of revision that have been emitted. They can be considered
1976 # Set of parents of revision that have been emitted. They can be considered
1974 # unblocked as the graph generator is already aware of them so there is no
1977 # unblocked as the graph generator is already aware of them so there is no
1975 # need to delay the revisions that reference them.
1978 # need to delay the revisions that reference them.
1976 #
1979 #
1977 # If someone wants to prioritize a branch over the others, pre-filling this
1980 # If someone wants to prioritize a branch over the others, pre-filling this
1978 # set will force all other branches to wait until this branch is ready to be
1981 # set will force all other branches to wait until this branch is ready to be
1979 # emitted.
1982 # emitted.
1980 unblocked = set(firstbranch)
1983 unblocked = set(firstbranch)
1981
1984
1982 # list of groups waiting to be displayed, each group is defined by:
1985 # list of groups waiting to be displayed, each group is defined by:
1983 #
1986 #
1984 # (revs: lists of revs waiting to be displayed,
1987 # (revs: lists of revs waiting to be displayed,
1985 # blocked: set of that cannot be displayed before those in 'revs')
1988 # blocked: set of that cannot be displayed before those in 'revs')
1986 #
1989 #
1987 # The second value ('blocked') correspond to parents of any revision in the
1990 # The second value ('blocked') correspond to parents of any revision in the
1988 # group ('revs') that is not itself contained in the group. The main idea
1991 # group ('revs') that is not itself contained in the group. The main idea
1989 # of this algorithm is to delay as much as possible the emission of any
1992 # of this algorithm is to delay as much as possible the emission of any
1990 # revision. This means waiting for the moment we are about to display
1993 # revision. This means waiting for the moment we are about to display
1991 # these parents to display the revs in a group.
1994 # these parents to display the revs in a group.
1992 #
1995 #
1993 # This first implementation is smart until it encounters a merge: it will
1996 # This first implementation is smart until it encounters a merge: it will
1994 # emit revs as soon as any parent is about to be emitted and can grow an
1997 # emit revs as soon as any parent is about to be emitted and can grow an
1995 # arbitrary number of revs in 'blocked'. In practice this mean we properly
1998 # arbitrary number of revs in 'blocked'. In practice this mean we properly
1996 # retains new branches but gives up on any special ordering for ancestors
1999 # retains new branches but gives up on any special ordering for ancestors
1997 # of merges. The implementation can be improved to handle this better.
2000 # of merges. The implementation can be improved to handle this better.
1998 #
2001 #
1999 # The first subgroup is special. It corresponds to all the revision that
2002 # The first subgroup is special. It corresponds to all the revision that
2000 # were already emitted. The 'revs' lists is expected to be empty and the
2003 # were already emitted. The 'revs' lists is expected to be empty and the
2001 # 'blocked' set contains the parents revisions of already emitted revision.
2004 # 'blocked' set contains the parents revisions of already emitted revision.
2002 #
2005 #
2003 # You could pre-seed the <parents> set of groups[0] to a specific
2006 # You could pre-seed the <parents> set of groups[0] to a specific
2004 # changesets to select what the first emitted branch should be.
2007 # changesets to select what the first emitted branch should be.
2005 groups = [([], unblocked)]
2008 groups = [([], unblocked)]
2006 pendingheap = []
2009 pendingheap = []
2007 pendingset = set()
2010 pendingset = set()
2008
2011
2009 heapq.heapify(pendingheap)
2012 heapq.heapify(pendingheap)
2010 heappop = heapq.heappop
2013 heappop = heapq.heappop
2011 heappush = heapq.heappush
2014 heappush = heapq.heappush
2012 for currentrev in revs:
2015 for currentrev in revs:
2013 # Heap works with smallest element, we want highest so we invert
2016 # Heap works with smallest element, we want highest so we invert
2014 if currentrev not in pendingset:
2017 if currentrev not in pendingset:
2015 heappush(pendingheap, -currentrev)
2018 heappush(pendingheap, -currentrev)
2016 pendingset.add(currentrev)
2019 pendingset.add(currentrev)
2017 # iterates on pending rev until after the current rev have been
2020 # iterates on pending rev until after the current rev have been
2018 # processed.
2021 # processed.
2019 rev = None
2022 rev = None
2020 while rev != currentrev:
2023 while rev != currentrev:
2021 rev = -heappop(pendingheap)
2024 rev = -heappop(pendingheap)
2022 pendingset.remove(rev)
2025 pendingset.remove(rev)
2023
2026
2024 # Seek for a subgroup blocked, waiting for the current revision.
2027 # Seek for a subgroup blocked, waiting for the current revision.
2025 matching = [i for i, g in enumerate(groups) if rev in g[1]]
2028 matching = [i for i, g in enumerate(groups) if rev in g[1]]
2026
2029
2027 if matching:
2030 if matching:
2028 # The main idea is to gather together all sets that are blocked
2031 # The main idea is to gather together all sets that are blocked
2029 # on the same revision.
2032 # on the same revision.
2030 #
2033 #
2031 # Groups are merged when a common blocking ancestor is
2034 # Groups are merged when a common blocking ancestor is
2032 # observed. For example, given two groups:
2035 # observed. For example, given two groups:
2033 #
2036 #
2034 # revs [5, 4] waiting for 1
2037 # revs [5, 4] waiting for 1
2035 # revs [3, 2] waiting for 1
2038 # revs [3, 2] waiting for 1
2036 #
2039 #
2037 # These two groups will be merged when we process
2040 # These two groups will be merged when we process
2038 # 1. In theory, we could have merged the groups when
2041 # 1. In theory, we could have merged the groups when
2039 # we added 2 to the group it is now in (we could have
2042 # we added 2 to the group it is now in (we could have
2040 # noticed the groups were both blocked on 1 then), but
2043 # noticed the groups were both blocked on 1 then), but
2041 # the way it works now makes the algorithm simpler.
2044 # the way it works now makes the algorithm simpler.
2042 #
2045 #
2043 # We also always keep the oldest subgroup first. We can
2046 # We also always keep the oldest subgroup first. We can
2044 # probably improve the behavior by having the longest set
2047 # probably improve the behavior by having the longest set
2045 # first. That way, graph algorithms could minimise the length
2048 # first. That way, graph algorithms could minimise the length
2046 # of parallel lines their drawing. This is currently not done.
2049 # of parallel lines their drawing. This is currently not done.
2047 targetidx = matching.pop(0)
2050 targetidx = matching.pop(0)
2048 trevs, tparents = groups[targetidx]
2051 trevs, tparents = groups[targetidx]
2049 for i in matching:
2052 for i in matching:
2050 gr = groups[i]
2053 gr = groups[i]
2051 trevs.extend(gr[0])
2054 trevs.extend(gr[0])
2052 tparents |= gr[1]
2055 tparents |= gr[1]
2053 # delete all merged subgroups (except the one we kept)
2056 # delete all merged subgroups (except the one we kept)
2054 # (starting from the last subgroup for performance and
2057 # (starting from the last subgroup for performance and
2055 # sanity reasons)
2058 # sanity reasons)
2056 for i in reversed(matching):
2059 for i in reversed(matching):
2057 del groups[i]
2060 del groups[i]
2058 else:
2061 else:
2059 # This is a new head. We create a new subgroup for it.
2062 # This is a new head. We create a new subgroup for it.
2060 targetidx = len(groups)
2063 targetidx = len(groups)
2061 groups.append(([], set([rev])))
2064 groups.append(([], set([rev])))
2062
2065
2063 gr = groups[targetidx]
2066 gr = groups[targetidx]
2064
2067
2065 # We now add the current nodes to this subgroups. This is done
2068 # We now add the current nodes to this subgroups. This is done
2066 # after the subgroup merging because all elements from a subgroup
2069 # after the subgroup merging because all elements from a subgroup
2067 # that relied on this rev must precede it.
2070 # that relied on this rev must precede it.
2068 #
2071 #
2069 # we also update the <parents> set to include the parents of the
2072 # we also update the <parents> set to include the parents of the
2070 # new nodes.
2073 # new nodes.
2071 if rev == currentrev: # only display stuff in rev
2074 if rev == currentrev: # only display stuff in rev
2072 gr[0].append(rev)
2075 gr[0].append(rev)
2073 gr[1].remove(rev)
2076 gr[1].remove(rev)
2074 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
2077 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
2075 gr[1].update(parents)
2078 gr[1].update(parents)
2076 for p in parents:
2079 for p in parents:
2077 if p not in pendingset:
2080 if p not in pendingset:
2078 pendingset.add(p)
2081 pendingset.add(p)
2079 heappush(pendingheap, -p)
2082 heappush(pendingheap, -p)
2080
2083
2081 # Look for a subgroup to display
2084 # Look for a subgroup to display
2082 #
2085 #
2083 # When unblocked is empty (if clause), we were not waiting for any
2086 # When unblocked is empty (if clause), we were not waiting for any
2084 # revisions during the first iteration (if no priority was given) or
2087 # revisions during the first iteration (if no priority was given) or
2085 # if we emitted a whole disconnected set of the graph (reached a
2088 # if we emitted a whole disconnected set of the graph (reached a
2086 # root). In that case we arbitrarily take the oldest known
2089 # root). In that case we arbitrarily take the oldest known
2087 # subgroup. The heuristic could probably be better.
2090 # subgroup. The heuristic could probably be better.
2088 #
2091 #
2089 # Otherwise (elif clause) if the subgroup is blocked on
2092 # Otherwise (elif clause) if the subgroup is blocked on
2090 # a revision we just emitted, we can safely emit it as
2093 # a revision we just emitted, we can safely emit it as
2091 # well.
2094 # well.
2092 if not unblocked:
2095 if not unblocked:
2093 if len(groups) > 1: # display other subset
2096 if len(groups) > 1: # display other subset
2094 targetidx = 1
2097 targetidx = 1
2095 gr = groups[1]
2098 gr = groups[1]
2096 elif not gr[1] & unblocked:
2099 elif not gr[1] & unblocked:
2097 gr = None
2100 gr = None
2098
2101
2099 if gr is not None:
2102 if gr is not None:
2100 # update the set of awaited revisions with the one from the
2103 # update the set of awaited revisions with the one from the
2101 # subgroup
2104 # subgroup
2102 unblocked |= gr[1]
2105 unblocked |= gr[1]
2103 # output all revisions in the subgroup
2106 # output all revisions in the subgroup
2104 for r in gr[0]:
2107 for r in gr[0]:
2105 yield r
2108 yield r
2106 # delete the subgroup that you just output
2109 # delete the subgroup that you just output
2107 # unless it is groups[0] in which case you just empty it.
2110 # unless it is groups[0] in which case you just empty it.
2108 if targetidx:
2111 if targetidx:
2109 del groups[targetidx]
2112 del groups[targetidx]
2110 else:
2113 else:
2111 gr[0][:] = []
2114 gr[0][:] = []
2112 # Check if we have some subgroup waiting for revisions we are not going to
2115 # Check if we have some subgroup waiting for revisions we are not going to
2113 # iterate over
2116 # iterate over
2114 for g in groups:
2117 for g in groups:
2115 for r in g[0]:
2118 for r in g[0]:
2116 yield r
2119 yield r
2117
2120
2118 @predicate('subrepo([pattern])')
2121 @predicate('subrepo([pattern])')
2119 def subrepo(repo, subset, x):
2122 def subrepo(repo, subset, x):
2120 """Changesets that add, modify or remove the given subrepo. If no subrepo
2123 """Changesets that add, modify or remove the given subrepo. If no subrepo
2121 pattern is named, any subrepo changes are returned.
2124 pattern is named, any subrepo changes are returned.
2122 """
2125 """
2123 # i18n: "subrepo" is a keyword
2126 # i18n: "subrepo" is a keyword
2124 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
2127 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
2125 pat = None
2128 pat = None
2126 if len(args) != 0:
2129 if len(args) != 0:
2127 pat = getstring(args[0], _("subrepo requires a pattern"))
2130 pat = getstring(args[0], _("subrepo requires a pattern"))
2128
2131
2129 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
2132 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
2130
2133
2131 def submatches(names):
2134 def submatches(names):
2132 k, p, m = util.stringmatcher(pat)
2135 k, p, m = util.stringmatcher(pat)
2133 for name in names:
2136 for name in names:
2134 if m(name):
2137 if m(name):
2135 yield name
2138 yield name
2136
2139
2137 def matches(x):
2140 def matches(x):
2138 c = repo[x]
2141 c = repo[x]
2139 s = repo.status(c.p1().node(), c.node(), match=m)
2142 s = repo.status(c.p1().node(), c.node(), match=m)
2140
2143
2141 if pat is None:
2144 if pat is None:
2142 return s.added or s.modified or s.removed
2145 return s.added or s.modified or s.removed
2143
2146
2144 if s.added:
2147 if s.added:
2145 return any(submatches(c.substate.keys()))
2148 return any(submatches(c.substate.keys()))
2146
2149
2147 if s.modified:
2150 if s.modified:
2148 subs = set(c.p1().substate.keys())
2151 subs = set(c.p1().substate.keys())
2149 subs.update(c.substate.keys())
2152 subs.update(c.substate.keys())
2150
2153
2151 for path in submatches(subs):
2154 for path in submatches(subs):
2152 if c.p1().substate.get(path) != c.substate.get(path):
2155 if c.p1().substate.get(path) != c.substate.get(path):
2153 return True
2156 return True
2154
2157
2155 if s.removed:
2158 if s.removed:
2156 return any(submatches(c.p1().substate.keys()))
2159 return any(submatches(c.p1().substate.keys()))
2157
2160
2158 return False
2161 return False
2159
2162
2160 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
2163 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
2161
2164
2162 def _substringmatcher(pattern):
2165 def _substringmatcher(pattern):
2163 kind, pattern, matcher = util.stringmatcher(pattern)
2166 kind, pattern, matcher = util.stringmatcher(pattern)
2164 if kind == 'literal':
2167 if kind == 'literal':
2165 matcher = lambda s: pattern in s
2168 matcher = lambda s: pattern in s
2166 return kind, pattern, matcher
2169 return kind, pattern, matcher
2167
2170
2168 @predicate('tag([name])', safe=True)
2171 @predicate('tag([name])', safe=True)
2169 def tag(repo, subset, x):
2172 def tag(repo, subset, x):
2170 """The specified tag by name, or all tagged revisions if no name is given.
2173 """The specified tag by name, or all tagged revisions if no name is given.
2171
2174
2172 If `name` starts with `re:`, the remainder of the name is treated as
2175 If `name` starts with `re:`, the remainder of the name is treated as
2173 a regular expression. To match a tag that actually starts with `re:`,
2176 a regular expression. To match a tag that actually starts with `re:`,
2174 use the prefix `literal:`.
2177 use the prefix `literal:`.
2175 """
2178 """
2176 # i18n: "tag" is a keyword
2179 # i18n: "tag" is a keyword
2177 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2180 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2178 cl = repo.changelog
2181 cl = repo.changelog
2179 if args:
2182 if args:
2180 pattern = getstring(args[0],
2183 pattern = getstring(args[0],
2181 # i18n: "tag" is a keyword
2184 # i18n: "tag" is a keyword
2182 _('the argument to tag must be a string'))
2185 _('the argument to tag must be a string'))
2183 kind, pattern, matcher = util.stringmatcher(pattern)
2186 kind, pattern, matcher = util.stringmatcher(pattern)
2184 if kind == 'literal':
2187 if kind == 'literal':
2185 # avoid resolving all tags
2188 # avoid resolving all tags
2186 tn = repo._tagscache.tags.get(pattern, None)
2189 tn = repo._tagscache.tags.get(pattern, None)
2187 if tn is None:
2190 if tn is None:
2188 raise error.RepoLookupError(_("tag '%s' does not exist")
2191 raise error.RepoLookupError(_("tag '%s' does not exist")
2189 % pattern)
2192 % pattern)
2190 s = set([repo[tn].rev()])
2193 s = set([repo[tn].rev()])
2191 else:
2194 else:
2192 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2195 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2193 else:
2196 else:
2194 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2197 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2195 return subset & s
2198 return subset & s
2196
2199
2197 @predicate('tagged', safe=True)
2200 @predicate('tagged', safe=True)
2198 def tagged(repo, subset, x):
2201 def tagged(repo, subset, x):
2199 return tag(repo, subset, x)
2202 return tag(repo, subset, x)
2200
2203
2201 @predicate('unstable()', safe=True)
2204 @predicate('unstable()', safe=True)
2202 def unstable(repo, subset, x):
2205 def unstable(repo, subset, x):
2203 """Non-obsolete changesets with obsolete ancestors.
2206 """Non-obsolete changesets with obsolete ancestors.
2204 """
2207 """
2205 # i18n: "unstable" is a keyword
2208 # i18n: "unstable" is a keyword
2206 getargs(x, 0, 0, _("unstable takes no arguments"))
2209 getargs(x, 0, 0, _("unstable takes no arguments"))
2207 unstables = obsmod.getrevs(repo, 'unstable')
2210 unstables = obsmod.getrevs(repo, 'unstable')
2208 return subset & unstables
2211 return subset & unstables
2209
2212
2210
2213
2211 @predicate('user(string)', safe=True)
2214 @predicate('user(string)', safe=True)
2212 def user(repo, subset, x):
2215 def user(repo, subset, x):
2213 """User name contains string. The match is case-insensitive.
2216 """User name contains string. The match is case-insensitive.
2214
2217
2215 If `string` starts with `re:`, the remainder of the string is treated as
2218 If `string` starts with `re:`, the remainder of the string is treated as
2216 a regular expression. To match a user that actually contains `re:`, use
2219 a regular expression. To match a user that actually contains `re:`, use
2217 the prefix `literal:`.
2220 the prefix `literal:`.
2218 """
2221 """
2219 return author(repo, subset, x)
2222 return author(repo, subset, x)
2220
2223
2221 # experimental
2224 # experimental
2222 @predicate('wdir', safe=True)
2225 @predicate('wdir', safe=True)
2223 def wdir(repo, subset, x):
2226 def wdir(repo, subset, x):
2224 # i18n: "wdir" is a keyword
2227 # i18n: "wdir" is a keyword
2225 getargs(x, 0, 0, _("wdir takes no arguments"))
2228 getargs(x, 0, 0, _("wdir takes no arguments"))
2226 if node.wdirrev in subset or isinstance(subset, fullreposet):
2229 if node.wdirrev in subset or isinstance(subset, fullreposet):
2227 return baseset([node.wdirrev])
2230 return baseset([node.wdirrev])
2228 return baseset()
2231 return baseset()
2229
2232
2230 # for internal use
2233 # for internal use
2231 @predicate('_list', safe=True)
2234 @predicate('_list', safe=True)
2232 def _list(repo, subset, x):
2235 def _list(repo, subset, x):
2233 s = getstring(x, "internal error")
2236 s = getstring(x, "internal error")
2234 if not s:
2237 if not s:
2235 return baseset()
2238 return baseset()
2236 # remove duplicates here. it's difficult for caller to deduplicate sets
2239 # remove duplicates here. it's difficult for caller to deduplicate sets
2237 # because different symbols can point to the same rev.
2240 # because different symbols can point to the same rev.
2238 cl = repo.changelog
2241 cl = repo.changelog
2239 ls = []
2242 ls = []
2240 seen = set()
2243 seen = set()
2241 for t in s.split('\0'):
2244 for t in s.split('\0'):
2242 try:
2245 try:
2243 # fast path for integer revision
2246 # fast path for integer revision
2244 r = int(t)
2247 r = int(t)
2245 if str(r) != t or r not in cl:
2248 if str(r) != t or r not in cl:
2246 raise ValueError
2249 raise ValueError
2247 revs = [r]
2250 revs = [r]
2248 except ValueError:
2251 except ValueError:
2249 revs = stringset(repo, subset, t)
2252 revs = stringset(repo, subset, t)
2250
2253
2251 for r in revs:
2254 for r in revs:
2252 if r in seen:
2255 if r in seen:
2253 continue
2256 continue
2254 if (r in subset
2257 if (r in subset
2255 or r == node.nullrev and isinstance(subset, fullreposet)):
2258 or r == node.nullrev and isinstance(subset, fullreposet)):
2256 ls.append(r)
2259 ls.append(r)
2257 seen.add(r)
2260 seen.add(r)
2258 return baseset(ls)
2261 return baseset(ls)
2259
2262
2260 # for internal use
2263 # for internal use
2261 @predicate('_intlist', safe=True)
2264 @predicate('_intlist', safe=True)
2262 def _intlist(repo, subset, x):
2265 def _intlist(repo, subset, x):
2263 s = getstring(x, "internal error")
2266 s = getstring(x, "internal error")
2264 if not s:
2267 if not s:
2265 return baseset()
2268 return baseset()
2266 ls = [int(r) for r in s.split('\0')]
2269 ls = [int(r) for r in s.split('\0')]
2267 s = subset
2270 s = subset
2268 return baseset([r for r in ls if r in s])
2271 return baseset([r for r in ls if r in s])
2269
2272
2270 # for internal use
2273 # for internal use
2271 @predicate('_hexlist', safe=True)
2274 @predicate('_hexlist', safe=True)
2272 def _hexlist(repo, subset, x):
2275 def _hexlist(repo, subset, x):
2273 s = getstring(x, "internal error")
2276 s = getstring(x, "internal error")
2274 if not s:
2277 if not s:
2275 return baseset()
2278 return baseset()
2276 cl = repo.changelog
2279 cl = repo.changelog
2277 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2280 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2278 s = subset
2281 s = subset
2279 return baseset([r for r in ls if r in s])
2282 return baseset([r for r in ls if r in s])
2280
2283
2281 methods = {
2284 methods = {
2282 "range": rangeset,
2285 "range": rangeset,
2283 "dagrange": dagrange,
2286 "dagrange": dagrange,
2284 "string": stringset,
2287 "string": stringset,
2285 "symbol": stringset,
2288 "symbol": stringset,
2286 "and": andset,
2289 "and": andset,
2287 "or": orset,
2290 "or": orset,
2288 "not": notset,
2291 "not": notset,
2289 "difference": differenceset,
2292 "difference": differenceset,
2290 "list": listset,
2293 "list": listset,
2291 "keyvalue": keyvaluepair,
2294 "keyvalue": keyvaluepair,
2292 "func": func,
2295 "func": func,
2293 "ancestor": ancestorspec,
2296 "ancestor": ancestorspec,
2294 "parent": parentspec,
2297 "parent": parentspec,
2295 "parentpost": p1,
2298 "parentpost": p1,
2296 }
2299 }
2297
2300
2298 def _matchonly(revs, bases):
2301 def _matchonly(revs, bases):
2299 """
2302 """
2300 >>> f = lambda *args: _matchonly(*map(parse, args))
2303 >>> f = lambda *args: _matchonly(*map(parse, args))
2301 >>> f('ancestors(A)', 'not ancestors(B)')
2304 >>> f('ancestors(A)', 'not ancestors(B)')
2302 ('list', ('symbol', 'A'), ('symbol', 'B'))
2305 ('list', ('symbol', 'A'), ('symbol', 'B'))
2303 """
2306 """
2304 if (revs is not None
2307 if (revs is not None
2305 and revs[0] == 'func'
2308 and revs[0] == 'func'
2306 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2309 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2307 and bases is not None
2310 and bases is not None
2308 and bases[0] == 'not'
2311 and bases[0] == 'not'
2309 and bases[1][0] == 'func'
2312 and bases[1][0] == 'func'
2310 and getstring(bases[1][1], _('not a symbol')) == 'ancestors'):
2313 and getstring(bases[1][1], _('not a symbol')) == 'ancestors'):
2311 return ('list', revs[2], bases[1][2])
2314 return ('list', revs[2], bases[1][2])
2312
2315
2313 def _optimize(x, small):
2316 def _optimize(x, small):
2314 if x is None:
2317 if x is None:
2315 return 0, x
2318 return 0, x
2316
2319
2317 smallbonus = 1
2320 smallbonus = 1
2318 if small:
2321 if small:
2319 smallbonus = .5
2322 smallbonus = .5
2320
2323
2321 op = x[0]
2324 op = x[0]
2322 if op == 'minus':
2325 if op == 'minus':
2323 return _optimize(('and', x[1], ('not', x[2])), small)
2326 return _optimize(('and', x[1], ('not', x[2])), small)
2324 elif op == 'only':
2327 elif op == 'only':
2325 t = ('func', ('symbol', 'only'), ('list', x[1], x[2]))
2328 t = ('func', ('symbol', 'only'), ('list', x[1], x[2]))
2326 return _optimize(t, small)
2329 return _optimize(t, small)
2327 elif op == 'onlypost':
2330 elif op == 'onlypost':
2328 return _optimize(('func', ('symbol', 'only'), x[1]), small)
2331 return _optimize(('func', ('symbol', 'only'), x[1]), small)
2329 elif op == 'dagrangepre':
2332 elif op == 'dagrangepre':
2330 return _optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2333 return _optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2331 elif op == 'dagrangepost':
2334 elif op == 'dagrangepost':
2332 return _optimize(('func', ('symbol', 'descendants'), x[1]), small)
2335 return _optimize(('func', ('symbol', 'descendants'), x[1]), small)
2333 elif op == 'rangeall':
2336 elif op == 'rangeall':
2334 return _optimize(('range', ('string', '0'), ('string', 'tip')), small)
2337 return _optimize(('range', ('string', '0'), ('string', 'tip')), small)
2335 elif op == 'rangepre':
2338 elif op == 'rangepre':
2336 return _optimize(('range', ('string', '0'), x[1]), small)
2339 return _optimize(('range', ('string', '0'), x[1]), small)
2337 elif op == 'rangepost':
2340 elif op == 'rangepost':
2338 return _optimize(('range', x[1], ('string', 'tip')), small)
2341 return _optimize(('range', x[1], ('string', 'tip')), small)
2339 elif op == 'negate':
2342 elif op == 'negate':
2340 s = getstring(x[1], _("can't negate that"))
2343 s = getstring(x[1], _("can't negate that"))
2341 return _optimize(('string', '-' + s), small)
2344 return _optimize(('string', '-' + s), small)
2342 elif op in 'string symbol negate':
2345 elif op in 'string symbol negate':
2343 return smallbonus, x # single revisions are small
2346 return smallbonus, x # single revisions are small
2344 elif op == 'and':
2347 elif op == 'and':
2345 wa, ta = _optimize(x[1], True)
2348 wa, ta = _optimize(x[1], True)
2346 wb, tb = _optimize(x[2], True)
2349 wb, tb = _optimize(x[2], True)
2347 w = min(wa, wb)
2350 w = min(wa, wb)
2348
2351
2349 # (::x and not ::y)/(not ::y and ::x) have a fast path
2352 # (::x and not ::y)/(not ::y and ::x) have a fast path
2350 tm = _matchonly(ta, tb) or _matchonly(tb, ta)
2353 tm = _matchonly(ta, tb) or _matchonly(tb, ta)
2351 if tm:
2354 if tm:
2352 return w, ('func', ('symbol', 'only'), tm)
2355 return w, ('func', ('symbol', 'only'), tm)
2353
2356
2354 if tb is not None and tb[0] == 'not':
2357 if tb is not None and tb[0] == 'not':
2355 return wa, ('difference', ta, tb[1])
2358 return wa, ('difference', ta, tb[1])
2356
2359
2357 if wa > wb:
2360 if wa > wb:
2358 return w, (op, tb, ta)
2361 return w, (op, tb, ta)
2359 return w, (op, ta, tb)
2362 return w, (op, ta, tb)
2360 elif op == 'or':
2363 elif op == 'or':
2361 # fast path for machine-generated expression, that is likely to have
2364 # fast path for machine-generated expression, that is likely to have
2362 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2365 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2363 ws, ts, ss = [], [], []
2366 ws, ts, ss = [], [], []
2364 def flushss():
2367 def flushss():
2365 if not ss:
2368 if not ss:
2366 return
2369 return
2367 if len(ss) == 1:
2370 if len(ss) == 1:
2368 w, t = ss[0]
2371 w, t = ss[0]
2369 else:
2372 else:
2370 s = '\0'.join(t[1] for w, t in ss)
2373 s = '\0'.join(t[1] for w, t in ss)
2371 y = ('func', ('symbol', '_list'), ('string', s))
2374 y = ('func', ('symbol', '_list'), ('string', s))
2372 w, t = _optimize(y, False)
2375 w, t = _optimize(y, False)
2373 ws.append(w)
2376 ws.append(w)
2374 ts.append(t)
2377 ts.append(t)
2375 del ss[:]
2378 del ss[:]
2376 for y in x[1:]:
2379 for y in x[1:]:
2377 w, t = _optimize(y, False)
2380 w, t = _optimize(y, False)
2378 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2381 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2379 ss.append((w, t))
2382 ss.append((w, t))
2380 continue
2383 continue
2381 flushss()
2384 flushss()
2382 ws.append(w)
2385 ws.append(w)
2383 ts.append(t)
2386 ts.append(t)
2384 flushss()
2387 flushss()
2385 if len(ts) == 1:
2388 if len(ts) == 1:
2386 return ws[0], ts[0] # 'or' operation is fully optimized out
2389 return ws[0], ts[0] # 'or' operation is fully optimized out
2387 # we can't reorder trees by weight because it would change the order.
2390 # we can't reorder trees by weight because it would change the order.
2388 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2391 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2389 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2392 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2390 return max(ws), (op,) + tuple(ts)
2393 return max(ws), (op,) + tuple(ts)
2391 elif op == 'not':
2394 elif op == 'not':
2392 # Optimize not public() to _notpublic() because we have a fast version
2395 # Optimize not public() to _notpublic() because we have a fast version
2393 if x[1] == ('func', ('symbol', 'public'), None):
2396 if x[1] == ('func', ('symbol', 'public'), None):
2394 newsym = ('func', ('symbol', '_notpublic'), None)
2397 newsym = ('func', ('symbol', '_notpublic'), None)
2395 o = _optimize(newsym, not small)
2398 o = _optimize(newsym, not small)
2396 return o[0], o[1]
2399 return o[0], o[1]
2397 else:
2400 else:
2398 o = _optimize(x[1], not small)
2401 o = _optimize(x[1], not small)
2399 return o[0], (op, o[1])
2402 return o[0], (op, o[1])
2400 elif op == 'parentpost':
2403 elif op == 'parentpost':
2401 o = _optimize(x[1], small)
2404 o = _optimize(x[1], small)
2402 return o[0], (op, o[1])
2405 return o[0], (op, o[1])
2403 elif op == 'group':
2406 elif op == 'group':
2404 return _optimize(x[1], small)
2407 return _optimize(x[1], small)
2405 elif op in 'dagrange range parent ancestorspec':
2408 elif op in 'dagrange range parent ancestorspec':
2406 if op == 'parent':
2409 if op == 'parent':
2407 # x^:y means (x^) : y, not x ^ (:y)
2410 # x^:y means (x^) : y, not x ^ (:y)
2408 post = ('parentpost', x[1])
2411 post = ('parentpost', x[1])
2409 if x[2][0] == 'dagrangepre':
2412 if x[2][0] == 'dagrangepre':
2410 return _optimize(('dagrange', post, x[2][1]), small)
2413 return _optimize(('dagrange', post, x[2][1]), small)
2411 elif x[2][0] == 'rangepre':
2414 elif x[2][0] == 'rangepre':
2412 return _optimize(('range', post, x[2][1]), small)
2415 return _optimize(('range', post, x[2][1]), small)
2413
2416
2414 wa, ta = _optimize(x[1], small)
2417 wa, ta = _optimize(x[1], small)
2415 wb, tb = _optimize(x[2], small)
2418 wb, tb = _optimize(x[2], small)
2416 return wa + wb, (op, ta, tb)
2419 return wa + wb, (op, ta, tb)
2417 elif op == 'list':
2420 elif op == 'list':
2418 ws, ts = zip(*(_optimize(y, small) for y in x[1:]))
2421 ws, ts = zip(*(_optimize(y, small) for y in x[1:]))
2419 return sum(ws), (op,) + ts
2422 return sum(ws), (op,) + ts
2420 elif op == 'func':
2423 elif op == 'func':
2421 f = getstring(x[1], _("not a symbol"))
2424 f = getstring(x[1], _("not a symbol"))
2422 wa, ta = _optimize(x[2], small)
2425 wa, ta = _optimize(x[2], small)
2423 if f in ("author branch closed date desc file grep keyword "
2426 if f in ("author branch closed date desc file grep keyword "
2424 "outgoing user"):
2427 "outgoing user"):
2425 w = 10 # slow
2428 w = 10 # slow
2426 elif f in "modifies adds removes":
2429 elif f in "modifies adds removes":
2427 w = 30 # slower
2430 w = 30 # slower
2428 elif f == "contains":
2431 elif f == "contains":
2429 w = 100 # very slow
2432 w = 100 # very slow
2430 elif f == "ancestor":
2433 elif f == "ancestor":
2431 w = 1 * smallbonus
2434 w = 1 * smallbonus
2432 elif f in "reverse limit first _intlist":
2435 elif f in "reverse limit first _intlist":
2433 w = 0
2436 w = 0
2434 elif f in "sort":
2437 elif f in "sort":
2435 w = 10 # assume most sorts look at changelog
2438 w = 10 # assume most sorts look at changelog
2436 else:
2439 else:
2437 w = 1
2440 w = 1
2438 return w + wa, (op, x[1], ta)
2441 return w + wa, (op, x[1], ta)
2439 return 1, x
2442 return 1, x
2440
2443
2441 def optimize(tree):
2444 def optimize(tree):
2442 _weight, newtree = _optimize(tree, small=True)
2445 _weight, newtree = _optimize(tree, small=True)
2443 return newtree
2446 return newtree
2444
2447
2445 # the set of valid characters for the initial letter of symbols in
2448 # the set of valid characters for the initial letter of symbols in
2446 # alias declarations and definitions
2449 # alias declarations and definitions
2447 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2450 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2448 if c.isalnum() or c in '._@$' or ord(c) > 127)
2451 if c.isalnum() or c in '._@$' or ord(c) > 127)
2449
2452
2450 def _parsewith(spec, lookup=None, syminitletters=None):
2453 def _parsewith(spec, lookup=None, syminitletters=None):
2451 """Generate a parse tree of given spec with given tokenizing options
2454 """Generate a parse tree of given spec with given tokenizing options
2452
2455
2453 >>> _parsewith('foo($1)', syminitletters=_aliassyminitletters)
2456 >>> _parsewith('foo($1)', syminitletters=_aliassyminitletters)
2454 ('func', ('symbol', 'foo'), ('symbol', '$1'))
2457 ('func', ('symbol', 'foo'), ('symbol', '$1'))
2455 >>> _parsewith('$1')
2458 >>> _parsewith('$1')
2456 Traceback (most recent call last):
2459 Traceback (most recent call last):
2457 ...
2460 ...
2458 ParseError: ("syntax error in revset '$1'", 0)
2461 ParseError: ("syntax error in revset '$1'", 0)
2459 >>> _parsewith('foo bar')
2462 >>> _parsewith('foo bar')
2460 Traceback (most recent call last):
2463 Traceback (most recent call last):
2461 ...
2464 ...
2462 ParseError: ('invalid token', 4)
2465 ParseError: ('invalid token', 4)
2463 """
2466 """
2464 p = parser.parser(elements)
2467 p = parser.parser(elements)
2465 tree, pos = p.parse(tokenize(spec, lookup=lookup,
2468 tree, pos = p.parse(tokenize(spec, lookup=lookup,
2466 syminitletters=syminitletters))
2469 syminitletters=syminitletters))
2467 if pos != len(spec):
2470 if pos != len(spec):
2468 raise error.ParseError(_('invalid token'), pos)
2471 raise error.ParseError(_('invalid token'), pos)
2469 return parser.simplifyinfixops(tree, ('list', 'or'))
2472 return parser.simplifyinfixops(tree, ('list', 'or'))
2470
2473
2471 class _aliasrules(parser.basealiasrules):
2474 class _aliasrules(parser.basealiasrules):
2472 """Parsing and expansion rule set of revset aliases"""
2475 """Parsing and expansion rule set of revset aliases"""
2473 _section = _('revset alias')
2476 _section = _('revset alias')
2474
2477
2475 @staticmethod
2478 @staticmethod
2476 def _parse(spec):
2479 def _parse(spec):
2477 """Parse alias declaration/definition ``spec``
2480 """Parse alias declaration/definition ``spec``
2478
2481
2479 This allows symbol names to use also ``$`` as an initial letter
2482 This allows symbol names to use also ``$`` as an initial letter
2480 (for backward compatibility), and callers of this function should
2483 (for backward compatibility), and callers of this function should
2481 examine whether ``$`` is used also for unexpected symbols or not.
2484 examine whether ``$`` is used also for unexpected symbols or not.
2482 """
2485 """
2483 return _parsewith(spec, syminitletters=_aliassyminitletters)
2486 return _parsewith(spec, syminitletters=_aliassyminitletters)
2484
2487
2485 @staticmethod
2488 @staticmethod
2486 def _trygetfunc(tree):
2489 def _trygetfunc(tree):
2487 if tree[0] == 'func' and tree[1][0] == 'symbol':
2490 if tree[0] == 'func' and tree[1][0] == 'symbol':
2488 return tree[1][1], getlist(tree[2])
2491 return tree[1][1], getlist(tree[2])
2489
2492
2490 def expandaliases(ui, tree, showwarning=None):
2493 def expandaliases(ui, tree, showwarning=None):
2491 aliases = _aliasrules.buildmap(ui.configitems('revsetalias'))
2494 aliases = _aliasrules.buildmap(ui.configitems('revsetalias'))
2492 tree = _aliasrules.expand(aliases, tree)
2495 tree = _aliasrules.expand(aliases, tree)
2493 if showwarning:
2496 if showwarning:
2494 # warn about problematic (but not referred) aliases
2497 # warn about problematic (but not referred) aliases
2495 for name, alias in sorted(aliases.iteritems()):
2498 for name, alias in sorted(aliases.iteritems()):
2496 if alias.error and not alias.warned:
2499 if alias.error and not alias.warned:
2497 showwarning(_('warning: %s\n') % (alias.error))
2500 showwarning(_('warning: %s\n') % (alias.error))
2498 alias.warned = True
2501 alias.warned = True
2499 return tree
2502 return tree
2500
2503
2501 def foldconcat(tree):
2504 def foldconcat(tree):
2502 """Fold elements to be concatenated by `##`
2505 """Fold elements to be concatenated by `##`
2503 """
2506 """
2504 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2507 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2505 return tree
2508 return tree
2506 if tree[0] == '_concat':
2509 if tree[0] == '_concat':
2507 pending = [tree]
2510 pending = [tree]
2508 l = []
2511 l = []
2509 while pending:
2512 while pending:
2510 e = pending.pop()
2513 e = pending.pop()
2511 if e[0] == '_concat':
2514 if e[0] == '_concat':
2512 pending.extend(reversed(e[1:]))
2515 pending.extend(reversed(e[1:]))
2513 elif e[0] in ('string', 'symbol'):
2516 elif e[0] in ('string', 'symbol'):
2514 l.append(e[1])
2517 l.append(e[1])
2515 else:
2518 else:
2516 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2519 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2517 raise error.ParseError(msg)
2520 raise error.ParseError(msg)
2518 return ('string', ''.join(l))
2521 return ('string', ''.join(l))
2519 else:
2522 else:
2520 return tuple(foldconcat(t) for t in tree)
2523 return tuple(foldconcat(t) for t in tree)
2521
2524
2522 def parse(spec, lookup=None):
2525 def parse(spec, lookup=None):
2523 return _parsewith(spec, lookup=lookup)
2526 return _parsewith(spec, lookup=lookup)
2524
2527
2525 def posttreebuilthook(tree, repo):
2528 def posttreebuilthook(tree, repo):
2526 # hook for extensions to execute code on the optimized tree
2529 # hook for extensions to execute code on the optimized tree
2527 pass
2530 pass
2528
2531
2529 def match(ui, spec, repo=None):
2532 def match(ui, spec, repo=None):
2530 if not spec:
2533 if not spec:
2531 raise error.ParseError(_("empty query"))
2534 raise error.ParseError(_("empty query"))
2532 lookup = None
2535 lookup = None
2533 if repo:
2536 if repo:
2534 lookup = repo.__contains__
2537 lookup = repo.__contains__
2535 tree = parse(spec, lookup)
2538 tree = parse(spec, lookup)
2536 return _makematcher(ui, tree, repo)
2539 return _makematcher(ui, tree, repo)
2537
2540
2538 def matchany(ui, specs, repo=None):
2541 def matchany(ui, specs, repo=None):
2539 """Create a matcher that will include any revisions matching one of the
2542 """Create a matcher that will include any revisions matching one of the
2540 given specs"""
2543 given specs"""
2541 if not specs:
2544 if not specs:
2542 def mfunc(repo, subset=None):
2545 def mfunc(repo, subset=None):
2543 return baseset()
2546 return baseset()
2544 return mfunc
2547 return mfunc
2545 if not all(specs):
2548 if not all(specs):
2546 raise error.ParseError(_("empty query"))
2549 raise error.ParseError(_("empty query"))
2547 lookup = None
2550 lookup = None
2548 if repo:
2551 if repo:
2549 lookup = repo.__contains__
2552 lookup = repo.__contains__
2550 if len(specs) == 1:
2553 if len(specs) == 1:
2551 tree = parse(specs[0], lookup)
2554 tree = parse(specs[0], lookup)
2552 else:
2555 else:
2553 tree = ('or',) + tuple(parse(s, lookup) for s in specs)
2556 tree = ('or',) + tuple(parse(s, lookup) for s in specs)
2554 return _makematcher(ui, tree, repo)
2557 return _makematcher(ui, tree, repo)
2555
2558
2556 def _makematcher(ui, tree, repo):
2559 def _makematcher(ui, tree, repo):
2557 if ui:
2560 if ui:
2558 tree = expandaliases(ui, tree, showwarning=ui.warn)
2561 tree = expandaliases(ui, tree, showwarning=ui.warn)
2559 tree = foldconcat(tree)
2562 tree = foldconcat(tree)
2560 tree = optimize(tree)
2563 tree = optimize(tree)
2561 posttreebuilthook(tree, repo)
2564 posttreebuilthook(tree, repo)
2562 def mfunc(repo, subset=None):
2565 def mfunc(repo, subset=None):
2563 if subset is None:
2566 if subset is None:
2564 subset = fullreposet(repo)
2567 subset = fullreposet(repo)
2565 if util.safehasattr(subset, 'isascending'):
2568 if util.safehasattr(subset, 'isascending'):
2566 result = getset(repo, subset, tree)
2569 result = getset(repo, subset, tree)
2567 else:
2570 else:
2568 result = getset(repo, baseset(subset), tree)
2571 result = getset(repo, baseset(subset), tree)
2569 return result
2572 return result
2570 return mfunc
2573 return mfunc
2571
2574
2572 def formatspec(expr, *args):
2575 def formatspec(expr, *args):
2573 '''
2576 '''
2574 This is a convenience function for using revsets internally, and
2577 This is a convenience function for using revsets internally, and
2575 escapes arguments appropriately. Aliases are intentionally ignored
2578 escapes arguments appropriately. Aliases are intentionally ignored
2576 so that intended expression behavior isn't accidentally subverted.
2579 so that intended expression behavior isn't accidentally subverted.
2577
2580
2578 Supported arguments:
2581 Supported arguments:
2579
2582
2580 %r = revset expression, parenthesized
2583 %r = revset expression, parenthesized
2581 %d = int(arg), no quoting
2584 %d = int(arg), no quoting
2582 %s = string(arg), escaped and single-quoted
2585 %s = string(arg), escaped and single-quoted
2583 %b = arg.branch(), escaped and single-quoted
2586 %b = arg.branch(), escaped and single-quoted
2584 %n = hex(arg), single-quoted
2587 %n = hex(arg), single-quoted
2585 %% = a literal '%'
2588 %% = a literal '%'
2586
2589
2587 Prefixing the type with 'l' specifies a parenthesized list of that type.
2590 Prefixing the type with 'l' specifies a parenthesized list of that type.
2588
2591
2589 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2592 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2590 '(10 or 11):: and ((this()) or (that()))'
2593 '(10 or 11):: and ((this()) or (that()))'
2591 >>> formatspec('%d:: and not %d::', 10, 20)
2594 >>> formatspec('%d:: and not %d::', 10, 20)
2592 '10:: and not 20::'
2595 '10:: and not 20::'
2593 >>> formatspec('%ld or %ld', [], [1])
2596 >>> formatspec('%ld or %ld', [], [1])
2594 "_list('') or 1"
2597 "_list('') or 1"
2595 >>> formatspec('keyword(%s)', 'foo\\xe9')
2598 >>> formatspec('keyword(%s)', 'foo\\xe9')
2596 "keyword('foo\\\\xe9')"
2599 "keyword('foo\\\\xe9')"
2597 >>> b = lambda: 'default'
2600 >>> b = lambda: 'default'
2598 >>> b.branch = b
2601 >>> b.branch = b
2599 >>> formatspec('branch(%b)', b)
2602 >>> formatspec('branch(%b)', b)
2600 "branch('default')"
2603 "branch('default')"
2601 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2604 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2602 "root(_list('a\\x00b\\x00c\\x00d'))"
2605 "root(_list('a\\x00b\\x00c\\x00d'))"
2603 '''
2606 '''
2604
2607
2605 def quote(s):
2608 def quote(s):
2606 return repr(str(s))
2609 return repr(str(s))
2607
2610
2608 def argtype(c, arg):
2611 def argtype(c, arg):
2609 if c == 'd':
2612 if c == 'd':
2610 return str(int(arg))
2613 return str(int(arg))
2611 elif c == 's':
2614 elif c == 's':
2612 return quote(arg)
2615 return quote(arg)
2613 elif c == 'r':
2616 elif c == 'r':
2614 parse(arg) # make sure syntax errors are confined
2617 parse(arg) # make sure syntax errors are confined
2615 return '(%s)' % arg
2618 return '(%s)' % arg
2616 elif c == 'n':
2619 elif c == 'n':
2617 return quote(node.hex(arg))
2620 return quote(node.hex(arg))
2618 elif c == 'b':
2621 elif c == 'b':
2619 return quote(arg.branch())
2622 return quote(arg.branch())
2620
2623
2621 def listexp(s, t):
2624 def listexp(s, t):
2622 l = len(s)
2625 l = len(s)
2623 if l == 0:
2626 if l == 0:
2624 return "_list('')"
2627 return "_list('')"
2625 elif l == 1:
2628 elif l == 1:
2626 return argtype(t, s[0])
2629 return argtype(t, s[0])
2627 elif t == 'd':
2630 elif t == 'd':
2628 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2631 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2629 elif t == 's':
2632 elif t == 's':
2630 return "_list('%s')" % "\0".join(s)
2633 return "_list('%s')" % "\0".join(s)
2631 elif t == 'n':
2634 elif t == 'n':
2632 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2635 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2633 elif t == 'b':
2636 elif t == 'b':
2634 return "_list('%s')" % "\0".join(a.branch() for a in s)
2637 return "_list('%s')" % "\0".join(a.branch() for a in s)
2635
2638
2636 m = l // 2
2639 m = l // 2
2637 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2640 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2638
2641
2639 ret = ''
2642 ret = ''
2640 pos = 0
2643 pos = 0
2641 arg = 0
2644 arg = 0
2642 while pos < len(expr):
2645 while pos < len(expr):
2643 c = expr[pos]
2646 c = expr[pos]
2644 if c == '%':
2647 if c == '%':
2645 pos += 1
2648 pos += 1
2646 d = expr[pos]
2649 d = expr[pos]
2647 if d == '%':
2650 if d == '%':
2648 ret += d
2651 ret += d
2649 elif d in 'dsnbr':
2652 elif d in 'dsnbr':
2650 ret += argtype(d, args[arg])
2653 ret += argtype(d, args[arg])
2651 arg += 1
2654 arg += 1
2652 elif d == 'l':
2655 elif d == 'l':
2653 # a list of some type
2656 # a list of some type
2654 pos += 1
2657 pos += 1
2655 d = expr[pos]
2658 d = expr[pos]
2656 ret += listexp(list(args[arg]), d)
2659 ret += listexp(list(args[arg]), d)
2657 arg += 1
2660 arg += 1
2658 else:
2661 else:
2659 raise error.Abort('unexpected revspec format character %s' % d)
2662 raise error.Abort('unexpected revspec format character %s' % d)
2660 else:
2663 else:
2661 ret += c
2664 ret += c
2662 pos += 1
2665 pos += 1
2663
2666
2664 return ret
2667 return ret
2665
2668
2666 def prettyformat(tree):
2669 def prettyformat(tree):
2667 return parser.prettyformat(tree, ('string', 'symbol'))
2670 return parser.prettyformat(tree, ('string', 'symbol'))
2668
2671
2669 def depth(tree):
2672 def depth(tree):
2670 if isinstance(tree, tuple):
2673 if isinstance(tree, tuple):
2671 return max(map(depth, tree)) + 1
2674 return max(map(depth, tree)) + 1
2672 else:
2675 else:
2673 return 0
2676 return 0
2674
2677
2675 def funcsused(tree):
2678 def funcsused(tree):
2676 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2679 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2677 return set()
2680 return set()
2678 else:
2681 else:
2679 funcs = set()
2682 funcs = set()
2680 for s in tree[1:]:
2683 for s in tree[1:]:
2681 funcs |= funcsused(s)
2684 funcs |= funcsused(s)
2682 if tree[0] == 'func':
2685 if tree[0] == 'func':
2683 funcs.add(tree[1][1])
2686 funcs.add(tree[1][1])
2684 return funcs
2687 return funcs
2685
2688
2686 def _formatsetrepr(r):
2689 def _formatsetrepr(r):
2687 """Format an optional printable representation of a set
2690 """Format an optional printable representation of a set
2688
2691
2689 ======== =================================
2692 ======== =================================
2690 type(r) example
2693 type(r) example
2691 ======== =================================
2694 ======== =================================
2692 tuple ('<not %r>', other)
2695 tuple ('<not %r>', other)
2693 str '<branch closed>'
2696 str '<branch closed>'
2694 callable lambda: '<branch %r>' % sorted(b)
2697 callable lambda: '<branch %r>' % sorted(b)
2695 object other
2698 object other
2696 ======== =================================
2699 ======== =================================
2697 """
2700 """
2698 if r is None:
2701 if r is None:
2699 return ''
2702 return ''
2700 elif isinstance(r, tuple):
2703 elif isinstance(r, tuple):
2701 return r[0] % r[1:]
2704 return r[0] % r[1:]
2702 elif isinstance(r, str):
2705 elif isinstance(r, str):
2703 return r
2706 return r
2704 elif callable(r):
2707 elif callable(r):
2705 return r()
2708 return r()
2706 else:
2709 else:
2707 return repr(r)
2710 return repr(r)
2708
2711
2709 class abstractsmartset(object):
2712 class abstractsmartset(object):
2710
2713
2711 def __nonzero__(self):
2714 def __nonzero__(self):
2712 """True if the smartset is not empty"""
2715 """True if the smartset is not empty"""
2713 raise NotImplementedError()
2716 raise NotImplementedError()
2714
2717
2715 def __contains__(self, rev):
2718 def __contains__(self, rev):
2716 """provide fast membership testing"""
2719 """provide fast membership testing"""
2717 raise NotImplementedError()
2720 raise NotImplementedError()
2718
2721
2719 def __iter__(self):
2722 def __iter__(self):
2720 """iterate the set in the order it is supposed to be iterated"""
2723 """iterate the set in the order it is supposed to be iterated"""
2721 raise NotImplementedError()
2724 raise NotImplementedError()
2722
2725
2723 # Attributes containing a function to perform a fast iteration in a given
2726 # Attributes containing a function to perform a fast iteration in a given
2724 # direction. A smartset can have none, one, or both defined.
2727 # direction. A smartset can have none, one, or both defined.
2725 #
2728 #
2726 # Default value is None instead of a function returning None to avoid
2729 # Default value is None instead of a function returning None to avoid
2727 # initializing an iterator just for testing if a fast method exists.
2730 # initializing an iterator just for testing if a fast method exists.
2728 fastasc = None
2731 fastasc = None
2729 fastdesc = None
2732 fastdesc = None
2730
2733
2731 def isascending(self):
2734 def isascending(self):
2732 """True if the set will iterate in ascending order"""
2735 """True if the set will iterate in ascending order"""
2733 raise NotImplementedError()
2736 raise NotImplementedError()
2734
2737
2735 def isdescending(self):
2738 def isdescending(self):
2736 """True if the set will iterate in descending order"""
2739 """True if the set will iterate in descending order"""
2737 raise NotImplementedError()
2740 raise NotImplementedError()
2738
2741
2739 def istopo(self):
2742 def istopo(self):
2740 """True if the set will iterate in topographical order"""
2743 """True if the set will iterate in topographical order"""
2741 raise NotImplementedError()
2744 raise NotImplementedError()
2742
2745
2743 @util.cachefunc
2746 @util.cachefunc
2744 def min(self):
2747 def min(self):
2745 """return the minimum element in the set"""
2748 """return the minimum element in the set"""
2746 if self.fastasc is not None:
2749 if self.fastasc is not None:
2747 for r in self.fastasc():
2750 for r in self.fastasc():
2748 return r
2751 return r
2749 raise ValueError('arg is an empty sequence')
2752 raise ValueError('arg is an empty sequence')
2750 return min(self)
2753 return min(self)
2751
2754
2752 @util.cachefunc
2755 @util.cachefunc
2753 def max(self):
2756 def max(self):
2754 """return the maximum element in the set"""
2757 """return the maximum element in the set"""
2755 if self.fastdesc is not None:
2758 if self.fastdesc is not None:
2756 for r in self.fastdesc():
2759 for r in self.fastdesc():
2757 return r
2760 return r
2758 raise ValueError('arg is an empty sequence')
2761 raise ValueError('arg is an empty sequence')
2759 return max(self)
2762 return max(self)
2760
2763
2761 def first(self):
2764 def first(self):
2762 """return the first element in the set (user iteration perspective)
2765 """return the first element in the set (user iteration perspective)
2763
2766
2764 Return None if the set is empty"""
2767 Return None if the set is empty"""
2765 raise NotImplementedError()
2768 raise NotImplementedError()
2766
2769
2767 def last(self):
2770 def last(self):
2768 """return the last element in the set (user iteration perspective)
2771 """return the last element in the set (user iteration perspective)
2769
2772
2770 Return None if the set is empty"""
2773 Return None if the set is empty"""
2771 raise NotImplementedError()
2774 raise NotImplementedError()
2772
2775
2773 def __len__(self):
2776 def __len__(self):
2774 """return the length of the smartsets
2777 """return the length of the smartsets
2775
2778
2776 This can be expensive on smartset that could be lazy otherwise."""
2779 This can be expensive on smartset that could be lazy otherwise."""
2777 raise NotImplementedError()
2780 raise NotImplementedError()
2778
2781
2779 def reverse(self):
2782 def reverse(self):
2780 """reverse the expected iteration order"""
2783 """reverse the expected iteration order"""
2781 raise NotImplementedError()
2784 raise NotImplementedError()
2782
2785
2783 def sort(self, reverse=True):
2786 def sort(self, reverse=True):
2784 """get the set to iterate in an ascending or descending order"""
2787 """get the set to iterate in an ascending or descending order"""
2785 raise NotImplementedError()
2788 raise NotImplementedError()
2786
2789
2787 def __and__(self, other):
2790 def __and__(self, other):
2788 """Returns a new object with the intersection of the two collections.
2791 """Returns a new object with the intersection of the two collections.
2789
2792
2790 This is part of the mandatory API for smartset."""
2793 This is part of the mandatory API for smartset."""
2791 if isinstance(other, fullreposet):
2794 if isinstance(other, fullreposet):
2792 return self
2795 return self
2793 return self.filter(other.__contains__, condrepr=other, cache=False)
2796 return self.filter(other.__contains__, condrepr=other, cache=False)
2794
2797
2795 def __add__(self, other):
2798 def __add__(self, other):
2796 """Returns a new object with the union of the two collections.
2799 """Returns a new object with the union of the two collections.
2797
2800
2798 This is part of the mandatory API for smartset."""
2801 This is part of the mandatory API for smartset."""
2799 return addset(self, other)
2802 return addset(self, other)
2800
2803
2801 def __sub__(self, other):
2804 def __sub__(self, other):
2802 """Returns a new object with the substraction of the two collections.
2805 """Returns a new object with the substraction of the two collections.
2803
2806
2804 This is part of the mandatory API for smartset."""
2807 This is part of the mandatory API for smartset."""
2805 c = other.__contains__
2808 c = other.__contains__
2806 return self.filter(lambda r: not c(r), condrepr=('<not %r>', other),
2809 return self.filter(lambda r: not c(r), condrepr=('<not %r>', other),
2807 cache=False)
2810 cache=False)
2808
2811
2809 def filter(self, condition, condrepr=None, cache=True):
2812 def filter(self, condition, condrepr=None, cache=True):
2810 """Returns this smartset filtered by condition as a new smartset.
2813 """Returns this smartset filtered by condition as a new smartset.
2811
2814
2812 `condition` is a callable which takes a revision number and returns a
2815 `condition` is a callable which takes a revision number and returns a
2813 boolean. Optional `condrepr` provides a printable representation of
2816 boolean. Optional `condrepr` provides a printable representation of
2814 the given `condition`.
2817 the given `condition`.
2815
2818
2816 This is part of the mandatory API for smartset."""
2819 This is part of the mandatory API for smartset."""
2817 # builtin cannot be cached. but do not needs to
2820 # builtin cannot be cached. but do not needs to
2818 if cache and util.safehasattr(condition, 'func_code'):
2821 if cache and util.safehasattr(condition, 'func_code'):
2819 condition = util.cachefunc(condition)
2822 condition = util.cachefunc(condition)
2820 return filteredset(self, condition, condrepr)
2823 return filteredset(self, condition, condrepr)
2821
2824
2822 class baseset(abstractsmartset):
2825 class baseset(abstractsmartset):
2823 """Basic data structure that represents a revset and contains the basic
2826 """Basic data structure that represents a revset and contains the basic
2824 operation that it should be able to perform.
2827 operation that it should be able to perform.
2825
2828
2826 Every method in this class should be implemented by any smartset class.
2829 Every method in this class should be implemented by any smartset class.
2827 """
2830 """
2828 def __init__(self, data=(), datarepr=None, istopo=False):
2831 def __init__(self, data=(), datarepr=None, istopo=False):
2829 """
2832 """
2830 datarepr: a tuple of (format, obj, ...), a function or an object that
2833 datarepr: a tuple of (format, obj, ...), a function or an object that
2831 provides a printable representation of the given data.
2834 provides a printable representation of the given data.
2832 """
2835 """
2833 self._ascending = None
2836 self._ascending = None
2834 self._istopo = istopo
2837 self._istopo = istopo
2835 if not isinstance(data, list):
2838 if not isinstance(data, list):
2836 if isinstance(data, set):
2839 if isinstance(data, set):
2837 self._set = data
2840 self._set = data
2838 # set has no order we pick one for stability purpose
2841 # set has no order we pick one for stability purpose
2839 self._ascending = True
2842 self._ascending = True
2840 data = list(data)
2843 data = list(data)
2841 self._list = data
2844 self._list = data
2842 self._datarepr = datarepr
2845 self._datarepr = datarepr
2843
2846
2844 @util.propertycache
2847 @util.propertycache
2845 def _set(self):
2848 def _set(self):
2846 return set(self._list)
2849 return set(self._list)
2847
2850
2848 @util.propertycache
2851 @util.propertycache
2849 def _asclist(self):
2852 def _asclist(self):
2850 asclist = self._list[:]
2853 asclist = self._list[:]
2851 asclist.sort()
2854 asclist.sort()
2852 return asclist
2855 return asclist
2853
2856
2854 def __iter__(self):
2857 def __iter__(self):
2855 if self._ascending is None:
2858 if self._ascending is None:
2856 return iter(self._list)
2859 return iter(self._list)
2857 elif self._ascending:
2860 elif self._ascending:
2858 return iter(self._asclist)
2861 return iter(self._asclist)
2859 else:
2862 else:
2860 return reversed(self._asclist)
2863 return reversed(self._asclist)
2861
2864
2862 def fastasc(self):
2865 def fastasc(self):
2863 return iter(self._asclist)
2866 return iter(self._asclist)
2864
2867
2865 def fastdesc(self):
2868 def fastdesc(self):
2866 return reversed(self._asclist)
2869 return reversed(self._asclist)
2867
2870
2868 @util.propertycache
2871 @util.propertycache
2869 def __contains__(self):
2872 def __contains__(self):
2870 return self._set.__contains__
2873 return self._set.__contains__
2871
2874
2872 def __nonzero__(self):
2875 def __nonzero__(self):
2873 return bool(self._list)
2876 return bool(self._list)
2874
2877
2875 def sort(self, reverse=False):
2878 def sort(self, reverse=False):
2876 self._ascending = not bool(reverse)
2879 self._ascending = not bool(reverse)
2877 self._istopo = False
2880 self._istopo = False
2878
2881
2879 def reverse(self):
2882 def reverse(self):
2880 if self._ascending is None:
2883 if self._ascending is None:
2881 self._list.reverse()
2884 self._list.reverse()
2882 else:
2885 else:
2883 self._ascending = not self._ascending
2886 self._ascending = not self._ascending
2884 self._istopo = False
2887 self._istopo = False
2885
2888
2886 def __len__(self):
2889 def __len__(self):
2887 return len(self._list)
2890 return len(self._list)
2888
2891
2889 def isascending(self):
2892 def isascending(self):
2890 """Returns True if the collection is ascending order, False if not.
2893 """Returns True if the collection is ascending order, False if not.
2891
2894
2892 This is part of the mandatory API for smartset."""
2895 This is part of the mandatory API for smartset."""
2893 if len(self) <= 1:
2896 if len(self) <= 1:
2894 return True
2897 return True
2895 return self._ascending is not None and self._ascending
2898 return self._ascending is not None and self._ascending
2896
2899
2897 def isdescending(self):
2900 def isdescending(self):
2898 """Returns True if the collection is descending order, False if not.
2901 """Returns True if the collection is descending order, False if not.
2899
2902
2900 This is part of the mandatory API for smartset."""
2903 This is part of the mandatory API for smartset."""
2901 if len(self) <= 1:
2904 if len(self) <= 1:
2902 return True
2905 return True
2903 return self._ascending is not None and not self._ascending
2906 return self._ascending is not None and not self._ascending
2904
2907
2905 def istopo(self):
2908 def istopo(self):
2906 """Is the collection is in topographical order or not.
2909 """Is the collection is in topographical order or not.
2907
2910
2908 This is part of the mandatory API for smartset."""
2911 This is part of the mandatory API for smartset."""
2909 if len(self) <= 1:
2912 if len(self) <= 1:
2910 return True
2913 return True
2911 return self._istopo
2914 return self._istopo
2912
2915
2913 def first(self):
2916 def first(self):
2914 if self:
2917 if self:
2915 if self._ascending is None:
2918 if self._ascending is None:
2916 return self._list[0]
2919 return self._list[0]
2917 elif self._ascending:
2920 elif self._ascending:
2918 return self._asclist[0]
2921 return self._asclist[0]
2919 else:
2922 else:
2920 return self._asclist[-1]
2923 return self._asclist[-1]
2921 return None
2924 return None
2922
2925
2923 def last(self):
2926 def last(self):
2924 if self:
2927 if self:
2925 if self._ascending is None:
2928 if self._ascending is None:
2926 return self._list[-1]
2929 return self._list[-1]
2927 elif self._ascending:
2930 elif self._ascending:
2928 return self._asclist[-1]
2931 return self._asclist[-1]
2929 else:
2932 else:
2930 return self._asclist[0]
2933 return self._asclist[0]
2931 return None
2934 return None
2932
2935
2933 def __repr__(self):
2936 def __repr__(self):
2934 d = {None: '', False: '-', True: '+'}[self._ascending]
2937 d = {None: '', False: '-', True: '+'}[self._ascending]
2935 s = _formatsetrepr(self._datarepr)
2938 s = _formatsetrepr(self._datarepr)
2936 if not s:
2939 if not s:
2937 l = self._list
2940 l = self._list
2938 # if _list has been built from a set, it might have a different
2941 # if _list has been built from a set, it might have a different
2939 # order from one python implementation to another.
2942 # order from one python implementation to another.
2940 # We fallback to the sorted version for a stable output.
2943 # We fallback to the sorted version for a stable output.
2941 if self._ascending is not None:
2944 if self._ascending is not None:
2942 l = self._asclist
2945 l = self._asclist
2943 s = repr(l)
2946 s = repr(l)
2944 return '<%s%s %s>' % (type(self).__name__, d, s)
2947 return '<%s%s %s>' % (type(self).__name__, d, s)
2945
2948
2946 class filteredset(abstractsmartset):
2949 class filteredset(abstractsmartset):
2947 """Duck type for baseset class which iterates lazily over the revisions in
2950 """Duck type for baseset class which iterates lazily over the revisions in
2948 the subset and contains a function which tests for membership in the
2951 the subset and contains a function which tests for membership in the
2949 revset
2952 revset
2950 """
2953 """
2951 def __init__(self, subset, condition=lambda x: True, condrepr=None):
2954 def __init__(self, subset, condition=lambda x: True, condrepr=None):
2952 """
2955 """
2953 condition: a function that decide whether a revision in the subset
2956 condition: a function that decide whether a revision in the subset
2954 belongs to the revset or not.
2957 belongs to the revset or not.
2955 condrepr: a tuple of (format, obj, ...), a function or an object that
2958 condrepr: a tuple of (format, obj, ...), a function or an object that
2956 provides a printable representation of the given condition.
2959 provides a printable representation of the given condition.
2957 """
2960 """
2958 self._subset = subset
2961 self._subset = subset
2959 self._condition = condition
2962 self._condition = condition
2960 self._condrepr = condrepr
2963 self._condrepr = condrepr
2961
2964
2962 def __contains__(self, x):
2965 def __contains__(self, x):
2963 return x in self._subset and self._condition(x)
2966 return x in self._subset and self._condition(x)
2964
2967
2965 def __iter__(self):
2968 def __iter__(self):
2966 return self._iterfilter(self._subset)
2969 return self._iterfilter(self._subset)
2967
2970
2968 def _iterfilter(self, it):
2971 def _iterfilter(self, it):
2969 cond = self._condition
2972 cond = self._condition
2970 for x in it:
2973 for x in it:
2971 if cond(x):
2974 if cond(x):
2972 yield x
2975 yield x
2973
2976
2974 @property
2977 @property
2975 def fastasc(self):
2978 def fastasc(self):
2976 it = self._subset.fastasc
2979 it = self._subset.fastasc
2977 if it is None:
2980 if it is None:
2978 return None
2981 return None
2979 return lambda: self._iterfilter(it())
2982 return lambda: self._iterfilter(it())
2980
2983
2981 @property
2984 @property
2982 def fastdesc(self):
2985 def fastdesc(self):
2983 it = self._subset.fastdesc
2986 it = self._subset.fastdesc
2984 if it is None:
2987 if it is None:
2985 return None
2988 return None
2986 return lambda: self._iterfilter(it())
2989 return lambda: self._iterfilter(it())
2987
2990
2988 def __nonzero__(self):
2991 def __nonzero__(self):
2989 fast = None
2992 fast = None
2990 candidates = [self.fastasc if self.isascending() else None,
2993 candidates = [self.fastasc if self.isascending() else None,
2991 self.fastdesc if self.isdescending() else None,
2994 self.fastdesc if self.isdescending() else None,
2992 self.fastasc,
2995 self.fastasc,
2993 self.fastdesc]
2996 self.fastdesc]
2994 for candidate in candidates:
2997 for candidate in candidates:
2995 if candidate is not None:
2998 if candidate is not None:
2996 fast = candidate
2999 fast = candidate
2997 break
3000 break
2998
3001
2999 if fast is not None:
3002 if fast is not None:
3000 it = fast()
3003 it = fast()
3001 else:
3004 else:
3002 it = self
3005 it = self
3003
3006
3004 for r in it:
3007 for r in it:
3005 return True
3008 return True
3006 return False
3009 return False
3007
3010
3008 def __len__(self):
3011 def __len__(self):
3009 # Basic implementation to be changed in future patches.
3012 # Basic implementation to be changed in future patches.
3010 # until this gets improved, we use generator expression
3013 # until this gets improved, we use generator expression
3011 # here, since list compr is free to call __len__ again
3014 # here, since list compr is free to call __len__ again
3012 # causing infinite recursion
3015 # causing infinite recursion
3013 l = baseset(r for r in self)
3016 l = baseset(r for r in self)
3014 return len(l)
3017 return len(l)
3015
3018
3016 def sort(self, reverse=False):
3019 def sort(self, reverse=False):
3017 self._subset.sort(reverse=reverse)
3020 self._subset.sort(reverse=reverse)
3018
3021
3019 def reverse(self):
3022 def reverse(self):
3020 self._subset.reverse()
3023 self._subset.reverse()
3021
3024
3022 def isascending(self):
3025 def isascending(self):
3023 return self._subset.isascending()
3026 return self._subset.isascending()
3024
3027
3025 def isdescending(self):
3028 def isdescending(self):
3026 return self._subset.isdescending()
3029 return self._subset.isdescending()
3027
3030
3028 def istopo(self):
3031 def istopo(self):
3029 return self._subset.istopo()
3032 return self._subset.istopo()
3030
3033
3031 def first(self):
3034 def first(self):
3032 for x in self:
3035 for x in self:
3033 return x
3036 return x
3034 return None
3037 return None
3035
3038
3036 def last(self):
3039 def last(self):
3037 it = None
3040 it = None
3038 if self.isascending():
3041 if self.isascending():
3039 it = self.fastdesc
3042 it = self.fastdesc
3040 elif self.isdescending():
3043 elif self.isdescending():
3041 it = self.fastasc
3044 it = self.fastasc
3042 if it is not None:
3045 if it is not None:
3043 for x in it():
3046 for x in it():
3044 return x
3047 return x
3045 return None #empty case
3048 return None #empty case
3046 else:
3049 else:
3047 x = None
3050 x = None
3048 for x in self:
3051 for x in self:
3049 pass
3052 pass
3050 return x
3053 return x
3051
3054
3052 def __repr__(self):
3055 def __repr__(self):
3053 xs = [repr(self._subset)]
3056 xs = [repr(self._subset)]
3054 s = _formatsetrepr(self._condrepr)
3057 s = _formatsetrepr(self._condrepr)
3055 if s:
3058 if s:
3056 xs.append(s)
3059 xs.append(s)
3057 return '<%s %s>' % (type(self).__name__, ', '.join(xs))
3060 return '<%s %s>' % (type(self).__name__, ', '.join(xs))
3058
3061
3059 def _iterordered(ascending, iter1, iter2):
3062 def _iterordered(ascending, iter1, iter2):
3060 """produce an ordered iteration from two iterators with the same order
3063 """produce an ordered iteration from two iterators with the same order
3061
3064
3062 The ascending is used to indicated the iteration direction.
3065 The ascending is used to indicated the iteration direction.
3063 """
3066 """
3064 choice = max
3067 choice = max
3065 if ascending:
3068 if ascending:
3066 choice = min
3069 choice = min
3067
3070
3068 val1 = None
3071 val1 = None
3069 val2 = None
3072 val2 = None
3070 try:
3073 try:
3071 # Consume both iterators in an ordered way until one is empty
3074 # Consume both iterators in an ordered way until one is empty
3072 while True:
3075 while True:
3073 if val1 is None:
3076 if val1 is None:
3074 val1 = next(iter1)
3077 val1 = next(iter1)
3075 if val2 is None:
3078 if val2 is None:
3076 val2 = next(iter2)
3079 val2 = next(iter2)
3077 n = choice(val1, val2)
3080 n = choice(val1, val2)
3078 yield n
3081 yield n
3079 if val1 == n:
3082 if val1 == n:
3080 val1 = None
3083 val1 = None
3081 if val2 == n:
3084 if val2 == n:
3082 val2 = None
3085 val2 = None
3083 except StopIteration:
3086 except StopIteration:
3084 # Flush any remaining values and consume the other one
3087 # Flush any remaining values and consume the other one
3085 it = iter2
3088 it = iter2
3086 if val1 is not None:
3089 if val1 is not None:
3087 yield val1
3090 yield val1
3088 it = iter1
3091 it = iter1
3089 elif val2 is not None:
3092 elif val2 is not None:
3090 # might have been equality and both are empty
3093 # might have been equality and both are empty
3091 yield val2
3094 yield val2
3092 for val in it:
3095 for val in it:
3093 yield val
3096 yield val
3094
3097
3095 class addset(abstractsmartset):
3098 class addset(abstractsmartset):
3096 """Represent the addition of two sets
3099 """Represent the addition of two sets
3097
3100
3098 Wrapper structure for lazily adding two structures without losing much
3101 Wrapper structure for lazily adding two structures without losing much
3099 performance on the __contains__ method
3102 performance on the __contains__ method
3100
3103
3101 If the ascending attribute is set, that means the two structures are
3104 If the ascending attribute is set, that means the two structures are
3102 ordered in either an ascending or descending way. Therefore, we can add
3105 ordered in either an ascending or descending way. Therefore, we can add
3103 them maintaining the order by iterating over both at the same time
3106 them maintaining the order by iterating over both at the same time
3104
3107
3105 >>> xs = baseset([0, 3, 2])
3108 >>> xs = baseset([0, 3, 2])
3106 >>> ys = baseset([5, 2, 4])
3109 >>> ys = baseset([5, 2, 4])
3107
3110
3108 >>> rs = addset(xs, ys)
3111 >>> rs = addset(xs, ys)
3109 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3112 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3110 (True, True, False, True, 0, 4)
3113 (True, True, False, True, 0, 4)
3111 >>> rs = addset(xs, baseset([]))
3114 >>> rs = addset(xs, baseset([]))
3112 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3115 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3113 (True, True, False, 0, 2)
3116 (True, True, False, 0, 2)
3114 >>> rs = addset(baseset([]), baseset([]))
3117 >>> rs = addset(baseset([]), baseset([]))
3115 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3118 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3116 (False, False, None, None)
3119 (False, False, None, None)
3117
3120
3118 iterate unsorted:
3121 iterate unsorted:
3119 >>> rs = addset(xs, ys)
3122 >>> rs = addset(xs, ys)
3120 >>> # (use generator because pypy could call len())
3123 >>> # (use generator because pypy could call len())
3121 >>> list(x for x in rs) # without _genlist
3124 >>> list(x for x in rs) # without _genlist
3122 [0, 3, 2, 5, 4]
3125 [0, 3, 2, 5, 4]
3123 >>> assert not rs._genlist
3126 >>> assert not rs._genlist
3124 >>> len(rs)
3127 >>> len(rs)
3125 5
3128 5
3126 >>> [x for x in rs] # with _genlist
3129 >>> [x for x in rs] # with _genlist
3127 [0, 3, 2, 5, 4]
3130 [0, 3, 2, 5, 4]
3128 >>> assert rs._genlist
3131 >>> assert rs._genlist
3129
3132
3130 iterate ascending:
3133 iterate ascending:
3131 >>> rs = addset(xs, ys, ascending=True)
3134 >>> rs = addset(xs, ys, ascending=True)
3132 >>> # (use generator because pypy could call len())
3135 >>> # (use generator because pypy could call len())
3133 >>> list(x for x in rs), list(x for x in rs.fastasc()) # without _asclist
3136 >>> list(x for x in rs), list(x for x in rs.fastasc()) # without _asclist
3134 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3137 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3135 >>> assert not rs._asclist
3138 >>> assert not rs._asclist
3136 >>> len(rs)
3139 >>> len(rs)
3137 5
3140 5
3138 >>> [x for x in rs], [x for x in rs.fastasc()]
3141 >>> [x for x in rs], [x for x in rs.fastasc()]
3139 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3142 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3140 >>> assert rs._asclist
3143 >>> assert rs._asclist
3141
3144
3142 iterate descending:
3145 iterate descending:
3143 >>> rs = addset(xs, ys, ascending=False)
3146 >>> rs = addset(xs, ys, ascending=False)
3144 >>> # (use generator because pypy could call len())
3147 >>> # (use generator because pypy could call len())
3145 >>> list(x for x in rs), list(x for x in rs.fastdesc()) # without _asclist
3148 >>> list(x for x in rs), list(x for x in rs.fastdesc()) # without _asclist
3146 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3149 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3147 >>> assert not rs._asclist
3150 >>> assert not rs._asclist
3148 >>> len(rs)
3151 >>> len(rs)
3149 5
3152 5
3150 >>> [x for x in rs], [x for x in rs.fastdesc()]
3153 >>> [x for x in rs], [x for x in rs.fastdesc()]
3151 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3154 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3152 >>> assert rs._asclist
3155 >>> assert rs._asclist
3153
3156
3154 iterate ascending without fastasc:
3157 iterate ascending without fastasc:
3155 >>> rs = addset(xs, generatorset(ys), ascending=True)
3158 >>> rs = addset(xs, generatorset(ys), ascending=True)
3156 >>> assert rs.fastasc is None
3159 >>> assert rs.fastasc is None
3157 >>> [x for x in rs]
3160 >>> [x for x in rs]
3158 [0, 2, 3, 4, 5]
3161 [0, 2, 3, 4, 5]
3159
3162
3160 iterate descending without fastdesc:
3163 iterate descending without fastdesc:
3161 >>> rs = addset(generatorset(xs), ys, ascending=False)
3164 >>> rs = addset(generatorset(xs), ys, ascending=False)
3162 >>> assert rs.fastdesc is None
3165 >>> assert rs.fastdesc is None
3163 >>> [x for x in rs]
3166 >>> [x for x in rs]
3164 [5, 4, 3, 2, 0]
3167 [5, 4, 3, 2, 0]
3165 """
3168 """
3166 def __init__(self, revs1, revs2, ascending=None):
3169 def __init__(self, revs1, revs2, ascending=None):
3167 self._r1 = revs1
3170 self._r1 = revs1
3168 self._r2 = revs2
3171 self._r2 = revs2
3169 self._iter = None
3172 self._iter = None
3170 self._ascending = ascending
3173 self._ascending = ascending
3171 self._genlist = None
3174 self._genlist = None
3172 self._asclist = None
3175 self._asclist = None
3173
3176
3174 def __len__(self):
3177 def __len__(self):
3175 return len(self._list)
3178 return len(self._list)
3176
3179
3177 def __nonzero__(self):
3180 def __nonzero__(self):
3178 return bool(self._r1) or bool(self._r2)
3181 return bool(self._r1) or bool(self._r2)
3179
3182
3180 @util.propertycache
3183 @util.propertycache
3181 def _list(self):
3184 def _list(self):
3182 if not self._genlist:
3185 if not self._genlist:
3183 self._genlist = baseset(iter(self))
3186 self._genlist = baseset(iter(self))
3184 return self._genlist
3187 return self._genlist
3185
3188
3186 def __iter__(self):
3189 def __iter__(self):
3187 """Iterate over both collections without repeating elements
3190 """Iterate over both collections without repeating elements
3188
3191
3189 If the ascending attribute is not set, iterate over the first one and
3192 If the ascending attribute is not set, iterate over the first one and
3190 then over the second one checking for membership on the first one so we
3193 then over the second one checking for membership on the first one so we
3191 dont yield any duplicates.
3194 dont yield any duplicates.
3192
3195
3193 If the ascending attribute is set, iterate over both collections at the
3196 If the ascending attribute is set, iterate over both collections at the
3194 same time, yielding only one value at a time in the given order.
3197 same time, yielding only one value at a time in the given order.
3195 """
3198 """
3196 if self._ascending is None:
3199 if self._ascending is None:
3197 if self._genlist:
3200 if self._genlist:
3198 return iter(self._genlist)
3201 return iter(self._genlist)
3199 def arbitraryordergen():
3202 def arbitraryordergen():
3200 for r in self._r1:
3203 for r in self._r1:
3201 yield r
3204 yield r
3202 inr1 = self._r1.__contains__
3205 inr1 = self._r1.__contains__
3203 for r in self._r2:
3206 for r in self._r2:
3204 if not inr1(r):
3207 if not inr1(r):
3205 yield r
3208 yield r
3206 return arbitraryordergen()
3209 return arbitraryordergen()
3207 # try to use our own fast iterator if it exists
3210 # try to use our own fast iterator if it exists
3208 self._trysetasclist()
3211 self._trysetasclist()
3209 if self._ascending:
3212 if self._ascending:
3210 attr = 'fastasc'
3213 attr = 'fastasc'
3211 else:
3214 else:
3212 attr = 'fastdesc'
3215 attr = 'fastdesc'
3213 it = getattr(self, attr)
3216 it = getattr(self, attr)
3214 if it is not None:
3217 if it is not None:
3215 return it()
3218 return it()
3216 # maybe half of the component supports fast
3219 # maybe half of the component supports fast
3217 # get iterator for _r1
3220 # get iterator for _r1
3218 iter1 = getattr(self._r1, attr)
3221 iter1 = getattr(self._r1, attr)
3219 if iter1 is None:
3222 if iter1 is None:
3220 # let's avoid side effect (not sure it matters)
3223 # let's avoid side effect (not sure it matters)
3221 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3224 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3222 else:
3225 else:
3223 iter1 = iter1()
3226 iter1 = iter1()
3224 # get iterator for _r2
3227 # get iterator for _r2
3225 iter2 = getattr(self._r2, attr)
3228 iter2 = getattr(self._r2, attr)
3226 if iter2 is None:
3229 if iter2 is None:
3227 # let's avoid side effect (not sure it matters)
3230 # let's avoid side effect (not sure it matters)
3228 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3231 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3229 else:
3232 else:
3230 iter2 = iter2()
3233 iter2 = iter2()
3231 return _iterordered(self._ascending, iter1, iter2)
3234 return _iterordered(self._ascending, iter1, iter2)
3232
3235
3233 def _trysetasclist(self):
3236 def _trysetasclist(self):
3234 """populate the _asclist attribute if possible and necessary"""
3237 """populate the _asclist attribute if possible and necessary"""
3235 if self._genlist is not None and self._asclist is None:
3238 if self._genlist is not None and self._asclist is None:
3236 self._asclist = sorted(self._genlist)
3239 self._asclist = sorted(self._genlist)
3237
3240
3238 @property
3241 @property
3239 def fastasc(self):
3242 def fastasc(self):
3240 self._trysetasclist()
3243 self._trysetasclist()
3241 if self._asclist is not None:
3244 if self._asclist is not None:
3242 return self._asclist.__iter__
3245 return self._asclist.__iter__
3243 iter1 = self._r1.fastasc
3246 iter1 = self._r1.fastasc
3244 iter2 = self._r2.fastasc
3247 iter2 = self._r2.fastasc
3245 if None in (iter1, iter2):
3248 if None in (iter1, iter2):
3246 return None
3249 return None
3247 return lambda: _iterordered(True, iter1(), iter2())
3250 return lambda: _iterordered(True, iter1(), iter2())
3248
3251
3249 @property
3252 @property
3250 def fastdesc(self):
3253 def fastdesc(self):
3251 self._trysetasclist()
3254 self._trysetasclist()
3252 if self._asclist is not None:
3255 if self._asclist is not None:
3253 return self._asclist.__reversed__
3256 return self._asclist.__reversed__
3254 iter1 = self._r1.fastdesc
3257 iter1 = self._r1.fastdesc
3255 iter2 = self._r2.fastdesc
3258 iter2 = self._r2.fastdesc
3256 if None in (iter1, iter2):
3259 if None in (iter1, iter2):
3257 return None
3260 return None
3258 return lambda: _iterordered(False, iter1(), iter2())
3261 return lambda: _iterordered(False, iter1(), iter2())
3259
3262
3260 def __contains__(self, x):
3263 def __contains__(self, x):
3261 return x in self._r1 or x in self._r2
3264 return x in self._r1 or x in self._r2
3262
3265
3263 def sort(self, reverse=False):
3266 def sort(self, reverse=False):
3264 """Sort the added set
3267 """Sort the added set
3265
3268
3266 For this we use the cached list with all the generated values and if we
3269 For this we use the cached list with all the generated values and if we
3267 know they are ascending or descending we can sort them in a smart way.
3270 know they are ascending or descending we can sort them in a smart way.
3268 """
3271 """
3269 self._ascending = not reverse
3272 self._ascending = not reverse
3270
3273
3271 def isascending(self):
3274 def isascending(self):
3272 return self._ascending is not None and self._ascending
3275 return self._ascending is not None and self._ascending
3273
3276
3274 def isdescending(self):
3277 def isdescending(self):
3275 return self._ascending is not None and not self._ascending
3278 return self._ascending is not None and not self._ascending
3276
3279
3277 def istopo(self):
3280 def istopo(self):
3278 # not worth the trouble asserting if the two sets combined are still
3281 # not worth the trouble asserting if the two sets combined are still
3279 # in topographical order. Use the sort() predicate to explicitly sort
3282 # in topographical order. Use the sort() predicate to explicitly sort
3280 # again instead.
3283 # again instead.
3281 return False
3284 return False
3282
3285
3283 def reverse(self):
3286 def reverse(self):
3284 if self._ascending is None:
3287 if self._ascending is None:
3285 self._list.reverse()
3288 self._list.reverse()
3286 else:
3289 else:
3287 self._ascending = not self._ascending
3290 self._ascending = not self._ascending
3288
3291
3289 def first(self):
3292 def first(self):
3290 for x in self:
3293 for x in self:
3291 return x
3294 return x
3292 return None
3295 return None
3293
3296
3294 def last(self):
3297 def last(self):
3295 self.reverse()
3298 self.reverse()
3296 val = self.first()
3299 val = self.first()
3297 self.reverse()
3300 self.reverse()
3298 return val
3301 return val
3299
3302
3300 def __repr__(self):
3303 def __repr__(self):
3301 d = {None: '', False: '-', True: '+'}[self._ascending]
3304 d = {None: '', False: '-', True: '+'}[self._ascending]
3302 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3305 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3303
3306
3304 class generatorset(abstractsmartset):
3307 class generatorset(abstractsmartset):
3305 """Wrap a generator for lazy iteration
3308 """Wrap a generator for lazy iteration
3306
3309
3307 Wrapper structure for generators that provides lazy membership and can
3310 Wrapper structure for generators that provides lazy membership and can
3308 be iterated more than once.
3311 be iterated more than once.
3309 When asked for membership it generates values until either it finds the
3312 When asked for membership it generates values until either it finds the
3310 requested one or has gone through all the elements in the generator
3313 requested one or has gone through all the elements in the generator
3311 """
3314 """
3312 def __init__(self, gen, iterasc=None):
3315 def __init__(self, gen, iterasc=None):
3313 """
3316 """
3314 gen: a generator producing the values for the generatorset.
3317 gen: a generator producing the values for the generatorset.
3315 """
3318 """
3316 self._gen = gen
3319 self._gen = gen
3317 self._asclist = None
3320 self._asclist = None
3318 self._cache = {}
3321 self._cache = {}
3319 self._genlist = []
3322 self._genlist = []
3320 self._finished = False
3323 self._finished = False
3321 self._ascending = True
3324 self._ascending = True
3322 if iterasc is not None:
3325 if iterasc is not None:
3323 if iterasc:
3326 if iterasc:
3324 self.fastasc = self._iterator
3327 self.fastasc = self._iterator
3325 self.__contains__ = self._asccontains
3328 self.__contains__ = self._asccontains
3326 else:
3329 else:
3327 self.fastdesc = self._iterator
3330 self.fastdesc = self._iterator
3328 self.__contains__ = self._desccontains
3331 self.__contains__ = self._desccontains
3329
3332
3330 def __nonzero__(self):
3333 def __nonzero__(self):
3331 # Do not use 'for r in self' because it will enforce the iteration
3334 # Do not use 'for r in self' because it will enforce the iteration
3332 # order (default ascending), possibly unrolling a whole descending
3335 # order (default ascending), possibly unrolling a whole descending
3333 # iterator.
3336 # iterator.
3334 if self._genlist:
3337 if self._genlist:
3335 return True
3338 return True
3336 for r in self._consumegen():
3339 for r in self._consumegen():
3337 return True
3340 return True
3338 return False
3341 return False
3339
3342
3340 def __contains__(self, x):
3343 def __contains__(self, x):
3341 if x in self._cache:
3344 if x in self._cache:
3342 return self._cache[x]
3345 return self._cache[x]
3343
3346
3344 # Use new values only, as existing values would be cached.
3347 # Use new values only, as existing values would be cached.
3345 for l in self._consumegen():
3348 for l in self._consumegen():
3346 if l == x:
3349 if l == x:
3347 return True
3350 return True
3348
3351
3349 self._cache[x] = False
3352 self._cache[x] = False
3350 return False
3353 return False
3351
3354
3352 def _asccontains(self, x):
3355 def _asccontains(self, x):
3353 """version of contains optimised for ascending generator"""
3356 """version of contains optimised for ascending generator"""
3354 if x in self._cache:
3357 if x in self._cache:
3355 return self._cache[x]
3358 return self._cache[x]
3356
3359
3357 # Use new values only, as existing values would be cached.
3360 # Use new values only, as existing values would be cached.
3358 for l in self._consumegen():
3361 for l in self._consumegen():
3359 if l == x:
3362 if l == x:
3360 return True
3363 return True
3361 if l > x:
3364 if l > x:
3362 break
3365 break
3363
3366
3364 self._cache[x] = False
3367 self._cache[x] = False
3365 return False
3368 return False
3366
3369
3367 def _desccontains(self, x):
3370 def _desccontains(self, x):
3368 """version of contains optimised for descending generator"""
3371 """version of contains optimised for descending generator"""
3369 if x in self._cache:
3372 if x in self._cache:
3370 return self._cache[x]
3373 return self._cache[x]
3371
3374
3372 # Use new values only, as existing values would be cached.
3375 # Use new values only, as existing values would be cached.
3373 for l in self._consumegen():
3376 for l in self._consumegen():
3374 if l == x:
3377 if l == x:
3375 return True
3378 return True
3376 if l < x:
3379 if l < x:
3377 break
3380 break
3378
3381
3379 self._cache[x] = False
3382 self._cache[x] = False
3380 return False
3383 return False
3381
3384
3382 def __iter__(self):
3385 def __iter__(self):
3383 if self._ascending:
3386 if self._ascending:
3384 it = self.fastasc
3387 it = self.fastasc
3385 else:
3388 else:
3386 it = self.fastdesc
3389 it = self.fastdesc
3387 if it is not None:
3390 if it is not None:
3388 return it()
3391 return it()
3389 # we need to consume the iterator
3392 # we need to consume the iterator
3390 for x in self._consumegen():
3393 for x in self._consumegen():
3391 pass
3394 pass
3392 # recall the same code
3395 # recall the same code
3393 return iter(self)
3396 return iter(self)
3394
3397
3395 def _iterator(self):
3398 def _iterator(self):
3396 if self._finished:
3399 if self._finished:
3397 return iter(self._genlist)
3400 return iter(self._genlist)
3398
3401
3399 # We have to use this complex iteration strategy to allow multiple
3402 # We have to use this complex iteration strategy to allow multiple
3400 # iterations at the same time. We need to be able to catch revision
3403 # iterations at the same time. We need to be able to catch revision
3401 # removed from _consumegen and added to genlist in another instance.
3404 # removed from _consumegen and added to genlist in another instance.
3402 #
3405 #
3403 # Getting rid of it would provide an about 15% speed up on this
3406 # Getting rid of it would provide an about 15% speed up on this
3404 # iteration.
3407 # iteration.
3405 genlist = self._genlist
3408 genlist = self._genlist
3406 nextrev = self._consumegen().next
3409 nextrev = self._consumegen().next
3407 _len = len # cache global lookup
3410 _len = len # cache global lookup
3408 def gen():
3411 def gen():
3409 i = 0
3412 i = 0
3410 while True:
3413 while True:
3411 if i < _len(genlist):
3414 if i < _len(genlist):
3412 yield genlist[i]
3415 yield genlist[i]
3413 else:
3416 else:
3414 yield nextrev()
3417 yield nextrev()
3415 i += 1
3418 i += 1
3416 return gen()
3419 return gen()
3417
3420
3418 def _consumegen(self):
3421 def _consumegen(self):
3419 cache = self._cache
3422 cache = self._cache
3420 genlist = self._genlist.append
3423 genlist = self._genlist.append
3421 for item in self._gen:
3424 for item in self._gen:
3422 cache[item] = True
3425 cache[item] = True
3423 genlist(item)
3426 genlist(item)
3424 yield item
3427 yield item
3425 if not self._finished:
3428 if not self._finished:
3426 self._finished = True
3429 self._finished = True
3427 asc = self._genlist[:]
3430 asc = self._genlist[:]
3428 asc.sort()
3431 asc.sort()
3429 self._asclist = asc
3432 self._asclist = asc
3430 self.fastasc = asc.__iter__
3433 self.fastasc = asc.__iter__
3431 self.fastdesc = asc.__reversed__
3434 self.fastdesc = asc.__reversed__
3432
3435
3433 def __len__(self):
3436 def __len__(self):
3434 for x in self._consumegen():
3437 for x in self._consumegen():
3435 pass
3438 pass
3436 return len(self._genlist)
3439 return len(self._genlist)
3437
3440
3438 def sort(self, reverse=False):
3441 def sort(self, reverse=False):
3439 self._ascending = not reverse
3442 self._ascending = not reverse
3440
3443
3441 def reverse(self):
3444 def reverse(self):
3442 self._ascending = not self._ascending
3445 self._ascending = not self._ascending
3443
3446
3444 def isascending(self):
3447 def isascending(self):
3445 return self._ascending
3448 return self._ascending
3446
3449
3447 def isdescending(self):
3450 def isdescending(self):
3448 return not self._ascending
3451 return not self._ascending
3449
3452
3450 def istopo(self):
3453 def istopo(self):
3451 # not worth the trouble asserting if the two sets combined are still
3454 # not worth the trouble asserting if the two sets combined are still
3452 # in topographical order. Use the sort() predicate to explicitly sort
3455 # in topographical order. Use the sort() predicate to explicitly sort
3453 # again instead.
3456 # again instead.
3454 return False
3457 return False
3455
3458
3456 def first(self):
3459 def first(self):
3457 if self._ascending:
3460 if self._ascending:
3458 it = self.fastasc
3461 it = self.fastasc
3459 else:
3462 else:
3460 it = self.fastdesc
3463 it = self.fastdesc
3461 if it is None:
3464 if it is None:
3462 # we need to consume all and try again
3465 # we need to consume all and try again
3463 for x in self._consumegen():
3466 for x in self._consumegen():
3464 pass
3467 pass
3465 return self.first()
3468 return self.first()
3466 return next(it(), None)
3469 return next(it(), None)
3467
3470
3468 def last(self):
3471 def last(self):
3469 if self._ascending:
3472 if self._ascending:
3470 it = self.fastdesc
3473 it = self.fastdesc
3471 else:
3474 else:
3472 it = self.fastasc
3475 it = self.fastasc
3473 if it is None:
3476 if it is None:
3474 # we need to consume all and try again
3477 # we need to consume all and try again
3475 for x in self._consumegen():
3478 for x in self._consumegen():
3476 pass
3479 pass
3477 return self.first()
3480 return self.first()
3478 return next(it(), None)
3481 return next(it(), None)
3479
3482
3480 def __repr__(self):
3483 def __repr__(self):
3481 d = {False: '-', True: '+'}[self._ascending]
3484 d = {False: '-', True: '+'}[self._ascending]
3482 return '<%s%s>' % (type(self).__name__, d)
3485 return '<%s%s>' % (type(self).__name__, d)
3483
3486
3484 class spanset(abstractsmartset):
3487 class spanset(abstractsmartset):
3485 """Duck type for baseset class which represents a range of revisions and
3488 """Duck type for baseset class which represents a range of revisions and
3486 can work lazily and without having all the range in memory
3489 can work lazily and without having all the range in memory
3487
3490
3488 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3491 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3489 notable points:
3492 notable points:
3490 - when x < y it will be automatically descending,
3493 - when x < y it will be automatically descending,
3491 - revision filtered with this repoview will be skipped.
3494 - revision filtered with this repoview will be skipped.
3492
3495
3493 """
3496 """
3494 def __init__(self, repo, start=0, end=None):
3497 def __init__(self, repo, start=0, end=None):
3495 """
3498 """
3496 start: first revision included the set
3499 start: first revision included the set
3497 (default to 0)
3500 (default to 0)
3498 end: first revision excluded (last+1)
3501 end: first revision excluded (last+1)
3499 (default to len(repo)
3502 (default to len(repo)
3500
3503
3501 Spanset will be descending if `end` < `start`.
3504 Spanset will be descending if `end` < `start`.
3502 """
3505 """
3503 if end is None:
3506 if end is None:
3504 end = len(repo)
3507 end = len(repo)
3505 self._ascending = start <= end
3508 self._ascending = start <= end
3506 if not self._ascending:
3509 if not self._ascending:
3507 start, end = end + 1, start +1
3510 start, end = end + 1, start +1
3508 self._start = start
3511 self._start = start
3509 self._end = end
3512 self._end = end
3510 self._hiddenrevs = repo.changelog.filteredrevs
3513 self._hiddenrevs = repo.changelog.filteredrevs
3511
3514
3512 def sort(self, reverse=False):
3515 def sort(self, reverse=False):
3513 self._ascending = not reverse
3516 self._ascending = not reverse
3514
3517
3515 def reverse(self):
3518 def reverse(self):
3516 self._ascending = not self._ascending
3519 self._ascending = not self._ascending
3517
3520
3518 def istopo(self):
3521 def istopo(self):
3519 # not worth the trouble asserting if the two sets combined are still
3522 # not worth the trouble asserting if the two sets combined are still
3520 # in topographical order. Use the sort() predicate to explicitly sort
3523 # in topographical order. Use the sort() predicate to explicitly sort
3521 # again instead.
3524 # again instead.
3522 return False
3525 return False
3523
3526
3524 def _iterfilter(self, iterrange):
3527 def _iterfilter(self, iterrange):
3525 s = self._hiddenrevs
3528 s = self._hiddenrevs
3526 for r in iterrange:
3529 for r in iterrange:
3527 if r not in s:
3530 if r not in s:
3528 yield r
3531 yield r
3529
3532
3530 def __iter__(self):
3533 def __iter__(self):
3531 if self._ascending:
3534 if self._ascending:
3532 return self.fastasc()
3535 return self.fastasc()
3533 else:
3536 else:
3534 return self.fastdesc()
3537 return self.fastdesc()
3535
3538
3536 def fastasc(self):
3539 def fastasc(self):
3537 iterrange = xrange(self._start, self._end)
3540 iterrange = xrange(self._start, self._end)
3538 if self._hiddenrevs:
3541 if self._hiddenrevs:
3539 return self._iterfilter(iterrange)
3542 return self._iterfilter(iterrange)
3540 return iter(iterrange)
3543 return iter(iterrange)
3541
3544
3542 def fastdesc(self):
3545 def fastdesc(self):
3543 iterrange = xrange(self._end - 1, self._start - 1, -1)
3546 iterrange = xrange(self._end - 1, self._start - 1, -1)
3544 if self._hiddenrevs:
3547 if self._hiddenrevs:
3545 return self._iterfilter(iterrange)
3548 return self._iterfilter(iterrange)
3546 return iter(iterrange)
3549 return iter(iterrange)
3547
3550
3548 def __contains__(self, rev):
3551 def __contains__(self, rev):
3549 hidden = self._hiddenrevs
3552 hidden = self._hiddenrevs
3550 return ((self._start <= rev < self._end)
3553 return ((self._start <= rev < self._end)
3551 and not (hidden and rev in hidden))
3554 and not (hidden and rev in hidden))
3552
3555
3553 def __nonzero__(self):
3556 def __nonzero__(self):
3554 for r in self:
3557 for r in self:
3555 return True
3558 return True
3556 return False
3559 return False
3557
3560
3558 def __len__(self):
3561 def __len__(self):
3559 if not self._hiddenrevs:
3562 if not self._hiddenrevs:
3560 return abs(self._end - self._start)
3563 return abs(self._end - self._start)
3561 else:
3564 else:
3562 count = 0
3565 count = 0
3563 start = self._start
3566 start = self._start
3564 end = self._end
3567 end = self._end
3565 for rev in self._hiddenrevs:
3568 for rev in self._hiddenrevs:
3566 if (end < rev <= start) or (start <= rev < end):
3569 if (end < rev <= start) or (start <= rev < end):
3567 count += 1
3570 count += 1
3568 return abs(self._end - self._start) - count
3571 return abs(self._end - self._start) - count
3569
3572
3570 def isascending(self):
3573 def isascending(self):
3571 return self._ascending
3574 return self._ascending
3572
3575
3573 def isdescending(self):
3576 def isdescending(self):
3574 return not self._ascending
3577 return not self._ascending
3575
3578
3576 def first(self):
3579 def first(self):
3577 if self._ascending:
3580 if self._ascending:
3578 it = self.fastasc
3581 it = self.fastasc
3579 else:
3582 else:
3580 it = self.fastdesc
3583 it = self.fastdesc
3581 for x in it():
3584 for x in it():
3582 return x
3585 return x
3583 return None
3586 return None
3584
3587
3585 def last(self):
3588 def last(self):
3586 if self._ascending:
3589 if self._ascending:
3587 it = self.fastdesc
3590 it = self.fastdesc
3588 else:
3591 else:
3589 it = self.fastasc
3592 it = self.fastasc
3590 for x in it():
3593 for x in it():
3591 return x
3594 return x
3592 return None
3595 return None
3593
3596
3594 def __repr__(self):
3597 def __repr__(self):
3595 d = {False: '-', True: '+'}[self._ascending]
3598 d = {False: '-', True: '+'}[self._ascending]
3596 return '<%s%s %d:%d>' % (type(self).__name__, d,
3599 return '<%s%s %d:%d>' % (type(self).__name__, d,
3597 self._start, self._end - 1)
3600 self._start, self._end - 1)
3598
3601
3599 class fullreposet(spanset):
3602 class fullreposet(spanset):
3600 """a set containing all revisions in the repo
3603 """a set containing all revisions in the repo
3601
3604
3602 This class exists to host special optimization and magic to handle virtual
3605 This class exists to host special optimization and magic to handle virtual
3603 revisions such as "null".
3606 revisions such as "null".
3604 """
3607 """
3605
3608
3606 def __init__(self, repo):
3609 def __init__(self, repo):
3607 super(fullreposet, self).__init__(repo)
3610 super(fullreposet, self).__init__(repo)
3608
3611
3609 def __and__(self, other):
3612 def __and__(self, other):
3610 """As self contains the whole repo, all of the other set should also be
3613 """As self contains the whole repo, all of the other set should also be
3611 in self. Therefore `self & other = other`.
3614 in self. Therefore `self & other = other`.
3612
3615
3613 This boldly assumes the other contains valid revs only.
3616 This boldly assumes the other contains valid revs only.
3614 """
3617 """
3615 # other not a smartset, make is so
3618 # other not a smartset, make is so
3616 if not util.safehasattr(other, 'isascending'):
3619 if not util.safehasattr(other, 'isascending'):
3617 # filter out hidden revision
3620 # filter out hidden revision
3618 # (this boldly assumes all smartset are pure)
3621 # (this boldly assumes all smartset are pure)
3619 #
3622 #
3620 # `other` was used with "&", let's assume this is a set like
3623 # `other` was used with "&", let's assume this is a set like
3621 # object.
3624 # object.
3622 other = baseset(other - self._hiddenrevs)
3625 other = baseset(other - self._hiddenrevs)
3623
3626
3624 # XXX As fullreposet is also used as bootstrap, this is wrong.
3627 # XXX As fullreposet is also used as bootstrap, this is wrong.
3625 #
3628 #
3626 # With a giveme312() revset returning [3,1,2], this makes
3629 # With a giveme312() revset returning [3,1,2], this makes
3627 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3630 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3628 # We cannot just drop it because other usage still need to sort it:
3631 # We cannot just drop it because other usage still need to sort it:
3629 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3632 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3630 #
3633 #
3631 # There is also some faulty revset implementations that rely on it
3634 # There is also some faulty revset implementations that rely on it
3632 # (eg: children as of its state in e8075329c5fb)
3635 # (eg: children as of its state in e8075329c5fb)
3633 #
3636 #
3634 # When we fix the two points above we can move this into the if clause
3637 # When we fix the two points above we can move this into the if clause
3635 other.sort(reverse=self.isdescending())
3638 other.sort(reverse=self.isdescending())
3636 return other
3639 return other
3637
3640
3638 def prettyformatset(revs):
3641 def prettyformatset(revs):
3639 lines = []
3642 lines = []
3640 rs = repr(revs)
3643 rs = repr(revs)
3641 p = 0
3644 p = 0
3642 while p < len(rs):
3645 while p < len(rs):
3643 q = rs.find('<', p + 1)
3646 q = rs.find('<', p + 1)
3644 if q < 0:
3647 if q < 0:
3645 q = len(rs)
3648 q = len(rs)
3646 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3649 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3647 assert l >= 0
3650 assert l >= 0
3648 lines.append((l, rs[p:q].rstrip()))
3651 lines.append((l, rs[p:q].rstrip()))
3649 p = q
3652 p = q
3650 return '\n'.join(' ' * l + s for l, s in lines)
3653 return '\n'.join(' ' * l + s for l, s in lines)
3651
3654
3652 def loadpredicate(ui, extname, registrarobj):
3655 def loadpredicate(ui, extname, registrarobj):
3653 """Load revset predicates from specified registrarobj
3656 """Load revset predicates from specified registrarobj
3654 """
3657 """
3655 for name, func in registrarobj._table.iteritems():
3658 for name, func in registrarobj._table.iteritems():
3656 symbols[name] = func
3659 symbols[name] = func
3657 if func._safe:
3660 if func._safe:
3658 safesymbols.add(name)
3661 safesymbols.add(name)
3659
3662
3660 # load built-in predicates explicitly to setup safesymbols
3663 # load built-in predicates explicitly to setup safesymbols
3661 loadpredicate(None, None, predicate)
3664 loadpredicate(None, None, predicate)
3662
3665
3663 # tell hggettext to extract docstrings from these functions:
3666 # tell hggettext to extract docstrings from these functions:
3664 i18nfunctions = symbols.values()
3667 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now