##// END OF EJS Templates
revsets: passing a set to baseset() is not wrong...
Martin von Zweigbergk -
r29406:c2193e59 default
parent child Browse files
Show More
@@ -1,3672 +1,3668
1 # revset.py - revision set queries for mercurial
1 # revset.py - revision set queries for mercurial
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import heapq
10 import heapq
11 import re
11 import re
12
12
13 from .i18n import _
13 from .i18n import _
14 from . import (
14 from . import (
15 destutil,
15 destutil,
16 encoding,
16 encoding,
17 error,
17 error,
18 hbisect,
18 hbisect,
19 match as matchmod,
19 match as matchmod,
20 node,
20 node,
21 obsolete as obsmod,
21 obsolete as obsmod,
22 parser,
22 parser,
23 pathutil,
23 pathutil,
24 phases,
24 phases,
25 registrar,
25 registrar,
26 repoview,
26 repoview,
27 util,
27 util,
28 )
28 )
29
29
30 def _revancestors(repo, revs, followfirst):
30 def _revancestors(repo, revs, followfirst):
31 """Like revlog.ancestors(), but supports followfirst."""
31 """Like revlog.ancestors(), but supports followfirst."""
32 if followfirst:
32 if followfirst:
33 cut = 1
33 cut = 1
34 else:
34 else:
35 cut = None
35 cut = None
36 cl = repo.changelog
36 cl = repo.changelog
37
37
38 def iterate():
38 def iterate():
39 revs.sort(reverse=True)
39 revs.sort(reverse=True)
40 irevs = iter(revs)
40 irevs = iter(revs)
41 h = []
41 h = []
42
42
43 inputrev = next(irevs, None)
43 inputrev = next(irevs, None)
44 if inputrev is not None:
44 if inputrev is not None:
45 heapq.heappush(h, -inputrev)
45 heapq.heappush(h, -inputrev)
46
46
47 seen = set()
47 seen = set()
48 while h:
48 while h:
49 current = -heapq.heappop(h)
49 current = -heapq.heappop(h)
50 if current == inputrev:
50 if current == inputrev:
51 inputrev = next(irevs, None)
51 inputrev = next(irevs, None)
52 if inputrev is not None:
52 if inputrev is not None:
53 heapq.heappush(h, -inputrev)
53 heapq.heappush(h, -inputrev)
54 if current not in seen:
54 if current not in seen:
55 seen.add(current)
55 seen.add(current)
56 yield current
56 yield current
57 for parent in cl.parentrevs(current)[:cut]:
57 for parent in cl.parentrevs(current)[:cut]:
58 if parent != node.nullrev:
58 if parent != node.nullrev:
59 heapq.heappush(h, -parent)
59 heapq.heappush(h, -parent)
60
60
61 return generatorset(iterate(), iterasc=False)
61 return generatorset(iterate(), iterasc=False)
62
62
63 def _revdescendants(repo, revs, followfirst):
63 def _revdescendants(repo, revs, followfirst):
64 """Like revlog.descendants() but supports followfirst."""
64 """Like revlog.descendants() but supports followfirst."""
65 if followfirst:
65 if followfirst:
66 cut = 1
66 cut = 1
67 else:
67 else:
68 cut = None
68 cut = None
69
69
70 def iterate():
70 def iterate():
71 cl = repo.changelog
71 cl = repo.changelog
72 # XXX this should be 'parentset.min()' assuming 'parentset' is a
72 # XXX this should be 'parentset.min()' assuming 'parentset' is a
73 # smartset (and if it is not, it should.)
73 # smartset (and if it is not, it should.)
74 first = min(revs)
74 first = min(revs)
75 nullrev = node.nullrev
75 nullrev = node.nullrev
76 if first == nullrev:
76 if first == nullrev:
77 # Are there nodes with a null first parent and a non-null
77 # Are there nodes with a null first parent and a non-null
78 # second one? Maybe. Do we care? Probably not.
78 # second one? Maybe. Do we care? Probably not.
79 for i in cl:
79 for i in cl:
80 yield i
80 yield i
81 else:
81 else:
82 seen = set(revs)
82 seen = set(revs)
83 for i in cl.revs(first + 1):
83 for i in cl.revs(first + 1):
84 for x in cl.parentrevs(i)[:cut]:
84 for x in cl.parentrevs(i)[:cut]:
85 if x != nullrev and x in seen:
85 if x != nullrev and x in seen:
86 seen.add(i)
86 seen.add(i)
87 yield i
87 yield i
88 break
88 break
89
89
90 return generatorset(iterate(), iterasc=True)
90 return generatorset(iterate(), iterasc=True)
91
91
92 def _reachablerootspure(repo, minroot, roots, heads, includepath):
92 def _reachablerootspure(repo, minroot, roots, heads, includepath):
93 """return (heads(::<roots> and ::<heads>))
93 """return (heads(::<roots> and ::<heads>))
94
94
95 If includepath is True, return (<roots>::<heads>)."""
95 If includepath is True, return (<roots>::<heads>)."""
96 if not roots:
96 if not roots:
97 return []
97 return []
98 parentrevs = repo.changelog.parentrevs
98 parentrevs = repo.changelog.parentrevs
99 roots = set(roots)
99 roots = set(roots)
100 visit = list(heads)
100 visit = list(heads)
101 reachable = set()
101 reachable = set()
102 seen = {}
102 seen = {}
103 # prefetch all the things! (because python is slow)
103 # prefetch all the things! (because python is slow)
104 reached = reachable.add
104 reached = reachable.add
105 dovisit = visit.append
105 dovisit = visit.append
106 nextvisit = visit.pop
106 nextvisit = visit.pop
107 # open-code the post-order traversal due to the tiny size of
107 # open-code the post-order traversal due to the tiny size of
108 # sys.getrecursionlimit()
108 # sys.getrecursionlimit()
109 while visit:
109 while visit:
110 rev = nextvisit()
110 rev = nextvisit()
111 if rev in roots:
111 if rev in roots:
112 reached(rev)
112 reached(rev)
113 if not includepath:
113 if not includepath:
114 continue
114 continue
115 parents = parentrevs(rev)
115 parents = parentrevs(rev)
116 seen[rev] = parents
116 seen[rev] = parents
117 for parent in parents:
117 for parent in parents:
118 if parent >= minroot and parent not in seen:
118 if parent >= minroot and parent not in seen:
119 dovisit(parent)
119 dovisit(parent)
120 if not reachable:
120 if not reachable:
121 return baseset()
121 return baseset()
122 if not includepath:
122 if not includepath:
123 return reachable
123 return reachable
124 for rev in sorted(seen):
124 for rev in sorted(seen):
125 for parent in seen[rev]:
125 for parent in seen[rev]:
126 if parent in reachable:
126 if parent in reachable:
127 reached(rev)
127 reached(rev)
128 return reachable
128 return reachable
129
129
130 def reachableroots(repo, roots, heads, includepath=False):
130 def reachableroots(repo, roots, heads, includepath=False):
131 """return (heads(::<roots> and ::<heads>))
131 """return (heads(::<roots> and ::<heads>))
132
132
133 If includepath is True, return (<roots>::<heads>)."""
133 If includepath is True, return (<roots>::<heads>)."""
134 if not roots:
134 if not roots:
135 return baseset()
135 return baseset()
136 minroot = roots.min()
136 minroot = roots.min()
137 roots = list(roots)
137 roots = list(roots)
138 heads = list(heads)
138 heads = list(heads)
139 try:
139 try:
140 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
140 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
141 except AttributeError:
141 except AttributeError:
142 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
142 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
143 revs = baseset(revs)
143 revs = baseset(revs)
144 revs.sort()
144 revs.sort()
145 return revs
145 return revs
146
146
147 elements = {
147 elements = {
148 # token-type: binding-strength, primary, prefix, infix, suffix
148 # token-type: binding-strength, primary, prefix, infix, suffix
149 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
149 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
150 "##": (20, None, None, ("_concat", 20), None),
150 "##": (20, None, None, ("_concat", 20), None),
151 "~": (18, None, None, ("ancestor", 18), None),
151 "~": (18, None, None, ("ancestor", 18), None),
152 "^": (18, None, None, ("parent", 18), ("parentpost", 18)),
152 "^": (18, None, None, ("parent", 18), ("parentpost", 18)),
153 "-": (5, None, ("negate", 19), ("minus", 5), None),
153 "-": (5, None, ("negate", 19), ("minus", 5), None),
154 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17),
154 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17),
155 ("dagrangepost", 17)),
155 ("dagrangepost", 17)),
156 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17),
156 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17),
157 ("dagrangepost", 17)),
157 ("dagrangepost", 17)),
158 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), ("rangepost", 15)),
158 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), ("rangepost", 15)),
159 "not": (10, None, ("not", 10), None, None),
159 "not": (10, None, ("not", 10), None, None),
160 "!": (10, None, ("not", 10), None, None),
160 "!": (10, None, ("not", 10), None, None),
161 "and": (5, None, None, ("and", 5), None),
161 "and": (5, None, None, ("and", 5), None),
162 "&": (5, None, None, ("and", 5), None),
162 "&": (5, None, None, ("and", 5), None),
163 "%": (5, None, None, ("only", 5), ("onlypost", 5)),
163 "%": (5, None, None, ("only", 5), ("onlypost", 5)),
164 "or": (4, None, None, ("or", 4), None),
164 "or": (4, None, None, ("or", 4), None),
165 "|": (4, None, None, ("or", 4), None),
165 "|": (4, None, None, ("or", 4), None),
166 "+": (4, None, None, ("or", 4), None),
166 "+": (4, None, None, ("or", 4), None),
167 "=": (3, None, None, ("keyvalue", 3), None),
167 "=": (3, None, None, ("keyvalue", 3), None),
168 ",": (2, None, None, ("list", 2), None),
168 ",": (2, None, None, ("list", 2), None),
169 ")": (0, None, None, None, None),
169 ")": (0, None, None, None, None),
170 "symbol": (0, "symbol", None, None, None),
170 "symbol": (0, "symbol", None, None, None),
171 "string": (0, "string", None, None, None),
171 "string": (0, "string", None, None, None),
172 "end": (0, None, None, None, None),
172 "end": (0, None, None, None, None),
173 }
173 }
174
174
175 keywords = set(['and', 'or', 'not'])
175 keywords = set(['and', 'or', 'not'])
176
176
177 # default set of valid characters for the initial letter of symbols
177 # default set of valid characters for the initial letter of symbols
178 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
178 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
179 if c.isalnum() or c in '._@' or ord(c) > 127)
179 if c.isalnum() or c in '._@' or ord(c) > 127)
180
180
181 # default set of valid characters for non-initial letters of symbols
181 # default set of valid characters for non-initial letters of symbols
182 _symletters = set(c for c in [chr(i) for i in xrange(256)]
182 _symletters = set(c for c in [chr(i) for i in xrange(256)]
183 if c.isalnum() or c in '-._/@' or ord(c) > 127)
183 if c.isalnum() or c in '-._/@' or ord(c) > 127)
184
184
185 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
185 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
186 '''
186 '''
187 Parse a revset statement into a stream of tokens
187 Parse a revset statement into a stream of tokens
188
188
189 ``syminitletters`` is the set of valid characters for the initial
189 ``syminitletters`` is the set of valid characters for the initial
190 letter of symbols.
190 letter of symbols.
191
191
192 By default, character ``c`` is recognized as valid for initial
192 By default, character ``c`` is recognized as valid for initial
193 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
193 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
194
194
195 ``symletters`` is the set of valid characters for non-initial
195 ``symletters`` is the set of valid characters for non-initial
196 letters of symbols.
196 letters of symbols.
197
197
198 By default, character ``c`` is recognized as valid for non-initial
198 By default, character ``c`` is recognized as valid for non-initial
199 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
199 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
200
200
201 Check that @ is a valid unquoted token character (issue3686):
201 Check that @ is a valid unquoted token character (issue3686):
202 >>> list(tokenize("@::"))
202 >>> list(tokenize("@::"))
203 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
203 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
204
204
205 '''
205 '''
206 if syminitletters is None:
206 if syminitletters is None:
207 syminitletters = _syminitletters
207 syminitletters = _syminitletters
208 if symletters is None:
208 if symletters is None:
209 symletters = _symletters
209 symletters = _symletters
210
210
211 if program and lookup:
211 if program and lookup:
212 # attempt to parse old-style ranges first to deal with
212 # attempt to parse old-style ranges first to deal with
213 # things like old-tag which contain query metacharacters
213 # things like old-tag which contain query metacharacters
214 parts = program.split(':', 1)
214 parts = program.split(':', 1)
215 if all(lookup(sym) for sym in parts if sym):
215 if all(lookup(sym) for sym in parts if sym):
216 if parts[0]:
216 if parts[0]:
217 yield ('symbol', parts[0], 0)
217 yield ('symbol', parts[0], 0)
218 if len(parts) > 1:
218 if len(parts) > 1:
219 s = len(parts[0])
219 s = len(parts[0])
220 yield (':', None, s)
220 yield (':', None, s)
221 if parts[1]:
221 if parts[1]:
222 yield ('symbol', parts[1], s + 1)
222 yield ('symbol', parts[1], s + 1)
223 yield ('end', None, len(program))
223 yield ('end', None, len(program))
224 return
224 return
225
225
226 pos, l = 0, len(program)
226 pos, l = 0, len(program)
227 while pos < l:
227 while pos < l:
228 c = program[pos]
228 c = program[pos]
229 if c.isspace(): # skip inter-token whitespace
229 if c.isspace(): # skip inter-token whitespace
230 pass
230 pass
231 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
231 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
232 yield ('::', None, pos)
232 yield ('::', None, pos)
233 pos += 1 # skip ahead
233 pos += 1 # skip ahead
234 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
234 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
235 yield ('..', None, pos)
235 yield ('..', None, pos)
236 pos += 1 # skip ahead
236 pos += 1 # skip ahead
237 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
237 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
238 yield ('##', None, pos)
238 yield ('##', None, pos)
239 pos += 1 # skip ahead
239 pos += 1 # skip ahead
240 elif c in "():=,-|&+!~^%": # handle simple operators
240 elif c in "():=,-|&+!~^%": # handle simple operators
241 yield (c, None, pos)
241 yield (c, None, pos)
242 elif (c in '"\'' or c == 'r' and
242 elif (c in '"\'' or c == 'r' and
243 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
243 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
244 if c == 'r':
244 if c == 'r':
245 pos += 1
245 pos += 1
246 c = program[pos]
246 c = program[pos]
247 decode = lambda x: x
247 decode = lambda x: x
248 else:
248 else:
249 decode = parser.unescapestr
249 decode = parser.unescapestr
250 pos += 1
250 pos += 1
251 s = pos
251 s = pos
252 while pos < l: # find closing quote
252 while pos < l: # find closing quote
253 d = program[pos]
253 d = program[pos]
254 if d == '\\': # skip over escaped characters
254 if d == '\\': # skip over escaped characters
255 pos += 2
255 pos += 2
256 continue
256 continue
257 if d == c:
257 if d == c:
258 yield ('string', decode(program[s:pos]), s)
258 yield ('string', decode(program[s:pos]), s)
259 break
259 break
260 pos += 1
260 pos += 1
261 else:
261 else:
262 raise error.ParseError(_("unterminated string"), s)
262 raise error.ParseError(_("unterminated string"), s)
263 # gather up a symbol/keyword
263 # gather up a symbol/keyword
264 elif c in syminitletters:
264 elif c in syminitletters:
265 s = pos
265 s = pos
266 pos += 1
266 pos += 1
267 while pos < l: # find end of symbol
267 while pos < l: # find end of symbol
268 d = program[pos]
268 d = program[pos]
269 if d not in symletters:
269 if d not in symletters:
270 break
270 break
271 if d == '.' and program[pos - 1] == '.': # special case for ..
271 if d == '.' and program[pos - 1] == '.': # special case for ..
272 pos -= 1
272 pos -= 1
273 break
273 break
274 pos += 1
274 pos += 1
275 sym = program[s:pos]
275 sym = program[s:pos]
276 if sym in keywords: # operator keywords
276 if sym in keywords: # operator keywords
277 yield (sym, None, s)
277 yield (sym, None, s)
278 elif '-' in sym:
278 elif '-' in sym:
279 # some jerk gave us foo-bar-baz, try to check if it's a symbol
279 # some jerk gave us foo-bar-baz, try to check if it's a symbol
280 if lookup and lookup(sym):
280 if lookup and lookup(sym):
281 # looks like a real symbol
281 # looks like a real symbol
282 yield ('symbol', sym, s)
282 yield ('symbol', sym, s)
283 else:
283 else:
284 # looks like an expression
284 # looks like an expression
285 parts = sym.split('-')
285 parts = sym.split('-')
286 for p in parts[:-1]:
286 for p in parts[:-1]:
287 if p: # possible consecutive -
287 if p: # possible consecutive -
288 yield ('symbol', p, s)
288 yield ('symbol', p, s)
289 s += len(p)
289 s += len(p)
290 yield ('-', None, pos)
290 yield ('-', None, pos)
291 s += 1
291 s += 1
292 if parts[-1]: # possible trailing -
292 if parts[-1]: # possible trailing -
293 yield ('symbol', parts[-1], s)
293 yield ('symbol', parts[-1], s)
294 else:
294 else:
295 yield ('symbol', sym, s)
295 yield ('symbol', sym, s)
296 pos -= 1
296 pos -= 1
297 else:
297 else:
298 raise error.ParseError(_("syntax error in revset '%s'") %
298 raise error.ParseError(_("syntax error in revset '%s'") %
299 program, pos)
299 program, pos)
300 pos += 1
300 pos += 1
301 yield ('end', None, pos)
301 yield ('end', None, pos)
302
302
303 # helpers
303 # helpers
304
304
305 def getstring(x, err):
305 def getstring(x, err):
306 if x and (x[0] == 'string' or x[0] == 'symbol'):
306 if x and (x[0] == 'string' or x[0] == 'symbol'):
307 return x[1]
307 return x[1]
308 raise error.ParseError(err)
308 raise error.ParseError(err)
309
309
310 def getlist(x):
310 def getlist(x):
311 if not x:
311 if not x:
312 return []
312 return []
313 if x[0] == 'list':
313 if x[0] == 'list':
314 return list(x[1:])
314 return list(x[1:])
315 return [x]
315 return [x]
316
316
317 def getargs(x, min, max, err):
317 def getargs(x, min, max, err):
318 l = getlist(x)
318 l = getlist(x)
319 if len(l) < min or (max >= 0 and len(l) > max):
319 if len(l) < min or (max >= 0 and len(l) > max):
320 raise error.ParseError(err)
320 raise error.ParseError(err)
321 return l
321 return l
322
322
323 def getargsdict(x, funcname, keys):
323 def getargsdict(x, funcname, keys):
324 return parser.buildargsdict(getlist(x), funcname, keys.split(),
324 return parser.buildargsdict(getlist(x), funcname, keys.split(),
325 keyvaluenode='keyvalue', keynode='symbol')
325 keyvaluenode='keyvalue', keynode='symbol')
326
326
327 def getset(repo, subset, x):
327 def getset(repo, subset, x):
328 if not x:
328 if not x:
329 raise error.ParseError(_("missing argument"))
329 raise error.ParseError(_("missing argument"))
330 s = methods[x[0]](repo, subset, *x[1:])
330 s = methods[x[0]](repo, subset, *x[1:])
331 if util.safehasattr(s, 'isascending'):
331 if util.safehasattr(s, 'isascending'):
332 return s
332 return s
333 # else case should not happen, because all non-func are internal,
333 # else case should not happen, because all non-func are internal,
334 # ignoring for now.
334 # ignoring for now.
335 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
335 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
336 repo.ui.deprecwarn('revset "%s" uses list instead of smartset'
336 repo.ui.deprecwarn('revset "%s" uses list instead of smartset'
337 % x[1][1],
337 % x[1][1],
338 '3.9')
338 '3.9')
339 return baseset(s)
339 return baseset(s)
340
340
341 def _getrevsource(repo, r):
341 def _getrevsource(repo, r):
342 extra = repo[r].extra()
342 extra = repo[r].extra()
343 for label in ('source', 'transplant_source', 'rebase_source'):
343 for label in ('source', 'transplant_source', 'rebase_source'):
344 if label in extra:
344 if label in extra:
345 try:
345 try:
346 return repo[extra[label]].rev()
346 return repo[extra[label]].rev()
347 except error.RepoLookupError:
347 except error.RepoLookupError:
348 pass
348 pass
349 return None
349 return None
350
350
351 # operator methods
351 # operator methods
352
352
353 def stringset(repo, subset, x):
353 def stringset(repo, subset, x):
354 x = repo[x].rev()
354 x = repo[x].rev()
355 if (x in subset
355 if (x in subset
356 or x == node.nullrev and isinstance(subset, fullreposet)):
356 or x == node.nullrev and isinstance(subset, fullreposet)):
357 return baseset([x])
357 return baseset([x])
358 return baseset()
358 return baseset()
359
359
360 def rangeset(repo, subset, x, y):
360 def rangeset(repo, subset, x, y):
361 m = getset(repo, fullreposet(repo), x)
361 m = getset(repo, fullreposet(repo), x)
362 n = getset(repo, fullreposet(repo), y)
362 n = getset(repo, fullreposet(repo), y)
363
363
364 if not m or not n:
364 if not m or not n:
365 return baseset()
365 return baseset()
366 m, n = m.first(), n.last()
366 m, n = m.first(), n.last()
367
367
368 if m == n:
368 if m == n:
369 r = baseset([m])
369 r = baseset([m])
370 elif n == node.wdirrev:
370 elif n == node.wdirrev:
371 r = spanset(repo, m, len(repo)) + baseset([n])
371 r = spanset(repo, m, len(repo)) + baseset([n])
372 elif m == node.wdirrev:
372 elif m == node.wdirrev:
373 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
373 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
374 elif m < n:
374 elif m < n:
375 r = spanset(repo, m, n + 1)
375 r = spanset(repo, m, n + 1)
376 else:
376 else:
377 r = spanset(repo, m, n - 1)
377 r = spanset(repo, m, n - 1)
378 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
378 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
379 # necessary to ensure we preserve the order in subset.
379 # necessary to ensure we preserve the order in subset.
380 #
380 #
381 # This has performance implication, carrying the sorting over when possible
381 # This has performance implication, carrying the sorting over when possible
382 # would be more efficient.
382 # would be more efficient.
383 return r & subset
383 return r & subset
384
384
385 def dagrange(repo, subset, x, y):
385 def dagrange(repo, subset, x, y):
386 r = fullreposet(repo)
386 r = fullreposet(repo)
387 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
387 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
388 includepath=True)
388 includepath=True)
389 return subset & xs
389 return subset & xs
390
390
391 def andset(repo, subset, x, y):
391 def andset(repo, subset, x, y):
392 return getset(repo, getset(repo, subset, x), y)
392 return getset(repo, getset(repo, subset, x), y)
393
393
394 def differenceset(repo, subset, x, y):
394 def differenceset(repo, subset, x, y):
395 return getset(repo, subset, x) - getset(repo, subset, y)
395 return getset(repo, subset, x) - getset(repo, subset, y)
396
396
397 def orset(repo, subset, *xs):
397 def orset(repo, subset, *xs):
398 assert xs
398 assert xs
399 if len(xs) == 1:
399 if len(xs) == 1:
400 return getset(repo, subset, xs[0])
400 return getset(repo, subset, xs[0])
401 p = len(xs) // 2
401 p = len(xs) // 2
402 a = orset(repo, subset, *xs[:p])
402 a = orset(repo, subset, *xs[:p])
403 b = orset(repo, subset, *xs[p:])
403 b = orset(repo, subset, *xs[p:])
404 return a + b
404 return a + b
405
405
406 def notset(repo, subset, x):
406 def notset(repo, subset, x):
407 return subset - getset(repo, subset, x)
407 return subset - getset(repo, subset, x)
408
408
409 def listset(repo, subset, *xs):
409 def listset(repo, subset, *xs):
410 raise error.ParseError(_("can't use a list in this context"),
410 raise error.ParseError(_("can't use a list in this context"),
411 hint=_('see hg help "revsets.x or y"'))
411 hint=_('see hg help "revsets.x or y"'))
412
412
413 def keyvaluepair(repo, subset, k, v):
413 def keyvaluepair(repo, subset, k, v):
414 raise error.ParseError(_("can't use a key-value pair in this context"))
414 raise error.ParseError(_("can't use a key-value pair in this context"))
415
415
416 def func(repo, subset, a, b):
416 def func(repo, subset, a, b):
417 if a[0] == 'symbol' and a[1] in symbols:
417 if a[0] == 'symbol' and a[1] in symbols:
418 return symbols[a[1]](repo, subset, b)
418 return symbols[a[1]](repo, subset, b)
419
419
420 keep = lambda fn: getattr(fn, '__doc__', None) is not None
420 keep = lambda fn: getattr(fn, '__doc__', None) is not None
421
421
422 syms = [s for (s, fn) in symbols.items() if keep(fn)]
422 syms = [s for (s, fn) in symbols.items() if keep(fn)]
423 raise error.UnknownIdentifier(a[1], syms)
423 raise error.UnknownIdentifier(a[1], syms)
424
424
425 # functions
425 # functions
426
426
427 # symbols are callables like:
427 # symbols are callables like:
428 # fn(repo, subset, x)
428 # fn(repo, subset, x)
429 # with:
429 # with:
430 # repo - current repository instance
430 # repo - current repository instance
431 # subset - of revisions to be examined
431 # subset - of revisions to be examined
432 # x - argument in tree form
432 # x - argument in tree form
433 symbols = {}
433 symbols = {}
434
434
435 # symbols which can't be used for a DoS attack for any given input
435 # symbols which can't be used for a DoS attack for any given input
436 # (e.g. those which accept regexes as plain strings shouldn't be included)
436 # (e.g. those which accept regexes as plain strings shouldn't be included)
437 # functions that just return a lot of changesets (like all) don't count here
437 # functions that just return a lot of changesets (like all) don't count here
438 safesymbols = set()
438 safesymbols = set()
439
439
440 predicate = registrar.revsetpredicate()
440 predicate = registrar.revsetpredicate()
441
441
442 @predicate('_destupdate')
442 @predicate('_destupdate')
443 def _destupdate(repo, subset, x):
443 def _destupdate(repo, subset, x):
444 # experimental revset for update destination
444 # experimental revset for update destination
445 args = getargsdict(x, 'limit', 'clean check')
445 args = getargsdict(x, 'limit', 'clean check')
446 return subset & baseset([destutil.destupdate(repo, **args)[0]])
446 return subset & baseset([destutil.destupdate(repo, **args)[0]])
447
447
448 @predicate('_destmerge')
448 @predicate('_destmerge')
449 def _destmerge(repo, subset, x):
449 def _destmerge(repo, subset, x):
450 # experimental revset for merge destination
450 # experimental revset for merge destination
451 sourceset = None
451 sourceset = None
452 if x is not None:
452 if x is not None:
453 sourceset = getset(repo, fullreposet(repo), x)
453 sourceset = getset(repo, fullreposet(repo), x)
454 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
454 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
455
455
456 @predicate('adds(pattern)', safe=True)
456 @predicate('adds(pattern)', safe=True)
457 def adds(repo, subset, x):
457 def adds(repo, subset, x):
458 """Changesets that add a file matching pattern.
458 """Changesets that add a file matching pattern.
459
459
460 The pattern without explicit kind like ``glob:`` is expected to be
460 The pattern without explicit kind like ``glob:`` is expected to be
461 relative to the current directory and match against a file or a
461 relative to the current directory and match against a file or a
462 directory.
462 directory.
463 """
463 """
464 # i18n: "adds" is a keyword
464 # i18n: "adds" is a keyword
465 pat = getstring(x, _("adds requires a pattern"))
465 pat = getstring(x, _("adds requires a pattern"))
466 return checkstatus(repo, subset, pat, 1)
466 return checkstatus(repo, subset, pat, 1)
467
467
468 @predicate('ancestor(*changeset)', safe=True)
468 @predicate('ancestor(*changeset)', safe=True)
469 def ancestor(repo, subset, x):
469 def ancestor(repo, subset, x):
470 """A greatest common ancestor of the changesets.
470 """A greatest common ancestor of the changesets.
471
471
472 Accepts 0 or more changesets.
472 Accepts 0 or more changesets.
473 Will return empty list when passed no args.
473 Will return empty list when passed no args.
474 Greatest common ancestor of a single changeset is that changeset.
474 Greatest common ancestor of a single changeset is that changeset.
475 """
475 """
476 # i18n: "ancestor" is a keyword
476 # i18n: "ancestor" is a keyword
477 l = getlist(x)
477 l = getlist(x)
478 rl = fullreposet(repo)
478 rl = fullreposet(repo)
479 anc = None
479 anc = None
480
480
481 # (getset(repo, rl, i) for i in l) generates a list of lists
481 # (getset(repo, rl, i) for i in l) generates a list of lists
482 for revs in (getset(repo, rl, i) for i in l):
482 for revs in (getset(repo, rl, i) for i in l):
483 for r in revs:
483 for r in revs:
484 if anc is None:
484 if anc is None:
485 anc = repo[r]
485 anc = repo[r]
486 else:
486 else:
487 anc = anc.ancestor(repo[r])
487 anc = anc.ancestor(repo[r])
488
488
489 if anc is not None and anc.rev() in subset:
489 if anc is not None and anc.rev() in subset:
490 return baseset([anc.rev()])
490 return baseset([anc.rev()])
491 return baseset()
491 return baseset()
492
492
493 def _ancestors(repo, subset, x, followfirst=False):
493 def _ancestors(repo, subset, x, followfirst=False):
494 heads = getset(repo, fullreposet(repo), x)
494 heads = getset(repo, fullreposet(repo), x)
495 if not heads:
495 if not heads:
496 return baseset()
496 return baseset()
497 s = _revancestors(repo, heads, followfirst)
497 s = _revancestors(repo, heads, followfirst)
498 return subset & s
498 return subset & s
499
499
500 @predicate('ancestors(set)', safe=True)
500 @predicate('ancestors(set)', safe=True)
501 def ancestors(repo, subset, x):
501 def ancestors(repo, subset, x):
502 """Changesets that are ancestors of a changeset in set.
502 """Changesets that are ancestors of a changeset in set.
503 """
503 """
504 return _ancestors(repo, subset, x)
504 return _ancestors(repo, subset, x)
505
505
506 @predicate('_firstancestors', safe=True)
506 @predicate('_firstancestors', safe=True)
507 def _firstancestors(repo, subset, x):
507 def _firstancestors(repo, subset, x):
508 # ``_firstancestors(set)``
508 # ``_firstancestors(set)``
509 # Like ``ancestors(set)`` but follows only the first parents.
509 # Like ``ancestors(set)`` but follows only the first parents.
510 return _ancestors(repo, subset, x, followfirst=True)
510 return _ancestors(repo, subset, x, followfirst=True)
511
511
512 def ancestorspec(repo, subset, x, n):
512 def ancestorspec(repo, subset, x, n):
513 """``set~n``
513 """``set~n``
514 Changesets that are the Nth ancestor (first parents only) of a changeset
514 Changesets that are the Nth ancestor (first parents only) of a changeset
515 in set.
515 in set.
516 """
516 """
517 try:
517 try:
518 n = int(n[1])
518 n = int(n[1])
519 except (TypeError, ValueError):
519 except (TypeError, ValueError):
520 raise error.ParseError(_("~ expects a number"))
520 raise error.ParseError(_("~ expects a number"))
521 ps = set()
521 ps = set()
522 cl = repo.changelog
522 cl = repo.changelog
523 for r in getset(repo, fullreposet(repo), x):
523 for r in getset(repo, fullreposet(repo), x):
524 for i in range(n):
524 for i in range(n):
525 r = cl.parentrevs(r)[0]
525 r = cl.parentrevs(r)[0]
526 ps.add(r)
526 ps.add(r)
527 return subset & ps
527 return subset & ps
528
528
529 @predicate('author(string)', safe=True)
529 @predicate('author(string)', safe=True)
530 def author(repo, subset, x):
530 def author(repo, subset, x):
531 """Alias for ``user(string)``.
531 """Alias for ``user(string)``.
532 """
532 """
533 # i18n: "author" is a keyword
533 # i18n: "author" is a keyword
534 n = encoding.lower(getstring(x, _("author requires a string")))
534 n = encoding.lower(getstring(x, _("author requires a string")))
535 kind, pattern, matcher = _substringmatcher(n)
535 kind, pattern, matcher = _substringmatcher(n)
536 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())),
536 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())),
537 condrepr=('<user %r>', n))
537 condrepr=('<user %r>', n))
538
538
539 @predicate('bisect(string)', safe=True)
539 @predicate('bisect(string)', safe=True)
540 def bisect(repo, subset, x):
540 def bisect(repo, subset, x):
541 """Changesets marked in the specified bisect status:
541 """Changesets marked in the specified bisect status:
542
542
543 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
543 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
544 - ``goods``, ``bads`` : csets topologically good/bad
544 - ``goods``, ``bads`` : csets topologically good/bad
545 - ``range`` : csets taking part in the bisection
545 - ``range`` : csets taking part in the bisection
546 - ``pruned`` : csets that are goods, bads or skipped
546 - ``pruned`` : csets that are goods, bads or skipped
547 - ``untested`` : csets whose fate is yet unknown
547 - ``untested`` : csets whose fate is yet unknown
548 - ``ignored`` : csets ignored due to DAG topology
548 - ``ignored`` : csets ignored due to DAG topology
549 - ``current`` : the cset currently being bisected
549 - ``current`` : the cset currently being bisected
550 """
550 """
551 # i18n: "bisect" is a keyword
551 # i18n: "bisect" is a keyword
552 status = getstring(x, _("bisect requires a string")).lower()
552 status = getstring(x, _("bisect requires a string")).lower()
553 state = set(hbisect.get(repo, status))
553 state = set(hbisect.get(repo, status))
554 return subset & state
554 return subset & state
555
555
556 # Backward-compatibility
556 # Backward-compatibility
557 # - no help entry so that we do not advertise it any more
557 # - no help entry so that we do not advertise it any more
558 @predicate('bisected', safe=True)
558 @predicate('bisected', safe=True)
559 def bisected(repo, subset, x):
559 def bisected(repo, subset, x):
560 return bisect(repo, subset, x)
560 return bisect(repo, subset, x)
561
561
562 @predicate('bookmark([name])', safe=True)
562 @predicate('bookmark([name])', safe=True)
563 def bookmark(repo, subset, x):
563 def bookmark(repo, subset, x):
564 """The named bookmark or all bookmarks.
564 """The named bookmark or all bookmarks.
565
565
566 If `name` starts with `re:`, the remainder of the name is treated as
566 If `name` starts with `re:`, the remainder of the name is treated as
567 a regular expression. To match a bookmark that actually starts with `re:`,
567 a regular expression. To match a bookmark that actually starts with `re:`,
568 use the prefix `literal:`.
568 use the prefix `literal:`.
569 """
569 """
570 # i18n: "bookmark" is a keyword
570 # i18n: "bookmark" is a keyword
571 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
571 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
572 if args:
572 if args:
573 bm = getstring(args[0],
573 bm = getstring(args[0],
574 # i18n: "bookmark" is a keyword
574 # i18n: "bookmark" is a keyword
575 _('the argument to bookmark must be a string'))
575 _('the argument to bookmark must be a string'))
576 kind, pattern, matcher = util.stringmatcher(bm)
576 kind, pattern, matcher = util.stringmatcher(bm)
577 bms = set()
577 bms = set()
578 if kind == 'literal':
578 if kind == 'literal':
579 bmrev = repo._bookmarks.get(pattern, None)
579 bmrev = repo._bookmarks.get(pattern, None)
580 if not bmrev:
580 if not bmrev:
581 raise error.RepoLookupError(_("bookmark '%s' does not exist")
581 raise error.RepoLookupError(_("bookmark '%s' does not exist")
582 % pattern)
582 % pattern)
583 bms.add(repo[bmrev].rev())
583 bms.add(repo[bmrev].rev())
584 else:
584 else:
585 matchrevs = set()
585 matchrevs = set()
586 for name, bmrev in repo._bookmarks.iteritems():
586 for name, bmrev in repo._bookmarks.iteritems():
587 if matcher(name):
587 if matcher(name):
588 matchrevs.add(bmrev)
588 matchrevs.add(bmrev)
589 if not matchrevs:
589 if not matchrevs:
590 raise error.RepoLookupError(_("no bookmarks exist"
590 raise error.RepoLookupError(_("no bookmarks exist"
591 " that match '%s'") % pattern)
591 " that match '%s'") % pattern)
592 for bmrev in matchrevs:
592 for bmrev in matchrevs:
593 bms.add(repo[bmrev].rev())
593 bms.add(repo[bmrev].rev())
594 else:
594 else:
595 bms = set([repo[r].rev()
595 bms = set([repo[r].rev()
596 for r in repo._bookmarks.values()])
596 for r in repo._bookmarks.values()])
597 bms -= set([node.nullrev])
597 bms -= set([node.nullrev])
598 return subset & bms
598 return subset & bms
599
599
600 @predicate('branch(string or set)', safe=True)
600 @predicate('branch(string or set)', safe=True)
601 def branch(repo, subset, x):
601 def branch(repo, subset, x):
602 """
602 """
603 All changesets belonging to the given branch or the branches of the given
603 All changesets belonging to the given branch or the branches of the given
604 changesets.
604 changesets.
605
605
606 If `string` starts with `re:`, the remainder of the name is treated as
606 If `string` starts with `re:`, the remainder of the name is treated as
607 a regular expression. To match a branch that actually starts with `re:`,
607 a regular expression. To match a branch that actually starts with `re:`,
608 use the prefix `literal:`.
608 use the prefix `literal:`.
609 """
609 """
610 getbi = repo.revbranchcache().branchinfo
610 getbi = repo.revbranchcache().branchinfo
611
611
612 try:
612 try:
613 b = getstring(x, '')
613 b = getstring(x, '')
614 except error.ParseError:
614 except error.ParseError:
615 # not a string, but another revspec, e.g. tip()
615 # not a string, but another revspec, e.g. tip()
616 pass
616 pass
617 else:
617 else:
618 kind, pattern, matcher = util.stringmatcher(b)
618 kind, pattern, matcher = util.stringmatcher(b)
619 if kind == 'literal':
619 if kind == 'literal':
620 # note: falls through to the revspec case if no branch with
620 # note: falls through to the revspec case if no branch with
621 # this name exists and pattern kind is not specified explicitly
621 # this name exists and pattern kind is not specified explicitly
622 if pattern in repo.branchmap():
622 if pattern in repo.branchmap():
623 return subset.filter(lambda r: matcher(getbi(r)[0]),
623 return subset.filter(lambda r: matcher(getbi(r)[0]),
624 condrepr=('<branch %r>', b))
624 condrepr=('<branch %r>', b))
625 if b.startswith('literal:'):
625 if b.startswith('literal:'):
626 raise error.RepoLookupError(_("branch '%s' does not exist")
626 raise error.RepoLookupError(_("branch '%s' does not exist")
627 % pattern)
627 % pattern)
628 else:
628 else:
629 return subset.filter(lambda r: matcher(getbi(r)[0]),
629 return subset.filter(lambda r: matcher(getbi(r)[0]),
630 condrepr=('<branch %r>', b))
630 condrepr=('<branch %r>', b))
631
631
632 s = getset(repo, fullreposet(repo), x)
632 s = getset(repo, fullreposet(repo), x)
633 b = set()
633 b = set()
634 for r in s:
634 for r in s:
635 b.add(getbi(r)[0])
635 b.add(getbi(r)[0])
636 c = s.__contains__
636 c = s.__contains__
637 return subset.filter(lambda r: c(r) or getbi(r)[0] in b,
637 return subset.filter(lambda r: c(r) or getbi(r)[0] in b,
638 condrepr=lambda: '<branch %r>' % sorted(b))
638 condrepr=lambda: '<branch %r>' % sorted(b))
639
639
640 @predicate('bumped()', safe=True)
640 @predicate('bumped()', safe=True)
641 def bumped(repo, subset, x):
641 def bumped(repo, subset, x):
642 """Mutable changesets marked as successors of public changesets.
642 """Mutable changesets marked as successors of public changesets.
643
643
644 Only non-public and non-obsolete changesets can be `bumped`.
644 Only non-public and non-obsolete changesets can be `bumped`.
645 """
645 """
646 # i18n: "bumped" is a keyword
646 # i18n: "bumped" is a keyword
647 getargs(x, 0, 0, _("bumped takes no arguments"))
647 getargs(x, 0, 0, _("bumped takes no arguments"))
648 bumped = obsmod.getrevs(repo, 'bumped')
648 bumped = obsmod.getrevs(repo, 'bumped')
649 return subset & bumped
649 return subset & bumped
650
650
651 @predicate('bundle()', safe=True)
651 @predicate('bundle()', safe=True)
652 def bundle(repo, subset, x):
652 def bundle(repo, subset, x):
653 """Changesets in the bundle.
653 """Changesets in the bundle.
654
654
655 Bundle must be specified by the -R option."""
655 Bundle must be specified by the -R option."""
656
656
657 try:
657 try:
658 bundlerevs = repo.changelog.bundlerevs
658 bundlerevs = repo.changelog.bundlerevs
659 except AttributeError:
659 except AttributeError:
660 raise error.Abort(_("no bundle provided - specify with -R"))
660 raise error.Abort(_("no bundle provided - specify with -R"))
661 return subset & bundlerevs
661 return subset & bundlerevs
662
662
663 def checkstatus(repo, subset, pat, field):
663 def checkstatus(repo, subset, pat, field):
664 hasset = matchmod.patkind(pat) == 'set'
664 hasset = matchmod.patkind(pat) == 'set'
665
665
666 mcache = [None]
666 mcache = [None]
667 def matches(x):
667 def matches(x):
668 c = repo[x]
668 c = repo[x]
669 if not mcache[0] or hasset:
669 if not mcache[0] or hasset:
670 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
670 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
671 m = mcache[0]
671 m = mcache[0]
672 fname = None
672 fname = None
673 if not m.anypats() and len(m.files()) == 1:
673 if not m.anypats() and len(m.files()) == 1:
674 fname = m.files()[0]
674 fname = m.files()[0]
675 if fname is not None:
675 if fname is not None:
676 if fname not in c.files():
676 if fname not in c.files():
677 return False
677 return False
678 else:
678 else:
679 for f in c.files():
679 for f in c.files():
680 if m(f):
680 if m(f):
681 break
681 break
682 else:
682 else:
683 return False
683 return False
684 files = repo.status(c.p1().node(), c.node())[field]
684 files = repo.status(c.p1().node(), c.node())[field]
685 if fname is not None:
685 if fname is not None:
686 if fname in files:
686 if fname in files:
687 return True
687 return True
688 else:
688 else:
689 for f in files:
689 for f in files:
690 if m(f):
690 if m(f):
691 return True
691 return True
692
692
693 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
693 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
694
694
695 def _children(repo, narrow, parentset):
695 def _children(repo, subset, parentset):
696 if not parentset:
696 if not parentset:
697 return baseset()
697 return baseset()
698 cs = set()
698 cs = set()
699 pr = repo.changelog.parentrevs
699 pr = repo.changelog.parentrevs
700 minrev = parentset.min()
700 minrev = parentset.min()
701 for r in narrow:
701 for r in subset:
702 if r <= minrev:
702 if r <= minrev:
703 continue
703 continue
704 for p in pr(r):
704 for p in pr(r):
705 if p in parentset:
705 if p in parentset:
706 cs.add(r)
706 cs.add(r)
707 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
708 # This does not break because of other fullreposet misbehavior.
709 return baseset(cs)
707 return baseset(cs)
710
708
711 @predicate('children(set)', safe=True)
709 @predicate('children(set)', safe=True)
712 def children(repo, subset, x):
710 def children(repo, subset, x):
713 """Child changesets of changesets in set.
711 """Child changesets of changesets in set.
714 """
712 """
715 s = getset(repo, fullreposet(repo), x)
713 s = getset(repo, fullreposet(repo), x)
716 cs = _children(repo, subset, s)
714 cs = _children(repo, subset, s)
717 return subset & cs
715 return subset & cs
718
716
719 @predicate('closed()', safe=True)
717 @predicate('closed()', safe=True)
720 def closed(repo, subset, x):
718 def closed(repo, subset, x):
721 """Changeset is closed.
719 """Changeset is closed.
722 """
720 """
723 # i18n: "closed" is a keyword
721 # i18n: "closed" is a keyword
724 getargs(x, 0, 0, _("closed takes no arguments"))
722 getargs(x, 0, 0, _("closed takes no arguments"))
725 return subset.filter(lambda r: repo[r].closesbranch(),
723 return subset.filter(lambda r: repo[r].closesbranch(),
726 condrepr='<branch closed>')
724 condrepr='<branch closed>')
727
725
728 @predicate('contains(pattern)')
726 @predicate('contains(pattern)')
729 def contains(repo, subset, x):
727 def contains(repo, subset, x):
730 """The revision's manifest contains a file matching pattern (but might not
728 """The revision's manifest contains a file matching pattern (but might not
731 modify it). See :hg:`help patterns` for information about file patterns.
729 modify it). See :hg:`help patterns` for information about file patterns.
732
730
733 The pattern without explicit kind like ``glob:`` is expected to be
731 The pattern without explicit kind like ``glob:`` is expected to be
734 relative to the current directory and match against a file exactly
732 relative to the current directory and match against a file exactly
735 for efficiency.
733 for efficiency.
736 """
734 """
737 # i18n: "contains" is a keyword
735 # i18n: "contains" is a keyword
738 pat = getstring(x, _("contains requires a pattern"))
736 pat = getstring(x, _("contains requires a pattern"))
739
737
740 def matches(x):
738 def matches(x):
741 if not matchmod.patkind(pat):
739 if not matchmod.patkind(pat):
742 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
740 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
743 if pats in repo[x]:
741 if pats in repo[x]:
744 return True
742 return True
745 else:
743 else:
746 c = repo[x]
744 c = repo[x]
747 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
745 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
748 for f in c.manifest():
746 for f in c.manifest():
749 if m(f):
747 if m(f):
750 return True
748 return True
751 return False
749 return False
752
750
753 return subset.filter(matches, condrepr=('<contains %r>', pat))
751 return subset.filter(matches, condrepr=('<contains %r>', pat))
754
752
755 @predicate('converted([id])', safe=True)
753 @predicate('converted([id])', safe=True)
756 def converted(repo, subset, x):
754 def converted(repo, subset, x):
757 """Changesets converted from the given identifier in the old repository if
755 """Changesets converted from the given identifier in the old repository if
758 present, or all converted changesets if no identifier is specified.
756 present, or all converted changesets if no identifier is specified.
759 """
757 """
760
758
761 # There is exactly no chance of resolving the revision, so do a simple
759 # There is exactly no chance of resolving the revision, so do a simple
762 # string compare and hope for the best
760 # string compare and hope for the best
763
761
764 rev = None
762 rev = None
765 # i18n: "converted" is a keyword
763 # i18n: "converted" is a keyword
766 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
764 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
767 if l:
765 if l:
768 # i18n: "converted" is a keyword
766 # i18n: "converted" is a keyword
769 rev = getstring(l[0], _('converted requires a revision'))
767 rev = getstring(l[0], _('converted requires a revision'))
770
768
771 def _matchvalue(r):
769 def _matchvalue(r):
772 source = repo[r].extra().get('convert_revision', None)
770 source = repo[r].extra().get('convert_revision', None)
773 return source is not None and (rev is None or source.startswith(rev))
771 return source is not None and (rev is None or source.startswith(rev))
774
772
775 return subset.filter(lambda r: _matchvalue(r),
773 return subset.filter(lambda r: _matchvalue(r),
776 condrepr=('<converted %r>', rev))
774 condrepr=('<converted %r>', rev))
777
775
778 @predicate('date(interval)', safe=True)
776 @predicate('date(interval)', safe=True)
779 def date(repo, subset, x):
777 def date(repo, subset, x):
780 """Changesets within the interval, see :hg:`help dates`.
778 """Changesets within the interval, see :hg:`help dates`.
781 """
779 """
782 # i18n: "date" is a keyword
780 # i18n: "date" is a keyword
783 ds = getstring(x, _("date requires a string"))
781 ds = getstring(x, _("date requires a string"))
784 dm = util.matchdate(ds)
782 dm = util.matchdate(ds)
785 return subset.filter(lambda x: dm(repo[x].date()[0]),
783 return subset.filter(lambda x: dm(repo[x].date()[0]),
786 condrepr=('<date %r>', ds))
784 condrepr=('<date %r>', ds))
787
785
788 @predicate('desc(string)', safe=True)
786 @predicate('desc(string)', safe=True)
789 def desc(repo, subset, x):
787 def desc(repo, subset, x):
790 """Search commit message for string. The match is case-insensitive.
788 """Search commit message for string. The match is case-insensitive.
791 """
789 """
792 # i18n: "desc" is a keyword
790 # i18n: "desc" is a keyword
793 ds = encoding.lower(getstring(x, _("desc requires a string")))
791 ds = encoding.lower(getstring(x, _("desc requires a string")))
794
792
795 def matches(x):
793 def matches(x):
796 c = repo[x]
794 c = repo[x]
797 return ds in encoding.lower(c.description())
795 return ds in encoding.lower(c.description())
798
796
799 return subset.filter(matches, condrepr=('<desc %r>', ds))
797 return subset.filter(matches, condrepr=('<desc %r>', ds))
800
798
801 def _descendants(repo, subset, x, followfirst=False):
799 def _descendants(repo, subset, x, followfirst=False):
802 roots = getset(repo, fullreposet(repo), x)
800 roots = getset(repo, fullreposet(repo), x)
803 if not roots:
801 if not roots:
804 return baseset()
802 return baseset()
805 s = _revdescendants(repo, roots, followfirst)
803 s = _revdescendants(repo, roots, followfirst)
806
804
807 # Both sets need to be ascending in order to lazily return the union
805 # Both sets need to be ascending in order to lazily return the union
808 # in the correct order.
806 # in the correct order.
809 base = subset & roots
807 base = subset & roots
810 desc = subset & s
808 desc = subset & s
811 result = base + desc
809 result = base + desc
812 if subset.isascending():
810 if subset.isascending():
813 result.sort()
811 result.sort()
814 elif subset.isdescending():
812 elif subset.isdescending():
815 result.sort(reverse=True)
813 result.sort(reverse=True)
816 else:
814 else:
817 result = subset & result
815 result = subset & result
818 return result
816 return result
819
817
820 @predicate('descendants(set)', safe=True)
818 @predicate('descendants(set)', safe=True)
821 def descendants(repo, subset, x):
819 def descendants(repo, subset, x):
822 """Changesets which are descendants of changesets in set.
820 """Changesets which are descendants of changesets in set.
823 """
821 """
824 return _descendants(repo, subset, x)
822 return _descendants(repo, subset, x)
825
823
826 @predicate('_firstdescendants', safe=True)
824 @predicate('_firstdescendants', safe=True)
827 def _firstdescendants(repo, subset, x):
825 def _firstdescendants(repo, subset, x):
828 # ``_firstdescendants(set)``
826 # ``_firstdescendants(set)``
829 # Like ``descendants(set)`` but follows only the first parents.
827 # Like ``descendants(set)`` but follows only the first parents.
830 return _descendants(repo, subset, x, followfirst=True)
828 return _descendants(repo, subset, x, followfirst=True)
831
829
832 @predicate('destination([set])', safe=True)
830 @predicate('destination([set])', safe=True)
833 def destination(repo, subset, x):
831 def destination(repo, subset, x):
834 """Changesets that were created by a graft, transplant or rebase operation,
832 """Changesets that were created by a graft, transplant or rebase operation,
835 with the given revisions specified as the source. Omitting the optional set
833 with the given revisions specified as the source. Omitting the optional set
836 is the same as passing all().
834 is the same as passing all().
837 """
835 """
838 if x is not None:
836 if x is not None:
839 sources = getset(repo, fullreposet(repo), x)
837 sources = getset(repo, fullreposet(repo), x)
840 else:
838 else:
841 sources = fullreposet(repo)
839 sources = fullreposet(repo)
842
840
843 dests = set()
841 dests = set()
844
842
845 # subset contains all of the possible destinations that can be returned, so
843 # subset contains all of the possible destinations that can be returned, so
846 # iterate over them and see if their source(s) were provided in the arg set.
844 # iterate over them and see if their source(s) were provided in the arg set.
847 # Even if the immediate src of r is not in the arg set, src's source (or
845 # Even if the immediate src of r is not in the arg set, src's source (or
848 # further back) may be. Scanning back further than the immediate src allows
846 # further back) may be. Scanning back further than the immediate src allows
849 # transitive transplants and rebases to yield the same results as transitive
847 # transitive transplants and rebases to yield the same results as transitive
850 # grafts.
848 # grafts.
851 for r in subset:
849 for r in subset:
852 src = _getrevsource(repo, r)
850 src = _getrevsource(repo, r)
853 lineage = None
851 lineage = None
854
852
855 while src is not None:
853 while src is not None:
856 if lineage is None:
854 if lineage is None:
857 lineage = list()
855 lineage = list()
858
856
859 lineage.append(r)
857 lineage.append(r)
860
858
861 # The visited lineage is a match if the current source is in the arg
859 # The visited lineage is a match if the current source is in the arg
862 # set. Since every candidate dest is visited by way of iterating
860 # set. Since every candidate dest is visited by way of iterating
863 # subset, any dests further back in the lineage will be tested by a
861 # subset, any dests further back in the lineage will be tested by a
864 # different iteration over subset. Likewise, if the src was already
862 # different iteration over subset. Likewise, if the src was already
865 # selected, the current lineage can be selected without going back
863 # selected, the current lineage can be selected without going back
866 # further.
864 # further.
867 if src in sources or src in dests:
865 if src in sources or src in dests:
868 dests.update(lineage)
866 dests.update(lineage)
869 break
867 break
870
868
871 r = src
869 r = src
872 src = _getrevsource(repo, r)
870 src = _getrevsource(repo, r)
873
871
874 return subset.filter(dests.__contains__,
872 return subset.filter(dests.__contains__,
875 condrepr=lambda: '<destination %r>' % sorted(dests))
873 condrepr=lambda: '<destination %r>' % sorted(dests))
876
874
877 @predicate('divergent()', safe=True)
875 @predicate('divergent()', safe=True)
878 def divergent(repo, subset, x):
876 def divergent(repo, subset, x):
879 """
877 """
880 Final successors of changesets with an alternative set of final successors.
878 Final successors of changesets with an alternative set of final successors.
881 """
879 """
882 # i18n: "divergent" is a keyword
880 # i18n: "divergent" is a keyword
883 getargs(x, 0, 0, _("divergent takes no arguments"))
881 getargs(x, 0, 0, _("divergent takes no arguments"))
884 divergent = obsmod.getrevs(repo, 'divergent')
882 divergent = obsmod.getrevs(repo, 'divergent')
885 return subset & divergent
883 return subset & divergent
886
884
887 @predicate('extinct()', safe=True)
885 @predicate('extinct()', safe=True)
888 def extinct(repo, subset, x):
886 def extinct(repo, subset, x):
889 """Obsolete changesets with obsolete descendants only.
887 """Obsolete changesets with obsolete descendants only.
890 """
888 """
891 # i18n: "extinct" is a keyword
889 # i18n: "extinct" is a keyword
892 getargs(x, 0, 0, _("extinct takes no arguments"))
890 getargs(x, 0, 0, _("extinct takes no arguments"))
893 extincts = obsmod.getrevs(repo, 'extinct')
891 extincts = obsmod.getrevs(repo, 'extinct')
894 return subset & extincts
892 return subset & extincts
895
893
896 @predicate('extra(label, [value])', safe=True)
894 @predicate('extra(label, [value])', safe=True)
897 def extra(repo, subset, x):
895 def extra(repo, subset, x):
898 """Changesets with the given label in the extra metadata, with the given
896 """Changesets with the given label in the extra metadata, with the given
899 optional value.
897 optional value.
900
898
901 If `value` starts with `re:`, the remainder of the value is treated as
899 If `value` starts with `re:`, the remainder of the value is treated as
902 a regular expression. To match a value that actually starts with `re:`,
900 a regular expression. To match a value that actually starts with `re:`,
903 use the prefix `literal:`.
901 use the prefix `literal:`.
904 """
902 """
905 args = getargsdict(x, 'extra', 'label value')
903 args = getargsdict(x, 'extra', 'label value')
906 if 'label' not in args:
904 if 'label' not in args:
907 # i18n: "extra" is a keyword
905 # i18n: "extra" is a keyword
908 raise error.ParseError(_('extra takes at least 1 argument'))
906 raise error.ParseError(_('extra takes at least 1 argument'))
909 # i18n: "extra" is a keyword
907 # i18n: "extra" is a keyword
910 label = getstring(args['label'], _('first argument to extra must be '
908 label = getstring(args['label'], _('first argument to extra must be '
911 'a string'))
909 'a string'))
912 value = None
910 value = None
913
911
914 if 'value' in args:
912 if 'value' in args:
915 # i18n: "extra" is a keyword
913 # i18n: "extra" is a keyword
916 value = getstring(args['value'], _('second argument to extra must be '
914 value = getstring(args['value'], _('second argument to extra must be '
917 'a string'))
915 'a string'))
918 kind, value, matcher = util.stringmatcher(value)
916 kind, value, matcher = util.stringmatcher(value)
919
917
920 def _matchvalue(r):
918 def _matchvalue(r):
921 extra = repo[r].extra()
919 extra = repo[r].extra()
922 return label in extra and (value is None or matcher(extra[label]))
920 return label in extra and (value is None or matcher(extra[label]))
923
921
924 return subset.filter(lambda r: _matchvalue(r),
922 return subset.filter(lambda r: _matchvalue(r),
925 condrepr=('<extra[%r] %r>', label, value))
923 condrepr=('<extra[%r] %r>', label, value))
926
924
927 @predicate('filelog(pattern)', safe=True)
925 @predicate('filelog(pattern)', safe=True)
928 def filelog(repo, subset, x):
926 def filelog(repo, subset, x):
929 """Changesets connected to the specified filelog.
927 """Changesets connected to the specified filelog.
930
928
931 For performance reasons, visits only revisions mentioned in the file-level
929 For performance reasons, visits only revisions mentioned in the file-level
932 filelog, rather than filtering through all changesets (much faster, but
930 filelog, rather than filtering through all changesets (much faster, but
933 doesn't include deletes or duplicate changes). For a slower, more accurate
931 doesn't include deletes or duplicate changes). For a slower, more accurate
934 result, use ``file()``.
932 result, use ``file()``.
935
933
936 The pattern without explicit kind like ``glob:`` is expected to be
934 The pattern without explicit kind like ``glob:`` is expected to be
937 relative to the current directory and match against a file exactly
935 relative to the current directory and match against a file exactly
938 for efficiency.
936 for efficiency.
939
937
940 If some linkrev points to revisions filtered by the current repoview, we'll
938 If some linkrev points to revisions filtered by the current repoview, we'll
941 work around it to return a non-filtered value.
939 work around it to return a non-filtered value.
942 """
940 """
943
941
944 # i18n: "filelog" is a keyword
942 # i18n: "filelog" is a keyword
945 pat = getstring(x, _("filelog requires a pattern"))
943 pat = getstring(x, _("filelog requires a pattern"))
946 s = set()
944 s = set()
947 cl = repo.changelog
945 cl = repo.changelog
948
946
949 if not matchmod.patkind(pat):
947 if not matchmod.patkind(pat):
950 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
948 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
951 files = [f]
949 files = [f]
952 else:
950 else:
953 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
951 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
954 files = (f for f in repo[None] if m(f))
952 files = (f for f in repo[None] if m(f))
955
953
956 for f in files:
954 for f in files:
957 fl = repo.file(f)
955 fl = repo.file(f)
958 known = {}
956 known = {}
959 scanpos = 0
957 scanpos = 0
960 for fr in list(fl):
958 for fr in list(fl):
961 fn = fl.node(fr)
959 fn = fl.node(fr)
962 if fn in known:
960 if fn in known:
963 s.add(known[fn])
961 s.add(known[fn])
964 continue
962 continue
965
963
966 lr = fl.linkrev(fr)
964 lr = fl.linkrev(fr)
967 if lr in cl:
965 if lr in cl:
968 s.add(lr)
966 s.add(lr)
969 elif scanpos is not None:
967 elif scanpos is not None:
970 # lowest matching changeset is filtered, scan further
968 # lowest matching changeset is filtered, scan further
971 # ahead in changelog
969 # ahead in changelog
972 start = max(lr, scanpos) + 1
970 start = max(lr, scanpos) + 1
973 scanpos = None
971 scanpos = None
974 for r in cl.revs(start):
972 for r in cl.revs(start):
975 # minimize parsing of non-matching entries
973 # minimize parsing of non-matching entries
976 if f in cl.revision(r) and f in cl.readfiles(r):
974 if f in cl.revision(r) and f in cl.readfiles(r):
977 try:
975 try:
978 # try to use manifest delta fastpath
976 # try to use manifest delta fastpath
979 n = repo[r].filenode(f)
977 n = repo[r].filenode(f)
980 if n not in known:
978 if n not in known:
981 if n == fn:
979 if n == fn:
982 s.add(r)
980 s.add(r)
983 scanpos = r
981 scanpos = r
984 break
982 break
985 else:
983 else:
986 known[n] = r
984 known[n] = r
987 except error.ManifestLookupError:
985 except error.ManifestLookupError:
988 # deletion in changelog
986 # deletion in changelog
989 continue
987 continue
990
988
991 return subset & s
989 return subset & s
992
990
993 @predicate('first(set, [n])', safe=True)
991 @predicate('first(set, [n])', safe=True)
994 def first(repo, subset, x):
992 def first(repo, subset, x):
995 """An alias for limit().
993 """An alias for limit().
996 """
994 """
997 return limit(repo, subset, x)
995 return limit(repo, subset, x)
998
996
999 def _follow(repo, subset, x, name, followfirst=False):
997 def _follow(repo, subset, x, name, followfirst=False):
1000 l = getargs(x, 0, 1, _("%s takes no arguments or a pattern") % name)
998 l = getargs(x, 0, 1, _("%s takes no arguments or a pattern") % name)
1001 c = repo['.']
999 c = repo['.']
1002 if l:
1000 if l:
1003 x = getstring(l[0], _("%s expected a pattern") % name)
1001 x = getstring(l[0], _("%s expected a pattern") % name)
1004 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
1002 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
1005 ctx=repo[None], default='path')
1003 ctx=repo[None], default='path')
1006
1004
1007 files = c.manifest().walk(matcher)
1005 files = c.manifest().walk(matcher)
1008
1006
1009 s = set()
1007 s = set()
1010 for fname in files:
1008 for fname in files:
1011 fctx = c[fname]
1009 fctx = c[fname]
1012 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
1010 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
1013 # include the revision responsible for the most recent version
1011 # include the revision responsible for the most recent version
1014 s.add(fctx.introrev())
1012 s.add(fctx.introrev())
1015 else:
1013 else:
1016 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1014 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1017
1015
1018 return subset & s
1016 return subset & s
1019
1017
1020 @predicate('follow([pattern])', safe=True)
1018 @predicate('follow([pattern])', safe=True)
1021 def follow(repo, subset, x):
1019 def follow(repo, subset, x):
1022 """
1020 """
1023 An alias for ``::.`` (ancestors of the working directory's first parent).
1021 An alias for ``::.`` (ancestors of the working directory's first parent).
1024 If pattern is specified, the histories of files matching given
1022 If pattern is specified, the histories of files matching given
1025 pattern is followed, including copies.
1023 pattern is followed, including copies.
1026 """
1024 """
1027 return _follow(repo, subset, x, 'follow')
1025 return _follow(repo, subset, x, 'follow')
1028
1026
1029 @predicate('_followfirst', safe=True)
1027 @predicate('_followfirst', safe=True)
1030 def _followfirst(repo, subset, x):
1028 def _followfirst(repo, subset, x):
1031 # ``followfirst([pattern])``
1029 # ``followfirst([pattern])``
1032 # Like ``follow([pattern])`` but follows only the first parent of
1030 # Like ``follow([pattern])`` but follows only the first parent of
1033 # every revisions or files revisions.
1031 # every revisions or files revisions.
1034 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1032 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1035
1033
1036 @predicate('all()', safe=True)
1034 @predicate('all()', safe=True)
1037 def getall(repo, subset, x):
1035 def getall(repo, subset, x):
1038 """All changesets, the same as ``0:tip``.
1036 """All changesets, the same as ``0:tip``.
1039 """
1037 """
1040 # i18n: "all" is a keyword
1038 # i18n: "all" is a keyword
1041 getargs(x, 0, 0, _("all takes no arguments"))
1039 getargs(x, 0, 0, _("all takes no arguments"))
1042 return subset & spanset(repo) # drop "null" if any
1040 return subset & spanset(repo) # drop "null" if any
1043
1041
1044 @predicate('grep(regex)')
1042 @predicate('grep(regex)')
1045 def grep(repo, subset, x):
1043 def grep(repo, subset, x):
1046 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1044 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1047 to ensure special escape characters are handled correctly. Unlike
1045 to ensure special escape characters are handled correctly. Unlike
1048 ``keyword(string)``, the match is case-sensitive.
1046 ``keyword(string)``, the match is case-sensitive.
1049 """
1047 """
1050 try:
1048 try:
1051 # i18n: "grep" is a keyword
1049 # i18n: "grep" is a keyword
1052 gr = re.compile(getstring(x, _("grep requires a string")))
1050 gr = re.compile(getstring(x, _("grep requires a string")))
1053 except re.error as e:
1051 except re.error as e:
1054 raise error.ParseError(_('invalid match pattern: %s') % e)
1052 raise error.ParseError(_('invalid match pattern: %s') % e)
1055
1053
1056 def matches(x):
1054 def matches(x):
1057 c = repo[x]
1055 c = repo[x]
1058 for e in c.files() + [c.user(), c.description()]:
1056 for e in c.files() + [c.user(), c.description()]:
1059 if gr.search(e):
1057 if gr.search(e):
1060 return True
1058 return True
1061 return False
1059 return False
1062
1060
1063 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1061 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1064
1062
1065 @predicate('_matchfiles', safe=True)
1063 @predicate('_matchfiles', safe=True)
1066 def _matchfiles(repo, subset, x):
1064 def _matchfiles(repo, subset, x):
1067 # _matchfiles takes a revset list of prefixed arguments:
1065 # _matchfiles takes a revset list of prefixed arguments:
1068 #
1066 #
1069 # [p:foo, i:bar, x:baz]
1067 # [p:foo, i:bar, x:baz]
1070 #
1068 #
1071 # builds a match object from them and filters subset. Allowed
1069 # builds a match object from them and filters subset. Allowed
1072 # prefixes are 'p:' for regular patterns, 'i:' for include
1070 # prefixes are 'p:' for regular patterns, 'i:' for include
1073 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1071 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1074 # a revision identifier, or the empty string to reference the
1072 # a revision identifier, or the empty string to reference the
1075 # working directory, from which the match object is
1073 # working directory, from which the match object is
1076 # initialized. Use 'd:' to set the default matching mode, default
1074 # initialized. Use 'd:' to set the default matching mode, default
1077 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1075 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1078
1076
1079 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1077 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1080 pats, inc, exc = [], [], []
1078 pats, inc, exc = [], [], []
1081 rev, default = None, None
1079 rev, default = None, None
1082 for arg in l:
1080 for arg in l:
1083 s = getstring(arg, "_matchfiles requires string arguments")
1081 s = getstring(arg, "_matchfiles requires string arguments")
1084 prefix, value = s[:2], s[2:]
1082 prefix, value = s[:2], s[2:]
1085 if prefix == 'p:':
1083 if prefix == 'p:':
1086 pats.append(value)
1084 pats.append(value)
1087 elif prefix == 'i:':
1085 elif prefix == 'i:':
1088 inc.append(value)
1086 inc.append(value)
1089 elif prefix == 'x:':
1087 elif prefix == 'x:':
1090 exc.append(value)
1088 exc.append(value)
1091 elif prefix == 'r:':
1089 elif prefix == 'r:':
1092 if rev is not None:
1090 if rev is not None:
1093 raise error.ParseError('_matchfiles expected at most one '
1091 raise error.ParseError('_matchfiles expected at most one '
1094 'revision')
1092 'revision')
1095 if value != '': # empty means working directory; leave rev as None
1093 if value != '': # empty means working directory; leave rev as None
1096 rev = value
1094 rev = value
1097 elif prefix == 'd:':
1095 elif prefix == 'd:':
1098 if default is not None:
1096 if default is not None:
1099 raise error.ParseError('_matchfiles expected at most one '
1097 raise error.ParseError('_matchfiles expected at most one '
1100 'default mode')
1098 'default mode')
1101 default = value
1099 default = value
1102 else:
1100 else:
1103 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1101 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1104 if not default:
1102 if not default:
1105 default = 'glob'
1103 default = 'glob'
1106
1104
1107 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1105 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1108 exclude=exc, ctx=repo[rev], default=default)
1106 exclude=exc, ctx=repo[rev], default=default)
1109
1107
1110 # This directly read the changelog data as creating changectx for all
1108 # This directly read the changelog data as creating changectx for all
1111 # revisions is quite expensive.
1109 # revisions is quite expensive.
1112 getfiles = repo.changelog.readfiles
1110 getfiles = repo.changelog.readfiles
1113 wdirrev = node.wdirrev
1111 wdirrev = node.wdirrev
1114 def matches(x):
1112 def matches(x):
1115 if x == wdirrev:
1113 if x == wdirrev:
1116 files = repo[x].files()
1114 files = repo[x].files()
1117 else:
1115 else:
1118 files = getfiles(x)
1116 files = getfiles(x)
1119 for f in files:
1117 for f in files:
1120 if m(f):
1118 if m(f):
1121 return True
1119 return True
1122 return False
1120 return False
1123
1121
1124 return subset.filter(matches,
1122 return subset.filter(matches,
1125 condrepr=('<matchfiles patterns=%r, include=%r '
1123 condrepr=('<matchfiles patterns=%r, include=%r '
1126 'exclude=%r, default=%r, rev=%r>',
1124 'exclude=%r, default=%r, rev=%r>',
1127 pats, inc, exc, default, rev))
1125 pats, inc, exc, default, rev))
1128
1126
1129 @predicate('file(pattern)', safe=True)
1127 @predicate('file(pattern)', safe=True)
1130 def hasfile(repo, subset, x):
1128 def hasfile(repo, subset, x):
1131 """Changesets affecting files matched by pattern.
1129 """Changesets affecting files matched by pattern.
1132
1130
1133 For a faster but less accurate result, consider using ``filelog()``
1131 For a faster but less accurate result, consider using ``filelog()``
1134 instead.
1132 instead.
1135
1133
1136 This predicate uses ``glob:`` as the default kind of pattern.
1134 This predicate uses ``glob:`` as the default kind of pattern.
1137 """
1135 """
1138 # i18n: "file" is a keyword
1136 # i18n: "file" is a keyword
1139 pat = getstring(x, _("file requires a pattern"))
1137 pat = getstring(x, _("file requires a pattern"))
1140 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1138 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1141
1139
1142 @predicate('head()', safe=True)
1140 @predicate('head()', safe=True)
1143 def head(repo, subset, x):
1141 def head(repo, subset, x):
1144 """Changeset is a named branch head.
1142 """Changeset is a named branch head.
1145 """
1143 """
1146 # i18n: "head" is a keyword
1144 # i18n: "head" is a keyword
1147 getargs(x, 0, 0, _("head takes no arguments"))
1145 getargs(x, 0, 0, _("head takes no arguments"))
1148 hs = set()
1146 hs = set()
1149 cl = repo.changelog
1147 cl = repo.changelog
1150 for b, ls in repo.branchmap().iteritems():
1148 for b, ls in repo.branchmap().iteritems():
1151 hs.update(cl.rev(h) for h in ls)
1149 hs.update(cl.rev(h) for h in ls)
1152 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
1153 # This does not break because of other fullreposet misbehavior.
1154 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
1150 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
1155 # necessary to ensure we preserve the order in subset.
1151 # necessary to ensure we preserve the order in subset.
1156 return baseset(hs) & subset
1152 return baseset(hs) & subset
1157
1153
1158 @predicate('heads(set)', safe=True)
1154 @predicate('heads(set)', safe=True)
1159 def heads(repo, subset, x):
1155 def heads(repo, subset, x):
1160 """Members of set with no children in set.
1156 """Members of set with no children in set.
1161 """
1157 """
1162 s = getset(repo, subset, x)
1158 s = getset(repo, subset, x)
1163 ps = parents(repo, subset, x)
1159 ps = parents(repo, subset, x)
1164 return s - ps
1160 return s - ps
1165
1161
1166 @predicate('hidden()', safe=True)
1162 @predicate('hidden()', safe=True)
1167 def hidden(repo, subset, x):
1163 def hidden(repo, subset, x):
1168 """Hidden changesets.
1164 """Hidden changesets.
1169 """
1165 """
1170 # i18n: "hidden" is a keyword
1166 # i18n: "hidden" is a keyword
1171 getargs(x, 0, 0, _("hidden takes no arguments"))
1167 getargs(x, 0, 0, _("hidden takes no arguments"))
1172 hiddenrevs = repoview.filterrevs(repo, 'visible')
1168 hiddenrevs = repoview.filterrevs(repo, 'visible')
1173 return subset & hiddenrevs
1169 return subset & hiddenrevs
1174
1170
1175 @predicate('keyword(string)', safe=True)
1171 @predicate('keyword(string)', safe=True)
1176 def keyword(repo, subset, x):
1172 def keyword(repo, subset, x):
1177 """Search commit message, user name, and names of changed files for
1173 """Search commit message, user name, and names of changed files for
1178 string. The match is case-insensitive.
1174 string. The match is case-insensitive.
1179 """
1175 """
1180 # i18n: "keyword" is a keyword
1176 # i18n: "keyword" is a keyword
1181 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1177 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1182
1178
1183 def matches(r):
1179 def matches(r):
1184 c = repo[r]
1180 c = repo[r]
1185 return any(kw in encoding.lower(t)
1181 return any(kw in encoding.lower(t)
1186 for t in c.files() + [c.user(), c.description()])
1182 for t in c.files() + [c.user(), c.description()])
1187
1183
1188 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1184 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1189
1185
1190 @predicate('limit(set[, n[, offset]])', safe=True)
1186 @predicate('limit(set[, n[, offset]])', safe=True)
1191 def limit(repo, subset, x):
1187 def limit(repo, subset, x):
1192 """First n members of set, defaulting to 1, starting from offset.
1188 """First n members of set, defaulting to 1, starting from offset.
1193 """
1189 """
1194 args = getargsdict(x, 'limit', 'set n offset')
1190 args = getargsdict(x, 'limit', 'set n offset')
1195 if 'set' not in args:
1191 if 'set' not in args:
1196 # i18n: "limit" is a keyword
1192 # i18n: "limit" is a keyword
1197 raise error.ParseError(_("limit requires one to three arguments"))
1193 raise error.ParseError(_("limit requires one to three arguments"))
1198 try:
1194 try:
1199 lim, ofs = 1, 0
1195 lim, ofs = 1, 0
1200 if 'n' in args:
1196 if 'n' in args:
1201 # i18n: "limit" is a keyword
1197 # i18n: "limit" is a keyword
1202 lim = int(getstring(args['n'], _("limit requires a number")))
1198 lim = int(getstring(args['n'], _("limit requires a number")))
1203 if 'offset' in args:
1199 if 'offset' in args:
1204 # i18n: "limit" is a keyword
1200 # i18n: "limit" is a keyword
1205 ofs = int(getstring(args['offset'], _("limit requires a number")))
1201 ofs = int(getstring(args['offset'], _("limit requires a number")))
1206 if ofs < 0:
1202 if ofs < 0:
1207 raise error.ParseError(_("negative offset"))
1203 raise error.ParseError(_("negative offset"))
1208 except (TypeError, ValueError):
1204 except (TypeError, ValueError):
1209 # i18n: "limit" is a keyword
1205 # i18n: "limit" is a keyword
1210 raise error.ParseError(_("limit expects a number"))
1206 raise error.ParseError(_("limit expects a number"))
1211 os = getset(repo, fullreposet(repo), args['set'])
1207 os = getset(repo, fullreposet(repo), args['set'])
1212 result = []
1208 result = []
1213 it = iter(os)
1209 it = iter(os)
1214 for x in xrange(ofs):
1210 for x in xrange(ofs):
1215 y = next(it, None)
1211 y = next(it, None)
1216 if y is None:
1212 if y is None:
1217 break
1213 break
1218 for x in xrange(lim):
1214 for x in xrange(lim):
1219 y = next(it, None)
1215 y = next(it, None)
1220 if y is None:
1216 if y is None:
1221 break
1217 break
1222 elif y in subset:
1218 elif y in subset:
1223 result.append(y)
1219 result.append(y)
1224 return baseset(result, datarepr=('<limit n=%d, offset=%d, %r, %r>',
1220 return baseset(result, datarepr=('<limit n=%d, offset=%d, %r, %r>',
1225 lim, ofs, subset, os))
1221 lim, ofs, subset, os))
1226
1222
1227 @predicate('last(set, [n])', safe=True)
1223 @predicate('last(set, [n])', safe=True)
1228 def last(repo, subset, x):
1224 def last(repo, subset, x):
1229 """Last n members of set, defaulting to 1.
1225 """Last n members of set, defaulting to 1.
1230 """
1226 """
1231 # i18n: "last" is a keyword
1227 # i18n: "last" is a keyword
1232 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1228 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1233 try:
1229 try:
1234 lim = 1
1230 lim = 1
1235 if len(l) == 2:
1231 if len(l) == 2:
1236 # i18n: "last" is a keyword
1232 # i18n: "last" is a keyword
1237 lim = int(getstring(l[1], _("last requires a number")))
1233 lim = int(getstring(l[1], _("last requires a number")))
1238 except (TypeError, ValueError):
1234 except (TypeError, ValueError):
1239 # i18n: "last" is a keyword
1235 # i18n: "last" is a keyword
1240 raise error.ParseError(_("last expects a number"))
1236 raise error.ParseError(_("last expects a number"))
1241 os = getset(repo, fullreposet(repo), l[0])
1237 os = getset(repo, fullreposet(repo), l[0])
1242 os.reverse()
1238 os.reverse()
1243 result = []
1239 result = []
1244 it = iter(os)
1240 it = iter(os)
1245 for x in xrange(lim):
1241 for x in xrange(lim):
1246 y = next(it, None)
1242 y = next(it, None)
1247 if y is None:
1243 if y is None:
1248 break
1244 break
1249 elif y in subset:
1245 elif y in subset:
1250 result.append(y)
1246 result.append(y)
1251 return baseset(result, datarepr=('<last n=%d, %r, %r>', lim, subset, os))
1247 return baseset(result, datarepr=('<last n=%d, %r, %r>', lim, subset, os))
1252
1248
1253 @predicate('max(set)', safe=True)
1249 @predicate('max(set)', safe=True)
1254 def maxrev(repo, subset, x):
1250 def maxrev(repo, subset, x):
1255 """Changeset with highest revision number in set.
1251 """Changeset with highest revision number in set.
1256 """
1252 """
1257 os = getset(repo, fullreposet(repo), x)
1253 os = getset(repo, fullreposet(repo), x)
1258 try:
1254 try:
1259 m = os.max()
1255 m = os.max()
1260 if m in subset:
1256 if m in subset:
1261 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1257 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1262 except ValueError:
1258 except ValueError:
1263 # os.max() throws a ValueError when the collection is empty.
1259 # os.max() throws a ValueError when the collection is empty.
1264 # Same as python's max().
1260 # Same as python's max().
1265 pass
1261 pass
1266 return baseset(datarepr=('<max %r, %r>', subset, os))
1262 return baseset(datarepr=('<max %r, %r>', subset, os))
1267
1263
1268 @predicate('merge()', safe=True)
1264 @predicate('merge()', safe=True)
1269 def merge(repo, subset, x):
1265 def merge(repo, subset, x):
1270 """Changeset is a merge changeset.
1266 """Changeset is a merge changeset.
1271 """
1267 """
1272 # i18n: "merge" is a keyword
1268 # i18n: "merge" is a keyword
1273 getargs(x, 0, 0, _("merge takes no arguments"))
1269 getargs(x, 0, 0, _("merge takes no arguments"))
1274 cl = repo.changelog
1270 cl = repo.changelog
1275 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1271 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1276 condrepr='<merge>')
1272 condrepr='<merge>')
1277
1273
1278 @predicate('branchpoint()', safe=True)
1274 @predicate('branchpoint()', safe=True)
1279 def branchpoint(repo, subset, x):
1275 def branchpoint(repo, subset, x):
1280 """Changesets with more than one child.
1276 """Changesets with more than one child.
1281 """
1277 """
1282 # i18n: "branchpoint" is a keyword
1278 # i18n: "branchpoint" is a keyword
1283 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1279 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1284 cl = repo.changelog
1280 cl = repo.changelog
1285 if not subset:
1281 if not subset:
1286 return baseset()
1282 return baseset()
1287 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1283 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1288 # (and if it is not, it should.)
1284 # (and if it is not, it should.)
1289 baserev = min(subset)
1285 baserev = min(subset)
1290 parentscount = [0]*(len(repo) - baserev)
1286 parentscount = [0]*(len(repo) - baserev)
1291 for r in cl.revs(start=baserev + 1):
1287 for r in cl.revs(start=baserev + 1):
1292 for p in cl.parentrevs(r):
1288 for p in cl.parentrevs(r):
1293 if p >= baserev:
1289 if p >= baserev:
1294 parentscount[p - baserev] += 1
1290 parentscount[p - baserev] += 1
1295 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1291 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1296 condrepr='<branchpoint>')
1292 condrepr='<branchpoint>')
1297
1293
1298 @predicate('min(set)', safe=True)
1294 @predicate('min(set)', safe=True)
1299 def minrev(repo, subset, x):
1295 def minrev(repo, subset, x):
1300 """Changeset with lowest revision number in set.
1296 """Changeset with lowest revision number in set.
1301 """
1297 """
1302 os = getset(repo, fullreposet(repo), x)
1298 os = getset(repo, fullreposet(repo), x)
1303 try:
1299 try:
1304 m = os.min()
1300 m = os.min()
1305 if m in subset:
1301 if m in subset:
1306 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1302 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1307 except ValueError:
1303 except ValueError:
1308 # os.min() throws a ValueError when the collection is empty.
1304 # os.min() throws a ValueError when the collection is empty.
1309 # Same as python's min().
1305 # Same as python's min().
1310 pass
1306 pass
1311 return baseset(datarepr=('<min %r, %r>', subset, os))
1307 return baseset(datarepr=('<min %r, %r>', subset, os))
1312
1308
1313 @predicate('modifies(pattern)', safe=True)
1309 @predicate('modifies(pattern)', safe=True)
1314 def modifies(repo, subset, x):
1310 def modifies(repo, subset, x):
1315 """Changesets modifying files matched by pattern.
1311 """Changesets modifying files matched by pattern.
1316
1312
1317 The pattern without explicit kind like ``glob:`` is expected to be
1313 The pattern without explicit kind like ``glob:`` is expected to be
1318 relative to the current directory and match against a file or a
1314 relative to the current directory and match against a file or a
1319 directory.
1315 directory.
1320 """
1316 """
1321 # i18n: "modifies" is a keyword
1317 # i18n: "modifies" is a keyword
1322 pat = getstring(x, _("modifies requires a pattern"))
1318 pat = getstring(x, _("modifies requires a pattern"))
1323 return checkstatus(repo, subset, pat, 0)
1319 return checkstatus(repo, subset, pat, 0)
1324
1320
1325 @predicate('named(namespace)')
1321 @predicate('named(namespace)')
1326 def named(repo, subset, x):
1322 def named(repo, subset, x):
1327 """The changesets in a given namespace.
1323 """The changesets in a given namespace.
1328
1324
1329 If `namespace` starts with `re:`, the remainder of the string is treated as
1325 If `namespace` starts with `re:`, the remainder of the string is treated as
1330 a regular expression. To match a namespace that actually starts with `re:`,
1326 a regular expression. To match a namespace that actually starts with `re:`,
1331 use the prefix `literal:`.
1327 use the prefix `literal:`.
1332 """
1328 """
1333 # i18n: "named" is a keyword
1329 # i18n: "named" is a keyword
1334 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1330 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1335
1331
1336 ns = getstring(args[0],
1332 ns = getstring(args[0],
1337 # i18n: "named" is a keyword
1333 # i18n: "named" is a keyword
1338 _('the argument to named must be a string'))
1334 _('the argument to named must be a string'))
1339 kind, pattern, matcher = util.stringmatcher(ns)
1335 kind, pattern, matcher = util.stringmatcher(ns)
1340 namespaces = set()
1336 namespaces = set()
1341 if kind == 'literal':
1337 if kind == 'literal':
1342 if pattern not in repo.names:
1338 if pattern not in repo.names:
1343 raise error.RepoLookupError(_("namespace '%s' does not exist")
1339 raise error.RepoLookupError(_("namespace '%s' does not exist")
1344 % ns)
1340 % ns)
1345 namespaces.add(repo.names[pattern])
1341 namespaces.add(repo.names[pattern])
1346 else:
1342 else:
1347 for name, ns in repo.names.iteritems():
1343 for name, ns in repo.names.iteritems():
1348 if matcher(name):
1344 if matcher(name):
1349 namespaces.add(ns)
1345 namespaces.add(ns)
1350 if not namespaces:
1346 if not namespaces:
1351 raise error.RepoLookupError(_("no namespace exists"
1347 raise error.RepoLookupError(_("no namespace exists"
1352 " that match '%s'") % pattern)
1348 " that match '%s'") % pattern)
1353
1349
1354 names = set()
1350 names = set()
1355 for ns in namespaces:
1351 for ns in namespaces:
1356 for name in ns.listnames(repo):
1352 for name in ns.listnames(repo):
1357 if name not in ns.deprecated:
1353 if name not in ns.deprecated:
1358 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1354 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1359
1355
1360 names -= set([node.nullrev])
1356 names -= set([node.nullrev])
1361 return subset & names
1357 return subset & names
1362
1358
1363 @predicate('id(string)', safe=True)
1359 @predicate('id(string)', safe=True)
1364 def node_(repo, subset, x):
1360 def node_(repo, subset, x):
1365 """Revision non-ambiguously specified by the given hex string prefix.
1361 """Revision non-ambiguously specified by the given hex string prefix.
1366 """
1362 """
1367 # i18n: "id" is a keyword
1363 # i18n: "id" is a keyword
1368 l = getargs(x, 1, 1, _("id requires one argument"))
1364 l = getargs(x, 1, 1, _("id requires one argument"))
1369 # i18n: "id" is a keyword
1365 # i18n: "id" is a keyword
1370 n = getstring(l[0], _("id requires a string"))
1366 n = getstring(l[0], _("id requires a string"))
1371 if len(n) == 40:
1367 if len(n) == 40:
1372 try:
1368 try:
1373 rn = repo.changelog.rev(node.bin(n))
1369 rn = repo.changelog.rev(node.bin(n))
1374 except (LookupError, TypeError):
1370 except (LookupError, TypeError):
1375 rn = None
1371 rn = None
1376 else:
1372 else:
1377 rn = None
1373 rn = None
1378 pm = repo.changelog._partialmatch(n)
1374 pm = repo.changelog._partialmatch(n)
1379 if pm is not None:
1375 if pm is not None:
1380 rn = repo.changelog.rev(pm)
1376 rn = repo.changelog.rev(pm)
1381
1377
1382 if rn is None:
1378 if rn is None:
1383 return baseset()
1379 return baseset()
1384 result = baseset([rn])
1380 result = baseset([rn])
1385 return result & subset
1381 return result & subset
1386
1382
1387 @predicate('obsolete()', safe=True)
1383 @predicate('obsolete()', safe=True)
1388 def obsolete(repo, subset, x):
1384 def obsolete(repo, subset, x):
1389 """Mutable changeset with a newer version."""
1385 """Mutable changeset with a newer version."""
1390 # i18n: "obsolete" is a keyword
1386 # i18n: "obsolete" is a keyword
1391 getargs(x, 0, 0, _("obsolete takes no arguments"))
1387 getargs(x, 0, 0, _("obsolete takes no arguments"))
1392 obsoletes = obsmod.getrevs(repo, 'obsolete')
1388 obsoletes = obsmod.getrevs(repo, 'obsolete')
1393 return subset & obsoletes
1389 return subset & obsoletes
1394
1390
1395 @predicate('only(set, [set])', safe=True)
1391 @predicate('only(set, [set])', safe=True)
1396 def only(repo, subset, x):
1392 def only(repo, subset, x):
1397 """Changesets that are ancestors of the first set that are not ancestors
1393 """Changesets that are ancestors of the first set that are not ancestors
1398 of any other head in the repo. If a second set is specified, the result
1394 of any other head in the repo. If a second set is specified, the result
1399 is ancestors of the first set that are not ancestors of the second set
1395 is ancestors of the first set that are not ancestors of the second set
1400 (i.e. ::<set1> - ::<set2>).
1396 (i.e. ::<set1> - ::<set2>).
1401 """
1397 """
1402 cl = repo.changelog
1398 cl = repo.changelog
1403 # i18n: "only" is a keyword
1399 # i18n: "only" is a keyword
1404 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1400 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1405 include = getset(repo, fullreposet(repo), args[0])
1401 include = getset(repo, fullreposet(repo), args[0])
1406 if len(args) == 1:
1402 if len(args) == 1:
1407 if not include:
1403 if not include:
1408 return baseset()
1404 return baseset()
1409
1405
1410 descendants = set(_revdescendants(repo, include, False))
1406 descendants = set(_revdescendants(repo, include, False))
1411 exclude = [rev for rev in cl.headrevs()
1407 exclude = [rev for rev in cl.headrevs()
1412 if not rev in descendants and not rev in include]
1408 if not rev in descendants and not rev in include]
1413 else:
1409 else:
1414 exclude = getset(repo, fullreposet(repo), args[1])
1410 exclude = getset(repo, fullreposet(repo), args[1])
1415
1411
1416 results = set(cl.findmissingrevs(common=exclude, heads=include))
1412 results = set(cl.findmissingrevs(common=exclude, heads=include))
1417 # XXX we should turn this into a baseset instead of a set, smartset may do
1413 # XXX we should turn this into a baseset instead of a set, smartset may do
1418 # some optimisations from the fact this is a baseset.
1414 # some optimisations from the fact this is a baseset.
1419 return subset & results
1415 return subset & results
1420
1416
1421 @predicate('origin([set])', safe=True)
1417 @predicate('origin([set])', safe=True)
1422 def origin(repo, subset, x):
1418 def origin(repo, subset, x):
1423 """
1419 """
1424 Changesets that were specified as a source for the grafts, transplants or
1420 Changesets that were specified as a source for the grafts, transplants or
1425 rebases that created the given revisions. Omitting the optional set is the
1421 rebases that created the given revisions. Omitting the optional set is the
1426 same as passing all(). If a changeset created by these operations is itself
1422 same as passing all(). If a changeset created by these operations is itself
1427 specified as a source for one of these operations, only the source changeset
1423 specified as a source for one of these operations, only the source changeset
1428 for the first operation is selected.
1424 for the first operation is selected.
1429 """
1425 """
1430 if x is not None:
1426 if x is not None:
1431 dests = getset(repo, fullreposet(repo), x)
1427 dests = getset(repo, fullreposet(repo), x)
1432 else:
1428 else:
1433 dests = fullreposet(repo)
1429 dests = fullreposet(repo)
1434
1430
1435 def _firstsrc(rev):
1431 def _firstsrc(rev):
1436 src = _getrevsource(repo, rev)
1432 src = _getrevsource(repo, rev)
1437 if src is None:
1433 if src is None:
1438 return None
1434 return None
1439
1435
1440 while True:
1436 while True:
1441 prev = _getrevsource(repo, src)
1437 prev = _getrevsource(repo, src)
1442
1438
1443 if prev is None:
1439 if prev is None:
1444 return src
1440 return src
1445 src = prev
1441 src = prev
1446
1442
1447 o = set([_firstsrc(r) for r in dests])
1443 o = set([_firstsrc(r) for r in dests])
1448 o -= set([None])
1444 o -= set([None])
1449 # XXX we should turn this into a baseset instead of a set, smartset may do
1445 # XXX we should turn this into a baseset instead of a set, smartset may do
1450 # some optimisations from the fact this is a baseset.
1446 # some optimisations from the fact this is a baseset.
1451 return subset & o
1447 return subset & o
1452
1448
1453 @predicate('outgoing([path])', safe=True)
1449 @predicate('outgoing([path])', safe=True)
1454 def outgoing(repo, subset, x):
1450 def outgoing(repo, subset, x):
1455 """Changesets not found in the specified destination repository, or the
1451 """Changesets not found in the specified destination repository, or the
1456 default push location.
1452 default push location.
1457 """
1453 """
1458 # Avoid cycles.
1454 # Avoid cycles.
1459 from . import (
1455 from . import (
1460 discovery,
1456 discovery,
1461 hg,
1457 hg,
1462 )
1458 )
1463 # i18n: "outgoing" is a keyword
1459 # i18n: "outgoing" is a keyword
1464 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1460 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1465 # i18n: "outgoing" is a keyword
1461 # i18n: "outgoing" is a keyword
1466 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1462 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1467 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1463 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1468 dest, branches = hg.parseurl(dest)
1464 dest, branches = hg.parseurl(dest)
1469 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1465 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1470 if revs:
1466 if revs:
1471 revs = [repo.lookup(rev) for rev in revs]
1467 revs = [repo.lookup(rev) for rev in revs]
1472 other = hg.peer(repo, {}, dest)
1468 other = hg.peer(repo, {}, dest)
1473 repo.ui.pushbuffer()
1469 repo.ui.pushbuffer()
1474 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1470 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1475 repo.ui.popbuffer()
1471 repo.ui.popbuffer()
1476 cl = repo.changelog
1472 cl = repo.changelog
1477 o = set([cl.rev(r) for r in outgoing.missing])
1473 o = set([cl.rev(r) for r in outgoing.missing])
1478 return subset & o
1474 return subset & o
1479
1475
1480 @predicate('p1([set])', safe=True)
1476 @predicate('p1([set])', safe=True)
1481 def p1(repo, subset, x):
1477 def p1(repo, subset, x):
1482 """First parent of changesets in set, or the working directory.
1478 """First parent of changesets in set, or the working directory.
1483 """
1479 """
1484 if x is None:
1480 if x is None:
1485 p = repo[x].p1().rev()
1481 p = repo[x].p1().rev()
1486 if p >= 0:
1482 if p >= 0:
1487 return subset & baseset([p])
1483 return subset & baseset([p])
1488 return baseset()
1484 return baseset()
1489
1485
1490 ps = set()
1486 ps = set()
1491 cl = repo.changelog
1487 cl = repo.changelog
1492 for r in getset(repo, fullreposet(repo), x):
1488 for r in getset(repo, fullreposet(repo), x):
1493 ps.add(cl.parentrevs(r)[0])
1489 ps.add(cl.parentrevs(r)[0])
1494 ps -= set([node.nullrev])
1490 ps -= set([node.nullrev])
1495 # XXX we should turn this into a baseset instead of a set, smartset may do
1491 # XXX we should turn this into a baseset instead of a set, smartset may do
1496 # some optimisations from the fact this is a baseset.
1492 # some optimisations from the fact this is a baseset.
1497 return subset & ps
1493 return subset & ps
1498
1494
1499 @predicate('p2([set])', safe=True)
1495 @predicate('p2([set])', safe=True)
1500 def p2(repo, subset, x):
1496 def p2(repo, subset, x):
1501 """Second parent of changesets in set, or the working directory.
1497 """Second parent of changesets in set, or the working directory.
1502 """
1498 """
1503 if x is None:
1499 if x is None:
1504 ps = repo[x].parents()
1500 ps = repo[x].parents()
1505 try:
1501 try:
1506 p = ps[1].rev()
1502 p = ps[1].rev()
1507 if p >= 0:
1503 if p >= 0:
1508 return subset & baseset([p])
1504 return subset & baseset([p])
1509 return baseset()
1505 return baseset()
1510 except IndexError:
1506 except IndexError:
1511 return baseset()
1507 return baseset()
1512
1508
1513 ps = set()
1509 ps = set()
1514 cl = repo.changelog
1510 cl = repo.changelog
1515 for r in getset(repo, fullreposet(repo), x):
1511 for r in getset(repo, fullreposet(repo), x):
1516 ps.add(cl.parentrevs(r)[1])
1512 ps.add(cl.parentrevs(r)[1])
1517 ps -= set([node.nullrev])
1513 ps -= set([node.nullrev])
1518 # XXX we should turn this into a baseset instead of a set, smartset may do
1514 # XXX we should turn this into a baseset instead of a set, smartset may do
1519 # some optimisations from the fact this is a baseset.
1515 # some optimisations from the fact this is a baseset.
1520 return subset & ps
1516 return subset & ps
1521
1517
1522 @predicate('parents([set])', safe=True)
1518 @predicate('parents([set])', safe=True)
1523 def parents(repo, subset, x):
1519 def parents(repo, subset, x):
1524 """
1520 """
1525 The set of all parents for all changesets in set, or the working directory.
1521 The set of all parents for all changesets in set, or the working directory.
1526 """
1522 """
1527 if x is None:
1523 if x is None:
1528 ps = set(p.rev() for p in repo[x].parents())
1524 ps = set(p.rev() for p in repo[x].parents())
1529 else:
1525 else:
1530 ps = set()
1526 ps = set()
1531 cl = repo.changelog
1527 cl = repo.changelog
1532 up = ps.update
1528 up = ps.update
1533 parentrevs = cl.parentrevs
1529 parentrevs = cl.parentrevs
1534 for r in getset(repo, fullreposet(repo), x):
1530 for r in getset(repo, fullreposet(repo), x):
1535 if r == node.wdirrev:
1531 if r == node.wdirrev:
1536 up(p.rev() for p in repo[r].parents())
1532 up(p.rev() for p in repo[r].parents())
1537 else:
1533 else:
1538 up(parentrevs(r))
1534 up(parentrevs(r))
1539 ps -= set([node.nullrev])
1535 ps -= set([node.nullrev])
1540 return subset & ps
1536 return subset & ps
1541
1537
1542 def _phase(repo, subset, target):
1538 def _phase(repo, subset, target):
1543 """helper to select all rev in phase <target>"""
1539 """helper to select all rev in phase <target>"""
1544 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1540 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1545 if repo._phasecache._phasesets:
1541 if repo._phasecache._phasesets:
1546 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1542 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1547 s = baseset(s)
1543 s = baseset(s)
1548 s.sort() # set are non ordered, so we enforce ascending
1544 s.sort() # set are non ordered, so we enforce ascending
1549 return subset & s
1545 return subset & s
1550 else:
1546 else:
1551 phase = repo._phasecache.phase
1547 phase = repo._phasecache.phase
1552 condition = lambda r: phase(repo, r) == target
1548 condition = lambda r: phase(repo, r) == target
1553 return subset.filter(condition, condrepr=('<phase %r>', target),
1549 return subset.filter(condition, condrepr=('<phase %r>', target),
1554 cache=False)
1550 cache=False)
1555
1551
1556 @predicate('draft()', safe=True)
1552 @predicate('draft()', safe=True)
1557 def draft(repo, subset, x):
1553 def draft(repo, subset, x):
1558 """Changeset in draft phase."""
1554 """Changeset in draft phase."""
1559 # i18n: "draft" is a keyword
1555 # i18n: "draft" is a keyword
1560 getargs(x, 0, 0, _("draft takes no arguments"))
1556 getargs(x, 0, 0, _("draft takes no arguments"))
1561 target = phases.draft
1557 target = phases.draft
1562 return _phase(repo, subset, target)
1558 return _phase(repo, subset, target)
1563
1559
1564 @predicate('secret()', safe=True)
1560 @predicate('secret()', safe=True)
1565 def secret(repo, subset, x):
1561 def secret(repo, subset, x):
1566 """Changeset in secret phase."""
1562 """Changeset in secret phase."""
1567 # i18n: "secret" is a keyword
1563 # i18n: "secret" is a keyword
1568 getargs(x, 0, 0, _("secret takes no arguments"))
1564 getargs(x, 0, 0, _("secret takes no arguments"))
1569 target = phases.secret
1565 target = phases.secret
1570 return _phase(repo, subset, target)
1566 return _phase(repo, subset, target)
1571
1567
1572 def parentspec(repo, subset, x, n):
1568 def parentspec(repo, subset, x, n):
1573 """``set^0``
1569 """``set^0``
1574 The set.
1570 The set.
1575 ``set^1`` (or ``set^``), ``set^2``
1571 ``set^1`` (or ``set^``), ``set^2``
1576 First or second parent, respectively, of all changesets in set.
1572 First or second parent, respectively, of all changesets in set.
1577 """
1573 """
1578 try:
1574 try:
1579 n = int(n[1])
1575 n = int(n[1])
1580 if n not in (0, 1, 2):
1576 if n not in (0, 1, 2):
1581 raise ValueError
1577 raise ValueError
1582 except (TypeError, ValueError):
1578 except (TypeError, ValueError):
1583 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1579 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1584 ps = set()
1580 ps = set()
1585 cl = repo.changelog
1581 cl = repo.changelog
1586 for r in getset(repo, fullreposet(repo), x):
1582 for r in getset(repo, fullreposet(repo), x):
1587 if n == 0:
1583 if n == 0:
1588 ps.add(r)
1584 ps.add(r)
1589 elif n == 1:
1585 elif n == 1:
1590 ps.add(cl.parentrevs(r)[0])
1586 ps.add(cl.parentrevs(r)[0])
1591 elif n == 2:
1587 elif n == 2:
1592 parents = cl.parentrevs(r)
1588 parents = cl.parentrevs(r)
1593 if len(parents) > 1:
1589 if len(parents) > 1:
1594 ps.add(parents[1])
1590 ps.add(parents[1])
1595 return subset & ps
1591 return subset & ps
1596
1592
1597 @predicate('present(set)', safe=True)
1593 @predicate('present(set)', safe=True)
1598 def present(repo, subset, x):
1594 def present(repo, subset, x):
1599 """An empty set, if any revision in set isn't found; otherwise,
1595 """An empty set, if any revision in set isn't found; otherwise,
1600 all revisions in set.
1596 all revisions in set.
1601
1597
1602 If any of specified revisions is not present in the local repository,
1598 If any of specified revisions is not present in the local repository,
1603 the query is normally aborted. But this predicate allows the query
1599 the query is normally aborted. But this predicate allows the query
1604 to continue even in such cases.
1600 to continue even in such cases.
1605 """
1601 """
1606 try:
1602 try:
1607 return getset(repo, subset, x)
1603 return getset(repo, subset, x)
1608 except error.RepoLookupError:
1604 except error.RepoLookupError:
1609 return baseset()
1605 return baseset()
1610
1606
1611 # for internal use
1607 # for internal use
1612 @predicate('_notpublic', safe=True)
1608 @predicate('_notpublic', safe=True)
1613 def _notpublic(repo, subset, x):
1609 def _notpublic(repo, subset, x):
1614 getargs(x, 0, 0, "_notpublic takes no arguments")
1610 getargs(x, 0, 0, "_notpublic takes no arguments")
1615 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1611 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1616 if repo._phasecache._phasesets:
1612 if repo._phasecache._phasesets:
1617 s = set()
1613 s = set()
1618 for u in repo._phasecache._phasesets[1:]:
1614 for u in repo._phasecache._phasesets[1:]:
1619 s.update(u)
1615 s.update(u)
1620 s = baseset(s - repo.changelog.filteredrevs)
1616 s = baseset(s - repo.changelog.filteredrevs)
1621 s.sort()
1617 s.sort()
1622 return subset & s
1618 return subset & s
1623 else:
1619 else:
1624 phase = repo._phasecache.phase
1620 phase = repo._phasecache.phase
1625 target = phases.public
1621 target = phases.public
1626 condition = lambda r: phase(repo, r) != target
1622 condition = lambda r: phase(repo, r) != target
1627 return subset.filter(condition, condrepr=('<phase %r>', target),
1623 return subset.filter(condition, condrepr=('<phase %r>', target),
1628 cache=False)
1624 cache=False)
1629
1625
1630 @predicate('public()', safe=True)
1626 @predicate('public()', safe=True)
1631 def public(repo, subset, x):
1627 def public(repo, subset, x):
1632 """Changeset in public phase."""
1628 """Changeset in public phase."""
1633 # i18n: "public" is a keyword
1629 # i18n: "public" is a keyword
1634 getargs(x, 0, 0, _("public takes no arguments"))
1630 getargs(x, 0, 0, _("public takes no arguments"))
1635 phase = repo._phasecache.phase
1631 phase = repo._phasecache.phase
1636 target = phases.public
1632 target = phases.public
1637 condition = lambda r: phase(repo, r) == target
1633 condition = lambda r: phase(repo, r) == target
1638 return subset.filter(condition, condrepr=('<phase %r>', target),
1634 return subset.filter(condition, condrepr=('<phase %r>', target),
1639 cache=False)
1635 cache=False)
1640
1636
1641 @predicate('remote([id [,path]])', safe=True)
1637 @predicate('remote([id [,path]])', safe=True)
1642 def remote(repo, subset, x):
1638 def remote(repo, subset, x):
1643 """Local revision that corresponds to the given identifier in a
1639 """Local revision that corresponds to the given identifier in a
1644 remote repository, if present. Here, the '.' identifier is a
1640 remote repository, if present. Here, the '.' identifier is a
1645 synonym for the current local branch.
1641 synonym for the current local branch.
1646 """
1642 """
1647
1643
1648 from . import hg # avoid start-up nasties
1644 from . import hg # avoid start-up nasties
1649 # i18n: "remote" is a keyword
1645 # i18n: "remote" is a keyword
1650 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1646 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1651
1647
1652 q = '.'
1648 q = '.'
1653 if len(l) > 0:
1649 if len(l) > 0:
1654 # i18n: "remote" is a keyword
1650 # i18n: "remote" is a keyword
1655 q = getstring(l[0], _("remote requires a string id"))
1651 q = getstring(l[0], _("remote requires a string id"))
1656 if q == '.':
1652 if q == '.':
1657 q = repo['.'].branch()
1653 q = repo['.'].branch()
1658
1654
1659 dest = ''
1655 dest = ''
1660 if len(l) > 1:
1656 if len(l) > 1:
1661 # i18n: "remote" is a keyword
1657 # i18n: "remote" is a keyword
1662 dest = getstring(l[1], _("remote requires a repository path"))
1658 dest = getstring(l[1], _("remote requires a repository path"))
1663 dest = repo.ui.expandpath(dest or 'default')
1659 dest = repo.ui.expandpath(dest or 'default')
1664 dest, branches = hg.parseurl(dest)
1660 dest, branches = hg.parseurl(dest)
1665 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1661 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1666 if revs:
1662 if revs:
1667 revs = [repo.lookup(rev) for rev in revs]
1663 revs = [repo.lookup(rev) for rev in revs]
1668 other = hg.peer(repo, {}, dest)
1664 other = hg.peer(repo, {}, dest)
1669 n = other.lookup(q)
1665 n = other.lookup(q)
1670 if n in repo:
1666 if n in repo:
1671 r = repo[n].rev()
1667 r = repo[n].rev()
1672 if r in subset:
1668 if r in subset:
1673 return baseset([r])
1669 return baseset([r])
1674 return baseset()
1670 return baseset()
1675
1671
1676 @predicate('removes(pattern)', safe=True)
1672 @predicate('removes(pattern)', safe=True)
1677 def removes(repo, subset, x):
1673 def removes(repo, subset, x):
1678 """Changesets which remove files matching pattern.
1674 """Changesets which remove files matching pattern.
1679
1675
1680 The pattern without explicit kind like ``glob:`` is expected to be
1676 The pattern without explicit kind like ``glob:`` is expected to be
1681 relative to the current directory and match against a file or a
1677 relative to the current directory and match against a file or a
1682 directory.
1678 directory.
1683 """
1679 """
1684 # i18n: "removes" is a keyword
1680 # i18n: "removes" is a keyword
1685 pat = getstring(x, _("removes requires a pattern"))
1681 pat = getstring(x, _("removes requires a pattern"))
1686 return checkstatus(repo, subset, pat, 2)
1682 return checkstatus(repo, subset, pat, 2)
1687
1683
1688 @predicate('rev(number)', safe=True)
1684 @predicate('rev(number)', safe=True)
1689 def rev(repo, subset, x):
1685 def rev(repo, subset, x):
1690 """Revision with the given numeric identifier.
1686 """Revision with the given numeric identifier.
1691 """
1687 """
1692 # i18n: "rev" is a keyword
1688 # i18n: "rev" is a keyword
1693 l = getargs(x, 1, 1, _("rev requires one argument"))
1689 l = getargs(x, 1, 1, _("rev requires one argument"))
1694 try:
1690 try:
1695 # i18n: "rev" is a keyword
1691 # i18n: "rev" is a keyword
1696 l = int(getstring(l[0], _("rev requires a number")))
1692 l = int(getstring(l[0], _("rev requires a number")))
1697 except (TypeError, ValueError):
1693 except (TypeError, ValueError):
1698 # i18n: "rev" is a keyword
1694 # i18n: "rev" is a keyword
1699 raise error.ParseError(_("rev expects a number"))
1695 raise error.ParseError(_("rev expects a number"))
1700 if l not in repo.changelog and l != node.nullrev:
1696 if l not in repo.changelog and l != node.nullrev:
1701 return baseset()
1697 return baseset()
1702 return subset & baseset([l])
1698 return subset & baseset([l])
1703
1699
1704 @predicate('matching(revision [, field])', safe=True)
1700 @predicate('matching(revision [, field])', safe=True)
1705 def matching(repo, subset, x):
1701 def matching(repo, subset, x):
1706 """Changesets in which a given set of fields match the set of fields in the
1702 """Changesets in which a given set of fields match the set of fields in the
1707 selected revision or set.
1703 selected revision or set.
1708
1704
1709 To match more than one field pass the list of fields to match separated
1705 To match more than one field pass the list of fields to match separated
1710 by spaces (e.g. ``author description``).
1706 by spaces (e.g. ``author description``).
1711
1707
1712 Valid fields are most regular revision fields and some special fields.
1708 Valid fields are most regular revision fields and some special fields.
1713
1709
1714 Regular revision fields are ``description``, ``author``, ``branch``,
1710 Regular revision fields are ``description``, ``author``, ``branch``,
1715 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1711 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1716 and ``diff``.
1712 and ``diff``.
1717 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1713 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1718 contents of the revision. Two revisions matching their ``diff`` will
1714 contents of the revision. Two revisions matching their ``diff`` will
1719 also match their ``files``.
1715 also match their ``files``.
1720
1716
1721 Special fields are ``summary`` and ``metadata``:
1717 Special fields are ``summary`` and ``metadata``:
1722 ``summary`` matches the first line of the description.
1718 ``summary`` matches the first line of the description.
1723 ``metadata`` is equivalent to matching ``description user date``
1719 ``metadata`` is equivalent to matching ``description user date``
1724 (i.e. it matches the main metadata fields).
1720 (i.e. it matches the main metadata fields).
1725
1721
1726 ``metadata`` is the default field which is used when no fields are
1722 ``metadata`` is the default field which is used when no fields are
1727 specified. You can match more than one field at a time.
1723 specified. You can match more than one field at a time.
1728 """
1724 """
1729 # i18n: "matching" is a keyword
1725 # i18n: "matching" is a keyword
1730 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1726 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1731
1727
1732 revs = getset(repo, fullreposet(repo), l[0])
1728 revs = getset(repo, fullreposet(repo), l[0])
1733
1729
1734 fieldlist = ['metadata']
1730 fieldlist = ['metadata']
1735 if len(l) > 1:
1731 if len(l) > 1:
1736 fieldlist = getstring(l[1],
1732 fieldlist = getstring(l[1],
1737 # i18n: "matching" is a keyword
1733 # i18n: "matching" is a keyword
1738 _("matching requires a string "
1734 _("matching requires a string "
1739 "as its second argument")).split()
1735 "as its second argument")).split()
1740
1736
1741 # Make sure that there are no repeated fields,
1737 # Make sure that there are no repeated fields,
1742 # expand the 'special' 'metadata' field type
1738 # expand the 'special' 'metadata' field type
1743 # and check the 'files' whenever we check the 'diff'
1739 # and check the 'files' whenever we check the 'diff'
1744 fields = []
1740 fields = []
1745 for field in fieldlist:
1741 for field in fieldlist:
1746 if field == 'metadata':
1742 if field == 'metadata':
1747 fields += ['user', 'description', 'date']
1743 fields += ['user', 'description', 'date']
1748 elif field == 'diff':
1744 elif field == 'diff':
1749 # a revision matching the diff must also match the files
1745 # a revision matching the diff must also match the files
1750 # since matching the diff is very costly, make sure to
1746 # since matching the diff is very costly, make sure to
1751 # also match the files first
1747 # also match the files first
1752 fields += ['files', 'diff']
1748 fields += ['files', 'diff']
1753 else:
1749 else:
1754 if field == 'author':
1750 if field == 'author':
1755 field = 'user'
1751 field = 'user'
1756 fields.append(field)
1752 fields.append(field)
1757 fields = set(fields)
1753 fields = set(fields)
1758 if 'summary' in fields and 'description' in fields:
1754 if 'summary' in fields and 'description' in fields:
1759 # If a revision matches its description it also matches its summary
1755 # If a revision matches its description it also matches its summary
1760 fields.discard('summary')
1756 fields.discard('summary')
1761
1757
1762 # We may want to match more than one field
1758 # We may want to match more than one field
1763 # Not all fields take the same amount of time to be matched
1759 # Not all fields take the same amount of time to be matched
1764 # Sort the selected fields in order of increasing matching cost
1760 # Sort the selected fields in order of increasing matching cost
1765 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1761 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1766 'files', 'description', 'substate', 'diff']
1762 'files', 'description', 'substate', 'diff']
1767 def fieldkeyfunc(f):
1763 def fieldkeyfunc(f):
1768 try:
1764 try:
1769 return fieldorder.index(f)
1765 return fieldorder.index(f)
1770 except ValueError:
1766 except ValueError:
1771 # assume an unknown field is very costly
1767 # assume an unknown field is very costly
1772 return len(fieldorder)
1768 return len(fieldorder)
1773 fields = list(fields)
1769 fields = list(fields)
1774 fields.sort(key=fieldkeyfunc)
1770 fields.sort(key=fieldkeyfunc)
1775
1771
1776 # Each field will be matched with its own "getfield" function
1772 # Each field will be matched with its own "getfield" function
1777 # which will be added to the getfieldfuncs array of functions
1773 # which will be added to the getfieldfuncs array of functions
1778 getfieldfuncs = []
1774 getfieldfuncs = []
1779 _funcs = {
1775 _funcs = {
1780 'user': lambda r: repo[r].user(),
1776 'user': lambda r: repo[r].user(),
1781 'branch': lambda r: repo[r].branch(),
1777 'branch': lambda r: repo[r].branch(),
1782 'date': lambda r: repo[r].date(),
1778 'date': lambda r: repo[r].date(),
1783 'description': lambda r: repo[r].description(),
1779 'description': lambda r: repo[r].description(),
1784 'files': lambda r: repo[r].files(),
1780 'files': lambda r: repo[r].files(),
1785 'parents': lambda r: repo[r].parents(),
1781 'parents': lambda r: repo[r].parents(),
1786 'phase': lambda r: repo[r].phase(),
1782 'phase': lambda r: repo[r].phase(),
1787 'substate': lambda r: repo[r].substate,
1783 'substate': lambda r: repo[r].substate,
1788 'summary': lambda r: repo[r].description().splitlines()[0],
1784 'summary': lambda r: repo[r].description().splitlines()[0],
1789 'diff': lambda r: list(repo[r].diff(git=True),)
1785 'diff': lambda r: list(repo[r].diff(git=True),)
1790 }
1786 }
1791 for info in fields:
1787 for info in fields:
1792 getfield = _funcs.get(info, None)
1788 getfield = _funcs.get(info, None)
1793 if getfield is None:
1789 if getfield is None:
1794 raise error.ParseError(
1790 raise error.ParseError(
1795 # i18n: "matching" is a keyword
1791 # i18n: "matching" is a keyword
1796 _("unexpected field name passed to matching: %s") % info)
1792 _("unexpected field name passed to matching: %s") % info)
1797 getfieldfuncs.append(getfield)
1793 getfieldfuncs.append(getfield)
1798 # convert the getfield array of functions into a "getinfo" function
1794 # convert the getfield array of functions into a "getinfo" function
1799 # which returns an array of field values (or a single value if there
1795 # which returns an array of field values (or a single value if there
1800 # is only one field to match)
1796 # is only one field to match)
1801 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1797 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1802
1798
1803 def matches(x):
1799 def matches(x):
1804 for rev in revs:
1800 for rev in revs:
1805 target = getinfo(rev)
1801 target = getinfo(rev)
1806 match = True
1802 match = True
1807 for n, f in enumerate(getfieldfuncs):
1803 for n, f in enumerate(getfieldfuncs):
1808 if target[n] != f(x):
1804 if target[n] != f(x):
1809 match = False
1805 match = False
1810 if match:
1806 if match:
1811 return True
1807 return True
1812 return False
1808 return False
1813
1809
1814 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1810 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1815
1811
1816 @predicate('reverse(set)', safe=True)
1812 @predicate('reverse(set)', safe=True)
1817 def reverse(repo, subset, x):
1813 def reverse(repo, subset, x):
1818 """Reverse order of set.
1814 """Reverse order of set.
1819 """
1815 """
1820 l = getset(repo, subset, x)
1816 l = getset(repo, subset, x)
1821 l.reverse()
1817 l.reverse()
1822 return l
1818 return l
1823
1819
1824 @predicate('roots(set)', safe=True)
1820 @predicate('roots(set)', safe=True)
1825 def roots(repo, subset, x):
1821 def roots(repo, subset, x):
1826 """Changesets in set with no parent changeset in set.
1822 """Changesets in set with no parent changeset in set.
1827 """
1823 """
1828 s = getset(repo, fullreposet(repo), x)
1824 s = getset(repo, fullreposet(repo), x)
1829 parents = repo.changelog.parentrevs
1825 parents = repo.changelog.parentrevs
1830 def filter(r):
1826 def filter(r):
1831 for p in parents(r):
1827 for p in parents(r):
1832 if 0 <= p and p in s:
1828 if 0 <= p and p in s:
1833 return False
1829 return False
1834 return True
1830 return True
1835 return subset & s.filter(filter, condrepr='<roots>')
1831 return subset & s.filter(filter, condrepr='<roots>')
1836
1832
1837 _sortkeyfuncs = {
1833 _sortkeyfuncs = {
1838 'rev': lambda c: c.rev(),
1834 'rev': lambda c: c.rev(),
1839 'branch': lambda c: c.branch(),
1835 'branch': lambda c: c.branch(),
1840 'desc': lambda c: c.description(),
1836 'desc': lambda c: c.description(),
1841 'user': lambda c: c.user(),
1837 'user': lambda c: c.user(),
1842 'author': lambda c: c.user(),
1838 'author': lambda c: c.user(),
1843 'date': lambda c: c.date()[0],
1839 'date': lambda c: c.date()[0],
1844 }
1840 }
1845
1841
1846 def _getsortargs(x):
1842 def _getsortargs(x):
1847 """Parse sort options into (set, [(key, reverse)], opts)"""
1843 """Parse sort options into (set, [(key, reverse)], opts)"""
1848 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
1844 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
1849 if 'set' not in args:
1845 if 'set' not in args:
1850 # i18n: "sort" is a keyword
1846 # i18n: "sort" is a keyword
1851 raise error.ParseError(_('sort requires one or two arguments'))
1847 raise error.ParseError(_('sort requires one or two arguments'))
1852 keys = "rev"
1848 keys = "rev"
1853 if 'keys' in args:
1849 if 'keys' in args:
1854 # i18n: "sort" is a keyword
1850 # i18n: "sort" is a keyword
1855 keys = getstring(args['keys'], _("sort spec must be a string"))
1851 keys = getstring(args['keys'], _("sort spec must be a string"))
1856
1852
1857 keyflags = []
1853 keyflags = []
1858 for k in keys.split():
1854 for k in keys.split():
1859 fk = k
1855 fk = k
1860 reverse = (k[0] == '-')
1856 reverse = (k[0] == '-')
1861 if reverse:
1857 if reverse:
1862 k = k[1:]
1858 k = k[1:]
1863 if k not in _sortkeyfuncs and k != 'topo':
1859 if k not in _sortkeyfuncs and k != 'topo':
1864 raise error.ParseError(_("unknown sort key %r") % fk)
1860 raise error.ParseError(_("unknown sort key %r") % fk)
1865 keyflags.append((k, reverse))
1861 keyflags.append((k, reverse))
1866
1862
1867 if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
1863 if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
1868 # i18n: "topo" is a keyword
1864 # i18n: "topo" is a keyword
1869 raise error.ParseError(_(
1865 raise error.ParseError(_(
1870 'topo sort order cannot be combined with other sort keys'))
1866 'topo sort order cannot be combined with other sort keys'))
1871
1867
1872 opts = {}
1868 opts = {}
1873 if 'topo.firstbranch' in args:
1869 if 'topo.firstbranch' in args:
1874 if any(k == 'topo' for k, reverse in keyflags):
1870 if any(k == 'topo' for k, reverse in keyflags):
1875 opts['topo.firstbranch'] = args['topo.firstbranch']
1871 opts['topo.firstbranch'] = args['topo.firstbranch']
1876 else:
1872 else:
1877 # i18n: "topo" and "topo.firstbranch" are keywords
1873 # i18n: "topo" and "topo.firstbranch" are keywords
1878 raise error.ParseError(_(
1874 raise error.ParseError(_(
1879 'topo.firstbranch can only be used when using the topo sort '
1875 'topo.firstbranch can only be used when using the topo sort '
1880 'key'))
1876 'key'))
1881
1877
1882 return args['set'], keyflags, opts
1878 return args['set'], keyflags, opts
1883
1879
1884 @predicate('sort(set[, [-]key... [, ...]])', safe=True)
1880 @predicate('sort(set[, [-]key... [, ...]])', safe=True)
1885 def sort(repo, subset, x):
1881 def sort(repo, subset, x):
1886 """Sort set by keys. The default sort order is ascending, specify a key
1882 """Sort set by keys. The default sort order is ascending, specify a key
1887 as ``-key`` to sort in descending order.
1883 as ``-key`` to sort in descending order.
1888
1884
1889 The keys can be:
1885 The keys can be:
1890
1886
1891 - ``rev`` for the revision number,
1887 - ``rev`` for the revision number,
1892 - ``branch`` for the branch name,
1888 - ``branch`` for the branch name,
1893 - ``desc`` for the commit message (description),
1889 - ``desc`` for the commit message (description),
1894 - ``user`` for user name (``author`` can be used as an alias),
1890 - ``user`` for user name (``author`` can be used as an alias),
1895 - ``date`` for the commit date
1891 - ``date`` for the commit date
1896 - ``topo`` for a reverse topographical sort
1892 - ``topo`` for a reverse topographical sort
1897
1893
1898 The ``topo`` sort order cannot be combined with other sort keys. This sort
1894 The ``topo`` sort order cannot be combined with other sort keys. This sort
1899 takes one optional argument, ``topo.firstbranch``, which takes a revset that
1895 takes one optional argument, ``topo.firstbranch``, which takes a revset that
1900 specifies what topographical branches to prioritize in the sort.
1896 specifies what topographical branches to prioritize in the sort.
1901
1897
1902 """
1898 """
1903 s, keyflags, opts = _getsortargs(x)
1899 s, keyflags, opts = _getsortargs(x)
1904 revs = getset(repo, subset, s)
1900 revs = getset(repo, subset, s)
1905
1901
1906 if not keyflags:
1902 if not keyflags:
1907 return revs
1903 return revs
1908 if len(keyflags) == 1 and keyflags[0][0] == "rev":
1904 if len(keyflags) == 1 and keyflags[0][0] == "rev":
1909 revs.sort(reverse=keyflags[0][1])
1905 revs.sort(reverse=keyflags[0][1])
1910 return revs
1906 return revs
1911 elif keyflags[0][0] == "topo":
1907 elif keyflags[0][0] == "topo":
1912 firstbranch = ()
1908 firstbranch = ()
1913 if 'topo.firstbranch' in opts:
1909 if 'topo.firstbranch' in opts:
1914 firstbranch = getset(repo, subset, opts['topo.firstbranch'])
1910 firstbranch = getset(repo, subset, opts['topo.firstbranch'])
1915 revs = baseset(_toposort(revs, repo.changelog.parentrevs, firstbranch),
1911 revs = baseset(_toposort(revs, repo.changelog.parentrevs, firstbranch),
1916 istopo=True)
1912 istopo=True)
1917 if keyflags[0][1]:
1913 if keyflags[0][1]:
1918 revs.reverse()
1914 revs.reverse()
1919 return revs
1915 return revs
1920
1916
1921 # sort() is guaranteed to be stable
1917 # sort() is guaranteed to be stable
1922 ctxs = [repo[r] for r in revs]
1918 ctxs = [repo[r] for r in revs]
1923 for k, reverse in reversed(keyflags):
1919 for k, reverse in reversed(keyflags):
1924 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
1920 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
1925 return baseset([c.rev() for c in ctxs])
1921 return baseset([c.rev() for c in ctxs])
1926
1922
1927 def _toposort(revs, parentsfunc, firstbranch=()):
1923 def _toposort(revs, parentsfunc, firstbranch=()):
1928 """Yield revisions from heads to roots one (topo) branch at a time.
1924 """Yield revisions from heads to roots one (topo) branch at a time.
1929
1925
1930 This function aims to be used by a graph generator that wishes to minimize
1926 This function aims to be used by a graph generator that wishes to minimize
1931 the number of parallel branches and their interleaving.
1927 the number of parallel branches and their interleaving.
1932
1928
1933 Example iteration order (numbers show the "true" order in a changelog):
1929 Example iteration order (numbers show the "true" order in a changelog):
1934
1930
1935 o 4
1931 o 4
1936 |
1932 |
1937 o 1
1933 o 1
1938 |
1934 |
1939 | o 3
1935 | o 3
1940 | |
1936 | |
1941 | o 2
1937 | o 2
1942 |/
1938 |/
1943 o 0
1939 o 0
1944
1940
1945 Note that the ancestors of merges are understood by the current
1941 Note that the ancestors of merges are understood by the current
1946 algorithm to be on the same branch. This means no reordering will
1942 algorithm to be on the same branch. This means no reordering will
1947 occur behind a merge.
1943 occur behind a merge.
1948 """
1944 """
1949
1945
1950 ### Quick summary of the algorithm
1946 ### Quick summary of the algorithm
1951 #
1947 #
1952 # This function is based around a "retention" principle. We keep revisions
1948 # This function is based around a "retention" principle. We keep revisions
1953 # in memory until we are ready to emit a whole branch that immediately
1949 # in memory until we are ready to emit a whole branch that immediately
1954 # "merges" into an existing one. This reduces the number of parallel
1950 # "merges" into an existing one. This reduces the number of parallel
1955 # branches with interleaved revisions.
1951 # branches with interleaved revisions.
1956 #
1952 #
1957 # During iteration revs are split into two groups:
1953 # During iteration revs are split into two groups:
1958 # A) revision already emitted
1954 # A) revision already emitted
1959 # B) revision in "retention". They are stored as different subgroups.
1955 # B) revision in "retention". They are stored as different subgroups.
1960 #
1956 #
1961 # for each REV, we do the following logic:
1957 # for each REV, we do the following logic:
1962 #
1958 #
1963 # 1) if REV is a parent of (A), we will emit it. If there is a
1959 # 1) if REV is a parent of (A), we will emit it. If there is a
1964 # retention group ((B) above) that is blocked on REV being
1960 # retention group ((B) above) that is blocked on REV being
1965 # available, we emit all the revisions out of that retention
1961 # available, we emit all the revisions out of that retention
1966 # group first.
1962 # group first.
1967 #
1963 #
1968 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
1964 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
1969 # available, if such subgroup exist, we add REV to it and the subgroup is
1965 # available, if such subgroup exist, we add REV to it and the subgroup is
1970 # now awaiting for REV.parents() to be available.
1966 # now awaiting for REV.parents() to be available.
1971 #
1967 #
1972 # 3) finally if no such group existed in (B), we create a new subgroup.
1968 # 3) finally if no such group existed in (B), we create a new subgroup.
1973 #
1969 #
1974 #
1970 #
1975 # To bootstrap the algorithm, we emit the tipmost revision (which
1971 # To bootstrap the algorithm, we emit the tipmost revision (which
1976 # puts it in group (A) from above).
1972 # puts it in group (A) from above).
1977
1973
1978 revs.sort(reverse=True)
1974 revs.sort(reverse=True)
1979
1975
1980 # Set of parents of revision that have been emitted. They can be considered
1976 # Set of parents of revision that have been emitted. They can be considered
1981 # unblocked as the graph generator is already aware of them so there is no
1977 # unblocked as the graph generator is already aware of them so there is no
1982 # need to delay the revisions that reference them.
1978 # need to delay the revisions that reference them.
1983 #
1979 #
1984 # If someone wants to prioritize a branch over the others, pre-filling this
1980 # If someone wants to prioritize a branch over the others, pre-filling this
1985 # set will force all other branches to wait until this branch is ready to be
1981 # set will force all other branches to wait until this branch is ready to be
1986 # emitted.
1982 # emitted.
1987 unblocked = set(firstbranch)
1983 unblocked = set(firstbranch)
1988
1984
1989 # list of groups waiting to be displayed, each group is defined by:
1985 # list of groups waiting to be displayed, each group is defined by:
1990 #
1986 #
1991 # (revs: lists of revs waiting to be displayed,
1987 # (revs: lists of revs waiting to be displayed,
1992 # blocked: set of that cannot be displayed before those in 'revs')
1988 # blocked: set of that cannot be displayed before those in 'revs')
1993 #
1989 #
1994 # The second value ('blocked') correspond to parents of any revision in the
1990 # The second value ('blocked') correspond to parents of any revision in the
1995 # group ('revs') that is not itself contained in the group. The main idea
1991 # group ('revs') that is not itself contained in the group. The main idea
1996 # of this algorithm is to delay as much as possible the emission of any
1992 # of this algorithm is to delay as much as possible the emission of any
1997 # revision. This means waiting for the moment we are about to display
1993 # revision. This means waiting for the moment we are about to display
1998 # these parents to display the revs in a group.
1994 # these parents to display the revs in a group.
1999 #
1995 #
2000 # This first implementation is smart until it encounters a merge: it will
1996 # This first implementation is smart until it encounters a merge: it will
2001 # emit revs as soon as any parent is about to be emitted and can grow an
1997 # emit revs as soon as any parent is about to be emitted and can grow an
2002 # arbitrary number of revs in 'blocked'. In practice this mean we properly
1998 # arbitrary number of revs in 'blocked'. In practice this mean we properly
2003 # retains new branches but gives up on any special ordering for ancestors
1999 # retains new branches but gives up on any special ordering for ancestors
2004 # of merges. The implementation can be improved to handle this better.
2000 # of merges. The implementation can be improved to handle this better.
2005 #
2001 #
2006 # The first subgroup is special. It corresponds to all the revision that
2002 # The first subgroup is special. It corresponds to all the revision that
2007 # were already emitted. The 'revs' lists is expected to be empty and the
2003 # were already emitted. The 'revs' lists is expected to be empty and the
2008 # 'blocked' set contains the parents revisions of already emitted revision.
2004 # 'blocked' set contains the parents revisions of already emitted revision.
2009 #
2005 #
2010 # You could pre-seed the <parents> set of groups[0] to a specific
2006 # You could pre-seed the <parents> set of groups[0] to a specific
2011 # changesets to select what the first emitted branch should be.
2007 # changesets to select what the first emitted branch should be.
2012 groups = [([], unblocked)]
2008 groups = [([], unblocked)]
2013 pendingheap = []
2009 pendingheap = []
2014 pendingset = set()
2010 pendingset = set()
2015
2011
2016 heapq.heapify(pendingheap)
2012 heapq.heapify(pendingheap)
2017 heappop = heapq.heappop
2013 heappop = heapq.heappop
2018 heappush = heapq.heappush
2014 heappush = heapq.heappush
2019 for currentrev in revs:
2015 for currentrev in revs:
2020 # Heap works with smallest element, we want highest so we invert
2016 # Heap works with smallest element, we want highest so we invert
2021 if currentrev not in pendingset:
2017 if currentrev not in pendingset:
2022 heappush(pendingheap, -currentrev)
2018 heappush(pendingheap, -currentrev)
2023 pendingset.add(currentrev)
2019 pendingset.add(currentrev)
2024 # iterates on pending rev until after the current rev have been
2020 # iterates on pending rev until after the current rev have been
2025 # processed.
2021 # processed.
2026 rev = None
2022 rev = None
2027 while rev != currentrev:
2023 while rev != currentrev:
2028 rev = -heappop(pendingheap)
2024 rev = -heappop(pendingheap)
2029 pendingset.remove(rev)
2025 pendingset.remove(rev)
2030
2026
2031 # Seek for a subgroup blocked, waiting for the current revision.
2027 # Seek for a subgroup blocked, waiting for the current revision.
2032 matching = [i for i, g in enumerate(groups) if rev in g[1]]
2028 matching = [i for i, g in enumerate(groups) if rev in g[1]]
2033
2029
2034 if matching:
2030 if matching:
2035 # The main idea is to gather together all sets that are blocked
2031 # The main idea is to gather together all sets that are blocked
2036 # on the same revision.
2032 # on the same revision.
2037 #
2033 #
2038 # Groups are merged when a common blocking ancestor is
2034 # Groups are merged when a common blocking ancestor is
2039 # observed. For example, given two groups:
2035 # observed. For example, given two groups:
2040 #
2036 #
2041 # revs [5, 4] waiting for 1
2037 # revs [5, 4] waiting for 1
2042 # revs [3, 2] waiting for 1
2038 # revs [3, 2] waiting for 1
2043 #
2039 #
2044 # These two groups will be merged when we process
2040 # These two groups will be merged when we process
2045 # 1. In theory, we could have merged the groups when
2041 # 1. In theory, we could have merged the groups when
2046 # we added 2 to the group it is now in (we could have
2042 # we added 2 to the group it is now in (we could have
2047 # noticed the groups were both blocked on 1 then), but
2043 # noticed the groups were both blocked on 1 then), but
2048 # the way it works now makes the algorithm simpler.
2044 # the way it works now makes the algorithm simpler.
2049 #
2045 #
2050 # We also always keep the oldest subgroup first. We can
2046 # We also always keep the oldest subgroup first. We can
2051 # probably improve the behavior by having the longest set
2047 # probably improve the behavior by having the longest set
2052 # first. That way, graph algorithms could minimise the length
2048 # first. That way, graph algorithms could minimise the length
2053 # of parallel lines their drawing. This is currently not done.
2049 # of parallel lines their drawing. This is currently not done.
2054 targetidx = matching.pop(0)
2050 targetidx = matching.pop(0)
2055 trevs, tparents = groups[targetidx]
2051 trevs, tparents = groups[targetidx]
2056 for i in matching:
2052 for i in matching:
2057 gr = groups[i]
2053 gr = groups[i]
2058 trevs.extend(gr[0])
2054 trevs.extend(gr[0])
2059 tparents |= gr[1]
2055 tparents |= gr[1]
2060 # delete all merged subgroups (except the one we kept)
2056 # delete all merged subgroups (except the one we kept)
2061 # (starting from the last subgroup for performance and
2057 # (starting from the last subgroup for performance and
2062 # sanity reasons)
2058 # sanity reasons)
2063 for i in reversed(matching):
2059 for i in reversed(matching):
2064 del groups[i]
2060 del groups[i]
2065 else:
2061 else:
2066 # This is a new head. We create a new subgroup for it.
2062 # This is a new head. We create a new subgroup for it.
2067 targetidx = len(groups)
2063 targetidx = len(groups)
2068 groups.append(([], set([rev])))
2064 groups.append(([], set([rev])))
2069
2065
2070 gr = groups[targetidx]
2066 gr = groups[targetidx]
2071
2067
2072 # We now add the current nodes to this subgroups. This is done
2068 # We now add the current nodes to this subgroups. This is done
2073 # after the subgroup merging because all elements from a subgroup
2069 # after the subgroup merging because all elements from a subgroup
2074 # that relied on this rev must precede it.
2070 # that relied on this rev must precede it.
2075 #
2071 #
2076 # we also update the <parents> set to include the parents of the
2072 # we also update the <parents> set to include the parents of the
2077 # new nodes.
2073 # new nodes.
2078 if rev == currentrev: # only display stuff in rev
2074 if rev == currentrev: # only display stuff in rev
2079 gr[0].append(rev)
2075 gr[0].append(rev)
2080 gr[1].remove(rev)
2076 gr[1].remove(rev)
2081 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
2077 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
2082 gr[1].update(parents)
2078 gr[1].update(parents)
2083 for p in parents:
2079 for p in parents:
2084 if p not in pendingset:
2080 if p not in pendingset:
2085 pendingset.add(p)
2081 pendingset.add(p)
2086 heappush(pendingheap, -p)
2082 heappush(pendingheap, -p)
2087
2083
2088 # Look for a subgroup to display
2084 # Look for a subgroup to display
2089 #
2085 #
2090 # When unblocked is empty (if clause), we were not waiting for any
2086 # When unblocked is empty (if clause), we were not waiting for any
2091 # revisions during the first iteration (if no priority was given) or
2087 # revisions during the first iteration (if no priority was given) or
2092 # if we emitted a whole disconnected set of the graph (reached a
2088 # if we emitted a whole disconnected set of the graph (reached a
2093 # root). In that case we arbitrarily take the oldest known
2089 # root). In that case we arbitrarily take the oldest known
2094 # subgroup. The heuristic could probably be better.
2090 # subgroup. The heuristic could probably be better.
2095 #
2091 #
2096 # Otherwise (elif clause) if the subgroup is blocked on
2092 # Otherwise (elif clause) if the subgroup is blocked on
2097 # a revision we just emitted, we can safely emit it as
2093 # a revision we just emitted, we can safely emit it as
2098 # well.
2094 # well.
2099 if not unblocked:
2095 if not unblocked:
2100 if len(groups) > 1: # display other subset
2096 if len(groups) > 1: # display other subset
2101 targetidx = 1
2097 targetidx = 1
2102 gr = groups[1]
2098 gr = groups[1]
2103 elif not gr[1] & unblocked:
2099 elif not gr[1] & unblocked:
2104 gr = None
2100 gr = None
2105
2101
2106 if gr is not None:
2102 if gr is not None:
2107 # update the set of awaited revisions with the one from the
2103 # update the set of awaited revisions with the one from the
2108 # subgroup
2104 # subgroup
2109 unblocked |= gr[1]
2105 unblocked |= gr[1]
2110 # output all revisions in the subgroup
2106 # output all revisions in the subgroup
2111 for r in gr[0]:
2107 for r in gr[0]:
2112 yield r
2108 yield r
2113 # delete the subgroup that you just output
2109 # delete the subgroup that you just output
2114 # unless it is groups[0] in which case you just empty it.
2110 # unless it is groups[0] in which case you just empty it.
2115 if targetidx:
2111 if targetidx:
2116 del groups[targetidx]
2112 del groups[targetidx]
2117 else:
2113 else:
2118 gr[0][:] = []
2114 gr[0][:] = []
2119 # Check if we have some subgroup waiting for revisions we are not going to
2115 # Check if we have some subgroup waiting for revisions we are not going to
2120 # iterate over
2116 # iterate over
2121 for g in groups:
2117 for g in groups:
2122 for r in g[0]:
2118 for r in g[0]:
2123 yield r
2119 yield r
2124
2120
2125 @predicate('subrepo([pattern])')
2121 @predicate('subrepo([pattern])')
2126 def subrepo(repo, subset, x):
2122 def subrepo(repo, subset, x):
2127 """Changesets that add, modify or remove the given subrepo. If no subrepo
2123 """Changesets that add, modify or remove the given subrepo. If no subrepo
2128 pattern is named, any subrepo changes are returned.
2124 pattern is named, any subrepo changes are returned.
2129 """
2125 """
2130 # i18n: "subrepo" is a keyword
2126 # i18n: "subrepo" is a keyword
2131 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
2127 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
2132 pat = None
2128 pat = None
2133 if len(args) != 0:
2129 if len(args) != 0:
2134 pat = getstring(args[0], _("subrepo requires a pattern"))
2130 pat = getstring(args[0], _("subrepo requires a pattern"))
2135
2131
2136 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
2132 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
2137
2133
2138 def submatches(names):
2134 def submatches(names):
2139 k, p, m = util.stringmatcher(pat)
2135 k, p, m = util.stringmatcher(pat)
2140 for name in names:
2136 for name in names:
2141 if m(name):
2137 if m(name):
2142 yield name
2138 yield name
2143
2139
2144 def matches(x):
2140 def matches(x):
2145 c = repo[x]
2141 c = repo[x]
2146 s = repo.status(c.p1().node(), c.node(), match=m)
2142 s = repo.status(c.p1().node(), c.node(), match=m)
2147
2143
2148 if pat is None:
2144 if pat is None:
2149 return s.added or s.modified or s.removed
2145 return s.added or s.modified or s.removed
2150
2146
2151 if s.added:
2147 if s.added:
2152 return any(submatches(c.substate.keys()))
2148 return any(submatches(c.substate.keys()))
2153
2149
2154 if s.modified:
2150 if s.modified:
2155 subs = set(c.p1().substate.keys())
2151 subs = set(c.p1().substate.keys())
2156 subs.update(c.substate.keys())
2152 subs.update(c.substate.keys())
2157
2153
2158 for path in submatches(subs):
2154 for path in submatches(subs):
2159 if c.p1().substate.get(path) != c.substate.get(path):
2155 if c.p1().substate.get(path) != c.substate.get(path):
2160 return True
2156 return True
2161
2157
2162 if s.removed:
2158 if s.removed:
2163 return any(submatches(c.p1().substate.keys()))
2159 return any(submatches(c.p1().substate.keys()))
2164
2160
2165 return False
2161 return False
2166
2162
2167 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
2163 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
2168
2164
2169 def _substringmatcher(pattern):
2165 def _substringmatcher(pattern):
2170 kind, pattern, matcher = util.stringmatcher(pattern)
2166 kind, pattern, matcher = util.stringmatcher(pattern)
2171 if kind == 'literal':
2167 if kind == 'literal':
2172 matcher = lambda s: pattern in s
2168 matcher = lambda s: pattern in s
2173 return kind, pattern, matcher
2169 return kind, pattern, matcher
2174
2170
2175 @predicate('tag([name])', safe=True)
2171 @predicate('tag([name])', safe=True)
2176 def tag(repo, subset, x):
2172 def tag(repo, subset, x):
2177 """The specified tag by name, or all tagged revisions if no name is given.
2173 """The specified tag by name, or all tagged revisions if no name is given.
2178
2174
2179 If `name` starts with `re:`, the remainder of the name is treated as
2175 If `name` starts with `re:`, the remainder of the name is treated as
2180 a regular expression. To match a tag that actually starts with `re:`,
2176 a regular expression. To match a tag that actually starts with `re:`,
2181 use the prefix `literal:`.
2177 use the prefix `literal:`.
2182 """
2178 """
2183 # i18n: "tag" is a keyword
2179 # i18n: "tag" is a keyword
2184 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2180 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2185 cl = repo.changelog
2181 cl = repo.changelog
2186 if args:
2182 if args:
2187 pattern = getstring(args[0],
2183 pattern = getstring(args[0],
2188 # i18n: "tag" is a keyword
2184 # i18n: "tag" is a keyword
2189 _('the argument to tag must be a string'))
2185 _('the argument to tag must be a string'))
2190 kind, pattern, matcher = util.stringmatcher(pattern)
2186 kind, pattern, matcher = util.stringmatcher(pattern)
2191 if kind == 'literal':
2187 if kind == 'literal':
2192 # avoid resolving all tags
2188 # avoid resolving all tags
2193 tn = repo._tagscache.tags.get(pattern, None)
2189 tn = repo._tagscache.tags.get(pattern, None)
2194 if tn is None:
2190 if tn is None:
2195 raise error.RepoLookupError(_("tag '%s' does not exist")
2191 raise error.RepoLookupError(_("tag '%s' does not exist")
2196 % pattern)
2192 % pattern)
2197 s = set([repo[tn].rev()])
2193 s = set([repo[tn].rev()])
2198 else:
2194 else:
2199 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2195 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2200 else:
2196 else:
2201 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2197 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2202 return subset & s
2198 return subset & s
2203
2199
2204 @predicate('tagged', safe=True)
2200 @predicate('tagged', safe=True)
2205 def tagged(repo, subset, x):
2201 def tagged(repo, subset, x):
2206 return tag(repo, subset, x)
2202 return tag(repo, subset, x)
2207
2203
2208 @predicate('unstable()', safe=True)
2204 @predicate('unstable()', safe=True)
2209 def unstable(repo, subset, x):
2205 def unstable(repo, subset, x):
2210 """Non-obsolete changesets with obsolete ancestors.
2206 """Non-obsolete changesets with obsolete ancestors.
2211 """
2207 """
2212 # i18n: "unstable" is a keyword
2208 # i18n: "unstable" is a keyword
2213 getargs(x, 0, 0, _("unstable takes no arguments"))
2209 getargs(x, 0, 0, _("unstable takes no arguments"))
2214 unstables = obsmod.getrevs(repo, 'unstable')
2210 unstables = obsmod.getrevs(repo, 'unstable')
2215 return subset & unstables
2211 return subset & unstables
2216
2212
2217
2213
2218 @predicate('user(string)', safe=True)
2214 @predicate('user(string)', safe=True)
2219 def user(repo, subset, x):
2215 def user(repo, subset, x):
2220 """User name contains string. The match is case-insensitive.
2216 """User name contains string. The match is case-insensitive.
2221
2217
2222 If `string` starts with `re:`, the remainder of the string is treated as
2218 If `string` starts with `re:`, the remainder of the string is treated as
2223 a regular expression. To match a user that actually contains `re:`, use
2219 a regular expression. To match a user that actually contains `re:`, use
2224 the prefix `literal:`.
2220 the prefix `literal:`.
2225 """
2221 """
2226 return author(repo, subset, x)
2222 return author(repo, subset, x)
2227
2223
2228 # experimental
2224 # experimental
2229 @predicate('wdir', safe=True)
2225 @predicate('wdir', safe=True)
2230 def wdir(repo, subset, x):
2226 def wdir(repo, subset, x):
2231 # i18n: "wdir" is a keyword
2227 # i18n: "wdir" is a keyword
2232 getargs(x, 0, 0, _("wdir takes no arguments"))
2228 getargs(x, 0, 0, _("wdir takes no arguments"))
2233 if node.wdirrev in subset or isinstance(subset, fullreposet):
2229 if node.wdirrev in subset or isinstance(subset, fullreposet):
2234 return baseset([node.wdirrev])
2230 return baseset([node.wdirrev])
2235 return baseset()
2231 return baseset()
2236
2232
2237 # for internal use
2233 # for internal use
2238 @predicate('_list', safe=True)
2234 @predicate('_list', safe=True)
2239 def _list(repo, subset, x):
2235 def _list(repo, subset, x):
2240 s = getstring(x, "internal error")
2236 s = getstring(x, "internal error")
2241 if not s:
2237 if not s:
2242 return baseset()
2238 return baseset()
2243 # remove duplicates here. it's difficult for caller to deduplicate sets
2239 # remove duplicates here. it's difficult for caller to deduplicate sets
2244 # because different symbols can point to the same rev.
2240 # because different symbols can point to the same rev.
2245 cl = repo.changelog
2241 cl = repo.changelog
2246 ls = []
2242 ls = []
2247 seen = set()
2243 seen = set()
2248 for t in s.split('\0'):
2244 for t in s.split('\0'):
2249 try:
2245 try:
2250 # fast path for integer revision
2246 # fast path for integer revision
2251 r = int(t)
2247 r = int(t)
2252 if str(r) != t or r not in cl:
2248 if str(r) != t or r not in cl:
2253 raise ValueError
2249 raise ValueError
2254 revs = [r]
2250 revs = [r]
2255 except ValueError:
2251 except ValueError:
2256 revs = stringset(repo, subset, t)
2252 revs = stringset(repo, subset, t)
2257
2253
2258 for r in revs:
2254 for r in revs:
2259 if r in seen:
2255 if r in seen:
2260 continue
2256 continue
2261 if (r in subset
2257 if (r in subset
2262 or r == node.nullrev and isinstance(subset, fullreposet)):
2258 or r == node.nullrev and isinstance(subset, fullreposet)):
2263 ls.append(r)
2259 ls.append(r)
2264 seen.add(r)
2260 seen.add(r)
2265 return baseset(ls)
2261 return baseset(ls)
2266
2262
2267 # for internal use
2263 # for internal use
2268 @predicate('_intlist', safe=True)
2264 @predicate('_intlist', safe=True)
2269 def _intlist(repo, subset, x):
2265 def _intlist(repo, subset, x):
2270 s = getstring(x, "internal error")
2266 s = getstring(x, "internal error")
2271 if not s:
2267 if not s:
2272 return baseset()
2268 return baseset()
2273 ls = [int(r) for r in s.split('\0')]
2269 ls = [int(r) for r in s.split('\0')]
2274 s = subset
2270 s = subset
2275 return baseset([r for r in ls if r in s])
2271 return baseset([r for r in ls if r in s])
2276
2272
2277 # for internal use
2273 # for internal use
2278 @predicate('_hexlist', safe=True)
2274 @predicate('_hexlist', safe=True)
2279 def _hexlist(repo, subset, x):
2275 def _hexlist(repo, subset, x):
2280 s = getstring(x, "internal error")
2276 s = getstring(x, "internal error")
2281 if not s:
2277 if not s:
2282 return baseset()
2278 return baseset()
2283 cl = repo.changelog
2279 cl = repo.changelog
2284 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2280 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2285 s = subset
2281 s = subset
2286 return baseset([r for r in ls if r in s])
2282 return baseset([r for r in ls if r in s])
2287
2283
2288 methods = {
2284 methods = {
2289 "range": rangeset,
2285 "range": rangeset,
2290 "dagrange": dagrange,
2286 "dagrange": dagrange,
2291 "string": stringset,
2287 "string": stringset,
2292 "symbol": stringset,
2288 "symbol": stringset,
2293 "and": andset,
2289 "and": andset,
2294 "or": orset,
2290 "or": orset,
2295 "not": notset,
2291 "not": notset,
2296 "difference": differenceset,
2292 "difference": differenceset,
2297 "list": listset,
2293 "list": listset,
2298 "keyvalue": keyvaluepair,
2294 "keyvalue": keyvaluepair,
2299 "func": func,
2295 "func": func,
2300 "ancestor": ancestorspec,
2296 "ancestor": ancestorspec,
2301 "parent": parentspec,
2297 "parent": parentspec,
2302 "parentpost": p1,
2298 "parentpost": p1,
2303 }
2299 }
2304
2300
2305 def _matchonly(revs, bases):
2301 def _matchonly(revs, bases):
2306 """
2302 """
2307 >>> f = lambda *args: _matchonly(*map(parse, args))
2303 >>> f = lambda *args: _matchonly(*map(parse, args))
2308 >>> f('ancestors(A)', 'not ancestors(B)')
2304 >>> f('ancestors(A)', 'not ancestors(B)')
2309 ('list', ('symbol', 'A'), ('symbol', 'B'))
2305 ('list', ('symbol', 'A'), ('symbol', 'B'))
2310 """
2306 """
2311 if (revs is not None
2307 if (revs is not None
2312 and revs[0] == 'func'
2308 and revs[0] == 'func'
2313 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2309 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2314 and bases is not None
2310 and bases is not None
2315 and bases[0] == 'not'
2311 and bases[0] == 'not'
2316 and bases[1][0] == 'func'
2312 and bases[1][0] == 'func'
2317 and getstring(bases[1][1], _('not a symbol')) == 'ancestors'):
2313 and getstring(bases[1][1], _('not a symbol')) == 'ancestors'):
2318 return ('list', revs[2], bases[1][2])
2314 return ('list', revs[2], bases[1][2])
2319
2315
2320 def _optimize(x, small):
2316 def _optimize(x, small):
2321 if x is None:
2317 if x is None:
2322 return 0, x
2318 return 0, x
2323
2319
2324 smallbonus = 1
2320 smallbonus = 1
2325 if small:
2321 if small:
2326 smallbonus = .5
2322 smallbonus = .5
2327
2323
2328 op = x[0]
2324 op = x[0]
2329 if op == 'minus':
2325 if op == 'minus':
2330 return _optimize(('and', x[1], ('not', x[2])), small)
2326 return _optimize(('and', x[1], ('not', x[2])), small)
2331 elif op == 'only':
2327 elif op == 'only':
2332 t = ('func', ('symbol', 'only'), ('list', x[1], x[2]))
2328 t = ('func', ('symbol', 'only'), ('list', x[1], x[2]))
2333 return _optimize(t, small)
2329 return _optimize(t, small)
2334 elif op == 'onlypost':
2330 elif op == 'onlypost':
2335 return _optimize(('func', ('symbol', 'only'), x[1]), small)
2331 return _optimize(('func', ('symbol', 'only'), x[1]), small)
2336 elif op == 'dagrangepre':
2332 elif op == 'dagrangepre':
2337 return _optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2333 return _optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2338 elif op == 'dagrangepost':
2334 elif op == 'dagrangepost':
2339 return _optimize(('func', ('symbol', 'descendants'), x[1]), small)
2335 return _optimize(('func', ('symbol', 'descendants'), x[1]), small)
2340 elif op == 'rangeall':
2336 elif op == 'rangeall':
2341 return _optimize(('range', ('string', '0'), ('string', 'tip')), small)
2337 return _optimize(('range', ('string', '0'), ('string', 'tip')), small)
2342 elif op == 'rangepre':
2338 elif op == 'rangepre':
2343 return _optimize(('range', ('string', '0'), x[1]), small)
2339 return _optimize(('range', ('string', '0'), x[1]), small)
2344 elif op == 'rangepost':
2340 elif op == 'rangepost':
2345 return _optimize(('range', x[1], ('string', 'tip')), small)
2341 return _optimize(('range', x[1], ('string', 'tip')), small)
2346 elif op == 'negate':
2342 elif op == 'negate':
2347 s = getstring(x[1], _("can't negate that"))
2343 s = getstring(x[1], _("can't negate that"))
2348 return _optimize(('string', '-' + s), small)
2344 return _optimize(('string', '-' + s), small)
2349 elif op in 'string symbol negate':
2345 elif op in 'string symbol negate':
2350 return smallbonus, x # single revisions are small
2346 return smallbonus, x # single revisions are small
2351 elif op == 'and':
2347 elif op == 'and':
2352 wa, ta = _optimize(x[1], True)
2348 wa, ta = _optimize(x[1], True)
2353 wb, tb = _optimize(x[2], True)
2349 wb, tb = _optimize(x[2], True)
2354 w = min(wa, wb)
2350 w = min(wa, wb)
2355
2351
2356 # (::x and not ::y)/(not ::y and ::x) have a fast path
2352 # (::x and not ::y)/(not ::y and ::x) have a fast path
2357 tm = _matchonly(ta, tb) or _matchonly(tb, ta)
2353 tm = _matchonly(ta, tb) or _matchonly(tb, ta)
2358 if tm:
2354 if tm:
2359 return w, ('func', ('symbol', 'only'), tm)
2355 return w, ('func', ('symbol', 'only'), tm)
2360
2356
2361 if tb is not None and tb[0] == 'not':
2357 if tb is not None and tb[0] == 'not':
2362 return wa, ('difference', ta, tb[1])
2358 return wa, ('difference', ta, tb[1])
2363
2359
2364 if wa > wb:
2360 if wa > wb:
2365 return w, (op, tb, ta)
2361 return w, (op, tb, ta)
2366 return w, (op, ta, tb)
2362 return w, (op, ta, tb)
2367 elif op == 'or':
2363 elif op == 'or':
2368 # fast path for machine-generated expression, that is likely to have
2364 # fast path for machine-generated expression, that is likely to have
2369 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2365 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2370 ws, ts, ss = [], [], []
2366 ws, ts, ss = [], [], []
2371 def flushss():
2367 def flushss():
2372 if not ss:
2368 if not ss:
2373 return
2369 return
2374 if len(ss) == 1:
2370 if len(ss) == 1:
2375 w, t = ss[0]
2371 w, t = ss[0]
2376 else:
2372 else:
2377 s = '\0'.join(t[1] for w, t in ss)
2373 s = '\0'.join(t[1] for w, t in ss)
2378 y = ('func', ('symbol', '_list'), ('string', s))
2374 y = ('func', ('symbol', '_list'), ('string', s))
2379 w, t = _optimize(y, False)
2375 w, t = _optimize(y, False)
2380 ws.append(w)
2376 ws.append(w)
2381 ts.append(t)
2377 ts.append(t)
2382 del ss[:]
2378 del ss[:]
2383 for y in x[1:]:
2379 for y in x[1:]:
2384 w, t = _optimize(y, False)
2380 w, t = _optimize(y, False)
2385 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2381 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2386 ss.append((w, t))
2382 ss.append((w, t))
2387 continue
2383 continue
2388 flushss()
2384 flushss()
2389 ws.append(w)
2385 ws.append(w)
2390 ts.append(t)
2386 ts.append(t)
2391 flushss()
2387 flushss()
2392 if len(ts) == 1:
2388 if len(ts) == 1:
2393 return ws[0], ts[0] # 'or' operation is fully optimized out
2389 return ws[0], ts[0] # 'or' operation is fully optimized out
2394 # we can't reorder trees by weight because it would change the order.
2390 # we can't reorder trees by weight because it would change the order.
2395 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2391 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2396 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2392 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2397 return max(ws), (op,) + tuple(ts)
2393 return max(ws), (op,) + tuple(ts)
2398 elif op == 'not':
2394 elif op == 'not':
2399 # Optimize not public() to _notpublic() because we have a fast version
2395 # Optimize not public() to _notpublic() because we have a fast version
2400 if x[1] == ('func', ('symbol', 'public'), None):
2396 if x[1] == ('func', ('symbol', 'public'), None):
2401 newsym = ('func', ('symbol', '_notpublic'), None)
2397 newsym = ('func', ('symbol', '_notpublic'), None)
2402 o = _optimize(newsym, not small)
2398 o = _optimize(newsym, not small)
2403 return o[0], o[1]
2399 return o[0], o[1]
2404 else:
2400 else:
2405 o = _optimize(x[1], not small)
2401 o = _optimize(x[1], not small)
2406 return o[0], (op, o[1])
2402 return o[0], (op, o[1])
2407 elif op == 'parentpost':
2403 elif op == 'parentpost':
2408 o = _optimize(x[1], small)
2404 o = _optimize(x[1], small)
2409 return o[0], (op, o[1])
2405 return o[0], (op, o[1])
2410 elif op == 'group':
2406 elif op == 'group':
2411 return _optimize(x[1], small)
2407 return _optimize(x[1], small)
2412 elif op in 'dagrange range parent ancestorspec':
2408 elif op in 'dagrange range parent ancestorspec':
2413 if op == 'parent':
2409 if op == 'parent':
2414 # x^:y means (x^) : y, not x ^ (:y)
2410 # x^:y means (x^) : y, not x ^ (:y)
2415 post = ('parentpost', x[1])
2411 post = ('parentpost', x[1])
2416 if x[2][0] == 'dagrangepre':
2412 if x[2][0] == 'dagrangepre':
2417 return _optimize(('dagrange', post, x[2][1]), small)
2413 return _optimize(('dagrange', post, x[2][1]), small)
2418 elif x[2][0] == 'rangepre':
2414 elif x[2][0] == 'rangepre':
2419 return _optimize(('range', post, x[2][1]), small)
2415 return _optimize(('range', post, x[2][1]), small)
2420
2416
2421 wa, ta = _optimize(x[1], small)
2417 wa, ta = _optimize(x[1], small)
2422 wb, tb = _optimize(x[2], small)
2418 wb, tb = _optimize(x[2], small)
2423 return wa + wb, (op, ta, tb)
2419 return wa + wb, (op, ta, tb)
2424 elif op == 'list':
2420 elif op == 'list':
2425 ws, ts = zip(*(_optimize(y, small) for y in x[1:]))
2421 ws, ts = zip(*(_optimize(y, small) for y in x[1:]))
2426 return sum(ws), (op,) + ts
2422 return sum(ws), (op,) + ts
2427 elif op == 'func':
2423 elif op == 'func':
2428 f = getstring(x[1], _("not a symbol"))
2424 f = getstring(x[1], _("not a symbol"))
2429 wa, ta = _optimize(x[2], small)
2425 wa, ta = _optimize(x[2], small)
2430 if f in ("author branch closed date desc file grep keyword "
2426 if f in ("author branch closed date desc file grep keyword "
2431 "outgoing user"):
2427 "outgoing user"):
2432 w = 10 # slow
2428 w = 10 # slow
2433 elif f in "modifies adds removes":
2429 elif f in "modifies adds removes":
2434 w = 30 # slower
2430 w = 30 # slower
2435 elif f == "contains":
2431 elif f == "contains":
2436 w = 100 # very slow
2432 w = 100 # very slow
2437 elif f == "ancestor":
2433 elif f == "ancestor":
2438 w = 1 * smallbonus
2434 w = 1 * smallbonus
2439 elif f in "reverse limit first _intlist":
2435 elif f in "reverse limit first _intlist":
2440 w = 0
2436 w = 0
2441 elif f in "sort":
2437 elif f in "sort":
2442 w = 10 # assume most sorts look at changelog
2438 w = 10 # assume most sorts look at changelog
2443 else:
2439 else:
2444 w = 1
2440 w = 1
2445 return w + wa, (op, x[1], ta)
2441 return w + wa, (op, x[1], ta)
2446 return 1, x
2442 return 1, x
2447
2443
2448 def optimize(tree):
2444 def optimize(tree):
2449 _weight, newtree = _optimize(tree, small=True)
2445 _weight, newtree = _optimize(tree, small=True)
2450 return newtree
2446 return newtree
2451
2447
2452 # the set of valid characters for the initial letter of symbols in
2448 # the set of valid characters for the initial letter of symbols in
2453 # alias declarations and definitions
2449 # alias declarations and definitions
2454 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2450 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2455 if c.isalnum() or c in '._@$' or ord(c) > 127)
2451 if c.isalnum() or c in '._@$' or ord(c) > 127)
2456
2452
2457 def _parsewith(spec, lookup=None, syminitletters=None):
2453 def _parsewith(spec, lookup=None, syminitletters=None):
2458 """Generate a parse tree of given spec with given tokenizing options
2454 """Generate a parse tree of given spec with given tokenizing options
2459
2455
2460 >>> _parsewith('foo($1)', syminitletters=_aliassyminitletters)
2456 >>> _parsewith('foo($1)', syminitletters=_aliassyminitletters)
2461 ('func', ('symbol', 'foo'), ('symbol', '$1'))
2457 ('func', ('symbol', 'foo'), ('symbol', '$1'))
2462 >>> _parsewith('$1')
2458 >>> _parsewith('$1')
2463 Traceback (most recent call last):
2459 Traceback (most recent call last):
2464 ...
2460 ...
2465 ParseError: ("syntax error in revset '$1'", 0)
2461 ParseError: ("syntax error in revset '$1'", 0)
2466 >>> _parsewith('foo bar')
2462 >>> _parsewith('foo bar')
2467 Traceback (most recent call last):
2463 Traceback (most recent call last):
2468 ...
2464 ...
2469 ParseError: ('invalid token', 4)
2465 ParseError: ('invalid token', 4)
2470 """
2466 """
2471 p = parser.parser(elements)
2467 p = parser.parser(elements)
2472 tree, pos = p.parse(tokenize(spec, lookup=lookup,
2468 tree, pos = p.parse(tokenize(spec, lookup=lookup,
2473 syminitletters=syminitletters))
2469 syminitletters=syminitletters))
2474 if pos != len(spec):
2470 if pos != len(spec):
2475 raise error.ParseError(_('invalid token'), pos)
2471 raise error.ParseError(_('invalid token'), pos)
2476 return parser.simplifyinfixops(tree, ('list', 'or'))
2472 return parser.simplifyinfixops(tree, ('list', 'or'))
2477
2473
2478 class _aliasrules(parser.basealiasrules):
2474 class _aliasrules(parser.basealiasrules):
2479 """Parsing and expansion rule set of revset aliases"""
2475 """Parsing and expansion rule set of revset aliases"""
2480 _section = _('revset alias')
2476 _section = _('revset alias')
2481
2477
2482 @staticmethod
2478 @staticmethod
2483 def _parse(spec):
2479 def _parse(spec):
2484 """Parse alias declaration/definition ``spec``
2480 """Parse alias declaration/definition ``spec``
2485
2481
2486 This allows symbol names to use also ``$`` as an initial letter
2482 This allows symbol names to use also ``$`` as an initial letter
2487 (for backward compatibility), and callers of this function should
2483 (for backward compatibility), and callers of this function should
2488 examine whether ``$`` is used also for unexpected symbols or not.
2484 examine whether ``$`` is used also for unexpected symbols or not.
2489 """
2485 """
2490 return _parsewith(spec, syminitletters=_aliassyminitletters)
2486 return _parsewith(spec, syminitletters=_aliassyminitletters)
2491
2487
2492 @staticmethod
2488 @staticmethod
2493 def _trygetfunc(tree):
2489 def _trygetfunc(tree):
2494 if tree[0] == 'func' and tree[1][0] == 'symbol':
2490 if tree[0] == 'func' and tree[1][0] == 'symbol':
2495 return tree[1][1], getlist(tree[2])
2491 return tree[1][1], getlist(tree[2])
2496
2492
2497 def expandaliases(ui, tree, showwarning=None):
2493 def expandaliases(ui, tree, showwarning=None):
2498 aliases = _aliasrules.buildmap(ui.configitems('revsetalias'))
2494 aliases = _aliasrules.buildmap(ui.configitems('revsetalias'))
2499 tree = _aliasrules.expand(aliases, tree)
2495 tree = _aliasrules.expand(aliases, tree)
2500 if showwarning:
2496 if showwarning:
2501 # warn about problematic (but not referred) aliases
2497 # warn about problematic (but not referred) aliases
2502 for name, alias in sorted(aliases.iteritems()):
2498 for name, alias in sorted(aliases.iteritems()):
2503 if alias.error and not alias.warned:
2499 if alias.error and not alias.warned:
2504 showwarning(_('warning: %s\n') % (alias.error))
2500 showwarning(_('warning: %s\n') % (alias.error))
2505 alias.warned = True
2501 alias.warned = True
2506 return tree
2502 return tree
2507
2503
2508 def foldconcat(tree):
2504 def foldconcat(tree):
2509 """Fold elements to be concatenated by `##`
2505 """Fold elements to be concatenated by `##`
2510 """
2506 """
2511 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2507 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2512 return tree
2508 return tree
2513 if tree[0] == '_concat':
2509 if tree[0] == '_concat':
2514 pending = [tree]
2510 pending = [tree]
2515 l = []
2511 l = []
2516 while pending:
2512 while pending:
2517 e = pending.pop()
2513 e = pending.pop()
2518 if e[0] == '_concat':
2514 if e[0] == '_concat':
2519 pending.extend(reversed(e[1:]))
2515 pending.extend(reversed(e[1:]))
2520 elif e[0] in ('string', 'symbol'):
2516 elif e[0] in ('string', 'symbol'):
2521 l.append(e[1])
2517 l.append(e[1])
2522 else:
2518 else:
2523 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2519 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2524 raise error.ParseError(msg)
2520 raise error.ParseError(msg)
2525 return ('string', ''.join(l))
2521 return ('string', ''.join(l))
2526 else:
2522 else:
2527 return tuple(foldconcat(t) for t in tree)
2523 return tuple(foldconcat(t) for t in tree)
2528
2524
2529 def parse(spec, lookup=None):
2525 def parse(spec, lookup=None):
2530 return _parsewith(spec, lookup=lookup)
2526 return _parsewith(spec, lookup=lookup)
2531
2527
2532 def posttreebuilthook(tree, repo):
2528 def posttreebuilthook(tree, repo):
2533 # hook for extensions to execute code on the optimized tree
2529 # hook for extensions to execute code on the optimized tree
2534 pass
2530 pass
2535
2531
2536 def match(ui, spec, repo=None):
2532 def match(ui, spec, repo=None):
2537 if not spec:
2533 if not spec:
2538 raise error.ParseError(_("empty query"))
2534 raise error.ParseError(_("empty query"))
2539 lookup = None
2535 lookup = None
2540 if repo:
2536 if repo:
2541 lookup = repo.__contains__
2537 lookup = repo.__contains__
2542 tree = parse(spec, lookup)
2538 tree = parse(spec, lookup)
2543 return _makematcher(ui, tree, repo)
2539 return _makematcher(ui, tree, repo)
2544
2540
2545 def matchany(ui, specs, repo=None):
2541 def matchany(ui, specs, repo=None):
2546 """Create a matcher that will include any revisions matching one of the
2542 """Create a matcher that will include any revisions matching one of the
2547 given specs"""
2543 given specs"""
2548 if not specs:
2544 if not specs:
2549 def mfunc(repo, subset=None):
2545 def mfunc(repo, subset=None):
2550 return baseset()
2546 return baseset()
2551 return mfunc
2547 return mfunc
2552 if not all(specs):
2548 if not all(specs):
2553 raise error.ParseError(_("empty query"))
2549 raise error.ParseError(_("empty query"))
2554 lookup = None
2550 lookup = None
2555 if repo:
2551 if repo:
2556 lookup = repo.__contains__
2552 lookup = repo.__contains__
2557 if len(specs) == 1:
2553 if len(specs) == 1:
2558 tree = parse(specs[0], lookup)
2554 tree = parse(specs[0], lookup)
2559 else:
2555 else:
2560 tree = ('or',) + tuple(parse(s, lookup) for s in specs)
2556 tree = ('or',) + tuple(parse(s, lookup) for s in specs)
2561 return _makematcher(ui, tree, repo)
2557 return _makematcher(ui, tree, repo)
2562
2558
2563 def _makematcher(ui, tree, repo):
2559 def _makematcher(ui, tree, repo):
2564 if ui:
2560 if ui:
2565 tree = expandaliases(ui, tree, showwarning=ui.warn)
2561 tree = expandaliases(ui, tree, showwarning=ui.warn)
2566 tree = foldconcat(tree)
2562 tree = foldconcat(tree)
2567 tree = optimize(tree)
2563 tree = optimize(tree)
2568 posttreebuilthook(tree, repo)
2564 posttreebuilthook(tree, repo)
2569 def mfunc(repo, subset=None):
2565 def mfunc(repo, subset=None):
2570 if subset is None:
2566 if subset is None:
2571 subset = fullreposet(repo)
2567 subset = fullreposet(repo)
2572 if util.safehasattr(subset, 'isascending'):
2568 if util.safehasattr(subset, 'isascending'):
2573 result = getset(repo, subset, tree)
2569 result = getset(repo, subset, tree)
2574 else:
2570 else:
2575 result = getset(repo, baseset(subset), tree)
2571 result = getset(repo, baseset(subset), tree)
2576 return result
2572 return result
2577 return mfunc
2573 return mfunc
2578
2574
2579 def formatspec(expr, *args):
2575 def formatspec(expr, *args):
2580 '''
2576 '''
2581 This is a convenience function for using revsets internally, and
2577 This is a convenience function for using revsets internally, and
2582 escapes arguments appropriately. Aliases are intentionally ignored
2578 escapes arguments appropriately. Aliases are intentionally ignored
2583 so that intended expression behavior isn't accidentally subverted.
2579 so that intended expression behavior isn't accidentally subverted.
2584
2580
2585 Supported arguments:
2581 Supported arguments:
2586
2582
2587 %r = revset expression, parenthesized
2583 %r = revset expression, parenthesized
2588 %d = int(arg), no quoting
2584 %d = int(arg), no quoting
2589 %s = string(arg), escaped and single-quoted
2585 %s = string(arg), escaped and single-quoted
2590 %b = arg.branch(), escaped and single-quoted
2586 %b = arg.branch(), escaped and single-quoted
2591 %n = hex(arg), single-quoted
2587 %n = hex(arg), single-quoted
2592 %% = a literal '%'
2588 %% = a literal '%'
2593
2589
2594 Prefixing the type with 'l' specifies a parenthesized list of that type.
2590 Prefixing the type with 'l' specifies a parenthesized list of that type.
2595
2591
2596 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2592 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2597 '(10 or 11):: and ((this()) or (that()))'
2593 '(10 or 11):: and ((this()) or (that()))'
2598 >>> formatspec('%d:: and not %d::', 10, 20)
2594 >>> formatspec('%d:: and not %d::', 10, 20)
2599 '10:: and not 20::'
2595 '10:: and not 20::'
2600 >>> formatspec('%ld or %ld', [], [1])
2596 >>> formatspec('%ld or %ld', [], [1])
2601 "_list('') or 1"
2597 "_list('') or 1"
2602 >>> formatspec('keyword(%s)', 'foo\\xe9')
2598 >>> formatspec('keyword(%s)', 'foo\\xe9')
2603 "keyword('foo\\\\xe9')"
2599 "keyword('foo\\\\xe9')"
2604 >>> b = lambda: 'default'
2600 >>> b = lambda: 'default'
2605 >>> b.branch = b
2601 >>> b.branch = b
2606 >>> formatspec('branch(%b)', b)
2602 >>> formatspec('branch(%b)', b)
2607 "branch('default')"
2603 "branch('default')"
2608 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2604 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2609 "root(_list('a\\x00b\\x00c\\x00d'))"
2605 "root(_list('a\\x00b\\x00c\\x00d'))"
2610 '''
2606 '''
2611
2607
2612 def quote(s):
2608 def quote(s):
2613 return repr(str(s))
2609 return repr(str(s))
2614
2610
2615 def argtype(c, arg):
2611 def argtype(c, arg):
2616 if c == 'd':
2612 if c == 'd':
2617 return str(int(arg))
2613 return str(int(arg))
2618 elif c == 's':
2614 elif c == 's':
2619 return quote(arg)
2615 return quote(arg)
2620 elif c == 'r':
2616 elif c == 'r':
2621 parse(arg) # make sure syntax errors are confined
2617 parse(arg) # make sure syntax errors are confined
2622 return '(%s)' % arg
2618 return '(%s)' % arg
2623 elif c == 'n':
2619 elif c == 'n':
2624 return quote(node.hex(arg))
2620 return quote(node.hex(arg))
2625 elif c == 'b':
2621 elif c == 'b':
2626 return quote(arg.branch())
2622 return quote(arg.branch())
2627
2623
2628 def listexp(s, t):
2624 def listexp(s, t):
2629 l = len(s)
2625 l = len(s)
2630 if l == 0:
2626 if l == 0:
2631 return "_list('')"
2627 return "_list('')"
2632 elif l == 1:
2628 elif l == 1:
2633 return argtype(t, s[0])
2629 return argtype(t, s[0])
2634 elif t == 'd':
2630 elif t == 'd':
2635 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2631 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2636 elif t == 's':
2632 elif t == 's':
2637 return "_list('%s')" % "\0".join(s)
2633 return "_list('%s')" % "\0".join(s)
2638 elif t == 'n':
2634 elif t == 'n':
2639 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2635 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2640 elif t == 'b':
2636 elif t == 'b':
2641 return "_list('%s')" % "\0".join(a.branch() for a in s)
2637 return "_list('%s')" % "\0".join(a.branch() for a in s)
2642
2638
2643 m = l // 2
2639 m = l // 2
2644 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2640 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2645
2641
2646 ret = ''
2642 ret = ''
2647 pos = 0
2643 pos = 0
2648 arg = 0
2644 arg = 0
2649 while pos < len(expr):
2645 while pos < len(expr):
2650 c = expr[pos]
2646 c = expr[pos]
2651 if c == '%':
2647 if c == '%':
2652 pos += 1
2648 pos += 1
2653 d = expr[pos]
2649 d = expr[pos]
2654 if d == '%':
2650 if d == '%':
2655 ret += d
2651 ret += d
2656 elif d in 'dsnbr':
2652 elif d in 'dsnbr':
2657 ret += argtype(d, args[arg])
2653 ret += argtype(d, args[arg])
2658 arg += 1
2654 arg += 1
2659 elif d == 'l':
2655 elif d == 'l':
2660 # a list of some type
2656 # a list of some type
2661 pos += 1
2657 pos += 1
2662 d = expr[pos]
2658 d = expr[pos]
2663 ret += listexp(list(args[arg]), d)
2659 ret += listexp(list(args[arg]), d)
2664 arg += 1
2660 arg += 1
2665 else:
2661 else:
2666 raise error.Abort(_('unexpected revspec format character %s')
2662 raise error.Abort(_('unexpected revspec format character %s')
2667 % d)
2663 % d)
2668 else:
2664 else:
2669 ret += c
2665 ret += c
2670 pos += 1
2666 pos += 1
2671
2667
2672 return ret
2668 return ret
2673
2669
2674 def prettyformat(tree):
2670 def prettyformat(tree):
2675 return parser.prettyformat(tree, ('string', 'symbol'))
2671 return parser.prettyformat(tree, ('string', 'symbol'))
2676
2672
2677 def depth(tree):
2673 def depth(tree):
2678 if isinstance(tree, tuple):
2674 if isinstance(tree, tuple):
2679 return max(map(depth, tree)) + 1
2675 return max(map(depth, tree)) + 1
2680 else:
2676 else:
2681 return 0
2677 return 0
2682
2678
2683 def funcsused(tree):
2679 def funcsused(tree):
2684 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2680 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2685 return set()
2681 return set()
2686 else:
2682 else:
2687 funcs = set()
2683 funcs = set()
2688 for s in tree[1:]:
2684 for s in tree[1:]:
2689 funcs |= funcsused(s)
2685 funcs |= funcsused(s)
2690 if tree[0] == 'func':
2686 if tree[0] == 'func':
2691 funcs.add(tree[1][1])
2687 funcs.add(tree[1][1])
2692 return funcs
2688 return funcs
2693
2689
2694 def _formatsetrepr(r):
2690 def _formatsetrepr(r):
2695 """Format an optional printable representation of a set
2691 """Format an optional printable representation of a set
2696
2692
2697 ======== =================================
2693 ======== =================================
2698 type(r) example
2694 type(r) example
2699 ======== =================================
2695 ======== =================================
2700 tuple ('<not %r>', other)
2696 tuple ('<not %r>', other)
2701 str '<branch closed>'
2697 str '<branch closed>'
2702 callable lambda: '<branch %r>' % sorted(b)
2698 callable lambda: '<branch %r>' % sorted(b)
2703 object other
2699 object other
2704 ======== =================================
2700 ======== =================================
2705 """
2701 """
2706 if r is None:
2702 if r is None:
2707 return ''
2703 return ''
2708 elif isinstance(r, tuple):
2704 elif isinstance(r, tuple):
2709 return r[0] % r[1:]
2705 return r[0] % r[1:]
2710 elif isinstance(r, str):
2706 elif isinstance(r, str):
2711 return r
2707 return r
2712 elif callable(r):
2708 elif callable(r):
2713 return r()
2709 return r()
2714 else:
2710 else:
2715 return repr(r)
2711 return repr(r)
2716
2712
2717 class abstractsmartset(object):
2713 class abstractsmartset(object):
2718
2714
2719 def __nonzero__(self):
2715 def __nonzero__(self):
2720 """True if the smartset is not empty"""
2716 """True if the smartset is not empty"""
2721 raise NotImplementedError()
2717 raise NotImplementedError()
2722
2718
2723 def __contains__(self, rev):
2719 def __contains__(self, rev):
2724 """provide fast membership testing"""
2720 """provide fast membership testing"""
2725 raise NotImplementedError()
2721 raise NotImplementedError()
2726
2722
2727 def __iter__(self):
2723 def __iter__(self):
2728 """iterate the set in the order it is supposed to be iterated"""
2724 """iterate the set in the order it is supposed to be iterated"""
2729 raise NotImplementedError()
2725 raise NotImplementedError()
2730
2726
2731 # Attributes containing a function to perform a fast iteration in a given
2727 # Attributes containing a function to perform a fast iteration in a given
2732 # direction. A smartset can have none, one, or both defined.
2728 # direction. A smartset can have none, one, or both defined.
2733 #
2729 #
2734 # Default value is None instead of a function returning None to avoid
2730 # Default value is None instead of a function returning None to avoid
2735 # initializing an iterator just for testing if a fast method exists.
2731 # initializing an iterator just for testing if a fast method exists.
2736 fastasc = None
2732 fastasc = None
2737 fastdesc = None
2733 fastdesc = None
2738
2734
2739 def isascending(self):
2735 def isascending(self):
2740 """True if the set will iterate in ascending order"""
2736 """True if the set will iterate in ascending order"""
2741 raise NotImplementedError()
2737 raise NotImplementedError()
2742
2738
2743 def isdescending(self):
2739 def isdescending(self):
2744 """True if the set will iterate in descending order"""
2740 """True if the set will iterate in descending order"""
2745 raise NotImplementedError()
2741 raise NotImplementedError()
2746
2742
2747 def istopo(self):
2743 def istopo(self):
2748 """True if the set will iterate in topographical order"""
2744 """True if the set will iterate in topographical order"""
2749 raise NotImplementedError()
2745 raise NotImplementedError()
2750
2746
2751 @util.cachefunc
2747 @util.cachefunc
2752 def min(self):
2748 def min(self):
2753 """return the minimum element in the set"""
2749 """return the minimum element in the set"""
2754 if self.fastasc is not None:
2750 if self.fastasc is not None:
2755 for r in self.fastasc():
2751 for r in self.fastasc():
2756 return r
2752 return r
2757 raise ValueError('arg is an empty sequence')
2753 raise ValueError('arg is an empty sequence')
2758 return min(self)
2754 return min(self)
2759
2755
2760 @util.cachefunc
2756 @util.cachefunc
2761 def max(self):
2757 def max(self):
2762 """return the maximum element in the set"""
2758 """return the maximum element in the set"""
2763 if self.fastdesc is not None:
2759 if self.fastdesc is not None:
2764 for r in self.fastdesc():
2760 for r in self.fastdesc():
2765 return r
2761 return r
2766 raise ValueError('arg is an empty sequence')
2762 raise ValueError('arg is an empty sequence')
2767 return max(self)
2763 return max(self)
2768
2764
2769 def first(self):
2765 def first(self):
2770 """return the first element in the set (user iteration perspective)
2766 """return the first element in the set (user iteration perspective)
2771
2767
2772 Return None if the set is empty"""
2768 Return None if the set is empty"""
2773 raise NotImplementedError()
2769 raise NotImplementedError()
2774
2770
2775 def last(self):
2771 def last(self):
2776 """return the last element in the set (user iteration perspective)
2772 """return the last element in the set (user iteration perspective)
2777
2773
2778 Return None if the set is empty"""
2774 Return None if the set is empty"""
2779 raise NotImplementedError()
2775 raise NotImplementedError()
2780
2776
2781 def __len__(self):
2777 def __len__(self):
2782 """return the length of the smartsets
2778 """return the length of the smartsets
2783
2779
2784 This can be expensive on smartset that could be lazy otherwise."""
2780 This can be expensive on smartset that could be lazy otherwise."""
2785 raise NotImplementedError()
2781 raise NotImplementedError()
2786
2782
2787 def reverse(self):
2783 def reverse(self):
2788 """reverse the expected iteration order"""
2784 """reverse the expected iteration order"""
2789 raise NotImplementedError()
2785 raise NotImplementedError()
2790
2786
2791 def sort(self, reverse=True):
2787 def sort(self, reverse=True):
2792 """get the set to iterate in an ascending or descending order"""
2788 """get the set to iterate in an ascending or descending order"""
2793 raise NotImplementedError()
2789 raise NotImplementedError()
2794
2790
2795 def __and__(self, other):
2791 def __and__(self, other):
2796 """Returns a new object with the intersection of the two collections.
2792 """Returns a new object with the intersection of the two collections.
2797
2793
2798 This is part of the mandatory API for smartset."""
2794 This is part of the mandatory API for smartset."""
2799 if isinstance(other, fullreposet):
2795 if isinstance(other, fullreposet):
2800 return self
2796 return self
2801 return self.filter(other.__contains__, condrepr=other, cache=False)
2797 return self.filter(other.__contains__, condrepr=other, cache=False)
2802
2798
2803 def __add__(self, other):
2799 def __add__(self, other):
2804 """Returns a new object with the union of the two collections.
2800 """Returns a new object with the union of the two collections.
2805
2801
2806 This is part of the mandatory API for smartset."""
2802 This is part of the mandatory API for smartset."""
2807 return addset(self, other)
2803 return addset(self, other)
2808
2804
2809 def __sub__(self, other):
2805 def __sub__(self, other):
2810 """Returns a new object with the substraction of the two collections.
2806 """Returns a new object with the substraction of the two collections.
2811
2807
2812 This is part of the mandatory API for smartset."""
2808 This is part of the mandatory API for smartset."""
2813 c = other.__contains__
2809 c = other.__contains__
2814 return self.filter(lambda r: not c(r), condrepr=('<not %r>', other),
2810 return self.filter(lambda r: not c(r), condrepr=('<not %r>', other),
2815 cache=False)
2811 cache=False)
2816
2812
2817 def filter(self, condition, condrepr=None, cache=True):
2813 def filter(self, condition, condrepr=None, cache=True):
2818 """Returns this smartset filtered by condition as a new smartset.
2814 """Returns this smartset filtered by condition as a new smartset.
2819
2815
2820 `condition` is a callable which takes a revision number and returns a
2816 `condition` is a callable which takes a revision number and returns a
2821 boolean. Optional `condrepr` provides a printable representation of
2817 boolean. Optional `condrepr` provides a printable representation of
2822 the given `condition`.
2818 the given `condition`.
2823
2819
2824 This is part of the mandatory API for smartset."""
2820 This is part of the mandatory API for smartset."""
2825 # builtin cannot be cached. but do not needs to
2821 # builtin cannot be cached. but do not needs to
2826 if cache and util.safehasattr(condition, 'func_code'):
2822 if cache and util.safehasattr(condition, 'func_code'):
2827 condition = util.cachefunc(condition)
2823 condition = util.cachefunc(condition)
2828 return filteredset(self, condition, condrepr)
2824 return filteredset(self, condition, condrepr)
2829
2825
2830 class baseset(abstractsmartset):
2826 class baseset(abstractsmartset):
2831 """Basic data structure that represents a revset and contains the basic
2827 """Basic data structure that represents a revset and contains the basic
2832 operation that it should be able to perform.
2828 operation that it should be able to perform.
2833
2829
2834 Every method in this class should be implemented by any smartset class.
2830 Every method in this class should be implemented by any smartset class.
2835 """
2831 """
2836 def __init__(self, data=(), datarepr=None, istopo=False):
2832 def __init__(self, data=(), datarepr=None, istopo=False):
2837 """
2833 """
2838 datarepr: a tuple of (format, obj, ...), a function or an object that
2834 datarepr: a tuple of (format, obj, ...), a function or an object that
2839 provides a printable representation of the given data.
2835 provides a printable representation of the given data.
2840 """
2836 """
2841 self._ascending = None
2837 self._ascending = None
2842 self._istopo = istopo
2838 self._istopo = istopo
2843 if not isinstance(data, list):
2839 if not isinstance(data, list):
2844 if isinstance(data, set):
2840 if isinstance(data, set):
2845 self._set = data
2841 self._set = data
2846 # set has no order we pick one for stability purpose
2842 # set has no order we pick one for stability purpose
2847 self._ascending = True
2843 self._ascending = True
2848 data = list(data)
2844 data = list(data)
2849 self._list = data
2845 self._list = data
2850 self._datarepr = datarepr
2846 self._datarepr = datarepr
2851
2847
2852 @util.propertycache
2848 @util.propertycache
2853 def _set(self):
2849 def _set(self):
2854 return set(self._list)
2850 return set(self._list)
2855
2851
2856 @util.propertycache
2852 @util.propertycache
2857 def _asclist(self):
2853 def _asclist(self):
2858 asclist = self._list[:]
2854 asclist = self._list[:]
2859 asclist.sort()
2855 asclist.sort()
2860 return asclist
2856 return asclist
2861
2857
2862 def __iter__(self):
2858 def __iter__(self):
2863 if self._ascending is None:
2859 if self._ascending is None:
2864 return iter(self._list)
2860 return iter(self._list)
2865 elif self._ascending:
2861 elif self._ascending:
2866 return iter(self._asclist)
2862 return iter(self._asclist)
2867 else:
2863 else:
2868 return reversed(self._asclist)
2864 return reversed(self._asclist)
2869
2865
2870 def fastasc(self):
2866 def fastasc(self):
2871 return iter(self._asclist)
2867 return iter(self._asclist)
2872
2868
2873 def fastdesc(self):
2869 def fastdesc(self):
2874 return reversed(self._asclist)
2870 return reversed(self._asclist)
2875
2871
2876 @util.propertycache
2872 @util.propertycache
2877 def __contains__(self):
2873 def __contains__(self):
2878 return self._set.__contains__
2874 return self._set.__contains__
2879
2875
2880 def __nonzero__(self):
2876 def __nonzero__(self):
2881 return bool(self._list)
2877 return bool(self._list)
2882
2878
2883 def sort(self, reverse=False):
2879 def sort(self, reverse=False):
2884 self._ascending = not bool(reverse)
2880 self._ascending = not bool(reverse)
2885 self._istopo = False
2881 self._istopo = False
2886
2882
2887 def reverse(self):
2883 def reverse(self):
2888 if self._ascending is None:
2884 if self._ascending is None:
2889 self._list.reverse()
2885 self._list.reverse()
2890 else:
2886 else:
2891 self._ascending = not self._ascending
2887 self._ascending = not self._ascending
2892 self._istopo = False
2888 self._istopo = False
2893
2889
2894 def __len__(self):
2890 def __len__(self):
2895 return len(self._list)
2891 return len(self._list)
2896
2892
2897 def isascending(self):
2893 def isascending(self):
2898 """Returns True if the collection is ascending order, False if not.
2894 """Returns True if the collection is ascending order, False if not.
2899
2895
2900 This is part of the mandatory API for smartset."""
2896 This is part of the mandatory API for smartset."""
2901 if len(self) <= 1:
2897 if len(self) <= 1:
2902 return True
2898 return True
2903 return self._ascending is not None and self._ascending
2899 return self._ascending is not None and self._ascending
2904
2900
2905 def isdescending(self):
2901 def isdescending(self):
2906 """Returns True if the collection is descending order, False if not.
2902 """Returns True if the collection is descending order, False if not.
2907
2903
2908 This is part of the mandatory API for smartset."""
2904 This is part of the mandatory API for smartset."""
2909 if len(self) <= 1:
2905 if len(self) <= 1:
2910 return True
2906 return True
2911 return self._ascending is not None and not self._ascending
2907 return self._ascending is not None and not self._ascending
2912
2908
2913 def istopo(self):
2909 def istopo(self):
2914 """Is the collection is in topographical order or not.
2910 """Is the collection is in topographical order or not.
2915
2911
2916 This is part of the mandatory API for smartset."""
2912 This is part of the mandatory API for smartset."""
2917 if len(self) <= 1:
2913 if len(self) <= 1:
2918 return True
2914 return True
2919 return self._istopo
2915 return self._istopo
2920
2916
2921 def first(self):
2917 def first(self):
2922 if self:
2918 if self:
2923 if self._ascending is None:
2919 if self._ascending is None:
2924 return self._list[0]
2920 return self._list[0]
2925 elif self._ascending:
2921 elif self._ascending:
2926 return self._asclist[0]
2922 return self._asclist[0]
2927 else:
2923 else:
2928 return self._asclist[-1]
2924 return self._asclist[-1]
2929 return None
2925 return None
2930
2926
2931 def last(self):
2927 def last(self):
2932 if self:
2928 if self:
2933 if self._ascending is None:
2929 if self._ascending is None:
2934 return self._list[-1]
2930 return self._list[-1]
2935 elif self._ascending:
2931 elif self._ascending:
2936 return self._asclist[-1]
2932 return self._asclist[-1]
2937 else:
2933 else:
2938 return self._asclist[0]
2934 return self._asclist[0]
2939 return None
2935 return None
2940
2936
2941 def __repr__(self):
2937 def __repr__(self):
2942 d = {None: '', False: '-', True: '+'}[self._ascending]
2938 d = {None: '', False: '-', True: '+'}[self._ascending]
2943 s = _formatsetrepr(self._datarepr)
2939 s = _formatsetrepr(self._datarepr)
2944 if not s:
2940 if not s:
2945 l = self._list
2941 l = self._list
2946 # if _list has been built from a set, it might have a different
2942 # if _list has been built from a set, it might have a different
2947 # order from one python implementation to another.
2943 # order from one python implementation to another.
2948 # We fallback to the sorted version for a stable output.
2944 # We fallback to the sorted version for a stable output.
2949 if self._ascending is not None:
2945 if self._ascending is not None:
2950 l = self._asclist
2946 l = self._asclist
2951 s = repr(l)
2947 s = repr(l)
2952 return '<%s%s %s>' % (type(self).__name__, d, s)
2948 return '<%s%s %s>' % (type(self).__name__, d, s)
2953
2949
2954 class filteredset(abstractsmartset):
2950 class filteredset(abstractsmartset):
2955 """Duck type for baseset class which iterates lazily over the revisions in
2951 """Duck type for baseset class which iterates lazily over the revisions in
2956 the subset and contains a function which tests for membership in the
2952 the subset and contains a function which tests for membership in the
2957 revset
2953 revset
2958 """
2954 """
2959 def __init__(self, subset, condition=lambda x: True, condrepr=None):
2955 def __init__(self, subset, condition=lambda x: True, condrepr=None):
2960 """
2956 """
2961 condition: a function that decide whether a revision in the subset
2957 condition: a function that decide whether a revision in the subset
2962 belongs to the revset or not.
2958 belongs to the revset or not.
2963 condrepr: a tuple of (format, obj, ...), a function or an object that
2959 condrepr: a tuple of (format, obj, ...), a function or an object that
2964 provides a printable representation of the given condition.
2960 provides a printable representation of the given condition.
2965 """
2961 """
2966 self._subset = subset
2962 self._subset = subset
2967 self._condition = condition
2963 self._condition = condition
2968 self._condrepr = condrepr
2964 self._condrepr = condrepr
2969
2965
2970 def __contains__(self, x):
2966 def __contains__(self, x):
2971 return x in self._subset and self._condition(x)
2967 return x in self._subset and self._condition(x)
2972
2968
2973 def __iter__(self):
2969 def __iter__(self):
2974 return self._iterfilter(self._subset)
2970 return self._iterfilter(self._subset)
2975
2971
2976 def _iterfilter(self, it):
2972 def _iterfilter(self, it):
2977 cond = self._condition
2973 cond = self._condition
2978 for x in it:
2974 for x in it:
2979 if cond(x):
2975 if cond(x):
2980 yield x
2976 yield x
2981
2977
2982 @property
2978 @property
2983 def fastasc(self):
2979 def fastasc(self):
2984 it = self._subset.fastasc
2980 it = self._subset.fastasc
2985 if it is None:
2981 if it is None:
2986 return None
2982 return None
2987 return lambda: self._iterfilter(it())
2983 return lambda: self._iterfilter(it())
2988
2984
2989 @property
2985 @property
2990 def fastdesc(self):
2986 def fastdesc(self):
2991 it = self._subset.fastdesc
2987 it = self._subset.fastdesc
2992 if it is None:
2988 if it is None:
2993 return None
2989 return None
2994 return lambda: self._iterfilter(it())
2990 return lambda: self._iterfilter(it())
2995
2991
2996 def __nonzero__(self):
2992 def __nonzero__(self):
2997 fast = None
2993 fast = None
2998 candidates = [self.fastasc if self.isascending() else None,
2994 candidates = [self.fastasc if self.isascending() else None,
2999 self.fastdesc if self.isdescending() else None,
2995 self.fastdesc if self.isdescending() else None,
3000 self.fastasc,
2996 self.fastasc,
3001 self.fastdesc]
2997 self.fastdesc]
3002 for candidate in candidates:
2998 for candidate in candidates:
3003 if candidate is not None:
2999 if candidate is not None:
3004 fast = candidate
3000 fast = candidate
3005 break
3001 break
3006
3002
3007 if fast is not None:
3003 if fast is not None:
3008 it = fast()
3004 it = fast()
3009 else:
3005 else:
3010 it = self
3006 it = self
3011
3007
3012 for r in it:
3008 for r in it:
3013 return True
3009 return True
3014 return False
3010 return False
3015
3011
3016 def __len__(self):
3012 def __len__(self):
3017 # Basic implementation to be changed in future patches.
3013 # Basic implementation to be changed in future patches.
3018 # until this gets improved, we use generator expression
3014 # until this gets improved, we use generator expression
3019 # here, since list compr is free to call __len__ again
3015 # here, since list compr is free to call __len__ again
3020 # causing infinite recursion
3016 # causing infinite recursion
3021 l = baseset(r for r in self)
3017 l = baseset(r for r in self)
3022 return len(l)
3018 return len(l)
3023
3019
3024 def sort(self, reverse=False):
3020 def sort(self, reverse=False):
3025 self._subset.sort(reverse=reverse)
3021 self._subset.sort(reverse=reverse)
3026
3022
3027 def reverse(self):
3023 def reverse(self):
3028 self._subset.reverse()
3024 self._subset.reverse()
3029
3025
3030 def isascending(self):
3026 def isascending(self):
3031 return self._subset.isascending()
3027 return self._subset.isascending()
3032
3028
3033 def isdescending(self):
3029 def isdescending(self):
3034 return self._subset.isdescending()
3030 return self._subset.isdescending()
3035
3031
3036 def istopo(self):
3032 def istopo(self):
3037 return self._subset.istopo()
3033 return self._subset.istopo()
3038
3034
3039 def first(self):
3035 def first(self):
3040 for x in self:
3036 for x in self:
3041 return x
3037 return x
3042 return None
3038 return None
3043
3039
3044 def last(self):
3040 def last(self):
3045 it = None
3041 it = None
3046 if self.isascending():
3042 if self.isascending():
3047 it = self.fastdesc
3043 it = self.fastdesc
3048 elif self.isdescending():
3044 elif self.isdescending():
3049 it = self.fastasc
3045 it = self.fastasc
3050 if it is not None:
3046 if it is not None:
3051 for x in it():
3047 for x in it():
3052 return x
3048 return x
3053 return None #empty case
3049 return None #empty case
3054 else:
3050 else:
3055 x = None
3051 x = None
3056 for x in self:
3052 for x in self:
3057 pass
3053 pass
3058 return x
3054 return x
3059
3055
3060 def __repr__(self):
3056 def __repr__(self):
3061 xs = [repr(self._subset)]
3057 xs = [repr(self._subset)]
3062 s = _formatsetrepr(self._condrepr)
3058 s = _formatsetrepr(self._condrepr)
3063 if s:
3059 if s:
3064 xs.append(s)
3060 xs.append(s)
3065 return '<%s %s>' % (type(self).__name__, ', '.join(xs))
3061 return '<%s %s>' % (type(self).__name__, ', '.join(xs))
3066
3062
3067 def _iterordered(ascending, iter1, iter2):
3063 def _iterordered(ascending, iter1, iter2):
3068 """produce an ordered iteration from two iterators with the same order
3064 """produce an ordered iteration from two iterators with the same order
3069
3065
3070 The ascending is used to indicated the iteration direction.
3066 The ascending is used to indicated the iteration direction.
3071 """
3067 """
3072 choice = max
3068 choice = max
3073 if ascending:
3069 if ascending:
3074 choice = min
3070 choice = min
3075
3071
3076 val1 = None
3072 val1 = None
3077 val2 = None
3073 val2 = None
3078 try:
3074 try:
3079 # Consume both iterators in an ordered way until one is empty
3075 # Consume both iterators in an ordered way until one is empty
3080 while True:
3076 while True:
3081 if val1 is None:
3077 if val1 is None:
3082 val1 = next(iter1)
3078 val1 = next(iter1)
3083 if val2 is None:
3079 if val2 is None:
3084 val2 = next(iter2)
3080 val2 = next(iter2)
3085 n = choice(val1, val2)
3081 n = choice(val1, val2)
3086 yield n
3082 yield n
3087 if val1 == n:
3083 if val1 == n:
3088 val1 = None
3084 val1 = None
3089 if val2 == n:
3085 if val2 == n:
3090 val2 = None
3086 val2 = None
3091 except StopIteration:
3087 except StopIteration:
3092 # Flush any remaining values and consume the other one
3088 # Flush any remaining values and consume the other one
3093 it = iter2
3089 it = iter2
3094 if val1 is not None:
3090 if val1 is not None:
3095 yield val1
3091 yield val1
3096 it = iter1
3092 it = iter1
3097 elif val2 is not None:
3093 elif val2 is not None:
3098 # might have been equality and both are empty
3094 # might have been equality and both are empty
3099 yield val2
3095 yield val2
3100 for val in it:
3096 for val in it:
3101 yield val
3097 yield val
3102
3098
3103 class addset(abstractsmartset):
3099 class addset(abstractsmartset):
3104 """Represent the addition of two sets
3100 """Represent the addition of two sets
3105
3101
3106 Wrapper structure for lazily adding two structures without losing much
3102 Wrapper structure for lazily adding two structures without losing much
3107 performance on the __contains__ method
3103 performance on the __contains__ method
3108
3104
3109 If the ascending attribute is set, that means the two structures are
3105 If the ascending attribute is set, that means the two structures are
3110 ordered in either an ascending or descending way. Therefore, we can add
3106 ordered in either an ascending or descending way. Therefore, we can add
3111 them maintaining the order by iterating over both at the same time
3107 them maintaining the order by iterating over both at the same time
3112
3108
3113 >>> xs = baseset([0, 3, 2])
3109 >>> xs = baseset([0, 3, 2])
3114 >>> ys = baseset([5, 2, 4])
3110 >>> ys = baseset([5, 2, 4])
3115
3111
3116 >>> rs = addset(xs, ys)
3112 >>> rs = addset(xs, ys)
3117 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3113 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3118 (True, True, False, True, 0, 4)
3114 (True, True, False, True, 0, 4)
3119 >>> rs = addset(xs, baseset([]))
3115 >>> rs = addset(xs, baseset([]))
3120 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3116 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3121 (True, True, False, 0, 2)
3117 (True, True, False, 0, 2)
3122 >>> rs = addset(baseset([]), baseset([]))
3118 >>> rs = addset(baseset([]), baseset([]))
3123 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3119 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3124 (False, False, None, None)
3120 (False, False, None, None)
3125
3121
3126 iterate unsorted:
3122 iterate unsorted:
3127 >>> rs = addset(xs, ys)
3123 >>> rs = addset(xs, ys)
3128 >>> # (use generator because pypy could call len())
3124 >>> # (use generator because pypy could call len())
3129 >>> list(x for x in rs) # without _genlist
3125 >>> list(x for x in rs) # without _genlist
3130 [0, 3, 2, 5, 4]
3126 [0, 3, 2, 5, 4]
3131 >>> assert not rs._genlist
3127 >>> assert not rs._genlist
3132 >>> len(rs)
3128 >>> len(rs)
3133 5
3129 5
3134 >>> [x for x in rs] # with _genlist
3130 >>> [x for x in rs] # with _genlist
3135 [0, 3, 2, 5, 4]
3131 [0, 3, 2, 5, 4]
3136 >>> assert rs._genlist
3132 >>> assert rs._genlist
3137
3133
3138 iterate ascending:
3134 iterate ascending:
3139 >>> rs = addset(xs, ys, ascending=True)
3135 >>> rs = addset(xs, ys, ascending=True)
3140 >>> # (use generator because pypy could call len())
3136 >>> # (use generator because pypy could call len())
3141 >>> list(x for x in rs), list(x for x in rs.fastasc()) # without _asclist
3137 >>> list(x for x in rs), list(x for x in rs.fastasc()) # without _asclist
3142 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3138 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3143 >>> assert not rs._asclist
3139 >>> assert not rs._asclist
3144 >>> len(rs)
3140 >>> len(rs)
3145 5
3141 5
3146 >>> [x for x in rs], [x for x in rs.fastasc()]
3142 >>> [x for x in rs], [x for x in rs.fastasc()]
3147 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3143 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3148 >>> assert rs._asclist
3144 >>> assert rs._asclist
3149
3145
3150 iterate descending:
3146 iterate descending:
3151 >>> rs = addset(xs, ys, ascending=False)
3147 >>> rs = addset(xs, ys, ascending=False)
3152 >>> # (use generator because pypy could call len())
3148 >>> # (use generator because pypy could call len())
3153 >>> list(x for x in rs), list(x for x in rs.fastdesc()) # without _asclist
3149 >>> list(x for x in rs), list(x for x in rs.fastdesc()) # without _asclist
3154 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3150 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3155 >>> assert not rs._asclist
3151 >>> assert not rs._asclist
3156 >>> len(rs)
3152 >>> len(rs)
3157 5
3153 5
3158 >>> [x for x in rs], [x for x in rs.fastdesc()]
3154 >>> [x for x in rs], [x for x in rs.fastdesc()]
3159 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3155 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3160 >>> assert rs._asclist
3156 >>> assert rs._asclist
3161
3157
3162 iterate ascending without fastasc:
3158 iterate ascending without fastasc:
3163 >>> rs = addset(xs, generatorset(ys), ascending=True)
3159 >>> rs = addset(xs, generatorset(ys), ascending=True)
3164 >>> assert rs.fastasc is None
3160 >>> assert rs.fastasc is None
3165 >>> [x for x in rs]
3161 >>> [x for x in rs]
3166 [0, 2, 3, 4, 5]
3162 [0, 2, 3, 4, 5]
3167
3163
3168 iterate descending without fastdesc:
3164 iterate descending without fastdesc:
3169 >>> rs = addset(generatorset(xs), ys, ascending=False)
3165 >>> rs = addset(generatorset(xs), ys, ascending=False)
3170 >>> assert rs.fastdesc is None
3166 >>> assert rs.fastdesc is None
3171 >>> [x for x in rs]
3167 >>> [x for x in rs]
3172 [5, 4, 3, 2, 0]
3168 [5, 4, 3, 2, 0]
3173 """
3169 """
3174 def __init__(self, revs1, revs2, ascending=None):
3170 def __init__(self, revs1, revs2, ascending=None):
3175 self._r1 = revs1
3171 self._r1 = revs1
3176 self._r2 = revs2
3172 self._r2 = revs2
3177 self._iter = None
3173 self._iter = None
3178 self._ascending = ascending
3174 self._ascending = ascending
3179 self._genlist = None
3175 self._genlist = None
3180 self._asclist = None
3176 self._asclist = None
3181
3177
3182 def __len__(self):
3178 def __len__(self):
3183 return len(self._list)
3179 return len(self._list)
3184
3180
3185 def __nonzero__(self):
3181 def __nonzero__(self):
3186 return bool(self._r1) or bool(self._r2)
3182 return bool(self._r1) or bool(self._r2)
3187
3183
3188 @util.propertycache
3184 @util.propertycache
3189 def _list(self):
3185 def _list(self):
3190 if not self._genlist:
3186 if not self._genlist:
3191 self._genlist = baseset(iter(self))
3187 self._genlist = baseset(iter(self))
3192 return self._genlist
3188 return self._genlist
3193
3189
3194 def __iter__(self):
3190 def __iter__(self):
3195 """Iterate over both collections without repeating elements
3191 """Iterate over both collections without repeating elements
3196
3192
3197 If the ascending attribute is not set, iterate over the first one and
3193 If the ascending attribute is not set, iterate over the first one and
3198 then over the second one checking for membership on the first one so we
3194 then over the second one checking for membership on the first one so we
3199 dont yield any duplicates.
3195 dont yield any duplicates.
3200
3196
3201 If the ascending attribute is set, iterate over both collections at the
3197 If the ascending attribute is set, iterate over both collections at the
3202 same time, yielding only one value at a time in the given order.
3198 same time, yielding only one value at a time in the given order.
3203 """
3199 """
3204 if self._ascending is None:
3200 if self._ascending is None:
3205 if self._genlist:
3201 if self._genlist:
3206 return iter(self._genlist)
3202 return iter(self._genlist)
3207 def arbitraryordergen():
3203 def arbitraryordergen():
3208 for r in self._r1:
3204 for r in self._r1:
3209 yield r
3205 yield r
3210 inr1 = self._r1.__contains__
3206 inr1 = self._r1.__contains__
3211 for r in self._r2:
3207 for r in self._r2:
3212 if not inr1(r):
3208 if not inr1(r):
3213 yield r
3209 yield r
3214 return arbitraryordergen()
3210 return arbitraryordergen()
3215 # try to use our own fast iterator if it exists
3211 # try to use our own fast iterator if it exists
3216 self._trysetasclist()
3212 self._trysetasclist()
3217 if self._ascending:
3213 if self._ascending:
3218 attr = 'fastasc'
3214 attr = 'fastasc'
3219 else:
3215 else:
3220 attr = 'fastdesc'
3216 attr = 'fastdesc'
3221 it = getattr(self, attr)
3217 it = getattr(self, attr)
3222 if it is not None:
3218 if it is not None:
3223 return it()
3219 return it()
3224 # maybe half of the component supports fast
3220 # maybe half of the component supports fast
3225 # get iterator for _r1
3221 # get iterator for _r1
3226 iter1 = getattr(self._r1, attr)
3222 iter1 = getattr(self._r1, attr)
3227 if iter1 is None:
3223 if iter1 is None:
3228 # let's avoid side effect (not sure it matters)
3224 # let's avoid side effect (not sure it matters)
3229 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3225 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3230 else:
3226 else:
3231 iter1 = iter1()
3227 iter1 = iter1()
3232 # get iterator for _r2
3228 # get iterator for _r2
3233 iter2 = getattr(self._r2, attr)
3229 iter2 = getattr(self._r2, attr)
3234 if iter2 is None:
3230 if iter2 is None:
3235 # let's avoid side effect (not sure it matters)
3231 # let's avoid side effect (not sure it matters)
3236 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3232 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3237 else:
3233 else:
3238 iter2 = iter2()
3234 iter2 = iter2()
3239 return _iterordered(self._ascending, iter1, iter2)
3235 return _iterordered(self._ascending, iter1, iter2)
3240
3236
3241 def _trysetasclist(self):
3237 def _trysetasclist(self):
3242 """populate the _asclist attribute if possible and necessary"""
3238 """populate the _asclist attribute if possible and necessary"""
3243 if self._genlist is not None and self._asclist is None:
3239 if self._genlist is not None and self._asclist is None:
3244 self._asclist = sorted(self._genlist)
3240 self._asclist = sorted(self._genlist)
3245
3241
3246 @property
3242 @property
3247 def fastasc(self):
3243 def fastasc(self):
3248 self._trysetasclist()
3244 self._trysetasclist()
3249 if self._asclist is not None:
3245 if self._asclist is not None:
3250 return self._asclist.__iter__
3246 return self._asclist.__iter__
3251 iter1 = self._r1.fastasc
3247 iter1 = self._r1.fastasc
3252 iter2 = self._r2.fastasc
3248 iter2 = self._r2.fastasc
3253 if None in (iter1, iter2):
3249 if None in (iter1, iter2):
3254 return None
3250 return None
3255 return lambda: _iterordered(True, iter1(), iter2())
3251 return lambda: _iterordered(True, iter1(), iter2())
3256
3252
3257 @property
3253 @property
3258 def fastdesc(self):
3254 def fastdesc(self):
3259 self._trysetasclist()
3255 self._trysetasclist()
3260 if self._asclist is not None:
3256 if self._asclist is not None:
3261 return self._asclist.__reversed__
3257 return self._asclist.__reversed__
3262 iter1 = self._r1.fastdesc
3258 iter1 = self._r1.fastdesc
3263 iter2 = self._r2.fastdesc
3259 iter2 = self._r2.fastdesc
3264 if None in (iter1, iter2):
3260 if None in (iter1, iter2):
3265 return None
3261 return None
3266 return lambda: _iterordered(False, iter1(), iter2())
3262 return lambda: _iterordered(False, iter1(), iter2())
3267
3263
3268 def __contains__(self, x):
3264 def __contains__(self, x):
3269 return x in self._r1 or x in self._r2
3265 return x in self._r1 or x in self._r2
3270
3266
3271 def sort(self, reverse=False):
3267 def sort(self, reverse=False):
3272 """Sort the added set
3268 """Sort the added set
3273
3269
3274 For this we use the cached list with all the generated values and if we
3270 For this we use the cached list with all the generated values and if we
3275 know they are ascending or descending we can sort them in a smart way.
3271 know they are ascending or descending we can sort them in a smart way.
3276 """
3272 """
3277 self._ascending = not reverse
3273 self._ascending = not reverse
3278
3274
3279 def isascending(self):
3275 def isascending(self):
3280 return self._ascending is not None and self._ascending
3276 return self._ascending is not None and self._ascending
3281
3277
3282 def isdescending(self):
3278 def isdescending(self):
3283 return self._ascending is not None and not self._ascending
3279 return self._ascending is not None and not self._ascending
3284
3280
3285 def istopo(self):
3281 def istopo(self):
3286 # not worth the trouble asserting if the two sets combined are still
3282 # not worth the trouble asserting if the two sets combined are still
3287 # in topographical order. Use the sort() predicate to explicitly sort
3283 # in topographical order. Use the sort() predicate to explicitly sort
3288 # again instead.
3284 # again instead.
3289 return False
3285 return False
3290
3286
3291 def reverse(self):
3287 def reverse(self):
3292 if self._ascending is None:
3288 if self._ascending is None:
3293 self._list.reverse()
3289 self._list.reverse()
3294 else:
3290 else:
3295 self._ascending = not self._ascending
3291 self._ascending = not self._ascending
3296
3292
3297 def first(self):
3293 def first(self):
3298 for x in self:
3294 for x in self:
3299 return x
3295 return x
3300 return None
3296 return None
3301
3297
3302 def last(self):
3298 def last(self):
3303 self.reverse()
3299 self.reverse()
3304 val = self.first()
3300 val = self.first()
3305 self.reverse()
3301 self.reverse()
3306 return val
3302 return val
3307
3303
3308 def __repr__(self):
3304 def __repr__(self):
3309 d = {None: '', False: '-', True: '+'}[self._ascending]
3305 d = {None: '', False: '-', True: '+'}[self._ascending]
3310 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3306 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3311
3307
3312 class generatorset(abstractsmartset):
3308 class generatorset(abstractsmartset):
3313 """Wrap a generator for lazy iteration
3309 """Wrap a generator for lazy iteration
3314
3310
3315 Wrapper structure for generators that provides lazy membership and can
3311 Wrapper structure for generators that provides lazy membership and can
3316 be iterated more than once.
3312 be iterated more than once.
3317 When asked for membership it generates values until either it finds the
3313 When asked for membership it generates values until either it finds the
3318 requested one or has gone through all the elements in the generator
3314 requested one or has gone through all the elements in the generator
3319 """
3315 """
3320 def __init__(self, gen, iterasc=None):
3316 def __init__(self, gen, iterasc=None):
3321 """
3317 """
3322 gen: a generator producing the values for the generatorset.
3318 gen: a generator producing the values for the generatorset.
3323 """
3319 """
3324 self._gen = gen
3320 self._gen = gen
3325 self._asclist = None
3321 self._asclist = None
3326 self._cache = {}
3322 self._cache = {}
3327 self._genlist = []
3323 self._genlist = []
3328 self._finished = False
3324 self._finished = False
3329 self._ascending = True
3325 self._ascending = True
3330 if iterasc is not None:
3326 if iterasc is not None:
3331 if iterasc:
3327 if iterasc:
3332 self.fastasc = self._iterator
3328 self.fastasc = self._iterator
3333 self.__contains__ = self._asccontains
3329 self.__contains__ = self._asccontains
3334 else:
3330 else:
3335 self.fastdesc = self._iterator
3331 self.fastdesc = self._iterator
3336 self.__contains__ = self._desccontains
3332 self.__contains__ = self._desccontains
3337
3333
3338 def __nonzero__(self):
3334 def __nonzero__(self):
3339 # Do not use 'for r in self' because it will enforce the iteration
3335 # Do not use 'for r in self' because it will enforce the iteration
3340 # order (default ascending), possibly unrolling a whole descending
3336 # order (default ascending), possibly unrolling a whole descending
3341 # iterator.
3337 # iterator.
3342 if self._genlist:
3338 if self._genlist:
3343 return True
3339 return True
3344 for r in self._consumegen():
3340 for r in self._consumegen():
3345 return True
3341 return True
3346 return False
3342 return False
3347
3343
3348 def __contains__(self, x):
3344 def __contains__(self, x):
3349 if x in self._cache:
3345 if x in self._cache:
3350 return self._cache[x]
3346 return self._cache[x]
3351
3347
3352 # Use new values only, as existing values would be cached.
3348 # Use new values only, as existing values would be cached.
3353 for l in self._consumegen():
3349 for l in self._consumegen():
3354 if l == x:
3350 if l == x:
3355 return True
3351 return True
3356
3352
3357 self._cache[x] = False
3353 self._cache[x] = False
3358 return False
3354 return False
3359
3355
3360 def _asccontains(self, x):
3356 def _asccontains(self, x):
3361 """version of contains optimised for ascending generator"""
3357 """version of contains optimised for ascending generator"""
3362 if x in self._cache:
3358 if x in self._cache:
3363 return self._cache[x]
3359 return self._cache[x]
3364
3360
3365 # Use new values only, as existing values would be cached.
3361 # Use new values only, as existing values would be cached.
3366 for l in self._consumegen():
3362 for l in self._consumegen():
3367 if l == x:
3363 if l == x:
3368 return True
3364 return True
3369 if l > x:
3365 if l > x:
3370 break
3366 break
3371
3367
3372 self._cache[x] = False
3368 self._cache[x] = False
3373 return False
3369 return False
3374
3370
3375 def _desccontains(self, x):
3371 def _desccontains(self, x):
3376 """version of contains optimised for descending generator"""
3372 """version of contains optimised for descending generator"""
3377 if x in self._cache:
3373 if x in self._cache:
3378 return self._cache[x]
3374 return self._cache[x]
3379
3375
3380 # Use new values only, as existing values would be cached.
3376 # Use new values only, as existing values would be cached.
3381 for l in self._consumegen():
3377 for l in self._consumegen():
3382 if l == x:
3378 if l == x:
3383 return True
3379 return True
3384 if l < x:
3380 if l < x:
3385 break
3381 break
3386
3382
3387 self._cache[x] = False
3383 self._cache[x] = False
3388 return False
3384 return False
3389
3385
3390 def __iter__(self):
3386 def __iter__(self):
3391 if self._ascending:
3387 if self._ascending:
3392 it = self.fastasc
3388 it = self.fastasc
3393 else:
3389 else:
3394 it = self.fastdesc
3390 it = self.fastdesc
3395 if it is not None:
3391 if it is not None:
3396 return it()
3392 return it()
3397 # we need to consume the iterator
3393 # we need to consume the iterator
3398 for x in self._consumegen():
3394 for x in self._consumegen():
3399 pass
3395 pass
3400 # recall the same code
3396 # recall the same code
3401 return iter(self)
3397 return iter(self)
3402
3398
3403 def _iterator(self):
3399 def _iterator(self):
3404 if self._finished:
3400 if self._finished:
3405 return iter(self._genlist)
3401 return iter(self._genlist)
3406
3402
3407 # We have to use this complex iteration strategy to allow multiple
3403 # We have to use this complex iteration strategy to allow multiple
3408 # iterations at the same time. We need to be able to catch revision
3404 # iterations at the same time. We need to be able to catch revision
3409 # removed from _consumegen and added to genlist in another instance.
3405 # removed from _consumegen and added to genlist in another instance.
3410 #
3406 #
3411 # Getting rid of it would provide an about 15% speed up on this
3407 # Getting rid of it would provide an about 15% speed up on this
3412 # iteration.
3408 # iteration.
3413 genlist = self._genlist
3409 genlist = self._genlist
3414 nextrev = self._consumegen().next
3410 nextrev = self._consumegen().next
3415 _len = len # cache global lookup
3411 _len = len # cache global lookup
3416 def gen():
3412 def gen():
3417 i = 0
3413 i = 0
3418 while True:
3414 while True:
3419 if i < _len(genlist):
3415 if i < _len(genlist):
3420 yield genlist[i]
3416 yield genlist[i]
3421 else:
3417 else:
3422 yield nextrev()
3418 yield nextrev()
3423 i += 1
3419 i += 1
3424 return gen()
3420 return gen()
3425
3421
3426 def _consumegen(self):
3422 def _consumegen(self):
3427 cache = self._cache
3423 cache = self._cache
3428 genlist = self._genlist.append
3424 genlist = self._genlist.append
3429 for item in self._gen:
3425 for item in self._gen:
3430 cache[item] = True
3426 cache[item] = True
3431 genlist(item)
3427 genlist(item)
3432 yield item
3428 yield item
3433 if not self._finished:
3429 if not self._finished:
3434 self._finished = True
3430 self._finished = True
3435 asc = self._genlist[:]
3431 asc = self._genlist[:]
3436 asc.sort()
3432 asc.sort()
3437 self._asclist = asc
3433 self._asclist = asc
3438 self.fastasc = asc.__iter__
3434 self.fastasc = asc.__iter__
3439 self.fastdesc = asc.__reversed__
3435 self.fastdesc = asc.__reversed__
3440
3436
3441 def __len__(self):
3437 def __len__(self):
3442 for x in self._consumegen():
3438 for x in self._consumegen():
3443 pass
3439 pass
3444 return len(self._genlist)
3440 return len(self._genlist)
3445
3441
3446 def sort(self, reverse=False):
3442 def sort(self, reverse=False):
3447 self._ascending = not reverse
3443 self._ascending = not reverse
3448
3444
3449 def reverse(self):
3445 def reverse(self):
3450 self._ascending = not self._ascending
3446 self._ascending = not self._ascending
3451
3447
3452 def isascending(self):
3448 def isascending(self):
3453 return self._ascending
3449 return self._ascending
3454
3450
3455 def isdescending(self):
3451 def isdescending(self):
3456 return not self._ascending
3452 return not self._ascending
3457
3453
3458 def istopo(self):
3454 def istopo(self):
3459 # not worth the trouble asserting if the two sets combined are still
3455 # not worth the trouble asserting if the two sets combined are still
3460 # in topographical order. Use the sort() predicate to explicitly sort
3456 # in topographical order. Use the sort() predicate to explicitly sort
3461 # again instead.
3457 # again instead.
3462 return False
3458 return False
3463
3459
3464 def first(self):
3460 def first(self):
3465 if self._ascending:
3461 if self._ascending:
3466 it = self.fastasc
3462 it = self.fastasc
3467 else:
3463 else:
3468 it = self.fastdesc
3464 it = self.fastdesc
3469 if it is None:
3465 if it is None:
3470 # we need to consume all and try again
3466 # we need to consume all and try again
3471 for x in self._consumegen():
3467 for x in self._consumegen():
3472 pass
3468 pass
3473 return self.first()
3469 return self.first()
3474 return next(it(), None)
3470 return next(it(), None)
3475
3471
3476 def last(self):
3472 def last(self):
3477 if self._ascending:
3473 if self._ascending:
3478 it = self.fastdesc
3474 it = self.fastdesc
3479 else:
3475 else:
3480 it = self.fastasc
3476 it = self.fastasc
3481 if it is None:
3477 if it is None:
3482 # we need to consume all and try again
3478 # we need to consume all and try again
3483 for x in self._consumegen():
3479 for x in self._consumegen():
3484 pass
3480 pass
3485 return self.first()
3481 return self.first()
3486 return next(it(), None)
3482 return next(it(), None)
3487
3483
3488 def __repr__(self):
3484 def __repr__(self):
3489 d = {False: '-', True: '+'}[self._ascending]
3485 d = {False: '-', True: '+'}[self._ascending]
3490 return '<%s%s>' % (type(self).__name__, d)
3486 return '<%s%s>' % (type(self).__name__, d)
3491
3487
3492 class spanset(abstractsmartset):
3488 class spanset(abstractsmartset):
3493 """Duck type for baseset class which represents a range of revisions and
3489 """Duck type for baseset class which represents a range of revisions and
3494 can work lazily and without having all the range in memory
3490 can work lazily and without having all the range in memory
3495
3491
3496 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3492 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3497 notable points:
3493 notable points:
3498 - when x < y it will be automatically descending,
3494 - when x < y it will be automatically descending,
3499 - revision filtered with this repoview will be skipped.
3495 - revision filtered with this repoview will be skipped.
3500
3496
3501 """
3497 """
3502 def __init__(self, repo, start=0, end=None):
3498 def __init__(self, repo, start=0, end=None):
3503 """
3499 """
3504 start: first revision included the set
3500 start: first revision included the set
3505 (default to 0)
3501 (default to 0)
3506 end: first revision excluded (last+1)
3502 end: first revision excluded (last+1)
3507 (default to len(repo)
3503 (default to len(repo)
3508
3504
3509 Spanset will be descending if `end` < `start`.
3505 Spanset will be descending if `end` < `start`.
3510 """
3506 """
3511 if end is None:
3507 if end is None:
3512 end = len(repo)
3508 end = len(repo)
3513 self._ascending = start <= end
3509 self._ascending = start <= end
3514 if not self._ascending:
3510 if not self._ascending:
3515 start, end = end + 1, start +1
3511 start, end = end + 1, start +1
3516 self._start = start
3512 self._start = start
3517 self._end = end
3513 self._end = end
3518 self._hiddenrevs = repo.changelog.filteredrevs
3514 self._hiddenrevs = repo.changelog.filteredrevs
3519
3515
3520 def sort(self, reverse=False):
3516 def sort(self, reverse=False):
3521 self._ascending = not reverse
3517 self._ascending = not reverse
3522
3518
3523 def reverse(self):
3519 def reverse(self):
3524 self._ascending = not self._ascending
3520 self._ascending = not self._ascending
3525
3521
3526 def istopo(self):
3522 def istopo(self):
3527 # not worth the trouble asserting if the two sets combined are still
3523 # not worth the trouble asserting if the two sets combined are still
3528 # in topographical order. Use the sort() predicate to explicitly sort
3524 # in topographical order. Use the sort() predicate to explicitly sort
3529 # again instead.
3525 # again instead.
3530 return False
3526 return False
3531
3527
3532 def _iterfilter(self, iterrange):
3528 def _iterfilter(self, iterrange):
3533 s = self._hiddenrevs
3529 s = self._hiddenrevs
3534 for r in iterrange:
3530 for r in iterrange:
3535 if r not in s:
3531 if r not in s:
3536 yield r
3532 yield r
3537
3533
3538 def __iter__(self):
3534 def __iter__(self):
3539 if self._ascending:
3535 if self._ascending:
3540 return self.fastasc()
3536 return self.fastasc()
3541 else:
3537 else:
3542 return self.fastdesc()
3538 return self.fastdesc()
3543
3539
3544 def fastasc(self):
3540 def fastasc(self):
3545 iterrange = xrange(self._start, self._end)
3541 iterrange = xrange(self._start, self._end)
3546 if self._hiddenrevs:
3542 if self._hiddenrevs:
3547 return self._iterfilter(iterrange)
3543 return self._iterfilter(iterrange)
3548 return iter(iterrange)
3544 return iter(iterrange)
3549
3545
3550 def fastdesc(self):
3546 def fastdesc(self):
3551 iterrange = xrange(self._end - 1, self._start - 1, -1)
3547 iterrange = xrange(self._end - 1, self._start - 1, -1)
3552 if self._hiddenrevs:
3548 if self._hiddenrevs:
3553 return self._iterfilter(iterrange)
3549 return self._iterfilter(iterrange)
3554 return iter(iterrange)
3550 return iter(iterrange)
3555
3551
3556 def __contains__(self, rev):
3552 def __contains__(self, rev):
3557 hidden = self._hiddenrevs
3553 hidden = self._hiddenrevs
3558 return ((self._start <= rev < self._end)
3554 return ((self._start <= rev < self._end)
3559 and not (hidden and rev in hidden))
3555 and not (hidden and rev in hidden))
3560
3556
3561 def __nonzero__(self):
3557 def __nonzero__(self):
3562 for r in self:
3558 for r in self:
3563 return True
3559 return True
3564 return False
3560 return False
3565
3561
3566 def __len__(self):
3562 def __len__(self):
3567 if not self._hiddenrevs:
3563 if not self._hiddenrevs:
3568 return abs(self._end - self._start)
3564 return abs(self._end - self._start)
3569 else:
3565 else:
3570 count = 0
3566 count = 0
3571 start = self._start
3567 start = self._start
3572 end = self._end
3568 end = self._end
3573 for rev in self._hiddenrevs:
3569 for rev in self._hiddenrevs:
3574 if (end < rev <= start) or (start <= rev < end):
3570 if (end < rev <= start) or (start <= rev < end):
3575 count += 1
3571 count += 1
3576 return abs(self._end - self._start) - count
3572 return abs(self._end - self._start) - count
3577
3573
3578 def isascending(self):
3574 def isascending(self):
3579 return self._ascending
3575 return self._ascending
3580
3576
3581 def isdescending(self):
3577 def isdescending(self):
3582 return not self._ascending
3578 return not self._ascending
3583
3579
3584 def first(self):
3580 def first(self):
3585 if self._ascending:
3581 if self._ascending:
3586 it = self.fastasc
3582 it = self.fastasc
3587 else:
3583 else:
3588 it = self.fastdesc
3584 it = self.fastdesc
3589 for x in it():
3585 for x in it():
3590 return x
3586 return x
3591 return None
3587 return None
3592
3588
3593 def last(self):
3589 def last(self):
3594 if self._ascending:
3590 if self._ascending:
3595 it = self.fastdesc
3591 it = self.fastdesc
3596 else:
3592 else:
3597 it = self.fastasc
3593 it = self.fastasc
3598 for x in it():
3594 for x in it():
3599 return x
3595 return x
3600 return None
3596 return None
3601
3597
3602 def __repr__(self):
3598 def __repr__(self):
3603 d = {False: '-', True: '+'}[self._ascending]
3599 d = {False: '-', True: '+'}[self._ascending]
3604 return '<%s%s %d:%d>' % (type(self).__name__, d,
3600 return '<%s%s %d:%d>' % (type(self).__name__, d,
3605 self._start, self._end - 1)
3601 self._start, self._end - 1)
3606
3602
3607 class fullreposet(spanset):
3603 class fullreposet(spanset):
3608 """a set containing all revisions in the repo
3604 """a set containing all revisions in the repo
3609
3605
3610 This class exists to host special optimization and magic to handle virtual
3606 This class exists to host special optimization and magic to handle virtual
3611 revisions such as "null".
3607 revisions such as "null".
3612 """
3608 """
3613
3609
3614 def __init__(self, repo):
3610 def __init__(self, repo):
3615 super(fullreposet, self).__init__(repo)
3611 super(fullreposet, self).__init__(repo)
3616
3612
3617 def __and__(self, other):
3613 def __and__(self, other):
3618 """As self contains the whole repo, all of the other set should also be
3614 """As self contains the whole repo, all of the other set should also be
3619 in self. Therefore `self & other = other`.
3615 in self. Therefore `self & other = other`.
3620
3616
3621 This boldly assumes the other contains valid revs only.
3617 This boldly assumes the other contains valid revs only.
3622 """
3618 """
3623 # other not a smartset, make is so
3619 # other not a smartset, make is so
3624 if not util.safehasattr(other, 'isascending'):
3620 if not util.safehasattr(other, 'isascending'):
3625 # filter out hidden revision
3621 # filter out hidden revision
3626 # (this boldly assumes all smartset are pure)
3622 # (this boldly assumes all smartset are pure)
3627 #
3623 #
3628 # `other` was used with "&", let's assume this is a set like
3624 # `other` was used with "&", let's assume this is a set like
3629 # object.
3625 # object.
3630 other = baseset(other - self._hiddenrevs)
3626 other = baseset(other - self._hiddenrevs)
3631
3627
3632 # XXX As fullreposet is also used as bootstrap, this is wrong.
3628 # XXX As fullreposet is also used as bootstrap, this is wrong.
3633 #
3629 #
3634 # With a giveme312() revset returning [3,1,2], this makes
3630 # With a giveme312() revset returning [3,1,2], this makes
3635 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3631 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3636 # We cannot just drop it because other usage still need to sort it:
3632 # We cannot just drop it because other usage still need to sort it:
3637 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3633 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3638 #
3634 #
3639 # There is also some faulty revset implementations that rely on it
3635 # There is also some faulty revset implementations that rely on it
3640 # (eg: children as of its state in e8075329c5fb)
3636 # (eg: children as of its state in e8075329c5fb)
3641 #
3637 #
3642 # When we fix the two points above we can move this into the if clause
3638 # When we fix the two points above we can move this into the if clause
3643 other.sort(reverse=self.isdescending())
3639 other.sort(reverse=self.isdescending())
3644 return other
3640 return other
3645
3641
3646 def prettyformatset(revs):
3642 def prettyformatset(revs):
3647 lines = []
3643 lines = []
3648 rs = repr(revs)
3644 rs = repr(revs)
3649 p = 0
3645 p = 0
3650 while p < len(rs):
3646 while p < len(rs):
3651 q = rs.find('<', p + 1)
3647 q = rs.find('<', p + 1)
3652 if q < 0:
3648 if q < 0:
3653 q = len(rs)
3649 q = len(rs)
3654 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3650 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3655 assert l >= 0
3651 assert l >= 0
3656 lines.append((l, rs[p:q].rstrip()))
3652 lines.append((l, rs[p:q].rstrip()))
3657 p = q
3653 p = q
3658 return '\n'.join(' ' * l + s for l, s in lines)
3654 return '\n'.join(' ' * l + s for l, s in lines)
3659
3655
3660 def loadpredicate(ui, extname, registrarobj):
3656 def loadpredicate(ui, extname, registrarobj):
3661 """Load revset predicates from specified registrarobj
3657 """Load revset predicates from specified registrarobj
3662 """
3658 """
3663 for name, func in registrarobj._table.iteritems():
3659 for name, func in registrarobj._table.iteritems():
3664 symbols[name] = func
3660 symbols[name] = func
3665 if func._safe:
3661 if func._safe:
3666 safesymbols.add(name)
3662 safesymbols.add(name)
3667
3663
3668 # load built-in predicates explicitly to setup safesymbols
3664 # load built-in predicates explicitly to setup safesymbols
3669 loadpredicate(None, None, predicate)
3665 loadpredicate(None, None, predicate)
3670
3666
3671 # tell hggettext to extract docstrings from these functions:
3667 # tell hggettext to extract docstrings from these functions:
3672 i18nfunctions = symbols.values()
3668 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now