##// END OF EJS Templates
revset: build list of (key, reverse) pairs before sorting...
Yuya Nishihara -
r29363:2d18c611 default
parent child Browse files
Show More
@@ -1,3665 +1,3664 b''
1 # revset.py - revision set queries for mercurial
1 # revset.py - revision set queries for mercurial
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import heapq
10 import heapq
11 import re
11 import re
12
12
13 from .i18n import _
13 from .i18n import _
14 from . import (
14 from . import (
15 destutil,
15 destutil,
16 encoding,
16 encoding,
17 error,
17 error,
18 hbisect,
18 hbisect,
19 match as matchmod,
19 match as matchmod,
20 node,
20 node,
21 obsolete as obsmod,
21 obsolete as obsmod,
22 parser,
22 parser,
23 pathutil,
23 pathutil,
24 phases,
24 phases,
25 registrar,
25 registrar,
26 repoview,
26 repoview,
27 util,
27 util,
28 )
28 )
29
29
30 def _revancestors(repo, revs, followfirst):
30 def _revancestors(repo, revs, followfirst):
31 """Like revlog.ancestors(), but supports followfirst."""
31 """Like revlog.ancestors(), but supports followfirst."""
32 if followfirst:
32 if followfirst:
33 cut = 1
33 cut = 1
34 else:
34 else:
35 cut = None
35 cut = None
36 cl = repo.changelog
36 cl = repo.changelog
37
37
38 def iterate():
38 def iterate():
39 revs.sort(reverse=True)
39 revs.sort(reverse=True)
40 irevs = iter(revs)
40 irevs = iter(revs)
41 h = []
41 h = []
42
42
43 inputrev = next(irevs, None)
43 inputrev = next(irevs, None)
44 if inputrev is not None:
44 if inputrev is not None:
45 heapq.heappush(h, -inputrev)
45 heapq.heappush(h, -inputrev)
46
46
47 seen = set()
47 seen = set()
48 while h:
48 while h:
49 current = -heapq.heappop(h)
49 current = -heapq.heappop(h)
50 if current == inputrev:
50 if current == inputrev:
51 inputrev = next(irevs, None)
51 inputrev = next(irevs, None)
52 if inputrev is not None:
52 if inputrev is not None:
53 heapq.heappush(h, -inputrev)
53 heapq.heappush(h, -inputrev)
54 if current not in seen:
54 if current not in seen:
55 seen.add(current)
55 seen.add(current)
56 yield current
56 yield current
57 for parent in cl.parentrevs(current)[:cut]:
57 for parent in cl.parentrevs(current)[:cut]:
58 if parent != node.nullrev:
58 if parent != node.nullrev:
59 heapq.heappush(h, -parent)
59 heapq.heappush(h, -parent)
60
60
61 return generatorset(iterate(), iterasc=False)
61 return generatorset(iterate(), iterasc=False)
62
62
63 def _revdescendants(repo, revs, followfirst):
63 def _revdescendants(repo, revs, followfirst):
64 """Like revlog.descendants() but supports followfirst."""
64 """Like revlog.descendants() but supports followfirst."""
65 if followfirst:
65 if followfirst:
66 cut = 1
66 cut = 1
67 else:
67 else:
68 cut = None
68 cut = None
69
69
70 def iterate():
70 def iterate():
71 cl = repo.changelog
71 cl = repo.changelog
72 # XXX this should be 'parentset.min()' assuming 'parentset' is a
72 # XXX this should be 'parentset.min()' assuming 'parentset' is a
73 # smartset (and if it is not, it should.)
73 # smartset (and if it is not, it should.)
74 first = min(revs)
74 first = min(revs)
75 nullrev = node.nullrev
75 nullrev = node.nullrev
76 if first == nullrev:
76 if first == nullrev:
77 # Are there nodes with a null first parent and a non-null
77 # Are there nodes with a null first parent and a non-null
78 # second one? Maybe. Do we care? Probably not.
78 # second one? Maybe. Do we care? Probably not.
79 for i in cl:
79 for i in cl:
80 yield i
80 yield i
81 else:
81 else:
82 seen = set(revs)
82 seen = set(revs)
83 for i in cl.revs(first + 1):
83 for i in cl.revs(first + 1):
84 for x in cl.parentrevs(i)[:cut]:
84 for x in cl.parentrevs(i)[:cut]:
85 if x != nullrev and x in seen:
85 if x != nullrev and x in seen:
86 seen.add(i)
86 seen.add(i)
87 yield i
87 yield i
88 break
88 break
89
89
90 return generatorset(iterate(), iterasc=True)
90 return generatorset(iterate(), iterasc=True)
91
91
92 def _reachablerootspure(repo, minroot, roots, heads, includepath):
92 def _reachablerootspure(repo, minroot, roots, heads, includepath):
93 """return (heads(::<roots> and ::<heads>))
93 """return (heads(::<roots> and ::<heads>))
94
94
95 If includepath is True, return (<roots>::<heads>)."""
95 If includepath is True, return (<roots>::<heads>)."""
96 if not roots:
96 if not roots:
97 return []
97 return []
98 parentrevs = repo.changelog.parentrevs
98 parentrevs = repo.changelog.parentrevs
99 roots = set(roots)
99 roots = set(roots)
100 visit = list(heads)
100 visit = list(heads)
101 reachable = set()
101 reachable = set()
102 seen = {}
102 seen = {}
103 # prefetch all the things! (because python is slow)
103 # prefetch all the things! (because python is slow)
104 reached = reachable.add
104 reached = reachable.add
105 dovisit = visit.append
105 dovisit = visit.append
106 nextvisit = visit.pop
106 nextvisit = visit.pop
107 # open-code the post-order traversal due to the tiny size of
107 # open-code the post-order traversal due to the tiny size of
108 # sys.getrecursionlimit()
108 # sys.getrecursionlimit()
109 while visit:
109 while visit:
110 rev = nextvisit()
110 rev = nextvisit()
111 if rev in roots:
111 if rev in roots:
112 reached(rev)
112 reached(rev)
113 if not includepath:
113 if not includepath:
114 continue
114 continue
115 parents = parentrevs(rev)
115 parents = parentrevs(rev)
116 seen[rev] = parents
116 seen[rev] = parents
117 for parent in parents:
117 for parent in parents:
118 if parent >= minroot and parent not in seen:
118 if parent >= minroot and parent not in seen:
119 dovisit(parent)
119 dovisit(parent)
120 if not reachable:
120 if not reachable:
121 return baseset()
121 return baseset()
122 if not includepath:
122 if not includepath:
123 return reachable
123 return reachable
124 for rev in sorted(seen):
124 for rev in sorted(seen):
125 for parent in seen[rev]:
125 for parent in seen[rev]:
126 if parent in reachable:
126 if parent in reachable:
127 reached(rev)
127 reached(rev)
128 return reachable
128 return reachable
129
129
130 def reachableroots(repo, roots, heads, includepath=False):
130 def reachableroots(repo, roots, heads, includepath=False):
131 """return (heads(::<roots> and ::<heads>))
131 """return (heads(::<roots> and ::<heads>))
132
132
133 If includepath is True, return (<roots>::<heads>)."""
133 If includepath is True, return (<roots>::<heads>)."""
134 if not roots:
134 if not roots:
135 return baseset()
135 return baseset()
136 minroot = roots.min()
136 minroot = roots.min()
137 roots = list(roots)
137 roots = list(roots)
138 heads = list(heads)
138 heads = list(heads)
139 try:
139 try:
140 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
140 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
141 except AttributeError:
141 except AttributeError:
142 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
142 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
143 revs = baseset(revs)
143 revs = baseset(revs)
144 revs.sort()
144 revs.sort()
145 return revs
145 return revs
146
146
147 elements = {
147 elements = {
148 # token-type: binding-strength, primary, prefix, infix, suffix
148 # token-type: binding-strength, primary, prefix, infix, suffix
149 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
149 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
150 "##": (20, None, None, ("_concat", 20), None),
150 "##": (20, None, None, ("_concat", 20), None),
151 "~": (18, None, None, ("ancestor", 18), None),
151 "~": (18, None, None, ("ancestor", 18), None),
152 "^": (18, None, None, ("parent", 18), ("parentpost", 18)),
152 "^": (18, None, None, ("parent", 18), ("parentpost", 18)),
153 "-": (5, None, ("negate", 19), ("minus", 5), None),
153 "-": (5, None, ("negate", 19), ("minus", 5), None),
154 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17),
154 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17),
155 ("dagrangepost", 17)),
155 ("dagrangepost", 17)),
156 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17),
156 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17),
157 ("dagrangepost", 17)),
157 ("dagrangepost", 17)),
158 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), ("rangepost", 15)),
158 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), ("rangepost", 15)),
159 "not": (10, None, ("not", 10), None, None),
159 "not": (10, None, ("not", 10), None, None),
160 "!": (10, None, ("not", 10), None, None),
160 "!": (10, None, ("not", 10), None, None),
161 "and": (5, None, None, ("and", 5), None),
161 "and": (5, None, None, ("and", 5), None),
162 "&": (5, None, None, ("and", 5), None),
162 "&": (5, None, None, ("and", 5), None),
163 "%": (5, None, None, ("only", 5), ("onlypost", 5)),
163 "%": (5, None, None, ("only", 5), ("onlypost", 5)),
164 "or": (4, None, None, ("or", 4), None),
164 "or": (4, None, None, ("or", 4), None),
165 "|": (4, None, None, ("or", 4), None),
165 "|": (4, None, None, ("or", 4), None),
166 "+": (4, None, None, ("or", 4), None),
166 "+": (4, None, None, ("or", 4), None),
167 "=": (3, None, None, ("keyvalue", 3), None),
167 "=": (3, None, None, ("keyvalue", 3), None),
168 ",": (2, None, None, ("list", 2), None),
168 ",": (2, None, None, ("list", 2), None),
169 ")": (0, None, None, None, None),
169 ")": (0, None, None, None, None),
170 "symbol": (0, "symbol", None, None, None),
170 "symbol": (0, "symbol", None, None, None),
171 "string": (0, "string", None, None, None),
171 "string": (0, "string", None, None, None),
172 "end": (0, None, None, None, None),
172 "end": (0, None, None, None, None),
173 }
173 }
174
174
175 keywords = set(['and', 'or', 'not'])
175 keywords = set(['and', 'or', 'not'])
176
176
177 # default set of valid characters for the initial letter of symbols
177 # default set of valid characters for the initial letter of symbols
178 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
178 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
179 if c.isalnum() or c in '._@' or ord(c) > 127)
179 if c.isalnum() or c in '._@' or ord(c) > 127)
180
180
181 # default set of valid characters for non-initial letters of symbols
181 # default set of valid characters for non-initial letters of symbols
182 _symletters = set(c for c in [chr(i) for i in xrange(256)]
182 _symletters = set(c for c in [chr(i) for i in xrange(256)]
183 if c.isalnum() or c in '-._/@' or ord(c) > 127)
183 if c.isalnum() or c in '-._/@' or ord(c) > 127)
184
184
185 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
185 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
186 '''
186 '''
187 Parse a revset statement into a stream of tokens
187 Parse a revset statement into a stream of tokens
188
188
189 ``syminitletters`` is the set of valid characters for the initial
189 ``syminitletters`` is the set of valid characters for the initial
190 letter of symbols.
190 letter of symbols.
191
191
192 By default, character ``c`` is recognized as valid for initial
192 By default, character ``c`` is recognized as valid for initial
193 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
193 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
194
194
195 ``symletters`` is the set of valid characters for non-initial
195 ``symletters`` is the set of valid characters for non-initial
196 letters of symbols.
196 letters of symbols.
197
197
198 By default, character ``c`` is recognized as valid for non-initial
198 By default, character ``c`` is recognized as valid for non-initial
199 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
199 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
200
200
201 Check that @ is a valid unquoted token character (issue3686):
201 Check that @ is a valid unquoted token character (issue3686):
202 >>> list(tokenize("@::"))
202 >>> list(tokenize("@::"))
203 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
203 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
204
204
205 '''
205 '''
206 if syminitletters is None:
206 if syminitletters is None:
207 syminitletters = _syminitletters
207 syminitletters = _syminitletters
208 if symletters is None:
208 if symletters is None:
209 symletters = _symletters
209 symletters = _symletters
210
210
211 if program and lookup:
211 if program and lookup:
212 # attempt to parse old-style ranges first to deal with
212 # attempt to parse old-style ranges first to deal with
213 # things like old-tag which contain query metacharacters
213 # things like old-tag which contain query metacharacters
214 parts = program.split(':', 1)
214 parts = program.split(':', 1)
215 if all(lookup(sym) for sym in parts if sym):
215 if all(lookup(sym) for sym in parts if sym):
216 if parts[0]:
216 if parts[0]:
217 yield ('symbol', parts[0], 0)
217 yield ('symbol', parts[0], 0)
218 if len(parts) > 1:
218 if len(parts) > 1:
219 s = len(parts[0])
219 s = len(parts[0])
220 yield (':', None, s)
220 yield (':', None, s)
221 if parts[1]:
221 if parts[1]:
222 yield ('symbol', parts[1], s + 1)
222 yield ('symbol', parts[1], s + 1)
223 yield ('end', None, len(program))
223 yield ('end', None, len(program))
224 return
224 return
225
225
226 pos, l = 0, len(program)
226 pos, l = 0, len(program)
227 while pos < l:
227 while pos < l:
228 c = program[pos]
228 c = program[pos]
229 if c.isspace(): # skip inter-token whitespace
229 if c.isspace(): # skip inter-token whitespace
230 pass
230 pass
231 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
231 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
232 yield ('::', None, pos)
232 yield ('::', None, pos)
233 pos += 1 # skip ahead
233 pos += 1 # skip ahead
234 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
234 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
235 yield ('..', None, pos)
235 yield ('..', None, pos)
236 pos += 1 # skip ahead
236 pos += 1 # skip ahead
237 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
237 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
238 yield ('##', None, pos)
238 yield ('##', None, pos)
239 pos += 1 # skip ahead
239 pos += 1 # skip ahead
240 elif c in "():=,-|&+!~^%": # handle simple operators
240 elif c in "():=,-|&+!~^%": # handle simple operators
241 yield (c, None, pos)
241 yield (c, None, pos)
242 elif (c in '"\'' or c == 'r' and
242 elif (c in '"\'' or c == 'r' and
243 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
243 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
244 if c == 'r':
244 if c == 'r':
245 pos += 1
245 pos += 1
246 c = program[pos]
246 c = program[pos]
247 decode = lambda x: x
247 decode = lambda x: x
248 else:
248 else:
249 decode = parser.unescapestr
249 decode = parser.unescapestr
250 pos += 1
250 pos += 1
251 s = pos
251 s = pos
252 while pos < l: # find closing quote
252 while pos < l: # find closing quote
253 d = program[pos]
253 d = program[pos]
254 if d == '\\': # skip over escaped characters
254 if d == '\\': # skip over escaped characters
255 pos += 2
255 pos += 2
256 continue
256 continue
257 if d == c:
257 if d == c:
258 yield ('string', decode(program[s:pos]), s)
258 yield ('string', decode(program[s:pos]), s)
259 break
259 break
260 pos += 1
260 pos += 1
261 else:
261 else:
262 raise error.ParseError(_("unterminated string"), s)
262 raise error.ParseError(_("unterminated string"), s)
263 # gather up a symbol/keyword
263 # gather up a symbol/keyword
264 elif c in syminitletters:
264 elif c in syminitletters:
265 s = pos
265 s = pos
266 pos += 1
266 pos += 1
267 while pos < l: # find end of symbol
267 while pos < l: # find end of symbol
268 d = program[pos]
268 d = program[pos]
269 if d not in symletters:
269 if d not in symletters:
270 break
270 break
271 if d == '.' and program[pos - 1] == '.': # special case for ..
271 if d == '.' and program[pos - 1] == '.': # special case for ..
272 pos -= 1
272 pos -= 1
273 break
273 break
274 pos += 1
274 pos += 1
275 sym = program[s:pos]
275 sym = program[s:pos]
276 if sym in keywords: # operator keywords
276 if sym in keywords: # operator keywords
277 yield (sym, None, s)
277 yield (sym, None, s)
278 elif '-' in sym:
278 elif '-' in sym:
279 # some jerk gave us foo-bar-baz, try to check if it's a symbol
279 # some jerk gave us foo-bar-baz, try to check if it's a symbol
280 if lookup and lookup(sym):
280 if lookup and lookup(sym):
281 # looks like a real symbol
281 # looks like a real symbol
282 yield ('symbol', sym, s)
282 yield ('symbol', sym, s)
283 else:
283 else:
284 # looks like an expression
284 # looks like an expression
285 parts = sym.split('-')
285 parts = sym.split('-')
286 for p in parts[:-1]:
286 for p in parts[:-1]:
287 if p: # possible consecutive -
287 if p: # possible consecutive -
288 yield ('symbol', p, s)
288 yield ('symbol', p, s)
289 s += len(p)
289 s += len(p)
290 yield ('-', None, pos)
290 yield ('-', None, pos)
291 s += 1
291 s += 1
292 if parts[-1]: # possible trailing -
292 if parts[-1]: # possible trailing -
293 yield ('symbol', parts[-1], s)
293 yield ('symbol', parts[-1], s)
294 else:
294 else:
295 yield ('symbol', sym, s)
295 yield ('symbol', sym, s)
296 pos -= 1
296 pos -= 1
297 else:
297 else:
298 raise error.ParseError(_("syntax error in revset '%s'") %
298 raise error.ParseError(_("syntax error in revset '%s'") %
299 program, pos)
299 program, pos)
300 pos += 1
300 pos += 1
301 yield ('end', None, pos)
301 yield ('end', None, pos)
302
302
303 # helpers
303 # helpers
304
304
305 def getstring(x, err):
305 def getstring(x, err):
306 if x and (x[0] == 'string' or x[0] == 'symbol'):
306 if x and (x[0] == 'string' or x[0] == 'symbol'):
307 return x[1]
307 return x[1]
308 raise error.ParseError(err)
308 raise error.ParseError(err)
309
309
310 def getlist(x):
310 def getlist(x):
311 if not x:
311 if not x:
312 return []
312 return []
313 if x[0] == 'list':
313 if x[0] == 'list':
314 return list(x[1:])
314 return list(x[1:])
315 return [x]
315 return [x]
316
316
317 def getargs(x, min, max, err):
317 def getargs(x, min, max, err):
318 l = getlist(x)
318 l = getlist(x)
319 if len(l) < min or (max >= 0 and len(l) > max):
319 if len(l) < min or (max >= 0 and len(l) > max):
320 raise error.ParseError(err)
320 raise error.ParseError(err)
321 return l
321 return l
322
322
323 def getargsdict(x, funcname, keys):
323 def getargsdict(x, funcname, keys):
324 return parser.buildargsdict(getlist(x), funcname, keys.split(),
324 return parser.buildargsdict(getlist(x), funcname, keys.split(),
325 keyvaluenode='keyvalue', keynode='symbol')
325 keyvaluenode='keyvalue', keynode='symbol')
326
326
327 def getset(repo, subset, x):
327 def getset(repo, subset, x):
328 if not x:
328 if not x:
329 raise error.ParseError(_("missing argument"))
329 raise error.ParseError(_("missing argument"))
330 s = methods[x[0]](repo, subset, *x[1:])
330 s = methods[x[0]](repo, subset, *x[1:])
331 if util.safehasattr(s, 'isascending'):
331 if util.safehasattr(s, 'isascending'):
332 return s
332 return s
333 # else case should not happen, because all non-func are internal,
333 # else case should not happen, because all non-func are internal,
334 # ignoring for now.
334 # ignoring for now.
335 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
335 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
336 repo.ui.deprecwarn('revset "%s" uses list instead of smartset'
336 repo.ui.deprecwarn('revset "%s" uses list instead of smartset'
337 % x[1][1],
337 % x[1][1],
338 '3.9')
338 '3.9')
339 return baseset(s)
339 return baseset(s)
340
340
341 def _getrevsource(repo, r):
341 def _getrevsource(repo, r):
342 extra = repo[r].extra()
342 extra = repo[r].extra()
343 for label in ('source', 'transplant_source', 'rebase_source'):
343 for label in ('source', 'transplant_source', 'rebase_source'):
344 if label in extra:
344 if label in extra:
345 try:
345 try:
346 return repo[extra[label]].rev()
346 return repo[extra[label]].rev()
347 except error.RepoLookupError:
347 except error.RepoLookupError:
348 pass
348 pass
349 return None
349 return None
350
350
351 # operator methods
351 # operator methods
352
352
353 def stringset(repo, subset, x):
353 def stringset(repo, subset, x):
354 x = repo[x].rev()
354 x = repo[x].rev()
355 if (x in subset
355 if (x in subset
356 or x == node.nullrev and isinstance(subset, fullreposet)):
356 or x == node.nullrev and isinstance(subset, fullreposet)):
357 return baseset([x])
357 return baseset([x])
358 return baseset()
358 return baseset()
359
359
360 def rangeset(repo, subset, x, y):
360 def rangeset(repo, subset, x, y):
361 m = getset(repo, fullreposet(repo), x)
361 m = getset(repo, fullreposet(repo), x)
362 n = getset(repo, fullreposet(repo), y)
362 n = getset(repo, fullreposet(repo), y)
363
363
364 if not m or not n:
364 if not m or not n:
365 return baseset()
365 return baseset()
366 m, n = m.first(), n.last()
366 m, n = m.first(), n.last()
367
367
368 if m == n:
368 if m == n:
369 r = baseset([m])
369 r = baseset([m])
370 elif n == node.wdirrev:
370 elif n == node.wdirrev:
371 r = spanset(repo, m, len(repo)) + baseset([n])
371 r = spanset(repo, m, len(repo)) + baseset([n])
372 elif m == node.wdirrev:
372 elif m == node.wdirrev:
373 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
373 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
374 elif m < n:
374 elif m < n:
375 r = spanset(repo, m, n + 1)
375 r = spanset(repo, m, n + 1)
376 else:
376 else:
377 r = spanset(repo, m, n - 1)
377 r = spanset(repo, m, n - 1)
378 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
378 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
379 # necessary to ensure we preserve the order in subset.
379 # necessary to ensure we preserve the order in subset.
380 #
380 #
381 # This has performance implication, carrying the sorting over when possible
381 # This has performance implication, carrying the sorting over when possible
382 # would be more efficient.
382 # would be more efficient.
383 return r & subset
383 return r & subset
384
384
385 def dagrange(repo, subset, x, y):
385 def dagrange(repo, subset, x, y):
386 r = fullreposet(repo)
386 r = fullreposet(repo)
387 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
387 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
388 includepath=True)
388 includepath=True)
389 return subset & xs
389 return subset & xs
390
390
391 def andset(repo, subset, x, y):
391 def andset(repo, subset, x, y):
392 return getset(repo, getset(repo, subset, x), y)
392 return getset(repo, getset(repo, subset, x), y)
393
393
394 def differenceset(repo, subset, x, y):
394 def differenceset(repo, subset, x, y):
395 return getset(repo, subset, x) - getset(repo, subset, y)
395 return getset(repo, subset, x) - getset(repo, subset, y)
396
396
397 def orset(repo, subset, *xs):
397 def orset(repo, subset, *xs):
398 assert xs
398 assert xs
399 if len(xs) == 1:
399 if len(xs) == 1:
400 return getset(repo, subset, xs[0])
400 return getset(repo, subset, xs[0])
401 p = len(xs) // 2
401 p = len(xs) // 2
402 a = orset(repo, subset, *xs[:p])
402 a = orset(repo, subset, *xs[:p])
403 b = orset(repo, subset, *xs[p:])
403 b = orset(repo, subset, *xs[p:])
404 return a + b
404 return a + b
405
405
406 def notset(repo, subset, x):
406 def notset(repo, subset, x):
407 return subset - getset(repo, subset, x)
407 return subset - getset(repo, subset, x)
408
408
409 def listset(repo, subset, *xs):
409 def listset(repo, subset, *xs):
410 raise error.ParseError(_("can't use a list in this context"),
410 raise error.ParseError(_("can't use a list in this context"),
411 hint=_('see hg help "revsets.x or y"'))
411 hint=_('see hg help "revsets.x or y"'))
412
412
413 def keyvaluepair(repo, subset, k, v):
413 def keyvaluepair(repo, subset, k, v):
414 raise error.ParseError(_("can't use a key-value pair in this context"))
414 raise error.ParseError(_("can't use a key-value pair in this context"))
415
415
416 def func(repo, subset, a, b):
416 def func(repo, subset, a, b):
417 if a[0] == 'symbol' and a[1] in symbols:
417 if a[0] == 'symbol' and a[1] in symbols:
418 return symbols[a[1]](repo, subset, b)
418 return symbols[a[1]](repo, subset, b)
419
419
420 keep = lambda fn: getattr(fn, '__doc__', None) is not None
420 keep = lambda fn: getattr(fn, '__doc__', None) is not None
421
421
422 syms = [s for (s, fn) in symbols.items() if keep(fn)]
422 syms = [s for (s, fn) in symbols.items() if keep(fn)]
423 raise error.UnknownIdentifier(a[1], syms)
423 raise error.UnknownIdentifier(a[1], syms)
424
424
425 # functions
425 # functions
426
426
427 # symbols are callables like:
427 # symbols are callables like:
428 # fn(repo, subset, x)
428 # fn(repo, subset, x)
429 # with:
429 # with:
430 # repo - current repository instance
430 # repo - current repository instance
431 # subset - of revisions to be examined
431 # subset - of revisions to be examined
432 # x - argument in tree form
432 # x - argument in tree form
433 symbols = {}
433 symbols = {}
434
434
435 # symbols which can't be used for a DoS attack for any given input
435 # symbols which can't be used for a DoS attack for any given input
436 # (e.g. those which accept regexes as plain strings shouldn't be included)
436 # (e.g. those which accept regexes as plain strings shouldn't be included)
437 # functions that just return a lot of changesets (like all) don't count here
437 # functions that just return a lot of changesets (like all) don't count here
438 safesymbols = set()
438 safesymbols = set()
439
439
440 predicate = registrar.revsetpredicate()
440 predicate = registrar.revsetpredicate()
441
441
442 @predicate('_destupdate')
442 @predicate('_destupdate')
443 def _destupdate(repo, subset, x):
443 def _destupdate(repo, subset, x):
444 # experimental revset for update destination
444 # experimental revset for update destination
445 args = getargsdict(x, 'limit', 'clean check')
445 args = getargsdict(x, 'limit', 'clean check')
446 return subset & baseset([destutil.destupdate(repo, **args)[0]])
446 return subset & baseset([destutil.destupdate(repo, **args)[0]])
447
447
448 @predicate('_destmerge')
448 @predicate('_destmerge')
449 def _destmerge(repo, subset, x):
449 def _destmerge(repo, subset, x):
450 # experimental revset for merge destination
450 # experimental revset for merge destination
451 sourceset = None
451 sourceset = None
452 if x is not None:
452 if x is not None:
453 sourceset = getset(repo, fullreposet(repo), x)
453 sourceset = getset(repo, fullreposet(repo), x)
454 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
454 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
455
455
456 @predicate('adds(pattern)', safe=True)
456 @predicate('adds(pattern)', safe=True)
457 def adds(repo, subset, x):
457 def adds(repo, subset, x):
458 """Changesets that add a file matching pattern.
458 """Changesets that add a file matching pattern.
459
459
460 The pattern without explicit kind like ``glob:`` is expected to be
460 The pattern without explicit kind like ``glob:`` is expected to be
461 relative to the current directory and match against a file or a
461 relative to the current directory and match against a file or a
462 directory.
462 directory.
463 """
463 """
464 # i18n: "adds" is a keyword
464 # i18n: "adds" is a keyword
465 pat = getstring(x, _("adds requires a pattern"))
465 pat = getstring(x, _("adds requires a pattern"))
466 return checkstatus(repo, subset, pat, 1)
466 return checkstatus(repo, subset, pat, 1)
467
467
468 @predicate('ancestor(*changeset)', safe=True)
468 @predicate('ancestor(*changeset)', safe=True)
469 def ancestor(repo, subset, x):
469 def ancestor(repo, subset, x):
470 """A greatest common ancestor of the changesets.
470 """A greatest common ancestor of the changesets.
471
471
472 Accepts 0 or more changesets.
472 Accepts 0 or more changesets.
473 Will return empty list when passed no args.
473 Will return empty list when passed no args.
474 Greatest common ancestor of a single changeset is that changeset.
474 Greatest common ancestor of a single changeset is that changeset.
475 """
475 """
476 # i18n: "ancestor" is a keyword
476 # i18n: "ancestor" is a keyword
477 l = getlist(x)
477 l = getlist(x)
478 rl = fullreposet(repo)
478 rl = fullreposet(repo)
479 anc = None
479 anc = None
480
480
481 # (getset(repo, rl, i) for i in l) generates a list of lists
481 # (getset(repo, rl, i) for i in l) generates a list of lists
482 for revs in (getset(repo, rl, i) for i in l):
482 for revs in (getset(repo, rl, i) for i in l):
483 for r in revs:
483 for r in revs:
484 if anc is None:
484 if anc is None:
485 anc = repo[r]
485 anc = repo[r]
486 else:
486 else:
487 anc = anc.ancestor(repo[r])
487 anc = anc.ancestor(repo[r])
488
488
489 if anc is not None and anc.rev() in subset:
489 if anc is not None and anc.rev() in subset:
490 return baseset([anc.rev()])
490 return baseset([anc.rev()])
491 return baseset()
491 return baseset()
492
492
493 def _ancestors(repo, subset, x, followfirst=False):
493 def _ancestors(repo, subset, x, followfirst=False):
494 heads = getset(repo, fullreposet(repo), x)
494 heads = getset(repo, fullreposet(repo), x)
495 if not heads:
495 if not heads:
496 return baseset()
496 return baseset()
497 s = _revancestors(repo, heads, followfirst)
497 s = _revancestors(repo, heads, followfirst)
498 return subset & s
498 return subset & s
499
499
500 @predicate('ancestors(set)', safe=True)
500 @predicate('ancestors(set)', safe=True)
501 def ancestors(repo, subset, x):
501 def ancestors(repo, subset, x):
502 """Changesets that are ancestors of a changeset in set.
502 """Changesets that are ancestors of a changeset in set.
503 """
503 """
504 return _ancestors(repo, subset, x)
504 return _ancestors(repo, subset, x)
505
505
506 @predicate('_firstancestors', safe=True)
506 @predicate('_firstancestors', safe=True)
507 def _firstancestors(repo, subset, x):
507 def _firstancestors(repo, subset, x):
508 # ``_firstancestors(set)``
508 # ``_firstancestors(set)``
509 # Like ``ancestors(set)`` but follows only the first parents.
509 # Like ``ancestors(set)`` but follows only the first parents.
510 return _ancestors(repo, subset, x, followfirst=True)
510 return _ancestors(repo, subset, x, followfirst=True)
511
511
512 def ancestorspec(repo, subset, x, n):
512 def ancestorspec(repo, subset, x, n):
513 """``set~n``
513 """``set~n``
514 Changesets that are the Nth ancestor (first parents only) of a changeset
514 Changesets that are the Nth ancestor (first parents only) of a changeset
515 in set.
515 in set.
516 """
516 """
517 try:
517 try:
518 n = int(n[1])
518 n = int(n[1])
519 except (TypeError, ValueError):
519 except (TypeError, ValueError):
520 raise error.ParseError(_("~ expects a number"))
520 raise error.ParseError(_("~ expects a number"))
521 ps = set()
521 ps = set()
522 cl = repo.changelog
522 cl = repo.changelog
523 for r in getset(repo, fullreposet(repo), x):
523 for r in getset(repo, fullreposet(repo), x):
524 for i in range(n):
524 for i in range(n):
525 r = cl.parentrevs(r)[0]
525 r = cl.parentrevs(r)[0]
526 ps.add(r)
526 ps.add(r)
527 return subset & ps
527 return subset & ps
528
528
529 @predicate('author(string)', safe=True)
529 @predicate('author(string)', safe=True)
530 def author(repo, subset, x):
530 def author(repo, subset, x):
531 """Alias for ``user(string)``.
531 """Alias for ``user(string)``.
532 """
532 """
533 # i18n: "author" is a keyword
533 # i18n: "author" is a keyword
534 n = encoding.lower(getstring(x, _("author requires a string")))
534 n = encoding.lower(getstring(x, _("author requires a string")))
535 kind, pattern, matcher = _substringmatcher(n)
535 kind, pattern, matcher = _substringmatcher(n)
536 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())),
536 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())),
537 condrepr=('<user %r>', n))
537 condrepr=('<user %r>', n))
538
538
539 @predicate('bisect(string)', safe=True)
539 @predicate('bisect(string)', safe=True)
540 def bisect(repo, subset, x):
540 def bisect(repo, subset, x):
541 """Changesets marked in the specified bisect status:
541 """Changesets marked in the specified bisect status:
542
542
543 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
543 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
544 - ``goods``, ``bads`` : csets topologically good/bad
544 - ``goods``, ``bads`` : csets topologically good/bad
545 - ``range`` : csets taking part in the bisection
545 - ``range`` : csets taking part in the bisection
546 - ``pruned`` : csets that are goods, bads or skipped
546 - ``pruned`` : csets that are goods, bads or skipped
547 - ``untested`` : csets whose fate is yet unknown
547 - ``untested`` : csets whose fate is yet unknown
548 - ``ignored`` : csets ignored due to DAG topology
548 - ``ignored`` : csets ignored due to DAG topology
549 - ``current`` : the cset currently being bisected
549 - ``current`` : the cset currently being bisected
550 """
550 """
551 # i18n: "bisect" is a keyword
551 # i18n: "bisect" is a keyword
552 status = getstring(x, _("bisect requires a string")).lower()
552 status = getstring(x, _("bisect requires a string")).lower()
553 state = set(hbisect.get(repo, status))
553 state = set(hbisect.get(repo, status))
554 return subset & state
554 return subset & state
555
555
556 # Backward-compatibility
556 # Backward-compatibility
557 # - no help entry so that we do not advertise it any more
557 # - no help entry so that we do not advertise it any more
558 @predicate('bisected', safe=True)
558 @predicate('bisected', safe=True)
559 def bisected(repo, subset, x):
559 def bisected(repo, subset, x):
560 return bisect(repo, subset, x)
560 return bisect(repo, subset, x)
561
561
562 @predicate('bookmark([name])', safe=True)
562 @predicate('bookmark([name])', safe=True)
563 def bookmark(repo, subset, x):
563 def bookmark(repo, subset, x):
564 """The named bookmark or all bookmarks.
564 """The named bookmark or all bookmarks.
565
565
566 If `name` starts with `re:`, the remainder of the name is treated as
566 If `name` starts with `re:`, the remainder of the name is treated as
567 a regular expression. To match a bookmark that actually starts with `re:`,
567 a regular expression. To match a bookmark that actually starts with `re:`,
568 use the prefix `literal:`.
568 use the prefix `literal:`.
569 """
569 """
570 # i18n: "bookmark" is a keyword
570 # i18n: "bookmark" is a keyword
571 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
571 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
572 if args:
572 if args:
573 bm = getstring(args[0],
573 bm = getstring(args[0],
574 # i18n: "bookmark" is a keyword
574 # i18n: "bookmark" is a keyword
575 _('the argument to bookmark must be a string'))
575 _('the argument to bookmark must be a string'))
576 kind, pattern, matcher = util.stringmatcher(bm)
576 kind, pattern, matcher = util.stringmatcher(bm)
577 bms = set()
577 bms = set()
578 if kind == 'literal':
578 if kind == 'literal':
579 bmrev = repo._bookmarks.get(pattern, None)
579 bmrev = repo._bookmarks.get(pattern, None)
580 if not bmrev:
580 if not bmrev:
581 raise error.RepoLookupError(_("bookmark '%s' does not exist")
581 raise error.RepoLookupError(_("bookmark '%s' does not exist")
582 % pattern)
582 % pattern)
583 bms.add(repo[bmrev].rev())
583 bms.add(repo[bmrev].rev())
584 else:
584 else:
585 matchrevs = set()
585 matchrevs = set()
586 for name, bmrev in repo._bookmarks.iteritems():
586 for name, bmrev in repo._bookmarks.iteritems():
587 if matcher(name):
587 if matcher(name):
588 matchrevs.add(bmrev)
588 matchrevs.add(bmrev)
589 if not matchrevs:
589 if not matchrevs:
590 raise error.RepoLookupError(_("no bookmarks exist"
590 raise error.RepoLookupError(_("no bookmarks exist"
591 " that match '%s'") % pattern)
591 " that match '%s'") % pattern)
592 for bmrev in matchrevs:
592 for bmrev in matchrevs:
593 bms.add(repo[bmrev].rev())
593 bms.add(repo[bmrev].rev())
594 else:
594 else:
595 bms = set([repo[r].rev()
595 bms = set([repo[r].rev()
596 for r in repo._bookmarks.values()])
596 for r in repo._bookmarks.values()])
597 bms -= set([node.nullrev])
597 bms -= set([node.nullrev])
598 return subset & bms
598 return subset & bms
599
599
600 @predicate('branch(string or set)', safe=True)
600 @predicate('branch(string or set)', safe=True)
601 def branch(repo, subset, x):
601 def branch(repo, subset, x):
602 """
602 """
603 All changesets belonging to the given branch or the branches of the given
603 All changesets belonging to the given branch or the branches of the given
604 changesets.
604 changesets.
605
605
606 If `string` starts with `re:`, the remainder of the name is treated as
606 If `string` starts with `re:`, the remainder of the name is treated as
607 a regular expression. To match a branch that actually starts with `re:`,
607 a regular expression. To match a branch that actually starts with `re:`,
608 use the prefix `literal:`.
608 use the prefix `literal:`.
609 """
609 """
610 getbi = repo.revbranchcache().branchinfo
610 getbi = repo.revbranchcache().branchinfo
611
611
612 try:
612 try:
613 b = getstring(x, '')
613 b = getstring(x, '')
614 except error.ParseError:
614 except error.ParseError:
615 # not a string, but another revspec, e.g. tip()
615 # not a string, but another revspec, e.g. tip()
616 pass
616 pass
617 else:
617 else:
618 kind, pattern, matcher = util.stringmatcher(b)
618 kind, pattern, matcher = util.stringmatcher(b)
619 if kind == 'literal':
619 if kind == 'literal':
620 # note: falls through to the revspec case if no branch with
620 # note: falls through to the revspec case if no branch with
621 # this name exists and pattern kind is not specified explicitly
621 # this name exists and pattern kind is not specified explicitly
622 if pattern in repo.branchmap():
622 if pattern in repo.branchmap():
623 return subset.filter(lambda r: matcher(getbi(r)[0]),
623 return subset.filter(lambda r: matcher(getbi(r)[0]),
624 condrepr=('<branch %r>', b))
624 condrepr=('<branch %r>', b))
625 if b.startswith('literal:'):
625 if b.startswith('literal:'):
626 raise error.RepoLookupError(_("branch '%s' does not exist")
626 raise error.RepoLookupError(_("branch '%s' does not exist")
627 % pattern)
627 % pattern)
628 else:
628 else:
629 return subset.filter(lambda r: matcher(getbi(r)[0]),
629 return subset.filter(lambda r: matcher(getbi(r)[0]),
630 condrepr=('<branch %r>', b))
630 condrepr=('<branch %r>', b))
631
631
632 s = getset(repo, fullreposet(repo), x)
632 s = getset(repo, fullreposet(repo), x)
633 b = set()
633 b = set()
634 for r in s:
634 for r in s:
635 b.add(getbi(r)[0])
635 b.add(getbi(r)[0])
636 c = s.__contains__
636 c = s.__contains__
637 return subset.filter(lambda r: c(r) or getbi(r)[0] in b,
637 return subset.filter(lambda r: c(r) or getbi(r)[0] in b,
638 condrepr=lambda: '<branch %r>' % sorted(b))
638 condrepr=lambda: '<branch %r>' % sorted(b))
639
639
640 @predicate('bumped()', safe=True)
640 @predicate('bumped()', safe=True)
641 def bumped(repo, subset, x):
641 def bumped(repo, subset, x):
642 """Mutable changesets marked as successors of public changesets.
642 """Mutable changesets marked as successors of public changesets.
643
643
644 Only non-public and non-obsolete changesets can be `bumped`.
644 Only non-public and non-obsolete changesets can be `bumped`.
645 """
645 """
646 # i18n: "bumped" is a keyword
646 # i18n: "bumped" is a keyword
647 getargs(x, 0, 0, _("bumped takes no arguments"))
647 getargs(x, 0, 0, _("bumped takes no arguments"))
648 bumped = obsmod.getrevs(repo, 'bumped')
648 bumped = obsmod.getrevs(repo, 'bumped')
649 return subset & bumped
649 return subset & bumped
650
650
651 @predicate('bundle()', safe=True)
651 @predicate('bundle()', safe=True)
652 def bundle(repo, subset, x):
652 def bundle(repo, subset, x):
653 """Changesets in the bundle.
653 """Changesets in the bundle.
654
654
655 Bundle must be specified by the -R option."""
655 Bundle must be specified by the -R option."""
656
656
657 try:
657 try:
658 bundlerevs = repo.changelog.bundlerevs
658 bundlerevs = repo.changelog.bundlerevs
659 except AttributeError:
659 except AttributeError:
660 raise error.Abort(_("no bundle provided - specify with -R"))
660 raise error.Abort(_("no bundle provided - specify with -R"))
661 return subset & bundlerevs
661 return subset & bundlerevs
662
662
663 def checkstatus(repo, subset, pat, field):
663 def checkstatus(repo, subset, pat, field):
664 hasset = matchmod.patkind(pat) == 'set'
664 hasset = matchmod.patkind(pat) == 'set'
665
665
666 mcache = [None]
666 mcache = [None]
667 def matches(x):
667 def matches(x):
668 c = repo[x]
668 c = repo[x]
669 if not mcache[0] or hasset:
669 if not mcache[0] or hasset:
670 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
670 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
671 m = mcache[0]
671 m = mcache[0]
672 fname = None
672 fname = None
673 if not m.anypats() and len(m.files()) == 1:
673 if not m.anypats() and len(m.files()) == 1:
674 fname = m.files()[0]
674 fname = m.files()[0]
675 if fname is not None:
675 if fname is not None:
676 if fname not in c.files():
676 if fname not in c.files():
677 return False
677 return False
678 else:
678 else:
679 for f in c.files():
679 for f in c.files():
680 if m(f):
680 if m(f):
681 break
681 break
682 else:
682 else:
683 return False
683 return False
684 files = repo.status(c.p1().node(), c.node())[field]
684 files = repo.status(c.p1().node(), c.node())[field]
685 if fname is not None:
685 if fname is not None:
686 if fname in files:
686 if fname in files:
687 return True
687 return True
688 else:
688 else:
689 for f in files:
689 for f in files:
690 if m(f):
690 if m(f):
691 return True
691 return True
692
692
693 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
693 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
694
694
695 def _children(repo, narrow, parentset):
695 def _children(repo, narrow, parentset):
696 if not parentset:
696 if not parentset:
697 return baseset()
697 return baseset()
698 cs = set()
698 cs = set()
699 pr = repo.changelog.parentrevs
699 pr = repo.changelog.parentrevs
700 minrev = parentset.min()
700 minrev = parentset.min()
701 for r in narrow:
701 for r in narrow:
702 if r <= minrev:
702 if r <= minrev:
703 continue
703 continue
704 for p in pr(r):
704 for p in pr(r):
705 if p in parentset:
705 if p in parentset:
706 cs.add(r)
706 cs.add(r)
707 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
707 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
708 # This does not break because of other fullreposet misbehavior.
708 # This does not break because of other fullreposet misbehavior.
709 return baseset(cs)
709 return baseset(cs)
710
710
711 @predicate('children(set)', safe=True)
711 @predicate('children(set)', safe=True)
712 def children(repo, subset, x):
712 def children(repo, subset, x):
713 """Child changesets of changesets in set.
713 """Child changesets of changesets in set.
714 """
714 """
715 s = getset(repo, fullreposet(repo), x)
715 s = getset(repo, fullreposet(repo), x)
716 cs = _children(repo, subset, s)
716 cs = _children(repo, subset, s)
717 return subset & cs
717 return subset & cs
718
718
719 @predicate('closed()', safe=True)
719 @predicate('closed()', safe=True)
720 def closed(repo, subset, x):
720 def closed(repo, subset, x):
721 """Changeset is closed.
721 """Changeset is closed.
722 """
722 """
723 # i18n: "closed" is a keyword
723 # i18n: "closed" is a keyword
724 getargs(x, 0, 0, _("closed takes no arguments"))
724 getargs(x, 0, 0, _("closed takes no arguments"))
725 return subset.filter(lambda r: repo[r].closesbranch(),
725 return subset.filter(lambda r: repo[r].closesbranch(),
726 condrepr='<branch closed>')
726 condrepr='<branch closed>')
727
727
728 @predicate('contains(pattern)')
728 @predicate('contains(pattern)')
729 def contains(repo, subset, x):
729 def contains(repo, subset, x):
730 """The revision's manifest contains a file matching pattern (but might not
730 """The revision's manifest contains a file matching pattern (but might not
731 modify it). See :hg:`help patterns` for information about file patterns.
731 modify it). See :hg:`help patterns` for information about file patterns.
732
732
733 The pattern without explicit kind like ``glob:`` is expected to be
733 The pattern without explicit kind like ``glob:`` is expected to be
734 relative to the current directory and match against a file exactly
734 relative to the current directory and match against a file exactly
735 for efficiency.
735 for efficiency.
736 """
736 """
737 # i18n: "contains" is a keyword
737 # i18n: "contains" is a keyword
738 pat = getstring(x, _("contains requires a pattern"))
738 pat = getstring(x, _("contains requires a pattern"))
739
739
740 def matches(x):
740 def matches(x):
741 if not matchmod.patkind(pat):
741 if not matchmod.patkind(pat):
742 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
742 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
743 if pats in repo[x]:
743 if pats in repo[x]:
744 return True
744 return True
745 else:
745 else:
746 c = repo[x]
746 c = repo[x]
747 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
747 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
748 for f in c.manifest():
748 for f in c.manifest():
749 if m(f):
749 if m(f):
750 return True
750 return True
751 return False
751 return False
752
752
753 return subset.filter(matches, condrepr=('<contains %r>', pat))
753 return subset.filter(matches, condrepr=('<contains %r>', pat))
754
754
755 @predicate('converted([id])', safe=True)
755 @predicate('converted([id])', safe=True)
756 def converted(repo, subset, x):
756 def converted(repo, subset, x):
757 """Changesets converted from the given identifier in the old repository if
757 """Changesets converted from the given identifier in the old repository if
758 present, or all converted changesets if no identifier is specified.
758 present, or all converted changesets if no identifier is specified.
759 """
759 """
760
760
761 # There is exactly no chance of resolving the revision, so do a simple
761 # There is exactly no chance of resolving the revision, so do a simple
762 # string compare and hope for the best
762 # string compare and hope for the best
763
763
764 rev = None
764 rev = None
765 # i18n: "converted" is a keyword
765 # i18n: "converted" is a keyword
766 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
766 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
767 if l:
767 if l:
768 # i18n: "converted" is a keyword
768 # i18n: "converted" is a keyword
769 rev = getstring(l[0], _('converted requires a revision'))
769 rev = getstring(l[0], _('converted requires a revision'))
770
770
771 def _matchvalue(r):
771 def _matchvalue(r):
772 source = repo[r].extra().get('convert_revision', None)
772 source = repo[r].extra().get('convert_revision', None)
773 return source is not None and (rev is None or source.startswith(rev))
773 return source is not None and (rev is None or source.startswith(rev))
774
774
775 return subset.filter(lambda r: _matchvalue(r),
775 return subset.filter(lambda r: _matchvalue(r),
776 condrepr=('<converted %r>', rev))
776 condrepr=('<converted %r>', rev))
777
777
778 @predicate('date(interval)', safe=True)
778 @predicate('date(interval)', safe=True)
779 def date(repo, subset, x):
779 def date(repo, subset, x):
780 """Changesets within the interval, see :hg:`help dates`.
780 """Changesets within the interval, see :hg:`help dates`.
781 """
781 """
782 # i18n: "date" is a keyword
782 # i18n: "date" is a keyword
783 ds = getstring(x, _("date requires a string"))
783 ds = getstring(x, _("date requires a string"))
784 dm = util.matchdate(ds)
784 dm = util.matchdate(ds)
785 return subset.filter(lambda x: dm(repo[x].date()[0]),
785 return subset.filter(lambda x: dm(repo[x].date()[0]),
786 condrepr=('<date %r>', ds))
786 condrepr=('<date %r>', ds))
787
787
788 @predicate('desc(string)', safe=True)
788 @predicate('desc(string)', safe=True)
789 def desc(repo, subset, x):
789 def desc(repo, subset, x):
790 """Search commit message for string. The match is case-insensitive.
790 """Search commit message for string. The match is case-insensitive.
791 """
791 """
792 # i18n: "desc" is a keyword
792 # i18n: "desc" is a keyword
793 ds = encoding.lower(getstring(x, _("desc requires a string")))
793 ds = encoding.lower(getstring(x, _("desc requires a string")))
794
794
795 def matches(x):
795 def matches(x):
796 c = repo[x]
796 c = repo[x]
797 return ds in encoding.lower(c.description())
797 return ds in encoding.lower(c.description())
798
798
799 return subset.filter(matches, condrepr=('<desc %r>', ds))
799 return subset.filter(matches, condrepr=('<desc %r>', ds))
800
800
801 def _descendants(repo, subset, x, followfirst=False):
801 def _descendants(repo, subset, x, followfirst=False):
802 roots = getset(repo, fullreposet(repo), x)
802 roots = getset(repo, fullreposet(repo), x)
803 if not roots:
803 if not roots:
804 return baseset()
804 return baseset()
805 s = _revdescendants(repo, roots, followfirst)
805 s = _revdescendants(repo, roots, followfirst)
806
806
807 # Both sets need to be ascending in order to lazily return the union
807 # Both sets need to be ascending in order to lazily return the union
808 # in the correct order.
808 # in the correct order.
809 base = subset & roots
809 base = subset & roots
810 desc = subset & s
810 desc = subset & s
811 result = base + desc
811 result = base + desc
812 if subset.isascending():
812 if subset.isascending():
813 result.sort()
813 result.sort()
814 elif subset.isdescending():
814 elif subset.isdescending():
815 result.sort(reverse=True)
815 result.sort(reverse=True)
816 else:
816 else:
817 result = subset & result
817 result = subset & result
818 return result
818 return result
819
819
820 @predicate('descendants(set)', safe=True)
820 @predicate('descendants(set)', safe=True)
821 def descendants(repo, subset, x):
821 def descendants(repo, subset, x):
822 """Changesets which are descendants of changesets in set.
822 """Changesets which are descendants of changesets in set.
823 """
823 """
824 return _descendants(repo, subset, x)
824 return _descendants(repo, subset, x)
825
825
826 @predicate('_firstdescendants', safe=True)
826 @predicate('_firstdescendants', safe=True)
827 def _firstdescendants(repo, subset, x):
827 def _firstdescendants(repo, subset, x):
828 # ``_firstdescendants(set)``
828 # ``_firstdescendants(set)``
829 # Like ``descendants(set)`` but follows only the first parents.
829 # Like ``descendants(set)`` but follows only the first parents.
830 return _descendants(repo, subset, x, followfirst=True)
830 return _descendants(repo, subset, x, followfirst=True)
831
831
832 @predicate('destination([set])', safe=True)
832 @predicate('destination([set])', safe=True)
833 def destination(repo, subset, x):
833 def destination(repo, subset, x):
834 """Changesets that were created by a graft, transplant or rebase operation,
834 """Changesets that were created by a graft, transplant or rebase operation,
835 with the given revisions specified as the source. Omitting the optional set
835 with the given revisions specified as the source. Omitting the optional set
836 is the same as passing all().
836 is the same as passing all().
837 """
837 """
838 if x is not None:
838 if x is not None:
839 sources = getset(repo, fullreposet(repo), x)
839 sources = getset(repo, fullreposet(repo), x)
840 else:
840 else:
841 sources = fullreposet(repo)
841 sources = fullreposet(repo)
842
842
843 dests = set()
843 dests = set()
844
844
845 # subset contains all of the possible destinations that can be returned, so
845 # subset contains all of the possible destinations that can be returned, so
846 # iterate over them and see if their source(s) were provided in the arg set.
846 # iterate over them and see if their source(s) were provided in the arg set.
847 # Even if the immediate src of r is not in the arg set, src's source (or
847 # Even if the immediate src of r is not in the arg set, src's source (or
848 # further back) may be. Scanning back further than the immediate src allows
848 # further back) may be. Scanning back further than the immediate src allows
849 # transitive transplants and rebases to yield the same results as transitive
849 # transitive transplants and rebases to yield the same results as transitive
850 # grafts.
850 # grafts.
851 for r in subset:
851 for r in subset:
852 src = _getrevsource(repo, r)
852 src = _getrevsource(repo, r)
853 lineage = None
853 lineage = None
854
854
855 while src is not None:
855 while src is not None:
856 if lineage is None:
856 if lineage is None:
857 lineage = list()
857 lineage = list()
858
858
859 lineage.append(r)
859 lineage.append(r)
860
860
861 # The visited lineage is a match if the current source is in the arg
861 # The visited lineage is a match if the current source is in the arg
862 # set. Since every candidate dest is visited by way of iterating
862 # set. Since every candidate dest is visited by way of iterating
863 # subset, any dests further back in the lineage will be tested by a
863 # subset, any dests further back in the lineage will be tested by a
864 # different iteration over subset. Likewise, if the src was already
864 # different iteration over subset. Likewise, if the src was already
865 # selected, the current lineage can be selected without going back
865 # selected, the current lineage can be selected without going back
866 # further.
866 # further.
867 if src in sources or src in dests:
867 if src in sources or src in dests:
868 dests.update(lineage)
868 dests.update(lineage)
869 break
869 break
870
870
871 r = src
871 r = src
872 src = _getrevsource(repo, r)
872 src = _getrevsource(repo, r)
873
873
874 return subset.filter(dests.__contains__,
874 return subset.filter(dests.__contains__,
875 condrepr=lambda: '<destination %r>' % sorted(dests))
875 condrepr=lambda: '<destination %r>' % sorted(dests))
876
876
877 @predicate('divergent()', safe=True)
877 @predicate('divergent()', safe=True)
878 def divergent(repo, subset, x):
878 def divergent(repo, subset, x):
879 """
879 """
880 Final successors of changesets with an alternative set of final successors.
880 Final successors of changesets with an alternative set of final successors.
881 """
881 """
882 # i18n: "divergent" is a keyword
882 # i18n: "divergent" is a keyword
883 getargs(x, 0, 0, _("divergent takes no arguments"))
883 getargs(x, 0, 0, _("divergent takes no arguments"))
884 divergent = obsmod.getrevs(repo, 'divergent')
884 divergent = obsmod.getrevs(repo, 'divergent')
885 return subset & divergent
885 return subset & divergent
886
886
887 @predicate('extinct()', safe=True)
887 @predicate('extinct()', safe=True)
888 def extinct(repo, subset, x):
888 def extinct(repo, subset, x):
889 """Obsolete changesets with obsolete descendants only.
889 """Obsolete changesets with obsolete descendants only.
890 """
890 """
891 # i18n: "extinct" is a keyword
891 # i18n: "extinct" is a keyword
892 getargs(x, 0, 0, _("extinct takes no arguments"))
892 getargs(x, 0, 0, _("extinct takes no arguments"))
893 extincts = obsmod.getrevs(repo, 'extinct')
893 extincts = obsmod.getrevs(repo, 'extinct')
894 return subset & extincts
894 return subset & extincts
895
895
896 @predicate('extra(label, [value])', safe=True)
896 @predicate('extra(label, [value])', safe=True)
897 def extra(repo, subset, x):
897 def extra(repo, subset, x):
898 """Changesets with the given label in the extra metadata, with the given
898 """Changesets with the given label in the extra metadata, with the given
899 optional value.
899 optional value.
900
900
901 If `value` starts with `re:`, the remainder of the value is treated as
901 If `value` starts with `re:`, the remainder of the value is treated as
902 a regular expression. To match a value that actually starts with `re:`,
902 a regular expression. To match a value that actually starts with `re:`,
903 use the prefix `literal:`.
903 use the prefix `literal:`.
904 """
904 """
905 args = getargsdict(x, 'extra', 'label value')
905 args = getargsdict(x, 'extra', 'label value')
906 if 'label' not in args:
906 if 'label' not in args:
907 # i18n: "extra" is a keyword
907 # i18n: "extra" is a keyword
908 raise error.ParseError(_('extra takes at least 1 argument'))
908 raise error.ParseError(_('extra takes at least 1 argument'))
909 # i18n: "extra" is a keyword
909 # i18n: "extra" is a keyword
910 label = getstring(args['label'], _('first argument to extra must be '
910 label = getstring(args['label'], _('first argument to extra must be '
911 'a string'))
911 'a string'))
912 value = None
912 value = None
913
913
914 if 'value' in args:
914 if 'value' in args:
915 # i18n: "extra" is a keyword
915 # i18n: "extra" is a keyword
916 value = getstring(args['value'], _('second argument to extra must be '
916 value = getstring(args['value'], _('second argument to extra must be '
917 'a string'))
917 'a string'))
918 kind, value, matcher = util.stringmatcher(value)
918 kind, value, matcher = util.stringmatcher(value)
919
919
920 def _matchvalue(r):
920 def _matchvalue(r):
921 extra = repo[r].extra()
921 extra = repo[r].extra()
922 return label in extra and (value is None or matcher(extra[label]))
922 return label in extra and (value is None or matcher(extra[label]))
923
923
924 return subset.filter(lambda r: _matchvalue(r),
924 return subset.filter(lambda r: _matchvalue(r),
925 condrepr=('<extra[%r] %r>', label, value))
925 condrepr=('<extra[%r] %r>', label, value))
926
926
927 @predicate('filelog(pattern)', safe=True)
927 @predicate('filelog(pattern)', safe=True)
928 def filelog(repo, subset, x):
928 def filelog(repo, subset, x):
929 """Changesets connected to the specified filelog.
929 """Changesets connected to the specified filelog.
930
930
931 For performance reasons, visits only revisions mentioned in the file-level
931 For performance reasons, visits only revisions mentioned in the file-level
932 filelog, rather than filtering through all changesets (much faster, but
932 filelog, rather than filtering through all changesets (much faster, but
933 doesn't include deletes or duplicate changes). For a slower, more accurate
933 doesn't include deletes or duplicate changes). For a slower, more accurate
934 result, use ``file()``.
934 result, use ``file()``.
935
935
936 The pattern without explicit kind like ``glob:`` is expected to be
936 The pattern without explicit kind like ``glob:`` is expected to be
937 relative to the current directory and match against a file exactly
937 relative to the current directory and match against a file exactly
938 for efficiency.
938 for efficiency.
939
939
940 If some linkrev points to revisions filtered by the current repoview, we'll
940 If some linkrev points to revisions filtered by the current repoview, we'll
941 work around it to return a non-filtered value.
941 work around it to return a non-filtered value.
942 """
942 """
943
943
944 # i18n: "filelog" is a keyword
944 # i18n: "filelog" is a keyword
945 pat = getstring(x, _("filelog requires a pattern"))
945 pat = getstring(x, _("filelog requires a pattern"))
946 s = set()
946 s = set()
947 cl = repo.changelog
947 cl = repo.changelog
948
948
949 if not matchmod.patkind(pat):
949 if not matchmod.patkind(pat):
950 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
950 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
951 files = [f]
951 files = [f]
952 else:
952 else:
953 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
953 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
954 files = (f for f in repo[None] if m(f))
954 files = (f for f in repo[None] if m(f))
955
955
956 for f in files:
956 for f in files:
957 fl = repo.file(f)
957 fl = repo.file(f)
958 known = {}
958 known = {}
959 scanpos = 0
959 scanpos = 0
960 for fr in list(fl):
960 for fr in list(fl):
961 fn = fl.node(fr)
961 fn = fl.node(fr)
962 if fn in known:
962 if fn in known:
963 s.add(known[fn])
963 s.add(known[fn])
964 continue
964 continue
965
965
966 lr = fl.linkrev(fr)
966 lr = fl.linkrev(fr)
967 if lr in cl:
967 if lr in cl:
968 s.add(lr)
968 s.add(lr)
969 elif scanpos is not None:
969 elif scanpos is not None:
970 # lowest matching changeset is filtered, scan further
970 # lowest matching changeset is filtered, scan further
971 # ahead in changelog
971 # ahead in changelog
972 start = max(lr, scanpos) + 1
972 start = max(lr, scanpos) + 1
973 scanpos = None
973 scanpos = None
974 for r in cl.revs(start):
974 for r in cl.revs(start):
975 # minimize parsing of non-matching entries
975 # minimize parsing of non-matching entries
976 if f in cl.revision(r) and f in cl.readfiles(r):
976 if f in cl.revision(r) and f in cl.readfiles(r):
977 try:
977 try:
978 # try to use manifest delta fastpath
978 # try to use manifest delta fastpath
979 n = repo[r].filenode(f)
979 n = repo[r].filenode(f)
980 if n not in known:
980 if n not in known:
981 if n == fn:
981 if n == fn:
982 s.add(r)
982 s.add(r)
983 scanpos = r
983 scanpos = r
984 break
984 break
985 else:
985 else:
986 known[n] = r
986 known[n] = r
987 except error.ManifestLookupError:
987 except error.ManifestLookupError:
988 # deletion in changelog
988 # deletion in changelog
989 continue
989 continue
990
990
991 return subset & s
991 return subset & s
992
992
993 @predicate('first(set, [n])', safe=True)
993 @predicate('first(set, [n])', safe=True)
994 def first(repo, subset, x):
994 def first(repo, subset, x):
995 """An alias for limit().
995 """An alias for limit().
996 """
996 """
997 return limit(repo, subset, x)
997 return limit(repo, subset, x)
998
998
999 def _follow(repo, subset, x, name, followfirst=False):
999 def _follow(repo, subset, x, name, followfirst=False):
1000 l = getargs(x, 0, 1, _("%s takes no arguments or a pattern") % name)
1000 l = getargs(x, 0, 1, _("%s takes no arguments or a pattern") % name)
1001 c = repo['.']
1001 c = repo['.']
1002 if l:
1002 if l:
1003 x = getstring(l[0], _("%s expected a pattern") % name)
1003 x = getstring(l[0], _("%s expected a pattern") % name)
1004 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
1004 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
1005 ctx=repo[None], default='path')
1005 ctx=repo[None], default='path')
1006
1006
1007 files = c.manifest().walk(matcher)
1007 files = c.manifest().walk(matcher)
1008
1008
1009 s = set()
1009 s = set()
1010 for fname in files:
1010 for fname in files:
1011 fctx = c[fname]
1011 fctx = c[fname]
1012 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
1012 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
1013 # include the revision responsible for the most recent version
1013 # include the revision responsible for the most recent version
1014 s.add(fctx.introrev())
1014 s.add(fctx.introrev())
1015 else:
1015 else:
1016 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1016 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1017
1017
1018 return subset & s
1018 return subset & s
1019
1019
1020 @predicate('follow([pattern])', safe=True)
1020 @predicate('follow([pattern])', safe=True)
1021 def follow(repo, subset, x):
1021 def follow(repo, subset, x):
1022 """
1022 """
1023 An alias for ``::.`` (ancestors of the working directory's first parent).
1023 An alias for ``::.`` (ancestors of the working directory's first parent).
1024 If pattern is specified, the histories of files matching given
1024 If pattern is specified, the histories of files matching given
1025 pattern is followed, including copies.
1025 pattern is followed, including copies.
1026 """
1026 """
1027 return _follow(repo, subset, x, 'follow')
1027 return _follow(repo, subset, x, 'follow')
1028
1028
1029 @predicate('_followfirst', safe=True)
1029 @predicate('_followfirst', safe=True)
1030 def _followfirst(repo, subset, x):
1030 def _followfirst(repo, subset, x):
1031 # ``followfirst([pattern])``
1031 # ``followfirst([pattern])``
1032 # Like ``follow([pattern])`` but follows only the first parent of
1032 # Like ``follow([pattern])`` but follows only the first parent of
1033 # every revisions or files revisions.
1033 # every revisions or files revisions.
1034 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1034 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1035
1035
1036 @predicate('all()', safe=True)
1036 @predicate('all()', safe=True)
1037 def getall(repo, subset, x):
1037 def getall(repo, subset, x):
1038 """All changesets, the same as ``0:tip``.
1038 """All changesets, the same as ``0:tip``.
1039 """
1039 """
1040 # i18n: "all" is a keyword
1040 # i18n: "all" is a keyword
1041 getargs(x, 0, 0, _("all takes no arguments"))
1041 getargs(x, 0, 0, _("all takes no arguments"))
1042 return subset & spanset(repo) # drop "null" if any
1042 return subset & spanset(repo) # drop "null" if any
1043
1043
1044 @predicate('grep(regex)')
1044 @predicate('grep(regex)')
1045 def grep(repo, subset, x):
1045 def grep(repo, subset, x):
1046 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1046 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1047 to ensure special escape characters are handled correctly. Unlike
1047 to ensure special escape characters are handled correctly. Unlike
1048 ``keyword(string)``, the match is case-sensitive.
1048 ``keyword(string)``, the match is case-sensitive.
1049 """
1049 """
1050 try:
1050 try:
1051 # i18n: "grep" is a keyword
1051 # i18n: "grep" is a keyword
1052 gr = re.compile(getstring(x, _("grep requires a string")))
1052 gr = re.compile(getstring(x, _("grep requires a string")))
1053 except re.error as e:
1053 except re.error as e:
1054 raise error.ParseError(_('invalid match pattern: %s') % e)
1054 raise error.ParseError(_('invalid match pattern: %s') % e)
1055
1055
1056 def matches(x):
1056 def matches(x):
1057 c = repo[x]
1057 c = repo[x]
1058 for e in c.files() + [c.user(), c.description()]:
1058 for e in c.files() + [c.user(), c.description()]:
1059 if gr.search(e):
1059 if gr.search(e):
1060 return True
1060 return True
1061 return False
1061 return False
1062
1062
1063 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1063 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1064
1064
1065 @predicate('_matchfiles', safe=True)
1065 @predicate('_matchfiles', safe=True)
1066 def _matchfiles(repo, subset, x):
1066 def _matchfiles(repo, subset, x):
1067 # _matchfiles takes a revset list of prefixed arguments:
1067 # _matchfiles takes a revset list of prefixed arguments:
1068 #
1068 #
1069 # [p:foo, i:bar, x:baz]
1069 # [p:foo, i:bar, x:baz]
1070 #
1070 #
1071 # builds a match object from them and filters subset. Allowed
1071 # builds a match object from them and filters subset. Allowed
1072 # prefixes are 'p:' for regular patterns, 'i:' for include
1072 # prefixes are 'p:' for regular patterns, 'i:' for include
1073 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1073 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1074 # a revision identifier, or the empty string to reference the
1074 # a revision identifier, or the empty string to reference the
1075 # working directory, from which the match object is
1075 # working directory, from which the match object is
1076 # initialized. Use 'd:' to set the default matching mode, default
1076 # initialized. Use 'd:' to set the default matching mode, default
1077 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1077 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1078
1078
1079 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1079 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1080 pats, inc, exc = [], [], []
1080 pats, inc, exc = [], [], []
1081 rev, default = None, None
1081 rev, default = None, None
1082 for arg in l:
1082 for arg in l:
1083 s = getstring(arg, "_matchfiles requires string arguments")
1083 s = getstring(arg, "_matchfiles requires string arguments")
1084 prefix, value = s[:2], s[2:]
1084 prefix, value = s[:2], s[2:]
1085 if prefix == 'p:':
1085 if prefix == 'p:':
1086 pats.append(value)
1086 pats.append(value)
1087 elif prefix == 'i:':
1087 elif prefix == 'i:':
1088 inc.append(value)
1088 inc.append(value)
1089 elif prefix == 'x:':
1089 elif prefix == 'x:':
1090 exc.append(value)
1090 exc.append(value)
1091 elif prefix == 'r:':
1091 elif prefix == 'r:':
1092 if rev is not None:
1092 if rev is not None:
1093 raise error.ParseError('_matchfiles expected at most one '
1093 raise error.ParseError('_matchfiles expected at most one '
1094 'revision')
1094 'revision')
1095 if value != '': # empty means working directory; leave rev as None
1095 if value != '': # empty means working directory; leave rev as None
1096 rev = value
1096 rev = value
1097 elif prefix == 'd:':
1097 elif prefix == 'd:':
1098 if default is not None:
1098 if default is not None:
1099 raise error.ParseError('_matchfiles expected at most one '
1099 raise error.ParseError('_matchfiles expected at most one '
1100 'default mode')
1100 'default mode')
1101 default = value
1101 default = value
1102 else:
1102 else:
1103 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1103 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1104 if not default:
1104 if not default:
1105 default = 'glob'
1105 default = 'glob'
1106
1106
1107 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1107 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1108 exclude=exc, ctx=repo[rev], default=default)
1108 exclude=exc, ctx=repo[rev], default=default)
1109
1109
1110 # This directly read the changelog data as creating changectx for all
1110 # This directly read the changelog data as creating changectx for all
1111 # revisions is quite expensive.
1111 # revisions is quite expensive.
1112 getfiles = repo.changelog.readfiles
1112 getfiles = repo.changelog.readfiles
1113 wdirrev = node.wdirrev
1113 wdirrev = node.wdirrev
1114 def matches(x):
1114 def matches(x):
1115 if x == wdirrev:
1115 if x == wdirrev:
1116 files = repo[x].files()
1116 files = repo[x].files()
1117 else:
1117 else:
1118 files = getfiles(x)
1118 files = getfiles(x)
1119 for f in files:
1119 for f in files:
1120 if m(f):
1120 if m(f):
1121 return True
1121 return True
1122 return False
1122 return False
1123
1123
1124 return subset.filter(matches,
1124 return subset.filter(matches,
1125 condrepr=('<matchfiles patterns=%r, include=%r '
1125 condrepr=('<matchfiles patterns=%r, include=%r '
1126 'exclude=%r, default=%r, rev=%r>',
1126 'exclude=%r, default=%r, rev=%r>',
1127 pats, inc, exc, default, rev))
1127 pats, inc, exc, default, rev))
1128
1128
1129 @predicate('file(pattern)', safe=True)
1129 @predicate('file(pattern)', safe=True)
1130 def hasfile(repo, subset, x):
1130 def hasfile(repo, subset, x):
1131 """Changesets affecting files matched by pattern.
1131 """Changesets affecting files matched by pattern.
1132
1132
1133 For a faster but less accurate result, consider using ``filelog()``
1133 For a faster but less accurate result, consider using ``filelog()``
1134 instead.
1134 instead.
1135
1135
1136 This predicate uses ``glob:`` as the default kind of pattern.
1136 This predicate uses ``glob:`` as the default kind of pattern.
1137 """
1137 """
1138 # i18n: "file" is a keyword
1138 # i18n: "file" is a keyword
1139 pat = getstring(x, _("file requires a pattern"))
1139 pat = getstring(x, _("file requires a pattern"))
1140 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1140 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1141
1141
1142 @predicate('head()', safe=True)
1142 @predicate('head()', safe=True)
1143 def head(repo, subset, x):
1143 def head(repo, subset, x):
1144 """Changeset is a named branch head.
1144 """Changeset is a named branch head.
1145 """
1145 """
1146 # i18n: "head" is a keyword
1146 # i18n: "head" is a keyword
1147 getargs(x, 0, 0, _("head takes no arguments"))
1147 getargs(x, 0, 0, _("head takes no arguments"))
1148 hs = set()
1148 hs = set()
1149 cl = repo.changelog
1149 cl = repo.changelog
1150 for b, ls in repo.branchmap().iteritems():
1150 for b, ls in repo.branchmap().iteritems():
1151 hs.update(cl.rev(h) for h in ls)
1151 hs.update(cl.rev(h) for h in ls)
1152 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
1152 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
1153 # This does not break because of other fullreposet misbehavior.
1153 # This does not break because of other fullreposet misbehavior.
1154 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
1154 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
1155 # necessary to ensure we preserve the order in subset.
1155 # necessary to ensure we preserve the order in subset.
1156 return baseset(hs) & subset
1156 return baseset(hs) & subset
1157
1157
1158 @predicate('heads(set)', safe=True)
1158 @predicate('heads(set)', safe=True)
1159 def heads(repo, subset, x):
1159 def heads(repo, subset, x):
1160 """Members of set with no children in set.
1160 """Members of set with no children in set.
1161 """
1161 """
1162 s = getset(repo, subset, x)
1162 s = getset(repo, subset, x)
1163 ps = parents(repo, subset, x)
1163 ps = parents(repo, subset, x)
1164 return s - ps
1164 return s - ps
1165
1165
1166 @predicate('hidden()', safe=True)
1166 @predicate('hidden()', safe=True)
1167 def hidden(repo, subset, x):
1167 def hidden(repo, subset, x):
1168 """Hidden changesets.
1168 """Hidden changesets.
1169 """
1169 """
1170 # i18n: "hidden" is a keyword
1170 # i18n: "hidden" is a keyword
1171 getargs(x, 0, 0, _("hidden takes no arguments"))
1171 getargs(x, 0, 0, _("hidden takes no arguments"))
1172 hiddenrevs = repoview.filterrevs(repo, 'visible')
1172 hiddenrevs = repoview.filterrevs(repo, 'visible')
1173 return subset & hiddenrevs
1173 return subset & hiddenrevs
1174
1174
1175 @predicate('keyword(string)', safe=True)
1175 @predicate('keyword(string)', safe=True)
1176 def keyword(repo, subset, x):
1176 def keyword(repo, subset, x):
1177 """Search commit message, user name, and names of changed files for
1177 """Search commit message, user name, and names of changed files for
1178 string. The match is case-insensitive.
1178 string. The match is case-insensitive.
1179 """
1179 """
1180 # i18n: "keyword" is a keyword
1180 # i18n: "keyword" is a keyword
1181 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1181 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1182
1182
1183 def matches(r):
1183 def matches(r):
1184 c = repo[r]
1184 c = repo[r]
1185 return any(kw in encoding.lower(t)
1185 return any(kw in encoding.lower(t)
1186 for t in c.files() + [c.user(), c.description()])
1186 for t in c.files() + [c.user(), c.description()])
1187
1187
1188 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1188 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1189
1189
1190 @predicate('limit(set[, n[, offset]])', safe=True)
1190 @predicate('limit(set[, n[, offset]])', safe=True)
1191 def limit(repo, subset, x):
1191 def limit(repo, subset, x):
1192 """First n members of set, defaulting to 1, starting from offset.
1192 """First n members of set, defaulting to 1, starting from offset.
1193 """
1193 """
1194 args = getargsdict(x, 'limit', 'set n offset')
1194 args = getargsdict(x, 'limit', 'set n offset')
1195 if 'set' not in args:
1195 if 'set' not in args:
1196 # i18n: "limit" is a keyword
1196 # i18n: "limit" is a keyword
1197 raise error.ParseError(_("limit requires one to three arguments"))
1197 raise error.ParseError(_("limit requires one to three arguments"))
1198 try:
1198 try:
1199 lim, ofs = 1, 0
1199 lim, ofs = 1, 0
1200 if 'n' in args:
1200 if 'n' in args:
1201 # i18n: "limit" is a keyword
1201 # i18n: "limit" is a keyword
1202 lim = int(getstring(args['n'], _("limit requires a number")))
1202 lim = int(getstring(args['n'], _("limit requires a number")))
1203 if 'offset' in args:
1203 if 'offset' in args:
1204 # i18n: "limit" is a keyword
1204 # i18n: "limit" is a keyword
1205 ofs = int(getstring(args['offset'], _("limit requires a number")))
1205 ofs = int(getstring(args['offset'], _("limit requires a number")))
1206 if ofs < 0:
1206 if ofs < 0:
1207 raise error.ParseError(_("negative offset"))
1207 raise error.ParseError(_("negative offset"))
1208 except (TypeError, ValueError):
1208 except (TypeError, ValueError):
1209 # i18n: "limit" is a keyword
1209 # i18n: "limit" is a keyword
1210 raise error.ParseError(_("limit expects a number"))
1210 raise error.ParseError(_("limit expects a number"))
1211 os = getset(repo, fullreposet(repo), args['set'])
1211 os = getset(repo, fullreposet(repo), args['set'])
1212 result = []
1212 result = []
1213 it = iter(os)
1213 it = iter(os)
1214 for x in xrange(ofs):
1214 for x in xrange(ofs):
1215 y = next(it, None)
1215 y = next(it, None)
1216 if y is None:
1216 if y is None:
1217 break
1217 break
1218 for x in xrange(lim):
1218 for x in xrange(lim):
1219 y = next(it, None)
1219 y = next(it, None)
1220 if y is None:
1220 if y is None:
1221 break
1221 break
1222 elif y in subset:
1222 elif y in subset:
1223 result.append(y)
1223 result.append(y)
1224 return baseset(result, datarepr=('<limit n=%d, offset=%d, %r, %r>',
1224 return baseset(result, datarepr=('<limit n=%d, offset=%d, %r, %r>',
1225 lim, ofs, subset, os))
1225 lim, ofs, subset, os))
1226
1226
1227 @predicate('last(set, [n])', safe=True)
1227 @predicate('last(set, [n])', safe=True)
1228 def last(repo, subset, x):
1228 def last(repo, subset, x):
1229 """Last n members of set, defaulting to 1.
1229 """Last n members of set, defaulting to 1.
1230 """
1230 """
1231 # i18n: "last" is a keyword
1231 # i18n: "last" is a keyword
1232 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1232 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1233 try:
1233 try:
1234 lim = 1
1234 lim = 1
1235 if len(l) == 2:
1235 if len(l) == 2:
1236 # i18n: "last" is a keyword
1236 # i18n: "last" is a keyword
1237 lim = int(getstring(l[1], _("last requires a number")))
1237 lim = int(getstring(l[1], _("last requires a number")))
1238 except (TypeError, ValueError):
1238 except (TypeError, ValueError):
1239 # i18n: "last" is a keyword
1239 # i18n: "last" is a keyword
1240 raise error.ParseError(_("last expects a number"))
1240 raise error.ParseError(_("last expects a number"))
1241 os = getset(repo, fullreposet(repo), l[0])
1241 os = getset(repo, fullreposet(repo), l[0])
1242 os.reverse()
1242 os.reverse()
1243 result = []
1243 result = []
1244 it = iter(os)
1244 it = iter(os)
1245 for x in xrange(lim):
1245 for x in xrange(lim):
1246 y = next(it, None)
1246 y = next(it, None)
1247 if y is None:
1247 if y is None:
1248 break
1248 break
1249 elif y in subset:
1249 elif y in subset:
1250 result.append(y)
1250 result.append(y)
1251 return baseset(result, datarepr=('<last n=%d, %r, %r>', lim, subset, os))
1251 return baseset(result, datarepr=('<last n=%d, %r, %r>', lim, subset, os))
1252
1252
1253 @predicate('max(set)', safe=True)
1253 @predicate('max(set)', safe=True)
1254 def maxrev(repo, subset, x):
1254 def maxrev(repo, subset, x):
1255 """Changeset with highest revision number in set.
1255 """Changeset with highest revision number in set.
1256 """
1256 """
1257 os = getset(repo, fullreposet(repo), x)
1257 os = getset(repo, fullreposet(repo), x)
1258 try:
1258 try:
1259 m = os.max()
1259 m = os.max()
1260 if m in subset:
1260 if m in subset:
1261 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1261 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1262 except ValueError:
1262 except ValueError:
1263 # os.max() throws a ValueError when the collection is empty.
1263 # os.max() throws a ValueError when the collection is empty.
1264 # Same as python's max().
1264 # Same as python's max().
1265 pass
1265 pass
1266 return baseset(datarepr=('<max %r, %r>', subset, os))
1266 return baseset(datarepr=('<max %r, %r>', subset, os))
1267
1267
1268 @predicate('merge()', safe=True)
1268 @predicate('merge()', safe=True)
1269 def merge(repo, subset, x):
1269 def merge(repo, subset, x):
1270 """Changeset is a merge changeset.
1270 """Changeset is a merge changeset.
1271 """
1271 """
1272 # i18n: "merge" is a keyword
1272 # i18n: "merge" is a keyword
1273 getargs(x, 0, 0, _("merge takes no arguments"))
1273 getargs(x, 0, 0, _("merge takes no arguments"))
1274 cl = repo.changelog
1274 cl = repo.changelog
1275 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1275 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1276 condrepr='<merge>')
1276 condrepr='<merge>')
1277
1277
1278 @predicate('branchpoint()', safe=True)
1278 @predicate('branchpoint()', safe=True)
1279 def branchpoint(repo, subset, x):
1279 def branchpoint(repo, subset, x):
1280 """Changesets with more than one child.
1280 """Changesets with more than one child.
1281 """
1281 """
1282 # i18n: "branchpoint" is a keyword
1282 # i18n: "branchpoint" is a keyword
1283 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1283 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1284 cl = repo.changelog
1284 cl = repo.changelog
1285 if not subset:
1285 if not subset:
1286 return baseset()
1286 return baseset()
1287 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1287 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1288 # (and if it is not, it should.)
1288 # (and if it is not, it should.)
1289 baserev = min(subset)
1289 baserev = min(subset)
1290 parentscount = [0]*(len(repo) - baserev)
1290 parentscount = [0]*(len(repo) - baserev)
1291 for r in cl.revs(start=baserev + 1):
1291 for r in cl.revs(start=baserev + 1):
1292 for p in cl.parentrevs(r):
1292 for p in cl.parentrevs(r):
1293 if p >= baserev:
1293 if p >= baserev:
1294 parentscount[p - baserev] += 1
1294 parentscount[p - baserev] += 1
1295 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1295 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1296 condrepr='<branchpoint>')
1296 condrepr='<branchpoint>')
1297
1297
1298 @predicate('min(set)', safe=True)
1298 @predicate('min(set)', safe=True)
1299 def minrev(repo, subset, x):
1299 def minrev(repo, subset, x):
1300 """Changeset with lowest revision number in set.
1300 """Changeset with lowest revision number in set.
1301 """
1301 """
1302 os = getset(repo, fullreposet(repo), x)
1302 os = getset(repo, fullreposet(repo), x)
1303 try:
1303 try:
1304 m = os.min()
1304 m = os.min()
1305 if m in subset:
1305 if m in subset:
1306 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1306 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1307 except ValueError:
1307 except ValueError:
1308 # os.min() throws a ValueError when the collection is empty.
1308 # os.min() throws a ValueError when the collection is empty.
1309 # Same as python's min().
1309 # Same as python's min().
1310 pass
1310 pass
1311 return baseset(datarepr=('<min %r, %r>', subset, os))
1311 return baseset(datarepr=('<min %r, %r>', subset, os))
1312
1312
1313 @predicate('modifies(pattern)', safe=True)
1313 @predicate('modifies(pattern)', safe=True)
1314 def modifies(repo, subset, x):
1314 def modifies(repo, subset, x):
1315 """Changesets modifying files matched by pattern.
1315 """Changesets modifying files matched by pattern.
1316
1316
1317 The pattern without explicit kind like ``glob:`` is expected to be
1317 The pattern without explicit kind like ``glob:`` is expected to be
1318 relative to the current directory and match against a file or a
1318 relative to the current directory and match against a file or a
1319 directory.
1319 directory.
1320 """
1320 """
1321 # i18n: "modifies" is a keyword
1321 # i18n: "modifies" is a keyword
1322 pat = getstring(x, _("modifies requires a pattern"))
1322 pat = getstring(x, _("modifies requires a pattern"))
1323 return checkstatus(repo, subset, pat, 0)
1323 return checkstatus(repo, subset, pat, 0)
1324
1324
1325 @predicate('named(namespace)')
1325 @predicate('named(namespace)')
1326 def named(repo, subset, x):
1326 def named(repo, subset, x):
1327 """The changesets in a given namespace.
1327 """The changesets in a given namespace.
1328
1328
1329 If `namespace` starts with `re:`, the remainder of the string is treated as
1329 If `namespace` starts with `re:`, the remainder of the string is treated as
1330 a regular expression. To match a namespace that actually starts with `re:`,
1330 a regular expression. To match a namespace that actually starts with `re:`,
1331 use the prefix `literal:`.
1331 use the prefix `literal:`.
1332 """
1332 """
1333 # i18n: "named" is a keyword
1333 # i18n: "named" is a keyword
1334 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1334 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1335
1335
1336 ns = getstring(args[0],
1336 ns = getstring(args[0],
1337 # i18n: "named" is a keyword
1337 # i18n: "named" is a keyword
1338 _('the argument to named must be a string'))
1338 _('the argument to named must be a string'))
1339 kind, pattern, matcher = util.stringmatcher(ns)
1339 kind, pattern, matcher = util.stringmatcher(ns)
1340 namespaces = set()
1340 namespaces = set()
1341 if kind == 'literal':
1341 if kind == 'literal':
1342 if pattern not in repo.names:
1342 if pattern not in repo.names:
1343 raise error.RepoLookupError(_("namespace '%s' does not exist")
1343 raise error.RepoLookupError(_("namespace '%s' does not exist")
1344 % ns)
1344 % ns)
1345 namespaces.add(repo.names[pattern])
1345 namespaces.add(repo.names[pattern])
1346 else:
1346 else:
1347 for name, ns in repo.names.iteritems():
1347 for name, ns in repo.names.iteritems():
1348 if matcher(name):
1348 if matcher(name):
1349 namespaces.add(ns)
1349 namespaces.add(ns)
1350 if not namespaces:
1350 if not namespaces:
1351 raise error.RepoLookupError(_("no namespace exists"
1351 raise error.RepoLookupError(_("no namespace exists"
1352 " that match '%s'") % pattern)
1352 " that match '%s'") % pattern)
1353
1353
1354 names = set()
1354 names = set()
1355 for ns in namespaces:
1355 for ns in namespaces:
1356 for name in ns.listnames(repo):
1356 for name in ns.listnames(repo):
1357 if name not in ns.deprecated:
1357 if name not in ns.deprecated:
1358 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1358 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1359
1359
1360 names -= set([node.nullrev])
1360 names -= set([node.nullrev])
1361 return subset & names
1361 return subset & names
1362
1362
1363 @predicate('id(string)', safe=True)
1363 @predicate('id(string)', safe=True)
1364 def node_(repo, subset, x):
1364 def node_(repo, subset, x):
1365 """Revision non-ambiguously specified by the given hex string prefix.
1365 """Revision non-ambiguously specified by the given hex string prefix.
1366 """
1366 """
1367 # i18n: "id" is a keyword
1367 # i18n: "id" is a keyword
1368 l = getargs(x, 1, 1, _("id requires one argument"))
1368 l = getargs(x, 1, 1, _("id requires one argument"))
1369 # i18n: "id" is a keyword
1369 # i18n: "id" is a keyword
1370 n = getstring(l[0], _("id requires a string"))
1370 n = getstring(l[0], _("id requires a string"))
1371 if len(n) == 40:
1371 if len(n) == 40:
1372 try:
1372 try:
1373 rn = repo.changelog.rev(node.bin(n))
1373 rn = repo.changelog.rev(node.bin(n))
1374 except (LookupError, TypeError):
1374 except (LookupError, TypeError):
1375 rn = None
1375 rn = None
1376 else:
1376 else:
1377 rn = None
1377 rn = None
1378 pm = repo.changelog._partialmatch(n)
1378 pm = repo.changelog._partialmatch(n)
1379 if pm is not None:
1379 if pm is not None:
1380 rn = repo.changelog.rev(pm)
1380 rn = repo.changelog.rev(pm)
1381
1381
1382 if rn is None:
1382 if rn is None:
1383 return baseset()
1383 return baseset()
1384 result = baseset([rn])
1384 result = baseset([rn])
1385 return result & subset
1385 return result & subset
1386
1386
1387 @predicate('obsolete()', safe=True)
1387 @predicate('obsolete()', safe=True)
1388 def obsolete(repo, subset, x):
1388 def obsolete(repo, subset, x):
1389 """Mutable changeset with a newer version."""
1389 """Mutable changeset with a newer version."""
1390 # i18n: "obsolete" is a keyword
1390 # i18n: "obsolete" is a keyword
1391 getargs(x, 0, 0, _("obsolete takes no arguments"))
1391 getargs(x, 0, 0, _("obsolete takes no arguments"))
1392 obsoletes = obsmod.getrevs(repo, 'obsolete')
1392 obsoletes = obsmod.getrevs(repo, 'obsolete')
1393 return subset & obsoletes
1393 return subset & obsoletes
1394
1394
1395 @predicate('only(set, [set])', safe=True)
1395 @predicate('only(set, [set])', safe=True)
1396 def only(repo, subset, x):
1396 def only(repo, subset, x):
1397 """Changesets that are ancestors of the first set that are not ancestors
1397 """Changesets that are ancestors of the first set that are not ancestors
1398 of any other head in the repo. If a second set is specified, the result
1398 of any other head in the repo. If a second set is specified, the result
1399 is ancestors of the first set that are not ancestors of the second set
1399 is ancestors of the first set that are not ancestors of the second set
1400 (i.e. ::<set1> - ::<set2>).
1400 (i.e. ::<set1> - ::<set2>).
1401 """
1401 """
1402 cl = repo.changelog
1402 cl = repo.changelog
1403 # i18n: "only" is a keyword
1403 # i18n: "only" is a keyword
1404 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1404 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1405 include = getset(repo, fullreposet(repo), args[0])
1405 include = getset(repo, fullreposet(repo), args[0])
1406 if len(args) == 1:
1406 if len(args) == 1:
1407 if not include:
1407 if not include:
1408 return baseset()
1408 return baseset()
1409
1409
1410 descendants = set(_revdescendants(repo, include, False))
1410 descendants = set(_revdescendants(repo, include, False))
1411 exclude = [rev for rev in cl.headrevs()
1411 exclude = [rev for rev in cl.headrevs()
1412 if not rev in descendants and not rev in include]
1412 if not rev in descendants and not rev in include]
1413 else:
1413 else:
1414 exclude = getset(repo, fullreposet(repo), args[1])
1414 exclude = getset(repo, fullreposet(repo), args[1])
1415
1415
1416 results = set(cl.findmissingrevs(common=exclude, heads=include))
1416 results = set(cl.findmissingrevs(common=exclude, heads=include))
1417 # XXX we should turn this into a baseset instead of a set, smartset may do
1417 # XXX we should turn this into a baseset instead of a set, smartset may do
1418 # some optimisations from the fact this is a baseset.
1418 # some optimisations from the fact this is a baseset.
1419 return subset & results
1419 return subset & results
1420
1420
1421 @predicate('origin([set])', safe=True)
1421 @predicate('origin([set])', safe=True)
1422 def origin(repo, subset, x):
1422 def origin(repo, subset, x):
1423 """
1423 """
1424 Changesets that were specified as a source for the grafts, transplants or
1424 Changesets that were specified as a source for the grafts, transplants or
1425 rebases that created the given revisions. Omitting the optional set is the
1425 rebases that created the given revisions. Omitting the optional set is the
1426 same as passing all(). If a changeset created by these operations is itself
1426 same as passing all(). If a changeset created by these operations is itself
1427 specified as a source for one of these operations, only the source changeset
1427 specified as a source for one of these operations, only the source changeset
1428 for the first operation is selected.
1428 for the first operation is selected.
1429 """
1429 """
1430 if x is not None:
1430 if x is not None:
1431 dests = getset(repo, fullreposet(repo), x)
1431 dests = getset(repo, fullreposet(repo), x)
1432 else:
1432 else:
1433 dests = fullreposet(repo)
1433 dests = fullreposet(repo)
1434
1434
1435 def _firstsrc(rev):
1435 def _firstsrc(rev):
1436 src = _getrevsource(repo, rev)
1436 src = _getrevsource(repo, rev)
1437 if src is None:
1437 if src is None:
1438 return None
1438 return None
1439
1439
1440 while True:
1440 while True:
1441 prev = _getrevsource(repo, src)
1441 prev = _getrevsource(repo, src)
1442
1442
1443 if prev is None:
1443 if prev is None:
1444 return src
1444 return src
1445 src = prev
1445 src = prev
1446
1446
1447 o = set([_firstsrc(r) for r in dests])
1447 o = set([_firstsrc(r) for r in dests])
1448 o -= set([None])
1448 o -= set([None])
1449 # XXX we should turn this into a baseset instead of a set, smartset may do
1449 # XXX we should turn this into a baseset instead of a set, smartset may do
1450 # some optimisations from the fact this is a baseset.
1450 # some optimisations from the fact this is a baseset.
1451 return subset & o
1451 return subset & o
1452
1452
1453 @predicate('outgoing([path])', safe=True)
1453 @predicate('outgoing([path])', safe=True)
1454 def outgoing(repo, subset, x):
1454 def outgoing(repo, subset, x):
1455 """Changesets not found in the specified destination repository, or the
1455 """Changesets not found in the specified destination repository, or the
1456 default push location.
1456 default push location.
1457 """
1457 """
1458 # Avoid cycles.
1458 # Avoid cycles.
1459 from . import (
1459 from . import (
1460 discovery,
1460 discovery,
1461 hg,
1461 hg,
1462 )
1462 )
1463 # i18n: "outgoing" is a keyword
1463 # i18n: "outgoing" is a keyword
1464 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1464 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1465 # i18n: "outgoing" is a keyword
1465 # i18n: "outgoing" is a keyword
1466 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1466 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1467 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1467 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1468 dest, branches = hg.parseurl(dest)
1468 dest, branches = hg.parseurl(dest)
1469 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1469 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1470 if revs:
1470 if revs:
1471 revs = [repo.lookup(rev) for rev in revs]
1471 revs = [repo.lookup(rev) for rev in revs]
1472 other = hg.peer(repo, {}, dest)
1472 other = hg.peer(repo, {}, dest)
1473 repo.ui.pushbuffer()
1473 repo.ui.pushbuffer()
1474 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1474 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1475 repo.ui.popbuffer()
1475 repo.ui.popbuffer()
1476 cl = repo.changelog
1476 cl = repo.changelog
1477 o = set([cl.rev(r) for r in outgoing.missing])
1477 o = set([cl.rev(r) for r in outgoing.missing])
1478 return subset & o
1478 return subset & o
1479
1479
1480 @predicate('p1([set])', safe=True)
1480 @predicate('p1([set])', safe=True)
1481 def p1(repo, subset, x):
1481 def p1(repo, subset, x):
1482 """First parent of changesets in set, or the working directory.
1482 """First parent of changesets in set, or the working directory.
1483 """
1483 """
1484 if x is None:
1484 if x is None:
1485 p = repo[x].p1().rev()
1485 p = repo[x].p1().rev()
1486 if p >= 0:
1486 if p >= 0:
1487 return subset & baseset([p])
1487 return subset & baseset([p])
1488 return baseset()
1488 return baseset()
1489
1489
1490 ps = set()
1490 ps = set()
1491 cl = repo.changelog
1491 cl = repo.changelog
1492 for r in getset(repo, fullreposet(repo), x):
1492 for r in getset(repo, fullreposet(repo), x):
1493 ps.add(cl.parentrevs(r)[0])
1493 ps.add(cl.parentrevs(r)[0])
1494 ps -= set([node.nullrev])
1494 ps -= set([node.nullrev])
1495 # XXX we should turn this into a baseset instead of a set, smartset may do
1495 # XXX we should turn this into a baseset instead of a set, smartset may do
1496 # some optimisations from the fact this is a baseset.
1496 # some optimisations from the fact this is a baseset.
1497 return subset & ps
1497 return subset & ps
1498
1498
1499 @predicate('p2([set])', safe=True)
1499 @predicate('p2([set])', safe=True)
1500 def p2(repo, subset, x):
1500 def p2(repo, subset, x):
1501 """Second parent of changesets in set, or the working directory.
1501 """Second parent of changesets in set, or the working directory.
1502 """
1502 """
1503 if x is None:
1503 if x is None:
1504 ps = repo[x].parents()
1504 ps = repo[x].parents()
1505 try:
1505 try:
1506 p = ps[1].rev()
1506 p = ps[1].rev()
1507 if p >= 0:
1507 if p >= 0:
1508 return subset & baseset([p])
1508 return subset & baseset([p])
1509 return baseset()
1509 return baseset()
1510 except IndexError:
1510 except IndexError:
1511 return baseset()
1511 return baseset()
1512
1512
1513 ps = set()
1513 ps = set()
1514 cl = repo.changelog
1514 cl = repo.changelog
1515 for r in getset(repo, fullreposet(repo), x):
1515 for r in getset(repo, fullreposet(repo), x):
1516 ps.add(cl.parentrevs(r)[1])
1516 ps.add(cl.parentrevs(r)[1])
1517 ps -= set([node.nullrev])
1517 ps -= set([node.nullrev])
1518 # XXX we should turn this into a baseset instead of a set, smartset may do
1518 # XXX we should turn this into a baseset instead of a set, smartset may do
1519 # some optimisations from the fact this is a baseset.
1519 # some optimisations from the fact this is a baseset.
1520 return subset & ps
1520 return subset & ps
1521
1521
1522 @predicate('parents([set])', safe=True)
1522 @predicate('parents([set])', safe=True)
1523 def parents(repo, subset, x):
1523 def parents(repo, subset, x):
1524 """
1524 """
1525 The set of all parents for all changesets in set, or the working directory.
1525 The set of all parents for all changesets in set, or the working directory.
1526 """
1526 """
1527 if x is None:
1527 if x is None:
1528 ps = set(p.rev() for p in repo[x].parents())
1528 ps = set(p.rev() for p in repo[x].parents())
1529 else:
1529 else:
1530 ps = set()
1530 ps = set()
1531 cl = repo.changelog
1531 cl = repo.changelog
1532 up = ps.update
1532 up = ps.update
1533 parentrevs = cl.parentrevs
1533 parentrevs = cl.parentrevs
1534 for r in getset(repo, fullreposet(repo), x):
1534 for r in getset(repo, fullreposet(repo), x):
1535 if r == node.wdirrev:
1535 if r == node.wdirrev:
1536 up(p.rev() for p in repo[r].parents())
1536 up(p.rev() for p in repo[r].parents())
1537 else:
1537 else:
1538 up(parentrevs(r))
1538 up(parentrevs(r))
1539 ps -= set([node.nullrev])
1539 ps -= set([node.nullrev])
1540 return subset & ps
1540 return subset & ps
1541
1541
1542 def _phase(repo, subset, target):
1542 def _phase(repo, subset, target):
1543 """helper to select all rev in phase <target>"""
1543 """helper to select all rev in phase <target>"""
1544 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1544 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1545 if repo._phasecache._phasesets:
1545 if repo._phasecache._phasesets:
1546 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1546 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1547 s = baseset(s)
1547 s = baseset(s)
1548 s.sort() # set are non ordered, so we enforce ascending
1548 s.sort() # set are non ordered, so we enforce ascending
1549 return subset & s
1549 return subset & s
1550 else:
1550 else:
1551 phase = repo._phasecache.phase
1551 phase = repo._phasecache.phase
1552 condition = lambda r: phase(repo, r) == target
1552 condition = lambda r: phase(repo, r) == target
1553 return subset.filter(condition, condrepr=('<phase %r>', target),
1553 return subset.filter(condition, condrepr=('<phase %r>', target),
1554 cache=False)
1554 cache=False)
1555
1555
1556 @predicate('draft()', safe=True)
1556 @predicate('draft()', safe=True)
1557 def draft(repo, subset, x):
1557 def draft(repo, subset, x):
1558 """Changeset in draft phase."""
1558 """Changeset in draft phase."""
1559 # i18n: "draft" is a keyword
1559 # i18n: "draft" is a keyword
1560 getargs(x, 0, 0, _("draft takes no arguments"))
1560 getargs(x, 0, 0, _("draft takes no arguments"))
1561 target = phases.draft
1561 target = phases.draft
1562 return _phase(repo, subset, target)
1562 return _phase(repo, subset, target)
1563
1563
1564 @predicate('secret()', safe=True)
1564 @predicate('secret()', safe=True)
1565 def secret(repo, subset, x):
1565 def secret(repo, subset, x):
1566 """Changeset in secret phase."""
1566 """Changeset in secret phase."""
1567 # i18n: "secret" is a keyword
1567 # i18n: "secret" is a keyword
1568 getargs(x, 0, 0, _("secret takes no arguments"))
1568 getargs(x, 0, 0, _("secret takes no arguments"))
1569 target = phases.secret
1569 target = phases.secret
1570 return _phase(repo, subset, target)
1570 return _phase(repo, subset, target)
1571
1571
1572 def parentspec(repo, subset, x, n):
1572 def parentspec(repo, subset, x, n):
1573 """``set^0``
1573 """``set^0``
1574 The set.
1574 The set.
1575 ``set^1`` (or ``set^``), ``set^2``
1575 ``set^1`` (or ``set^``), ``set^2``
1576 First or second parent, respectively, of all changesets in set.
1576 First or second parent, respectively, of all changesets in set.
1577 """
1577 """
1578 try:
1578 try:
1579 n = int(n[1])
1579 n = int(n[1])
1580 if n not in (0, 1, 2):
1580 if n not in (0, 1, 2):
1581 raise ValueError
1581 raise ValueError
1582 except (TypeError, ValueError):
1582 except (TypeError, ValueError):
1583 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1583 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1584 ps = set()
1584 ps = set()
1585 cl = repo.changelog
1585 cl = repo.changelog
1586 for r in getset(repo, fullreposet(repo), x):
1586 for r in getset(repo, fullreposet(repo), x):
1587 if n == 0:
1587 if n == 0:
1588 ps.add(r)
1588 ps.add(r)
1589 elif n == 1:
1589 elif n == 1:
1590 ps.add(cl.parentrevs(r)[0])
1590 ps.add(cl.parentrevs(r)[0])
1591 elif n == 2:
1591 elif n == 2:
1592 parents = cl.parentrevs(r)
1592 parents = cl.parentrevs(r)
1593 if len(parents) > 1:
1593 if len(parents) > 1:
1594 ps.add(parents[1])
1594 ps.add(parents[1])
1595 return subset & ps
1595 return subset & ps
1596
1596
1597 @predicate('present(set)', safe=True)
1597 @predicate('present(set)', safe=True)
1598 def present(repo, subset, x):
1598 def present(repo, subset, x):
1599 """An empty set, if any revision in set isn't found; otherwise,
1599 """An empty set, if any revision in set isn't found; otherwise,
1600 all revisions in set.
1600 all revisions in set.
1601
1601
1602 If any of specified revisions is not present in the local repository,
1602 If any of specified revisions is not present in the local repository,
1603 the query is normally aborted. But this predicate allows the query
1603 the query is normally aborted. But this predicate allows the query
1604 to continue even in such cases.
1604 to continue even in such cases.
1605 """
1605 """
1606 try:
1606 try:
1607 return getset(repo, subset, x)
1607 return getset(repo, subset, x)
1608 except error.RepoLookupError:
1608 except error.RepoLookupError:
1609 return baseset()
1609 return baseset()
1610
1610
1611 # for internal use
1611 # for internal use
1612 @predicate('_notpublic', safe=True)
1612 @predicate('_notpublic', safe=True)
1613 def _notpublic(repo, subset, x):
1613 def _notpublic(repo, subset, x):
1614 getargs(x, 0, 0, "_notpublic takes no arguments")
1614 getargs(x, 0, 0, "_notpublic takes no arguments")
1615 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1615 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1616 if repo._phasecache._phasesets:
1616 if repo._phasecache._phasesets:
1617 s = set()
1617 s = set()
1618 for u in repo._phasecache._phasesets[1:]:
1618 for u in repo._phasecache._phasesets[1:]:
1619 s.update(u)
1619 s.update(u)
1620 s = baseset(s - repo.changelog.filteredrevs)
1620 s = baseset(s - repo.changelog.filteredrevs)
1621 s.sort()
1621 s.sort()
1622 return subset & s
1622 return subset & s
1623 else:
1623 else:
1624 phase = repo._phasecache.phase
1624 phase = repo._phasecache.phase
1625 target = phases.public
1625 target = phases.public
1626 condition = lambda r: phase(repo, r) != target
1626 condition = lambda r: phase(repo, r) != target
1627 return subset.filter(condition, condrepr=('<phase %r>', target),
1627 return subset.filter(condition, condrepr=('<phase %r>', target),
1628 cache=False)
1628 cache=False)
1629
1629
1630 @predicate('public()', safe=True)
1630 @predicate('public()', safe=True)
1631 def public(repo, subset, x):
1631 def public(repo, subset, x):
1632 """Changeset in public phase."""
1632 """Changeset in public phase."""
1633 # i18n: "public" is a keyword
1633 # i18n: "public" is a keyword
1634 getargs(x, 0, 0, _("public takes no arguments"))
1634 getargs(x, 0, 0, _("public takes no arguments"))
1635 phase = repo._phasecache.phase
1635 phase = repo._phasecache.phase
1636 target = phases.public
1636 target = phases.public
1637 condition = lambda r: phase(repo, r) == target
1637 condition = lambda r: phase(repo, r) == target
1638 return subset.filter(condition, condrepr=('<phase %r>', target),
1638 return subset.filter(condition, condrepr=('<phase %r>', target),
1639 cache=False)
1639 cache=False)
1640
1640
1641 @predicate('remote([id [,path]])', safe=True)
1641 @predicate('remote([id [,path]])', safe=True)
1642 def remote(repo, subset, x):
1642 def remote(repo, subset, x):
1643 """Local revision that corresponds to the given identifier in a
1643 """Local revision that corresponds to the given identifier in a
1644 remote repository, if present. Here, the '.' identifier is a
1644 remote repository, if present. Here, the '.' identifier is a
1645 synonym for the current local branch.
1645 synonym for the current local branch.
1646 """
1646 """
1647
1647
1648 from . import hg # avoid start-up nasties
1648 from . import hg # avoid start-up nasties
1649 # i18n: "remote" is a keyword
1649 # i18n: "remote" is a keyword
1650 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1650 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1651
1651
1652 q = '.'
1652 q = '.'
1653 if len(l) > 0:
1653 if len(l) > 0:
1654 # i18n: "remote" is a keyword
1654 # i18n: "remote" is a keyword
1655 q = getstring(l[0], _("remote requires a string id"))
1655 q = getstring(l[0], _("remote requires a string id"))
1656 if q == '.':
1656 if q == '.':
1657 q = repo['.'].branch()
1657 q = repo['.'].branch()
1658
1658
1659 dest = ''
1659 dest = ''
1660 if len(l) > 1:
1660 if len(l) > 1:
1661 # i18n: "remote" is a keyword
1661 # i18n: "remote" is a keyword
1662 dest = getstring(l[1], _("remote requires a repository path"))
1662 dest = getstring(l[1], _("remote requires a repository path"))
1663 dest = repo.ui.expandpath(dest or 'default')
1663 dest = repo.ui.expandpath(dest or 'default')
1664 dest, branches = hg.parseurl(dest)
1664 dest, branches = hg.parseurl(dest)
1665 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1665 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1666 if revs:
1666 if revs:
1667 revs = [repo.lookup(rev) for rev in revs]
1667 revs = [repo.lookup(rev) for rev in revs]
1668 other = hg.peer(repo, {}, dest)
1668 other = hg.peer(repo, {}, dest)
1669 n = other.lookup(q)
1669 n = other.lookup(q)
1670 if n in repo:
1670 if n in repo:
1671 r = repo[n].rev()
1671 r = repo[n].rev()
1672 if r in subset:
1672 if r in subset:
1673 return baseset([r])
1673 return baseset([r])
1674 return baseset()
1674 return baseset()
1675
1675
1676 @predicate('removes(pattern)', safe=True)
1676 @predicate('removes(pattern)', safe=True)
1677 def removes(repo, subset, x):
1677 def removes(repo, subset, x):
1678 """Changesets which remove files matching pattern.
1678 """Changesets which remove files matching pattern.
1679
1679
1680 The pattern without explicit kind like ``glob:`` is expected to be
1680 The pattern without explicit kind like ``glob:`` is expected to be
1681 relative to the current directory and match against a file or a
1681 relative to the current directory and match against a file or a
1682 directory.
1682 directory.
1683 """
1683 """
1684 # i18n: "removes" is a keyword
1684 # i18n: "removes" is a keyword
1685 pat = getstring(x, _("removes requires a pattern"))
1685 pat = getstring(x, _("removes requires a pattern"))
1686 return checkstatus(repo, subset, pat, 2)
1686 return checkstatus(repo, subset, pat, 2)
1687
1687
1688 @predicate('rev(number)', safe=True)
1688 @predicate('rev(number)', safe=True)
1689 def rev(repo, subset, x):
1689 def rev(repo, subset, x):
1690 """Revision with the given numeric identifier.
1690 """Revision with the given numeric identifier.
1691 """
1691 """
1692 # i18n: "rev" is a keyword
1692 # i18n: "rev" is a keyword
1693 l = getargs(x, 1, 1, _("rev requires one argument"))
1693 l = getargs(x, 1, 1, _("rev requires one argument"))
1694 try:
1694 try:
1695 # i18n: "rev" is a keyword
1695 # i18n: "rev" is a keyword
1696 l = int(getstring(l[0], _("rev requires a number")))
1696 l = int(getstring(l[0], _("rev requires a number")))
1697 except (TypeError, ValueError):
1697 except (TypeError, ValueError):
1698 # i18n: "rev" is a keyword
1698 # i18n: "rev" is a keyword
1699 raise error.ParseError(_("rev expects a number"))
1699 raise error.ParseError(_("rev expects a number"))
1700 if l not in repo.changelog and l != node.nullrev:
1700 if l not in repo.changelog and l != node.nullrev:
1701 return baseset()
1701 return baseset()
1702 return subset & baseset([l])
1702 return subset & baseset([l])
1703
1703
1704 @predicate('matching(revision [, field])', safe=True)
1704 @predicate('matching(revision [, field])', safe=True)
1705 def matching(repo, subset, x):
1705 def matching(repo, subset, x):
1706 """Changesets in which a given set of fields match the set of fields in the
1706 """Changesets in which a given set of fields match the set of fields in the
1707 selected revision or set.
1707 selected revision or set.
1708
1708
1709 To match more than one field pass the list of fields to match separated
1709 To match more than one field pass the list of fields to match separated
1710 by spaces (e.g. ``author description``).
1710 by spaces (e.g. ``author description``).
1711
1711
1712 Valid fields are most regular revision fields and some special fields.
1712 Valid fields are most regular revision fields and some special fields.
1713
1713
1714 Regular revision fields are ``description``, ``author``, ``branch``,
1714 Regular revision fields are ``description``, ``author``, ``branch``,
1715 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1715 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1716 and ``diff``.
1716 and ``diff``.
1717 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1717 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1718 contents of the revision. Two revisions matching their ``diff`` will
1718 contents of the revision. Two revisions matching their ``diff`` will
1719 also match their ``files``.
1719 also match their ``files``.
1720
1720
1721 Special fields are ``summary`` and ``metadata``:
1721 Special fields are ``summary`` and ``metadata``:
1722 ``summary`` matches the first line of the description.
1722 ``summary`` matches the first line of the description.
1723 ``metadata`` is equivalent to matching ``description user date``
1723 ``metadata`` is equivalent to matching ``description user date``
1724 (i.e. it matches the main metadata fields).
1724 (i.e. it matches the main metadata fields).
1725
1725
1726 ``metadata`` is the default field which is used when no fields are
1726 ``metadata`` is the default field which is used when no fields are
1727 specified. You can match more than one field at a time.
1727 specified. You can match more than one field at a time.
1728 """
1728 """
1729 # i18n: "matching" is a keyword
1729 # i18n: "matching" is a keyword
1730 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1730 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1731
1731
1732 revs = getset(repo, fullreposet(repo), l[0])
1732 revs = getset(repo, fullreposet(repo), l[0])
1733
1733
1734 fieldlist = ['metadata']
1734 fieldlist = ['metadata']
1735 if len(l) > 1:
1735 if len(l) > 1:
1736 fieldlist = getstring(l[1],
1736 fieldlist = getstring(l[1],
1737 # i18n: "matching" is a keyword
1737 # i18n: "matching" is a keyword
1738 _("matching requires a string "
1738 _("matching requires a string "
1739 "as its second argument")).split()
1739 "as its second argument")).split()
1740
1740
1741 # Make sure that there are no repeated fields,
1741 # Make sure that there are no repeated fields,
1742 # expand the 'special' 'metadata' field type
1742 # expand the 'special' 'metadata' field type
1743 # and check the 'files' whenever we check the 'diff'
1743 # and check the 'files' whenever we check the 'diff'
1744 fields = []
1744 fields = []
1745 for field in fieldlist:
1745 for field in fieldlist:
1746 if field == 'metadata':
1746 if field == 'metadata':
1747 fields += ['user', 'description', 'date']
1747 fields += ['user', 'description', 'date']
1748 elif field == 'diff':
1748 elif field == 'diff':
1749 # a revision matching the diff must also match the files
1749 # a revision matching the diff must also match the files
1750 # since matching the diff is very costly, make sure to
1750 # since matching the diff is very costly, make sure to
1751 # also match the files first
1751 # also match the files first
1752 fields += ['files', 'diff']
1752 fields += ['files', 'diff']
1753 else:
1753 else:
1754 if field == 'author':
1754 if field == 'author':
1755 field = 'user'
1755 field = 'user'
1756 fields.append(field)
1756 fields.append(field)
1757 fields = set(fields)
1757 fields = set(fields)
1758 if 'summary' in fields and 'description' in fields:
1758 if 'summary' in fields and 'description' in fields:
1759 # If a revision matches its description it also matches its summary
1759 # If a revision matches its description it also matches its summary
1760 fields.discard('summary')
1760 fields.discard('summary')
1761
1761
1762 # We may want to match more than one field
1762 # We may want to match more than one field
1763 # Not all fields take the same amount of time to be matched
1763 # Not all fields take the same amount of time to be matched
1764 # Sort the selected fields in order of increasing matching cost
1764 # Sort the selected fields in order of increasing matching cost
1765 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1765 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1766 'files', 'description', 'substate', 'diff']
1766 'files', 'description', 'substate', 'diff']
1767 def fieldkeyfunc(f):
1767 def fieldkeyfunc(f):
1768 try:
1768 try:
1769 return fieldorder.index(f)
1769 return fieldorder.index(f)
1770 except ValueError:
1770 except ValueError:
1771 # assume an unknown field is very costly
1771 # assume an unknown field is very costly
1772 return len(fieldorder)
1772 return len(fieldorder)
1773 fields = list(fields)
1773 fields = list(fields)
1774 fields.sort(key=fieldkeyfunc)
1774 fields.sort(key=fieldkeyfunc)
1775
1775
1776 # Each field will be matched with its own "getfield" function
1776 # Each field will be matched with its own "getfield" function
1777 # which will be added to the getfieldfuncs array of functions
1777 # which will be added to the getfieldfuncs array of functions
1778 getfieldfuncs = []
1778 getfieldfuncs = []
1779 _funcs = {
1779 _funcs = {
1780 'user': lambda r: repo[r].user(),
1780 'user': lambda r: repo[r].user(),
1781 'branch': lambda r: repo[r].branch(),
1781 'branch': lambda r: repo[r].branch(),
1782 'date': lambda r: repo[r].date(),
1782 'date': lambda r: repo[r].date(),
1783 'description': lambda r: repo[r].description(),
1783 'description': lambda r: repo[r].description(),
1784 'files': lambda r: repo[r].files(),
1784 'files': lambda r: repo[r].files(),
1785 'parents': lambda r: repo[r].parents(),
1785 'parents': lambda r: repo[r].parents(),
1786 'phase': lambda r: repo[r].phase(),
1786 'phase': lambda r: repo[r].phase(),
1787 'substate': lambda r: repo[r].substate,
1787 'substate': lambda r: repo[r].substate,
1788 'summary': lambda r: repo[r].description().splitlines()[0],
1788 'summary': lambda r: repo[r].description().splitlines()[0],
1789 'diff': lambda r: list(repo[r].diff(git=True),)
1789 'diff': lambda r: list(repo[r].diff(git=True),)
1790 }
1790 }
1791 for info in fields:
1791 for info in fields:
1792 getfield = _funcs.get(info, None)
1792 getfield = _funcs.get(info, None)
1793 if getfield is None:
1793 if getfield is None:
1794 raise error.ParseError(
1794 raise error.ParseError(
1795 # i18n: "matching" is a keyword
1795 # i18n: "matching" is a keyword
1796 _("unexpected field name passed to matching: %s") % info)
1796 _("unexpected field name passed to matching: %s") % info)
1797 getfieldfuncs.append(getfield)
1797 getfieldfuncs.append(getfield)
1798 # convert the getfield array of functions into a "getinfo" function
1798 # convert the getfield array of functions into a "getinfo" function
1799 # which returns an array of field values (or a single value if there
1799 # which returns an array of field values (or a single value if there
1800 # is only one field to match)
1800 # is only one field to match)
1801 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1801 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1802
1802
1803 def matches(x):
1803 def matches(x):
1804 for rev in revs:
1804 for rev in revs:
1805 target = getinfo(rev)
1805 target = getinfo(rev)
1806 match = True
1806 match = True
1807 for n, f in enumerate(getfieldfuncs):
1807 for n, f in enumerate(getfieldfuncs):
1808 if target[n] != f(x):
1808 if target[n] != f(x):
1809 match = False
1809 match = False
1810 if match:
1810 if match:
1811 return True
1811 return True
1812 return False
1812 return False
1813
1813
1814 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1814 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1815
1815
1816 @predicate('reverse(set)', safe=True)
1816 @predicate('reverse(set)', safe=True)
1817 def reverse(repo, subset, x):
1817 def reverse(repo, subset, x):
1818 """Reverse order of set.
1818 """Reverse order of set.
1819 """
1819 """
1820 l = getset(repo, subset, x)
1820 l = getset(repo, subset, x)
1821 l.reverse()
1821 l.reverse()
1822 return l
1822 return l
1823
1823
1824 @predicate('roots(set)', safe=True)
1824 @predicate('roots(set)', safe=True)
1825 def roots(repo, subset, x):
1825 def roots(repo, subset, x):
1826 """Changesets in set with no parent changeset in set.
1826 """Changesets in set with no parent changeset in set.
1827 """
1827 """
1828 s = getset(repo, fullreposet(repo), x)
1828 s = getset(repo, fullreposet(repo), x)
1829 parents = repo.changelog.parentrevs
1829 parents = repo.changelog.parentrevs
1830 def filter(r):
1830 def filter(r):
1831 for p in parents(r):
1831 for p in parents(r):
1832 if 0 <= p and p in s:
1832 if 0 <= p and p in s:
1833 return False
1833 return False
1834 return True
1834 return True
1835 return subset & s.filter(filter, condrepr='<roots>')
1835 return subset & s.filter(filter, condrepr='<roots>')
1836
1836
1837 _sortkeyfuncs = {
1837 _sortkeyfuncs = {
1838 'rev': lambda c: c.rev(),
1838 'rev': lambda c: c.rev(),
1839 'branch': lambda c: c.branch(),
1839 'branch': lambda c: c.branch(),
1840 'desc': lambda c: c.description(),
1840 'desc': lambda c: c.description(),
1841 'user': lambda c: c.user(),
1841 'user': lambda c: c.user(),
1842 'author': lambda c: c.user(),
1842 'author': lambda c: c.user(),
1843 'date': lambda c: c.date()[0],
1843 'date': lambda c: c.date()[0],
1844 }
1844 }
1845
1845
1846 @predicate('sort(set[, [-]key... [, ...]])', safe=True)
1846 @predicate('sort(set[, [-]key... [, ...]])', safe=True)
1847 def sort(repo, subset, x):
1847 def sort(repo, subset, x):
1848 """Sort set by keys. The default sort order is ascending, specify a key
1848 """Sort set by keys. The default sort order is ascending, specify a key
1849 as ``-key`` to sort in descending order.
1849 as ``-key`` to sort in descending order.
1850
1850
1851 The keys can be:
1851 The keys can be:
1852
1852
1853 - ``rev`` for the revision number,
1853 - ``rev`` for the revision number,
1854 - ``branch`` for the branch name,
1854 - ``branch`` for the branch name,
1855 - ``desc`` for the commit message (description),
1855 - ``desc`` for the commit message (description),
1856 - ``user`` for user name (``author`` can be used as an alias),
1856 - ``user`` for user name (``author`` can be used as an alias),
1857 - ``date`` for the commit date
1857 - ``date`` for the commit date
1858 - ``topo`` for a reverse topographical sort
1858 - ``topo`` for a reverse topographical sort
1859
1859
1860 The ``topo`` sort order cannot be combined with other sort keys. This sort
1860 The ``topo`` sort order cannot be combined with other sort keys. This sort
1861 takes one optional argument, ``topo.firstbranch``, which takes a revset that
1861 takes one optional argument, ``topo.firstbranch``, which takes a revset that
1862 specifies what topographical branches to prioritize in the sort.
1862 specifies what topographical branches to prioritize in the sort.
1863
1863
1864 """
1864 """
1865 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
1865 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
1866 if 'set' not in args:
1866 if 'set' not in args:
1867 # i18n: "sort" is a keyword
1867 # i18n: "sort" is a keyword
1868 raise error.ParseError(_('sort requires one or two arguments'))
1868 raise error.ParseError(_('sort requires one or two arguments'))
1869 keys = "rev"
1869 keys = "rev"
1870 if 'keys' in args:
1870 if 'keys' in args:
1871 # i18n: "sort" is a keyword
1871 # i18n: "sort" is a keyword
1872 keys = getstring(args['keys'], _("sort spec must be a string"))
1872 keys = getstring(args['keys'], _("sort spec must be a string"))
1873
1873
1874 keyflags = []
1875 for k in keys.split():
1876 fk = k
1877 reverse = (k[0] == '-')
1878 if reverse:
1879 k = k[1:]
1880 if k not in _sortkeyfuncs and k != 'topo':
1881 raise error.ParseError(_("unknown sort key %r") % fk)
1882 keyflags.append((k, reverse))
1883
1874 s = args['set']
1884 s = args['set']
1875 keys = keys.split()
1876 revs = getset(repo, subset, s)
1885 revs = getset(repo, subset, s)
1877
1886
1878 if len(keys) > 1 and any(k.lstrip('-') == 'topo' for k in keys):
1887 if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
1879 # i18n: "topo" is a keyword
1888 # i18n: "topo" is a keyword
1880 raise error.ParseError(_(
1889 raise error.ParseError(_(
1881 'topo sort order cannot be combined with other sort keys'))
1890 'topo sort order cannot be combined with other sort keys'))
1882
1891
1883 firstbranch = ()
1892 firstbranch = ()
1884 if 'topo.firstbranch' in args:
1893 if 'topo.firstbranch' in args:
1885 if any(k.lstrip('-') == 'topo' for k in keys):
1894 if any(k == 'topo' for k, reverse in keyflags):
1886 firstbranch = getset(repo, subset, args['topo.firstbranch'])
1895 firstbranch = getset(repo, subset, args['topo.firstbranch'])
1887 else:
1896 else:
1888 # i18n: "topo" and "topo.firstbranch" are keywords
1897 # i18n: "topo" and "topo.firstbranch" are keywords
1889 raise error.ParseError(_(
1898 raise error.ParseError(_(
1890 'topo.firstbranch can only be used when using the topo sort '
1899 'topo.firstbranch can only be used when using the topo sort '
1891 'key'))
1900 'key'))
1892
1901
1893 if not keys:
1902 if not keyflags:
1894 return revs
1895 if keys == ["rev"]:
1896 revs.sort()
1897 return revs
1903 return revs
1898 elif keys == ["-rev"]:
1904 if len(keyflags) == 1 and keyflags[0][0] == "rev":
1899 revs.sort(reverse=True)
1905 revs.sort(reverse=keyflags[0][1])
1900 return revs
1906 return revs
1901 elif keys[0] in ("topo", "-topo"):
1907 elif keyflags[0][0] == "topo":
1902 revs = baseset(_toposort(revs, repo.changelog.parentrevs, firstbranch),
1908 revs = baseset(_toposort(revs, repo.changelog.parentrevs, firstbranch),
1903 istopo=True)
1909 istopo=True)
1904 if keys[0][0] == '-':
1910 if keyflags[0][1]:
1905 revs.reverse()
1911 revs.reverse()
1906 return revs
1912 return revs
1907
1913
1908 # sort() is guaranteed to be stable
1914 # sort() is guaranteed to be stable
1909 ctxs = [repo[r] for r in revs]
1915 ctxs = [repo[r] for r in revs]
1910 for k in reversed(keys):
1916 for k, reverse in reversed(keyflags):
1911 fk = k
1917 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
1912 reverse = (k[0] == '-')
1913 if reverse:
1914 k = k[1:]
1915 try:
1916 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
1917 except KeyError:
1918 raise error.ParseError(_("unknown sort key %r") % fk)
1919 return baseset([c.rev() for c in ctxs])
1918 return baseset([c.rev() for c in ctxs])
1920
1919
1921 def _toposort(revs, parentsfunc, firstbranch=()):
1920 def _toposort(revs, parentsfunc, firstbranch=()):
1922 """Yield revisions from heads to roots one (topo) branch at a time.
1921 """Yield revisions from heads to roots one (topo) branch at a time.
1923
1922
1924 This function aims to be used by a graph generator that wishes to minimize
1923 This function aims to be used by a graph generator that wishes to minimize
1925 the number of parallel branches and their interleaving.
1924 the number of parallel branches and their interleaving.
1926
1925
1927 Example iteration order (numbers show the "true" order in a changelog):
1926 Example iteration order (numbers show the "true" order in a changelog):
1928
1927
1929 o 4
1928 o 4
1930 |
1929 |
1931 o 1
1930 o 1
1932 |
1931 |
1933 | o 3
1932 | o 3
1934 | |
1933 | |
1935 | o 2
1934 | o 2
1936 |/
1935 |/
1937 o 0
1936 o 0
1938
1937
1939 Note that the ancestors of merges are understood by the current
1938 Note that the ancestors of merges are understood by the current
1940 algorithm to be on the same branch. This means no reordering will
1939 algorithm to be on the same branch. This means no reordering will
1941 occur behind a merge.
1940 occur behind a merge.
1942 """
1941 """
1943
1942
1944 ### Quick summary of the algorithm
1943 ### Quick summary of the algorithm
1945 #
1944 #
1946 # This function is based around a "retention" principle. We keep revisions
1945 # This function is based around a "retention" principle. We keep revisions
1947 # in memory until we are ready to emit a whole branch that immediately
1946 # in memory until we are ready to emit a whole branch that immediately
1948 # "merges" into an existing one. This reduces the number of parallel
1947 # "merges" into an existing one. This reduces the number of parallel
1949 # branches with interleaved revisions.
1948 # branches with interleaved revisions.
1950 #
1949 #
1951 # During iteration revs are split into two groups:
1950 # During iteration revs are split into two groups:
1952 # A) revision already emitted
1951 # A) revision already emitted
1953 # B) revision in "retention". They are stored as different subgroups.
1952 # B) revision in "retention". They are stored as different subgroups.
1954 #
1953 #
1955 # for each REV, we do the following logic:
1954 # for each REV, we do the following logic:
1956 #
1955 #
1957 # 1) if REV is a parent of (A), we will emit it. If there is a
1956 # 1) if REV is a parent of (A), we will emit it. If there is a
1958 # retention group ((B) above) that is blocked on REV being
1957 # retention group ((B) above) that is blocked on REV being
1959 # available, we emit all the revisions out of that retention
1958 # available, we emit all the revisions out of that retention
1960 # group first.
1959 # group first.
1961 #
1960 #
1962 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
1961 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
1963 # available, if such subgroup exist, we add REV to it and the subgroup is
1962 # available, if such subgroup exist, we add REV to it and the subgroup is
1964 # now awaiting for REV.parents() to be available.
1963 # now awaiting for REV.parents() to be available.
1965 #
1964 #
1966 # 3) finally if no such group existed in (B), we create a new subgroup.
1965 # 3) finally if no such group existed in (B), we create a new subgroup.
1967 #
1966 #
1968 #
1967 #
1969 # To bootstrap the algorithm, we emit the tipmost revision (which
1968 # To bootstrap the algorithm, we emit the tipmost revision (which
1970 # puts it in group (A) from above).
1969 # puts it in group (A) from above).
1971
1970
1972 revs.sort(reverse=True)
1971 revs.sort(reverse=True)
1973
1972
1974 # Set of parents of revision that have been emitted. They can be considered
1973 # Set of parents of revision that have been emitted. They can be considered
1975 # unblocked as the graph generator is already aware of them so there is no
1974 # unblocked as the graph generator is already aware of them so there is no
1976 # need to delay the revisions that reference them.
1975 # need to delay the revisions that reference them.
1977 #
1976 #
1978 # If someone wants to prioritize a branch over the others, pre-filling this
1977 # If someone wants to prioritize a branch over the others, pre-filling this
1979 # set will force all other branches to wait until this branch is ready to be
1978 # set will force all other branches to wait until this branch is ready to be
1980 # emitted.
1979 # emitted.
1981 unblocked = set(firstbranch)
1980 unblocked = set(firstbranch)
1982
1981
1983 # list of groups waiting to be displayed, each group is defined by:
1982 # list of groups waiting to be displayed, each group is defined by:
1984 #
1983 #
1985 # (revs: lists of revs waiting to be displayed,
1984 # (revs: lists of revs waiting to be displayed,
1986 # blocked: set of that cannot be displayed before those in 'revs')
1985 # blocked: set of that cannot be displayed before those in 'revs')
1987 #
1986 #
1988 # The second value ('blocked') correspond to parents of any revision in the
1987 # The second value ('blocked') correspond to parents of any revision in the
1989 # group ('revs') that is not itself contained in the group. The main idea
1988 # group ('revs') that is not itself contained in the group. The main idea
1990 # of this algorithm is to delay as much as possible the emission of any
1989 # of this algorithm is to delay as much as possible the emission of any
1991 # revision. This means waiting for the moment we are about to display
1990 # revision. This means waiting for the moment we are about to display
1992 # these parents to display the revs in a group.
1991 # these parents to display the revs in a group.
1993 #
1992 #
1994 # This first implementation is smart until it encounters a merge: it will
1993 # This first implementation is smart until it encounters a merge: it will
1995 # emit revs as soon as any parent is about to be emitted and can grow an
1994 # emit revs as soon as any parent is about to be emitted and can grow an
1996 # arbitrary number of revs in 'blocked'. In practice this mean we properly
1995 # arbitrary number of revs in 'blocked'. In practice this mean we properly
1997 # retains new branches but gives up on any special ordering for ancestors
1996 # retains new branches but gives up on any special ordering for ancestors
1998 # of merges. The implementation can be improved to handle this better.
1997 # of merges. The implementation can be improved to handle this better.
1999 #
1998 #
2000 # The first subgroup is special. It corresponds to all the revision that
1999 # The first subgroup is special. It corresponds to all the revision that
2001 # were already emitted. The 'revs' lists is expected to be empty and the
2000 # were already emitted. The 'revs' lists is expected to be empty and the
2002 # 'blocked' set contains the parents revisions of already emitted revision.
2001 # 'blocked' set contains the parents revisions of already emitted revision.
2003 #
2002 #
2004 # You could pre-seed the <parents> set of groups[0] to a specific
2003 # You could pre-seed the <parents> set of groups[0] to a specific
2005 # changesets to select what the first emitted branch should be.
2004 # changesets to select what the first emitted branch should be.
2006 groups = [([], unblocked)]
2005 groups = [([], unblocked)]
2007 pendingheap = []
2006 pendingheap = []
2008 pendingset = set()
2007 pendingset = set()
2009
2008
2010 heapq.heapify(pendingheap)
2009 heapq.heapify(pendingheap)
2011 heappop = heapq.heappop
2010 heappop = heapq.heappop
2012 heappush = heapq.heappush
2011 heappush = heapq.heappush
2013 for currentrev in revs:
2012 for currentrev in revs:
2014 # Heap works with smallest element, we want highest so we invert
2013 # Heap works with smallest element, we want highest so we invert
2015 if currentrev not in pendingset:
2014 if currentrev not in pendingset:
2016 heappush(pendingheap, -currentrev)
2015 heappush(pendingheap, -currentrev)
2017 pendingset.add(currentrev)
2016 pendingset.add(currentrev)
2018 # iterates on pending rev until after the current rev have been
2017 # iterates on pending rev until after the current rev have been
2019 # processed.
2018 # processed.
2020 rev = None
2019 rev = None
2021 while rev != currentrev:
2020 while rev != currentrev:
2022 rev = -heappop(pendingheap)
2021 rev = -heappop(pendingheap)
2023 pendingset.remove(rev)
2022 pendingset.remove(rev)
2024
2023
2025 # Seek for a subgroup blocked, waiting for the current revision.
2024 # Seek for a subgroup blocked, waiting for the current revision.
2026 matching = [i for i, g in enumerate(groups) if rev in g[1]]
2025 matching = [i for i, g in enumerate(groups) if rev in g[1]]
2027
2026
2028 if matching:
2027 if matching:
2029 # The main idea is to gather together all sets that are blocked
2028 # The main idea is to gather together all sets that are blocked
2030 # on the same revision.
2029 # on the same revision.
2031 #
2030 #
2032 # Groups are merged when a common blocking ancestor is
2031 # Groups are merged when a common blocking ancestor is
2033 # observed. For example, given two groups:
2032 # observed. For example, given two groups:
2034 #
2033 #
2035 # revs [5, 4] waiting for 1
2034 # revs [5, 4] waiting for 1
2036 # revs [3, 2] waiting for 1
2035 # revs [3, 2] waiting for 1
2037 #
2036 #
2038 # These two groups will be merged when we process
2037 # These two groups will be merged when we process
2039 # 1. In theory, we could have merged the groups when
2038 # 1. In theory, we could have merged the groups when
2040 # we added 2 to the group it is now in (we could have
2039 # we added 2 to the group it is now in (we could have
2041 # noticed the groups were both blocked on 1 then), but
2040 # noticed the groups were both blocked on 1 then), but
2042 # the way it works now makes the algorithm simpler.
2041 # the way it works now makes the algorithm simpler.
2043 #
2042 #
2044 # We also always keep the oldest subgroup first. We can
2043 # We also always keep the oldest subgroup first. We can
2045 # probably improve the behavior by having the longest set
2044 # probably improve the behavior by having the longest set
2046 # first. That way, graph algorithms could minimise the length
2045 # first. That way, graph algorithms could minimise the length
2047 # of parallel lines their drawing. This is currently not done.
2046 # of parallel lines their drawing. This is currently not done.
2048 targetidx = matching.pop(0)
2047 targetidx = matching.pop(0)
2049 trevs, tparents = groups[targetidx]
2048 trevs, tparents = groups[targetidx]
2050 for i in matching:
2049 for i in matching:
2051 gr = groups[i]
2050 gr = groups[i]
2052 trevs.extend(gr[0])
2051 trevs.extend(gr[0])
2053 tparents |= gr[1]
2052 tparents |= gr[1]
2054 # delete all merged subgroups (except the one we kept)
2053 # delete all merged subgroups (except the one we kept)
2055 # (starting from the last subgroup for performance and
2054 # (starting from the last subgroup for performance and
2056 # sanity reasons)
2055 # sanity reasons)
2057 for i in reversed(matching):
2056 for i in reversed(matching):
2058 del groups[i]
2057 del groups[i]
2059 else:
2058 else:
2060 # This is a new head. We create a new subgroup for it.
2059 # This is a new head. We create a new subgroup for it.
2061 targetidx = len(groups)
2060 targetidx = len(groups)
2062 groups.append(([], set([rev])))
2061 groups.append(([], set([rev])))
2063
2062
2064 gr = groups[targetidx]
2063 gr = groups[targetidx]
2065
2064
2066 # We now add the current nodes to this subgroups. This is done
2065 # We now add the current nodes to this subgroups. This is done
2067 # after the subgroup merging because all elements from a subgroup
2066 # after the subgroup merging because all elements from a subgroup
2068 # that relied on this rev must precede it.
2067 # that relied on this rev must precede it.
2069 #
2068 #
2070 # we also update the <parents> set to include the parents of the
2069 # we also update the <parents> set to include the parents of the
2071 # new nodes.
2070 # new nodes.
2072 if rev == currentrev: # only display stuff in rev
2071 if rev == currentrev: # only display stuff in rev
2073 gr[0].append(rev)
2072 gr[0].append(rev)
2074 gr[1].remove(rev)
2073 gr[1].remove(rev)
2075 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
2074 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
2076 gr[1].update(parents)
2075 gr[1].update(parents)
2077 for p in parents:
2076 for p in parents:
2078 if p not in pendingset:
2077 if p not in pendingset:
2079 pendingset.add(p)
2078 pendingset.add(p)
2080 heappush(pendingheap, -p)
2079 heappush(pendingheap, -p)
2081
2080
2082 # Look for a subgroup to display
2081 # Look for a subgroup to display
2083 #
2082 #
2084 # When unblocked is empty (if clause), we were not waiting for any
2083 # When unblocked is empty (if clause), we were not waiting for any
2085 # revisions during the first iteration (if no priority was given) or
2084 # revisions during the first iteration (if no priority was given) or
2086 # if we emitted a whole disconnected set of the graph (reached a
2085 # if we emitted a whole disconnected set of the graph (reached a
2087 # root). In that case we arbitrarily take the oldest known
2086 # root). In that case we arbitrarily take the oldest known
2088 # subgroup. The heuristic could probably be better.
2087 # subgroup. The heuristic could probably be better.
2089 #
2088 #
2090 # Otherwise (elif clause) if the subgroup is blocked on
2089 # Otherwise (elif clause) if the subgroup is blocked on
2091 # a revision we just emitted, we can safely emit it as
2090 # a revision we just emitted, we can safely emit it as
2092 # well.
2091 # well.
2093 if not unblocked:
2092 if not unblocked:
2094 if len(groups) > 1: # display other subset
2093 if len(groups) > 1: # display other subset
2095 targetidx = 1
2094 targetidx = 1
2096 gr = groups[1]
2095 gr = groups[1]
2097 elif not gr[1] & unblocked:
2096 elif not gr[1] & unblocked:
2098 gr = None
2097 gr = None
2099
2098
2100 if gr is not None:
2099 if gr is not None:
2101 # update the set of awaited revisions with the one from the
2100 # update the set of awaited revisions with the one from the
2102 # subgroup
2101 # subgroup
2103 unblocked |= gr[1]
2102 unblocked |= gr[1]
2104 # output all revisions in the subgroup
2103 # output all revisions in the subgroup
2105 for r in gr[0]:
2104 for r in gr[0]:
2106 yield r
2105 yield r
2107 # delete the subgroup that you just output
2106 # delete the subgroup that you just output
2108 # unless it is groups[0] in which case you just empty it.
2107 # unless it is groups[0] in which case you just empty it.
2109 if targetidx:
2108 if targetidx:
2110 del groups[targetidx]
2109 del groups[targetidx]
2111 else:
2110 else:
2112 gr[0][:] = []
2111 gr[0][:] = []
2113 # Check if we have some subgroup waiting for revisions we are not going to
2112 # Check if we have some subgroup waiting for revisions we are not going to
2114 # iterate over
2113 # iterate over
2115 for g in groups:
2114 for g in groups:
2116 for r in g[0]:
2115 for r in g[0]:
2117 yield r
2116 yield r
2118
2117
2119 @predicate('subrepo([pattern])')
2118 @predicate('subrepo([pattern])')
2120 def subrepo(repo, subset, x):
2119 def subrepo(repo, subset, x):
2121 """Changesets that add, modify or remove the given subrepo. If no subrepo
2120 """Changesets that add, modify or remove the given subrepo. If no subrepo
2122 pattern is named, any subrepo changes are returned.
2121 pattern is named, any subrepo changes are returned.
2123 """
2122 """
2124 # i18n: "subrepo" is a keyword
2123 # i18n: "subrepo" is a keyword
2125 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
2124 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
2126 pat = None
2125 pat = None
2127 if len(args) != 0:
2126 if len(args) != 0:
2128 pat = getstring(args[0], _("subrepo requires a pattern"))
2127 pat = getstring(args[0], _("subrepo requires a pattern"))
2129
2128
2130 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
2129 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
2131
2130
2132 def submatches(names):
2131 def submatches(names):
2133 k, p, m = util.stringmatcher(pat)
2132 k, p, m = util.stringmatcher(pat)
2134 for name in names:
2133 for name in names:
2135 if m(name):
2134 if m(name):
2136 yield name
2135 yield name
2137
2136
2138 def matches(x):
2137 def matches(x):
2139 c = repo[x]
2138 c = repo[x]
2140 s = repo.status(c.p1().node(), c.node(), match=m)
2139 s = repo.status(c.p1().node(), c.node(), match=m)
2141
2140
2142 if pat is None:
2141 if pat is None:
2143 return s.added or s.modified or s.removed
2142 return s.added or s.modified or s.removed
2144
2143
2145 if s.added:
2144 if s.added:
2146 return any(submatches(c.substate.keys()))
2145 return any(submatches(c.substate.keys()))
2147
2146
2148 if s.modified:
2147 if s.modified:
2149 subs = set(c.p1().substate.keys())
2148 subs = set(c.p1().substate.keys())
2150 subs.update(c.substate.keys())
2149 subs.update(c.substate.keys())
2151
2150
2152 for path in submatches(subs):
2151 for path in submatches(subs):
2153 if c.p1().substate.get(path) != c.substate.get(path):
2152 if c.p1().substate.get(path) != c.substate.get(path):
2154 return True
2153 return True
2155
2154
2156 if s.removed:
2155 if s.removed:
2157 return any(submatches(c.p1().substate.keys()))
2156 return any(submatches(c.p1().substate.keys()))
2158
2157
2159 return False
2158 return False
2160
2159
2161 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
2160 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
2162
2161
2163 def _substringmatcher(pattern):
2162 def _substringmatcher(pattern):
2164 kind, pattern, matcher = util.stringmatcher(pattern)
2163 kind, pattern, matcher = util.stringmatcher(pattern)
2165 if kind == 'literal':
2164 if kind == 'literal':
2166 matcher = lambda s: pattern in s
2165 matcher = lambda s: pattern in s
2167 return kind, pattern, matcher
2166 return kind, pattern, matcher
2168
2167
2169 @predicate('tag([name])', safe=True)
2168 @predicate('tag([name])', safe=True)
2170 def tag(repo, subset, x):
2169 def tag(repo, subset, x):
2171 """The specified tag by name, or all tagged revisions if no name is given.
2170 """The specified tag by name, or all tagged revisions if no name is given.
2172
2171
2173 If `name` starts with `re:`, the remainder of the name is treated as
2172 If `name` starts with `re:`, the remainder of the name is treated as
2174 a regular expression. To match a tag that actually starts with `re:`,
2173 a regular expression. To match a tag that actually starts with `re:`,
2175 use the prefix `literal:`.
2174 use the prefix `literal:`.
2176 """
2175 """
2177 # i18n: "tag" is a keyword
2176 # i18n: "tag" is a keyword
2178 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2177 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2179 cl = repo.changelog
2178 cl = repo.changelog
2180 if args:
2179 if args:
2181 pattern = getstring(args[0],
2180 pattern = getstring(args[0],
2182 # i18n: "tag" is a keyword
2181 # i18n: "tag" is a keyword
2183 _('the argument to tag must be a string'))
2182 _('the argument to tag must be a string'))
2184 kind, pattern, matcher = util.stringmatcher(pattern)
2183 kind, pattern, matcher = util.stringmatcher(pattern)
2185 if kind == 'literal':
2184 if kind == 'literal':
2186 # avoid resolving all tags
2185 # avoid resolving all tags
2187 tn = repo._tagscache.tags.get(pattern, None)
2186 tn = repo._tagscache.tags.get(pattern, None)
2188 if tn is None:
2187 if tn is None:
2189 raise error.RepoLookupError(_("tag '%s' does not exist")
2188 raise error.RepoLookupError(_("tag '%s' does not exist")
2190 % pattern)
2189 % pattern)
2191 s = set([repo[tn].rev()])
2190 s = set([repo[tn].rev()])
2192 else:
2191 else:
2193 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2192 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2194 else:
2193 else:
2195 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2194 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2196 return subset & s
2195 return subset & s
2197
2196
2198 @predicate('tagged', safe=True)
2197 @predicate('tagged', safe=True)
2199 def tagged(repo, subset, x):
2198 def tagged(repo, subset, x):
2200 return tag(repo, subset, x)
2199 return tag(repo, subset, x)
2201
2200
2202 @predicate('unstable()', safe=True)
2201 @predicate('unstable()', safe=True)
2203 def unstable(repo, subset, x):
2202 def unstable(repo, subset, x):
2204 """Non-obsolete changesets with obsolete ancestors.
2203 """Non-obsolete changesets with obsolete ancestors.
2205 """
2204 """
2206 # i18n: "unstable" is a keyword
2205 # i18n: "unstable" is a keyword
2207 getargs(x, 0, 0, _("unstable takes no arguments"))
2206 getargs(x, 0, 0, _("unstable takes no arguments"))
2208 unstables = obsmod.getrevs(repo, 'unstable')
2207 unstables = obsmod.getrevs(repo, 'unstable')
2209 return subset & unstables
2208 return subset & unstables
2210
2209
2211
2210
2212 @predicate('user(string)', safe=True)
2211 @predicate('user(string)', safe=True)
2213 def user(repo, subset, x):
2212 def user(repo, subset, x):
2214 """User name contains string. The match is case-insensitive.
2213 """User name contains string. The match is case-insensitive.
2215
2214
2216 If `string` starts with `re:`, the remainder of the string is treated as
2215 If `string` starts with `re:`, the remainder of the string is treated as
2217 a regular expression. To match a user that actually contains `re:`, use
2216 a regular expression. To match a user that actually contains `re:`, use
2218 the prefix `literal:`.
2217 the prefix `literal:`.
2219 """
2218 """
2220 return author(repo, subset, x)
2219 return author(repo, subset, x)
2221
2220
2222 # experimental
2221 # experimental
2223 @predicate('wdir', safe=True)
2222 @predicate('wdir', safe=True)
2224 def wdir(repo, subset, x):
2223 def wdir(repo, subset, x):
2225 # i18n: "wdir" is a keyword
2224 # i18n: "wdir" is a keyword
2226 getargs(x, 0, 0, _("wdir takes no arguments"))
2225 getargs(x, 0, 0, _("wdir takes no arguments"))
2227 if node.wdirrev in subset or isinstance(subset, fullreposet):
2226 if node.wdirrev in subset or isinstance(subset, fullreposet):
2228 return baseset([node.wdirrev])
2227 return baseset([node.wdirrev])
2229 return baseset()
2228 return baseset()
2230
2229
2231 # for internal use
2230 # for internal use
2232 @predicate('_list', safe=True)
2231 @predicate('_list', safe=True)
2233 def _list(repo, subset, x):
2232 def _list(repo, subset, x):
2234 s = getstring(x, "internal error")
2233 s = getstring(x, "internal error")
2235 if not s:
2234 if not s:
2236 return baseset()
2235 return baseset()
2237 # remove duplicates here. it's difficult for caller to deduplicate sets
2236 # remove duplicates here. it's difficult for caller to deduplicate sets
2238 # because different symbols can point to the same rev.
2237 # because different symbols can point to the same rev.
2239 cl = repo.changelog
2238 cl = repo.changelog
2240 ls = []
2239 ls = []
2241 seen = set()
2240 seen = set()
2242 for t in s.split('\0'):
2241 for t in s.split('\0'):
2243 try:
2242 try:
2244 # fast path for integer revision
2243 # fast path for integer revision
2245 r = int(t)
2244 r = int(t)
2246 if str(r) != t or r not in cl:
2245 if str(r) != t or r not in cl:
2247 raise ValueError
2246 raise ValueError
2248 revs = [r]
2247 revs = [r]
2249 except ValueError:
2248 except ValueError:
2250 revs = stringset(repo, subset, t)
2249 revs = stringset(repo, subset, t)
2251
2250
2252 for r in revs:
2251 for r in revs:
2253 if r in seen:
2252 if r in seen:
2254 continue
2253 continue
2255 if (r in subset
2254 if (r in subset
2256 or r == node.nullrev and isinstance(subset, fullreposet)):
2255 or r == node.nullrev and isinstance(subset, fullreposet)):
2257 ls.append(r)
2256 ls.append(r)
2258 seen.add(r)
2257 seen.add(r)
2259 return baseset(ls)
2258 return baseset(ls)
2260
2259
2261 # for internal use
2260 # for internal use
2262 @predicate('_intlist', safe=True)
2261 @predicate('_intlist', safe=True)
2263 def _intlist(repo, subset, x):
2262 def _intlist(repo, subset, x):
2264 s = getstring(x, "internal error")
2263 s = getstring(x, "internal error")
2265 if not s:
2264 if not s:
2266 return baseset()
2265 return baseset()
2267 ls = [int(r) for r in s.split('\0')]
2266 ls = [int(r) for r in s.split('\0')]
2268 s = subset
2267 s = subset
2269 return baseset([r for r in ls if r in s])
2268 return baseset([r for r in ls if r in s])
2270
2269
2271 # for internal use
2270 # for internal use
2272 @predicate('_hexlist', safe=True)
2271 @predicate('_hexlist', safe=True)
2273 def _hexlist(repo, subset, x):
2272 def _hexlist(repo, subset, x):
2274 s = getstring(x, "internal error")
2273 s = getstring(x, "internal error")
2275 if not s:
2274 if not s:
2276 return baseset()
2275 return baseset()
2277 cl = repo.changelog
2276 cl = repo.changelog
2278 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2277 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2279 s = subset
2278 s = subset
2280 return baseset([r for r in ls if r in s])
2279 return baseset([r for r in ls if r in s])
2281
2280
2282 methods = {
2281 methods = {
2283 "range": rangeset,
2282 "range": rangeset,
2284 "dagrange": dagrange,
2283 "dagrange": dagrange,
2285 "string": stringset,
2284 "string": stringset,
2286 "symbol": stringset,
2285 "symbol": stringset,
2287 "and": andset,
2286 "and": andset,
2288 "or": orset,
2287 "or": orset,
2289 "not": notset,
2288 "not": notset,
2290 "difference": differenceset,
2289 "difference": differenceset,
2291 "list": listset,
2290 "list": listset,
2292 "keyvalue": keyvaluepair,
2291 "keyvalue": keyvaluepair,
2293 "func": func,
2292 "func": func,
2294 "ancestor": ancestorspec,
2293 "ancestor": ancestorspec,
2295 "parent": parentspec,
2294 "parent": parentspec,
2296 "parentpost": p1,
2295 "parentpost": p1,
2297 }
2296 }
2298
2297
2299 def _matchonly(revs, bases):
2298 def _matchonly(revs, bases):
2300 """
2299 """
2301 >>> f = lambda *args: _matchonly(*map(parse, args))
2300 >>> f = lambda *args: _matchonly(*map(parse, args))
2302 >>> f('ancestors(A)', 'not ancestors(B)')
2301 >>> f('ancestors(A)', 'not ancestors(B)')
2303 ('list', ('symbol', 'A'), ('symbol', 'B'))
2302 ('list', ('symbol', 'A'), ('symbol', 'B'))
2304 """
2303 """
2305 if (revs is not None
2304 if (revs is not None
2306 and revs[0] == 'func'
2305 and revs[0] == 'func'
2307 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2306 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2308 and bases is not None
2307 and bases is not None
2309 and bases[0] == 'not'
2308 and bases[0] == 'not'
2310 and bases[1][0] == 'func'
2309 and bases[1][0] == 'func'
2311 and getstring(bases[1][1], _('not a symbol')) == 'ancestors'):
2310 and getstring(bases[1][1], _('not a symbol')) == 'ancestors'):
2312 return ('list', revs[2], bases[1][2])
2311 return ('list', revs[2], bases[1][2])
2313
2312
2314 def _optimize(x, small):
2313 def _optimize(x, small):
2315 if x is None:
2314 if x is None:
2316 return 0, x
2315 return 0, x
2317
2316
2318 smallbonus = 1
2317 smallbonus = 1
2319 if small:
2318 if small:
2320 smallbonus = .5
2319 smallbonus = .5
2321
2320
2322 op = x[0]
2321 op = x[0]
2323 if op == 'minus':
2322 if op == 'minus':
2324 return _optimize(('and', x[1], ('not', x[2])), small)
2323 return _optimize(('and', x[1], ('not', x[2])), small)
2325 elif op == 'only':
2324 elif op == 'only':
2326 t = ('func', ('symbol', 'only'), ('list', x[1], x[2]))
2325 t = ('func', ('symbol', 'only'), ('list', x[1], x[2]))
2327 return _optimize(t, small)
2326 return _optimize(t, small)
2328 elif op == 'onlypost':
2327 elif op == 'onlypost':
2329 return _optimize(('func', ('symbol', 'only'), x[1]), small)
2328 return _optimize(('func', ('symbol', 'only'), x[1]), small)
2330 elif op == 'dagrangepre':
2329 elif op == 'dagrangepre':
2331 return _optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2330 return _optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2332 elif op == 'dagrangepost':
2331 elif op == 'dagrangepost':
2333 return _optimize(('func', ('symbol', 'descendants'), x[1]), small)
2332 return _optimize(('func', ('symbol', 'descendants'), x[1]), small)
2334 elif op == 'rangeall':
2333 elif op == 'rangeall':
2335 return _optimize(('range', ('string', '0'), ('string', 'tip')), small)
2334 return _optimize(('range', ('string', '0'), ('string', 'tip')), small)
2336 elif op == 'rangepre':
2335 elif op == 'rangepre':
2337 return _optimize(('range', ('string', '0'), x[1]), small)
2336 return _optimize(('range', ('string', '0'), x[1]), small)
2338 elif op == 'rangepost':
2337 elif op == 'rangepost':
2339 return _optimize(('range', x[1], ('string', 'tip')), small)
2338 return _optimize(('range', x[1], ('string', 'tip')), small)
2340 elif op == 'negate':
2339 elif op == 'negate':
2341 s = getstring(x[1], _("can't negate that"))
2340 s = getstring(x[1], _("can't negate that"))
2342 return _optimize(('string', '-' + s), small)
2341 return _optimize(('string', '-' + s), small)
2343 elif op in 'string symbol negate':
2342 elif op in 'string symbol negate':
2344 return smallbonus, x # single revisions are small
2343 return smallbonus, x # single revisions are small
2345 elif op == 'and':
2344 elif op == 'and':
2346 wa, ta = _optimize(x[1], True)
2345 wa, ta = _optimize(x[1], True)
2347 wb, tb = _optimize(x[2], True)
2346 wb, tb = _optimize(x[2], True)
2348 w = min(wa, wb)
2347 w = min(wa, wb)
2349
2348
2350 # (::x and not ::y)/(not ::y and ::x) have a fast path
2349 # (::x and not ::y)/(not ::y and ::x) have a fast path
2351 tm = _matchonly(ta, tb) or _matchonly(tb, ta)
2350 tm = _matchonly(ta, tb) or _matchonly(tb, ta)
2352 if tm:
2351 if tm:
2353 return w, ('func', ('symbol', 'only'), tm)
2352 return w, ('func', ('symbol', 'only'), tm)
2354
2353
2355 if tb is not None and tb[0] == 'not':
2354 if tb is not None and tb[0] == 'not':
2356 return wa, ('difference', ta, tb[1])
2355 return wa, ('difference', ta, tb[1])
2357
2356
2358 if wa > wb:
2357 if wa > wb:
2359 return w, (op, tb, ta)
2358 return w, (op, tb, ta)
2360 return w, (op, ta, tb)
2359 return w, (op, ta, tb)
2361 elif op == 'or':
2360 elif op == 'or':
2362 # fast path for machine-generated expression, that is likely to have
2361 # fast path for machine-generated expression, that is likely to have
2363 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2362 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2364 ws, ts, ss = [], [], []
2363 ws, ts, ss = [], [], []
2365 def flushss():
2364 def flushss():
2366 if not ss:
2365 if not ss:
2367 return
2366 return
2368 if len(ss) == 1:
2367 if len(ss) == 1:
2369 w, t = ss[0]
2368 w, t = ss[0]
2370 else:
2369 else:
2371 s = '\0'.join(t[1] for w, t in ss)
2370 s = '\0'.join(t[1] for w, t in ss)
2372 y = ('func', ('symbol', '_list'), ('string', s))
2371 y = ('func', ('symbol', '_list'), ('string', s))
2373 w, t = _optimize(y, False)
2372 w, t = _optimize(y, False)
2374 ws.append(w)
2373 ws.append(w)
2375 ts.append(t)
2374 ts.append(t)
2376 del ss[:]
2375 del ss[:]
2377 for y in x[1:]:
2376 for y in x[1:]:
2378 w, t = _optimize(y, False)
2377 w, t = _optimize(y, False)
2379 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2378 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2380 ss.append((w, t))
2379 ss.append((w, t))
2381 continue
2380 continue
2382 flushss()
2381 flushss()
2383 ws.append(w)
2382 ws.append(w)
2384 ts.append(t)
2383 ts.append(t)
2385 flushss()
2384 flushss()
2386 if len(ts) == 1:
2385 if len(ts) == 1:
2387 return ws[0], ts[0] # 'or' operation is fully optimized out
2386 return ws[0], ts[0] # 'or' operation is fully optimized out
2388 # we can't reorder trees by weight because it would change the order.
2387 # we can't reorder trees by weight because it would change the order.
2389 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2388 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2390 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2389 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2391 return max(ws), (op,) + tuple(ts)
2390 return max(ws), (op,) + tuple(ts)
2392 elif op == 'not':
2391 elif op == 'not':
2393 # Optimize not public() to _notpublic() because we have a fast version
2392 # Optimize not public() to _notpublic() because we have a fast version
2394 if x[1] == ('func', ('symbol', 'public'), None):
2393 if x[1] == ('func', ('symbol', 'public'), None):
2395 newsym = ('func', ('symbol', '_notpublic'), None)
2394 newsym = ('func', ('symbol', '_notpublic'), None)
2396 o = _optimize(newsym, not small)
2395 o = _optimize(newsym, not small)
2397 return o[0], o[1]
2396 return o[0], o[1]
2398 else:
2397 else:
2399 o = _optimize(x[1], not small)
2398 o = _optimize(x[1], not small)
2400 return o[0], (op, o[1])
2399 return o[0], (op, o[1])
2401 elif op == 'parentpost':
2400 elif op == 'parentpost':
2402 o = _optimize(x[1], small)
2401 o = _optimize(x[1], small)
2403 return o[0], (op, o[1])
2402 return o[0], (op, o[1])
2404 elif op == 'group':
2403 elif op == 'group':
2405 return _optimize(x[1], small)
2404 return _optimize(x[1], small)
2406 elif op in 'dagrange range parent ancestorspec':
2405 elif op in 'dagrange range parent ancestorspec':
2407 if op == 'parent':
2406 if op == 'parent':
2408 # x^:y means (x^) : y, not x ^ (:y)
2407 # x^:y means (x^) : y, not x ^ (:y)
2409 post = ('parentpost', x[1])
2408 post = ('parentpost', x[1])
2410 if x[2][0] == 'dagrangepre':
2409 if x[2][0] == 'dagrangepre':
2411 return _optimize(('dagrange', post, x[2][1]), small)
2410 return _optimize(('dagrange', post, x[2][1]), small)
2412 elif x[2][0] == 'rangepre':
2411 elif x[2][0] == 'rangepre':
2413 return _optimize(('range', post, x[2][1]), small)
2412 return _optimize(('range', post, x[2][1]), small)
2414
2413
2415 wa, ta = _optimize(x[1], small)
2414 wa, ta = _optimize(x[1], small)
2416 wb, tb = _optimize(x[2], small)
2415 wb, tb = _optimize(x[2], small)
2417 return wa + wb, (op, ta, tb)
2416 return wa + wb, (op, ta, tb)
2418 elif op == 'list':
2417 elif op == 'list':
2419 ws, ts = zip(*(_optimize(y, small) for y in x[1:]))
2418 ws, ts = zip(*(_optimize(y, small) for y in x[1:]))
2420 return sum(ws), (op,) + ts
2419 return sum(ws), (op,) + ts
2421 elif op == 'func':
2420 elif op == 'func':
2422 f = getstring(x[1], _("not a symbol"))
2421 f = getstring(x[1], _("not a symbol"))
2423 wa, ta = _optimize(x[2], small)
2422 wa, ta = _optimize(x[2], small)
2424 if f in ("author branch closed date desc file grep keyword "
2423 if f in ("author branch closed date desc file grep keyword "
2425 "outgoing user"):
2424 "outgoing user"):
2426 w = 10 # slow
2425 w = 10 # slow
2427 elif f in "modifies adds removes":
2426 elif f in "modifies adds removes":
2428 w = 30 # slower
2427 w = 30 # slower
2429 elif f == "contains":
2428 elif f == "contains":
2430 w = 100 # very slow
2429 w = 100 # very slow
2431 elif f == "ancestor":
2430 elif f == "ancestor":
2432 w = 1 * smallbonus
2431 w = 1 * smallbonus
2433 elif f in "reverse limit first _intlist":
2432 elif f in "reverse limit first _intlist":
2434 w = 0
2433 w = 0
2435 elif f in "sort":
2434 elif f in "sort":
2436 w = 10 # assume most sorts look at changelog
2435 w = 10 # assume most sorts look at changelog
2437 else:
2436 else:
2438 w = 1
2437 w = 1
2439 return w + wa, (op, x[1], ta)
2438 return w + wa, (op, x[1], ta)
2440 return 1, x
2439 return 1, x
2441
2440
2442 def optimize(tree):
2441 def optimize(tree):
2443 _weight, newtree = _optimize(tree, small=True)
2442 _weight, newtree = _optimize(tree, small=True)
2444 return newtree
2443 return newtree
2445
2444
2446 # the set of valid characters for the initial letter of symbols in
2445 # the set of valid characters for the initial letter of symbols in
2447 # alias declarations and definitions
2446 # alias declarations and definitions
2448 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2447 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2449 if c.isalnum() or c in '._@$' or ord(c) > 127)
2448 if c.isalnum() or c in '._@$' or ord(c) > 127)
2450
2449
2451 def _parsewith(spec, lookup=None, syminitletters=None):
2450 def _parsewith(spec, lookup=None, syminitletters=None):
2452 """Generate a parse tree of given spec with given tokenizing options
2451 """Generate a parse tree of given spec with given tokenizing options
2453
2452
2454 >>> _parsewith('foo($1)', syminitletters=_aliassyminitletters)
2453 >>> _parsewith('foo($1)', syminitletters=_aliassyminitletters)
2455 ('func', ('symbol', 'foo'), ('symbol', '$1'))
2454 ('func', ('symbol', 'foo'), ('symbol', '$1'))
2456 >>> _parsewith('$1')
2455 >>> _parsewith('$1')
2457 Traceback (most recent call last):
2456 Traceback (most recent call last):
2458 ...
2457 ...
2459 ParseError: ("syntax error in revset '$1'", 0)
2458 ParseError: ("syntax error in revset '$1'", 0)
2460 >>> _parsewith('foo bar')
2459 >>> _parsewith('foo bar')
2461 Traceback (most recent call last):
2460 Traceback (most recent call last):
2462 ...
2461 ...
2463 ParseError: ('invalid token', 4)
2462 ParseError: ('invalid token', 4)
2464 """
2463 """
2465 p = parser.parser(elements)
2464 p = parser.parser(elements)
2466 tree, pos = p.parse(tokenize(spec, lookup=lookup,
2465 tree, pos = p.parse(tokenize(spec, lookup=lookup,
2467 syminitletters=syminitletters))
2466 syminitletters=syminitletters))
2468 if pos != len(spec):
2467 if pos != len(spec):
2469 raise error.ParseError(_('invalid token'), pos)
2468 raise error.ParseError(_('invalid token'), pos)
2470 return parser.simplifyinfixops(tree, ('list', 'or'))
2469 return parser.simplifyinfixops(tree, ('list', 'or'))
2471
2470
2472 class _aliasrules(parser.basealiasrules):
2471 class _aliasrules(parser.basealiasrules):
2473 """Parsing and expansion rule set of revset aliases"""
2472 """Parsing and expansion rule set of revset aliases"""
2474 _section = _('revset alias')
2473 _section = _('revset alias')
2475
2474
2476 @staticmethod
2475 @staticmethod
2477 def _parse(spec):
2476 def _parse(spec):
2478 """Parse alias declaration/definition ``spec``
2477 """Parse alias declaration/definition ``spec``
2479
2478
2480 This allows symbol names to use also ``$`` as an initial letter
2479 This allows symbol names to use also ``$`` as an initial letter
2481 (for backward compatibility), and callers of this function should
2480 (for backward compatibility), and callers of this function should
2482 examine whether ``$`` is used also for unexpected symbols or not.
2481 examine whether ``$`` is used also for unexpected symbols or not.
2483 """
2482 """
2484 return _parsewith(spec, syminitletters=_aliassyminitletters)
2483 return _parsewith(spec, syminitletters=_aliassyminitletters)
2485
2484
2486 @staticmethod
2485 @staticmethod
2487 def _trygetfunc(tree):
2486 def _trygetfunc(tree):
2488 if tree[0] == 'func' and tree[1][0] == 'symbol':
2487 if tree[0] == 'func' and tree[1][0] == 'symbol':
2489 return tree[1][1], getlist(tree[2])
2488 return tree[1][1], getlist(tree[2])
2490
2489
2491 def expandaliases(ui, tree, showwarning=None):
2490 def expandaliases(ui, tree, showwarning=None):
2492 aliases = _aliasrules.buildmap(ui.configitems('revsetalias'))
2491 aliases = _aliasrules.buildmap(ui.configitems('revsetalias'))
2493 tree = _aliasrules.expand(aliases, tree)
2492 tree = _aliasrules.expand(aliases, tree)
2494 if showwarning:
2493 if showwarning:
2495 # warn about problematic (but not referred) aliases
2494 # warn about problematic (but not referred) aliases
2496 for name, alias in sorted(aliases.iteritems()):
2495 for name, alias in sorted(aliases.iteritems()):
2497 if alias.error and not alias.warned:
2496 if alias.error and not alias.warned:
2498 showwarning(_('warning: %s\n') % (alias.error))
2497 showwarning(_('warning: %s\n') % (alias.error))
2499 alias.warned = True
2498 alias.warned = True
2500 return tree
2499 return tree
2501
2500
2502 def foldconcat(tree):
2501 def foldconcat(tree):
2503 """Fold elements to be concatenated by `##`
2502 """Fold elements to be concatenated by `##`
2504 """
2503 """
2505 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2504 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2506 return tree
2505 return tree
2507 if tree[0] == '_concat':
2506 if tree[0] == '_concat':
2508 pending = [tree]
2507 pending = [tree]
2509 l = []
2508 l = []
2510 while pending:
2509 while pending:
2511 e = pending.pop()
2510 e = pending.pop()
2512 if e[0] == '_concat':
2511 if e[0] == '_concat':
2513 pending.extend(reversed(e[1:]))
2512 pending.extend(reversed(e[1:]))
2514 elif e[0] in ('string', 'symbol'):
2513 elif e[0] in ('string', 'symbol'):
2515 l.append(e[1])
2514 l.append(e[1])
2516 else:
2515 else:
2517 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2516 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2518 raise error.ParseError(msg)
2517 raise error.ParseError(msg)
2519 return ('string', ''.join(l))
2518 return ('string', ''.join(l))
2520 else:
2519 else:
2521 return tuple(foldconcat(t) for t in tree)
2520 return tuple(foldconcat(t) for t in tree)
2522
2521
2523 def parse(spec, lookup=None):
2522 def parse(spec, lookup=None):
2524 return _parsewith(spec, lookup=lookup)
2523 return _parsewith(spec, lookup=lookup)
2525
2524
2526 def posttreebuilthook(tree, repo):
2525 def posttreebuilthook(tree, repo):
2527 # hook for extensions to execute code on the optimized tree
2526 # hook for extensions to execute code on the optimized tree
2528 pass
2527 pass
2529
2528
2530 def match(ui, spec, repo=None):
2529 def match(ui, spec, repo=None):
2531 if not spec:
2530 if not spec:
2532 raise error.ParseError(_("empty query"))
2531 raise error.ParseError(_("empty query"))
2533 lookup = None
2532 lookup = None
2534 if repo:
2533 if repo:
2535 lookup = repo.__contains__
2534 lookup = repo.__contains__
2536 tree = parse(spec, lookup)
2535 tree = parse(spec, lookup)
2537 return _makematcher(ui, tree, repo)
2536 return _makematcher(ui, tree, repo)
2538
2537
2539 def matchany(ui, specs, repo=None):
2538 def matchany(ui, specs, repo=None):
2540 """Create a matcher that will include any revisions matching one of the
2539 """Create a matcher that will include any revisions matching one of the
2541 given specs"""
2540 given specs"""
2542 if not specs:
2541 if not specs:
2543 def mfunc(repo, subset=None):
2542 def mfunc(repo, subset=None):
2544 return baseset()
2543 return baseset()
2545 return mfunc
2544 return mfunc
2546 if not all(specs):
2545 if not all(specs):
2547 raise error.ParseError(_("empty query"))
2546 raise error.ParseError(_("empty query"))
2548 lookup = None
2547 lookup = None
2549 if repo:
2548 if repo:
2550 lookup = repo.__contains__
2549 lookup = repo.__contains__
2551 if len(specs) == 1:
2550 if len(specs) == 1:
2552 tree = parse(specs[0], lookup)
2551 tree = parse(specs[0], lookup)
2553 else:
2552 else:
2554 tree = ('or',) + tuple(parse(s, lookup) for s in specs)
2553 tree = ('or',) + tuple(parse(s, lookup) for s in specs)
2555 return _makematcher(ui, tree, repo)
2554 return _makematcher(ui, tree, repo)
2556
2555
2557 def _makematcher(ui, tree, repo):
2556 def _makematcher(ui, tree, repo):
2558 if ui:
2557 if ui:
2559 tree = expandaliases(ui, tree, showwarning=ui.warn)
2558 tree = expandaliases(ui, tree, showwarning=ui.warn)
2560 tree = foldconcat(tree)
2559 tree = foldconcat(tree)
2561 tree = optimize(tree)
2560 tree = optimize(tree)
2562 posttreebuilthook(tree, repo)
2561 posttreebuilthook(tree, repo)
2563 def mfunc(repo, subset=None):
2562 def mfunc(repo, subset=None):
2564 if subset is None:
2563 if subset is None:
2565 subset = fullreposet(repo)
2564 subset = fullreposet(repo)
2566 if util.safehasattr(subset, 'isascending'):
2565 if util.safehasattr(subset, 'isascending'):
2567 result = getset(repo, subset, tree)
2566 result = getset(repo, subset, tree)
2568 else:
2567 else:
2569 result = getset(repo, baseset(subset), tree)
2568 result = getset(repo, baseset(subset), tree)
2570 return result
2569 return result
2571 return mfunc
2570 return mfunc
2572
2571
2573 def formatspec(expr, *args):
2572 def formatspec(expr, *args):
2574 '''
2573 '''
2575 This is a convenience function for using revsets internally, and
2574 This is a convenience function for using revsets internally, and
2576 escapes arguments appropriately. Aliases are intentionally ignored
2575 escapes arguments appropriately. Aliases are intentionally ignored
2577 so that intended expression behavior isn't accidentally subverted.
2576 so that intended expression behavior isn't accidentally subverted.
2578
2577
2579 Supported arguments:
2578 Supported arguments:
2580
2579
2581 %r = revset expression, parenthesized
2580 %r = revset expression, parenthesized
2582 %d = int(arg), no quoting
2581 %d = int(arg), no quoting
2583 %s = string(arg), escaped and single-quoted
2582 %s = string(arg), escaped and single-quoted
2584 %b = arg.branch(), escaped and single-quoted
2583 %b = arg.branch(), escaped and single-quoted
2585 %n = hex(arg), single-quoted
2584 %n = hex(arg), single-quoted
2586 %% = a literal '%'
2585 %% = a literal '%'
2587
2586
2588 Prefixing the type with 'l' specifies a parenthesized list of that type.
2587 Prefixing the type with 'l' specifies a parenthesized list of that type.
2589
2588
2590 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2589 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2591 '(10 or 11):: and ((this()) or (that()))'
2590 '(10 or 11):: and ((this()) or (that()))'
2592 >>> formatspec('%d:: and not %d::', 10, 20)
2591 >>> formatspec('%d:: and not %d::', 10, 20)
2593 '10:: and not 20::'
2592 '10:: and not 20::'
2594 >>> formatspec('%ld or %ld', [], [1])
2593 >>> formatspec('%ld or %ld', [], [1])
2595 "_list('') or 1"
2594 "_list('') or 1"
2596 >>> formatspec('keyword(%s)', 'foo\\xe9')
2595 >>> formatspec('keyword(%s)', 'foo\\xe9')
2597 "keyword('foo\\\\xe9')"
2596 "keyword('foo\\\\xe9')"
2598 >>> b = lambda: 'default'
2597 >>> b = lambda: 'default'
2599 >>> b.branch = b
2598 >>> b.branch = b
2600 >>> formatspec('branch(%b)', b)
2599 >>> formatspec('branch(%b)', b)
2601 "branch('default')"
2600 "branch('default')"
2602 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2601 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2603 "root(_list('a\\x00b\\x00c\\x00d'))"
2602 "root(_list('a\\x00b\\x00c\\x00d'))"
2604 '''
2603 '''
2605
2604
2606 def quote(s):
2605 def quote(s):
2607 return repr(str(s))
2606 return repr(str(s))
2608
2607
2609 def argtype(c, arg):
2608 def argtype(c, arg):
2610 if c == 'd':
2609 if c == 'd':
2611 return str(int(arg))
2610 return str(int(arg))
2612 elif c == 's':
2611 elif c == 's':
2613 return quote(arg)
2612 return quote(arg)
2614 elif c == 'r':
2613 elif c == 'r':
2615 parse(arg) # make sure syntax errors are confined
2614 parse(arg) # make sure syntax errors are confined
2616 return '(%s)' % arg
2615 return '(%s)' % arg
2617 elif c == 'n':
2616 elif c == 'n':
2618 return quote(node.hex(arg))
2617 return quote(node.hex(arg))
2619 elif c == 'b':
2618 elif c == 'b':
2620 return quote(arg.branch())
2619 return quote(arg.branch())
2621
2620
2622 def listexp(s, t):
2621 def listexp(s, t):
2623 l = len(s)
2622 l = len(s)
2624 if l == 0:
2623 if l == 0:
2625 return "_list('')"
2624 return "_list('')"
2626 elif l == 1:
2625 elif l == 1:
2627 return argtype(t, s[0])
2626 return argtype(t, s[0])
2628 elif t == 'd':
2627 elif t == 'd':
2629 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2628 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2630 elif t == 's':
2629 elif t == 's':
2631 return "_list('%s')" % "\0".join(s)
2630 return "_list('%s')" % "\0".join(s)
2632 elif t == 'n':
2631 elif t == 'n':
2633 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2632 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2634 elif t == 'b':
2633 elif t == 'b':
2635 return "_list('%s')" % "\0".join(a.branch() for a in s)
2634 return "_list('%s')" % "\0".join(a.branch() for a in s)
2636
2635
2637 m = l // 2
2636 m = l // 2
2638 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2637 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2639
2638
2640 ret = ''
2639 ret = ''
2641 pos = 0
2640 pos = 0
2642 arg = 0
2641 arg = 0
2643 while pos < len(expr):
2642 while pos < len(expr):
2644 c = expr[pos]
2643 c = expr[pos]
2645 if c == '%':
2644 if c == '%':
2646 pos += 1
2645 pos += 1
2647 d = expr[pos]
2646 d = expr[pos]
2648 if d == '%':
2647 if d == '%':
2649 ret += d
2648 ret += d
2650 elif d in 'dsnbr':
2649 elif d in 'dsnbr':
2651 ret += argtype(d, args[arg])
2650 ret += argtype(d, args[arg])
2652 arg += 1
2651 arg += 1
2653 elif d == 'l':
2652 elif d == 'l':
2654 # a list of some type
2653 # a list of some type
2655 pos += 1
2654 pos += 1
2656 d = expr[pos]
2655 d = expr[pos]
2657 ret += listexp(list(args[arg]), d)
2656 ret += listexp(list(args[arg]), d)
2658 arg += 1
2657 arg += 1
2659 else:
2658 else:
2660 raise error.Abort('unexpected revspec format character %s' % d)
2659 raise error.Abort('unexpected revspec format character %s' % d)
2661 else:
2660 else:
2662 ret += c
2661 ret += c
2663 pos += 1
2662 pos += 1
2664
2663
2665 return ret
2664 return ret
2666
2665
2667 def prettyformat(tree):
2666 def prettyformat(tree):
2668 return parser.prettyformat(tree, ('string', 'symbol'))
2667 return parser.prettyformat(tree, ('string', 'symbol'))
2669
2668
2670 def depth(tree):
2669 def depth(tree):
2671 if isinstance(tree, tuple):
2670 if isinstance(tree, tuple):
2672 return max(map(depth, tree)) + 1
2671 return max(map(depth, tree)) + 1
2673 else:
2672 else:
2674 return 0
2673 return 0
2675
2674
2676 def funcsused(tree):
2675 def funcsused(tree):
2677 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2676 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2678 return set()
2677 return set()
2679 else:
2678 else:
2680 funcs = set()
2679 funcs = set()
2681 for s in tree[1:]:
2680 for s in tree[1:]:
2682 funcs |= funcsused(s)
2681 funcs |= funcsused(s)
2683 if tree[0] == 'func':
2682 if tree[0] == 'func':
2684 funcs.add(tree[1][1])
2683 funcs.add(tree[1][1])
2685 return funcs
2684 return funcs
2686
2685
2687 def _formatsetrepr(r):
2686 def _formatsetrepr(r):
2688 """Format an optional printable representation of a set
2687 """Format an optional printable representation of a set
2689
2688
2690 ======== =================================
2689 ======== =================================
2691 type(r) example
2690 type(r) example
2692 ======== =================================
2691 ======== =================================
2693 tuple ('<not %r>', other)
2692 tuple ('<not %r>', other)
2694 str '<branch closed>'
2693 str '<branch closed>'
2695 callable lambda: '<branch %r>' % sorted(b)
2694 callable lambda: '<branch %r>' % sorted(b)
2696 object other
2695 object other
2697 ======== =================================
2696 ======== =================================
2698 """
2697 """
2699 if r is None:
2698 if r is None:
2700 return ''
2699 return ''
2701 elif isinstance(r, tuple):
2700 elif isinstance(r, tuple):
2702 return r[0] % r[1:]
2701 return r[0] % r[1:]
2703 elif isinstance(r, str):
2702 elif isinstance(r, str):
2704 return r
2703 return r
2705 elif callable(r):
2704 elif callable(r):
2706 return r()
2705 return r()
2707 else:
2706 else:
2708 return repr(r)
2707 return repr(r)
2709
2708
2710 class abstractsmartset(object):
2709 class abstractsmartset(object):
2711
2710
2712 def __nonzero__(self):
2711 def __nonzero__(self):
2713 """True if the smartset is not empty"""
2712 """True if the smartset is not empty"""
2714 raise NotImplementedError()
2713 raise NotImplementedError()
2715
2714
2716 def __contains__(self, rev):
2715 def __contains__(self, rev):
2717 """provide fast membership testing"""
2716 """provide fast membership testing"""
2718 raise NotImplementedError()
2717 raise NotImplementedError()
2719
2718
2720 def __iter__(self):
2719 def __iter__(self):
2721 """iterate the set in the order it is supposed to be iterated"""
2720 """iterate the set in the order it is supposed to be iterated"""
2722 raise NotImplementedError()
2721 raise NotImplementedError()
2723
2722
2724 # Attributes containing a function to perform a fast iteration in a given
2723 # Attributes containing a function to perform a fast iteration in a given
2725 # direction. A smartset can have none, one, or both defined.
2724 # direction. A smartset can have none, one, or both defined.
2726 #
2725 #
2727 # Default value is None instead of a function returning None to avoid
2726 # Default value is None instead of a function returning None to avoid
2728 # initializing an iterator just for testing if a fast method exists.
2727 # initializing an iterator just for testing if a fast method exists.
2729 fastasc = None
2728 fastasc = None
2730 fastdesc = None
2729 fastdesc = None
2731
2730
2732 def isascending(self):
2731 def isascending(self):
2733 """True if the set will iterate in ascending order"""
2732 """True if the set will iterate in ascending order"""
2734 raise NotImplementedError()
2733 raise NotImplementedError()
2735
2734
2736 def isdescending(self):
2735 def isdescending(self):
2737 """True if the set will iterate in descending order"""
2736 """True if the set will iterate in descending order"""
2738 raise NotImplementedError()
2737 raise NotImplementedError()
2739
2738
2740 def istopo(self):
2739 def istopo(self):
2741 """True if the set will iterate in topographical order"""
2740 """True if the set will iterate in topographical order"""
2742 raise NotImplementedError()
2741 raise NotImplementedError()
2743
2742
2744 @util.cachefunc
2743 @util.cachefunc
2745 def min(self):
2744 def min(self):
2746 """return the minimum element in the set"""
2745 """return the minimum element in the set"""
2747 if self.fastasc is not None:
2746 if self.fastasc is not None:
2748 for r in self.fastasc():
2747 for r in self.fastasc():
2749 return r
2748 return r
2750 raise ValueError('arg is an empty sequence')
2749 raise ValueError('arg is an empty sequence')
2751 return min(self)
2750 return min(self)
2752
2751
2753 @util.cachefunc
2752 @util.cachefunc
2754 def max(self):
2753 def max(self):
2755 """return the maximum element in the set"""
2754 """return the maximum element in the set"""
2756 if self.fastdesc is not None:
2755 if self.fastdesc is not None:
2757 for r in self.fastdesc():
2756 for r in self.fastdesc():
2758 return r
2757 return r
2759 raise ValueError('arg is an empty sequence')
2758 raise ValueError('arg is an empty sequence')
2760 return max(self)
2759 return max(self)
2761
2760
2762 def first(self):
2761 def first(self):
2763 """return the first element in the set (user iteration perspective)
2762 """return the first element in the set (user iteration perspective)
2764
2763
2765 Return None if the set is empty"""
2764 Return None if the set is empty"""
2766 raise NotImplementedError()
2765 raise NotImplementedError()
2767
2766
2768 def last(self):
2767 def last(self):
2769 """return the last element in the set (user iteration perspective)
2768 """return the last element in the set (user iteration perspective)
2770
2769
2771 Return None if the set is empty"""
2770 Return None if the set is empty"""
2772 raise NotImplementedError()
2771 raise NotImplementedError()
2773
2772
2774 def __len__(self):
2773 def __len__(self):
2775 """return the length of the smartsets
2774 """return the length of the smartsets
2776
2775
2777 This can be expensive on smartset that could be lazy otherwise."""
2776 This can be expensive on smartset that could be lazy otherwise."""
2778 raise NotImplementedError()
2777 raise NotImplementedError()
2779
2778
2780 def reverse(self):
2779 def reverse(self):
2781 """reverse the expected iteration order"""
2780 """reverse the expected iteration order"""
2782 raise NotImplementedError()
2781 raise NotImplementedError()
2783
2782
2784 def sort(self, reverse=True):
2783 def sort(self, reverse=True):
2785 """get the set to iterate in an ascending or descending order"""
2784 """get the set to iterate in an ascending or descending order"""
2786 raise NotImplementedError()
2785 raise NotImplementedError()
2787
2786
2788 def __and__(self, other):
2787 def __and__(self, other):
2789 """Returns a new object with the intersection of the two collections.
2788 """Returns a new object with the intersection of the two collections.
2790
2789
2791 This is part of the mandatory API for smartset."""
2790 This is part of the mandatory API for smartset."""
2792 if isinstance(other, fullreposet):
2791 if isinstance(other, fullreposet):
2793 return self
2792 return self
2794 return self.filter(other.__contains__, condrepr=other, cache=False)
2793 return self.filter(other.__contains__, condrepr=other, cache=False)
2795
2794
2796 def __add__(self, other):
2795 def __add__(self, other):
2797 """Returns a new object with the union of the two collections.
2796 """Returns a new object with the union of the two collections.
2798
2797
2799 This is part of the mandatory API for smartset."""
2798 This is part of the mandatory API for smartset."""
2800 return addset(self, other)
2799 return addset(self, other)
2801
2800
2802 def __sub__(self, other):
2801 def __sub__(self, other):
2803 """Returns a new object with the substraction of the two collections.
2802 """Returns a new object with the substraction of the two collections.
2804
2803
2805 This is part of the mandatory API for smartset."""
2804 This is part of the mandatory API for smartset."""
2806 c = other.__contains__
2805 c = other.__contains__
2807 return self.filter(lambda r: not c(r), condrepr=('<not %r>', other),
2806 return self.filter(lambda r: not c(r), condrepr=('<not %r>', other),
2808 cache=False)
2807 cache=False)
2809
2808
2810 def filter(self, condition, condrepr=None, cache=True):
2809 def filter(self, condition, condrepr=None, cache=True):
2811 """Returns this smartset filtered by condition as a new smartset.
2810 """Returns this smartset filtered by condition as a new smartset.
2812
2811
2813 `condition` is a callable which takes a revision number and returns a
2812 `condition` is a callable which takes a revision number and returns a
2814 boolean. Optional `condrepr` provides a printable representation of
2813 boolean. Optional `condrepr` provides a printable representation of
2815 the given `condition`.
2814 the given `condition`.
2816
2815
2817 This is part of the mandatory API for smartset."""
2816 This is part of the mandatory API for smartset."""
2818 # builtin cannot be cached. but do not needs to
2817 # builtin cannot be cached. but do not needs to
2819 if cache and util.safehasattr(condition, 'func_code'):
2818 if cache and util.safehasattr(condition, 'func_code'):
2820 condition = util.cachefunc(condition)
2819 condition = util.cachefunc(condition)
2821 return filteredset(self, condition, condrepr)
2820 return filteredset(self, condition, condrepr)
2822
2821
2823 class baseset(abstractsmartset):
2822 class baseset(abstractsmartset):
2824 """Basic data structure that represents a revset and contains the basic
2823 """Basic data structure that represents a revset and contains the basic
2825 operation that it should be able to perform.
2824 operation that it should be able to perform.
2826
2825
2827 Every method in this class should be implemented by any smartset class.
2826 Every method in this class should be implemented by any smartset class.
2828 """
2827 """
2829 def __init__(self, data=(), datarepr=None, istopo=False):
2828 def __init__(self, data=(), datarepr=None, istopo=False):
2830 """
2829 """
2831 datarepr: a tuple of (format, obj, ...), a function or an object that
2830 datarepr: a tuple of (format, obj, ...), a function or an object that
2832 provides a printable representation of the given data.
2831 provides a printable representation of the given data.
2833 """
2832 """
2834 self._ascending = None
2833 self._ascending = None
2835 self._istopo = istopo
2834 self._istopo = istopo
2836 if not isinstance(data, list):
2835 if not isinstance(data, list):
2837 if isinstance(data, set):
2836 if isinstance(data, set):
2838 self._set = data
2837 self._set = data
2839 # set has no order we pick one for stability purpose
2838 # set has no order we pick one for stability purpose
2840 self._ascending = True
2839 self._ascending = True
2841 data = list(data)
2840 data = list(data)
2842 self._list = data
2841 self._list = data
2843 self._datarepr = datarepr
2842 self._datarepr = datarepr
2844
2843
2845 @util.propertycache
2844 @util.propertycache
2846 def _set(self):
2845 def _set(self):
2847 return set(self._list)
2846 return set(self._list)
2848
2847
2849 @util.propertycache
2848 @util.propertycache
2850 def _asclist(self):
2849 def _asclist(self):
2851 asclist = self._list[:]
2850 asclist = self._list[:]
2852 asclist.sort()
2851 asclist.sort()
2853 return asclist
2852 return asclist
2854
2853
2855 def __iter__(self):
2854 def __iter__(self):
2856 if self._ascending is None:
2855 if self._ascending is None:
2857 return iter(self._list)
2856 return iter(self._list)
2858 elif self._ascending:
2857 elif self._ascending:
2859 return iter(self._asclist)
2858 return iter(self._asclist)
2860 else:
2859 else:
2861 return reversed(self._asclist)
2860 return reversed(self._asclist)
2862
2861
2863 def fastasc(self):
2862 def fastasc(self):
2864 return iter(self._asclist)
2863 return iter(self._asclist)
2865
2864
2866 def fastdesc(self):
2865 def fastdesc(self):
2867 return reversed(self._asclist)
2866 return reversed(self._asclist)
2868
2867
2869 @util.propertycache
2868 @util.propertycache
2870 def __contains__(self):
2869 def __contains__(self):
2871 return self._set.__contains__
2870 return self._set.__contains__
2872
2871
2873 def __nonzero__(self):
2872 def __nonzero__(self):
2874 return bool(self._list)
2873 return bool(self._list)
2875
2874
2876 def sort(self, reverse=False):
2875 def sort(self, reverse=False):
2877 self._ascending = not bool(reverse)
2876 self._ascending = not bool(reverse)
2878 self._istopo = False
2877 self._istopo = False
2879
2878
2880 def reverse(self):
2879 def reverse(self):
2881 if self._ascending is None:
2880 if self._ascending is None:
2882 self._list.reverse()
2881 self._list.reverse()
2883 else:
2882 else:
2884 self._ascending = not self._ascending
2883 self._ascending = not self._ascending
2885 self._istopo = False
2884 self._istopo = False
2886
2885
2887 def __len__(self):
2886 def __len__(self):
2888 return len(self._list)
2887 return len(self._list)
2889
2888
2890 def isascending(self):
2889 def isascending(self):
2891 """Returns True if the collection is ascending order, False if not.
2890 """Returns True if the collection is ascending order, False if not.
2892
2891
2893 This is part of the mandatory API for smartset."""
2892 This is part of the mandatory API for smartset."""
2894 if len(self) <= 1:
2893 if len(self) <= 1:
2895 return True
2894 return True
2896 return self._ascending is not None and self._ascending
2895 return self._ascending is not None and self._ascending
2897
2896
2898 def isdescending(self):
2897 def isdescending(self):
2899 """Returns True if the collection is descending order, False if not.
2898 """Returns True if the collection is descending order, False if not.
2900
2899
2901 This is part of the mandatory API for smartset."""
2900 This is part of the mandatory API for smartset."""
2902 if len(self) <= 1:
2901 if len(self) <= 1:
2903 return True
2902 return True
2904 return self._ascending is not None and not self._ascending
2903 return self._ascending is not None and not self._ascending
2905
2904
2906 def istopo(self):
2905 def istopo(self):
2907 """Is the collection is in topographical order or not.
2906 """Is the collection is in topographical order or not.
2908
2907
2909 This is part of the mandatory API for smartset."""
2908 This is part of the mandatory API for smartset."""
2910 if len(self) <= 1:
2909 if len(self) <= 1:
2911 return True
2910 return True
2912 return self._istopo
2911 return self._istopo
2913
2912
2914 def first(self):
2913 def first(self):
2915 if self:
2914 if self:
2916 if self._ascending is None:
2915 if self._ascending is None:
2917 return self._list[0]
2916 return self._list[0]
2918 elif self._ascending:
2917 elif self._ascending:
2919 return self._asclist[0]
2918 return self._asclist[0]
2920 else:
2919 else:
2921 return self._asclist[-1]
2920 return self._asclist[-1]
2922 return None
2921 return None
2923
2922
2924 def last(self):
2923 def last(self):
2925 if self:
2924 if self:
2926 if self._ascending is None:
2925 if self._ascending is None:
2927 return self._list[-1]
2926 return self._list[-1]
2928 elif self._ascending:
2927 elif self._ascending:
2929 return self._asclist[-1]
2928 return self._asclist[-1]
2930 else:
2929 else:
2931 return self._asclist[0]
2930 return self._asclist[0]
2932 return None
2931 return None
2933
2932
2934 def __repr__(self):
2933 def __repr__(self):
2935 d = {None: '', False: '-', True: '+'}[self._ascending]
2934 d = {None: '', False: '-', True: '+'}[self._ascending]
2936 s = _formatsetrepr(self._datarepr)
2935 s = _formatsetrepr(self._datarepr)
2937 if not s:
2936 if not s:
2938 l = self._list
2937 l = self._list
2939 # if _list has been built from a set, it might have a different
2938 # if _list has been built from a set, it might have a different
2940 # order from one python implementation to another.
2939 # order from one python implementation to another.
2941 # We fallback to the sorted version for a stable output.
2940 # We fallback to the sorted version for a stable output.
2942 if self._ascending is not None:
2941 if self._ascending is not None:
2943 l = self._asclist
2942 l = self._asclist
2944 s = repr(l)
2943 s = repr(l)
2945 return '<%s%s %s>' % (type(self).__name__, d, s)
2944 return '<%s%s %s>' % (type(self).__name__, d, s)
2946
2945
2947 class filteredset(abstractsmartset):
2946 class filteredset(abstractsmartset):
2948 """Duck type for baseset class which iterates lazily over the revisions in
2947 """Duck type for baseset class which iterates lazily over the revisions in
2949 the subset and contains a function which tests for membership in the
2948 the subset and contains a function which tests for membership in the
2950 revset
2949 revset
2951 """
2950 """
2952 def __init__(self, subset, condition=lambda x: True, condrepr=None):
2951 def __init__(self, subset, condition=lambda x: True, condrepr=None):
2953 """
2952 """
2954 condition: a function that decide whether a revision in the subset
2953 condition: a function that decide whether a revision in the subset
2955 belongs to the revset or not.
2954 belongs to the revset or not.
2956 condrepr: a tuple of (format, obj, ...), a function or an object that
2955 condrepr: a tuple of (format, obj, ...), a function or an object that
2957 provides a printable representation of the given condition.
2956 provides a printable representation of the given condition.
2958 """
2957 """
2959 self._subset = subset
2958 self._subset = subset
2960 self._condition = condition
2959 self._condition = condition
2961 self._condrepr = condrepr
2960 self._condrepr = condrepr
2962
2961
2963 def __contains__(self, x):
2962 def __contains__(self, x):
2964 return x in self._subset and self._condition(x)
2963 return x in self._subset and self._condition(x)
2965
2964
2966 def __iter__(self):
2965 def __iter__(self):
2967 return self._iterfilter(self._subset)
2966 return self._iterfilter(self._subset)
2968
2967
2969 def _iterfilter(self, it):
2968 def _iterfilter(self, it):
2970 cond = self._condition
2969 cond = self._condition
2971 for x in it:
2970 for x in it:
2972 if cond(x):
2971 if cond(x):
2973 yield x
2972 yield x
2974
2973
2975 @property
2974 @property
2976 def fastasc(self):
2975 def fastasc(self):
2977 it = self._subset.fastasc
2976 it = self._subset.fastasc
2978 if it is None:
2977 if it is None:
2979 return None
2978 return None
2980 return lambda: self._iterfilter(it())
2979 return lambda: self._iterfilter(it())
2981
2980
2982 @property
2981 @property
2983 def fastdesc(self):
2982 def fastdesc(self):
2984 it = self._subset.fastdesc
2983 it = self._subset.fastdesc
2985 if it is None:
2984 if it is None:
2986 return None
2985 return None
2987 return lambda: self._iterfilter(it())
2986 return lambda: self._iterfilter(it())
2988
2987
2989 def __nonzero__(self):
2988 def __nonzero__(self):
2990 fast = None
2989 fast = None
2991 candidates = [self.fastasc if self.isascending() else None,
2990 candidates = [self.fastasc if self.isascending() else None,
2992 self.fastdesc if self.isdescending() else None,
2991 self.fastdesc if self.isdescending() else None,
2993 self.fastasc,
2992 self.fastasc,
2994 self.fastdesc]
2993 self.fastdesc]
2995 for candidate in candidates:
2994 for candidate in candidates:
2996 if candidate is not None:
2995 if candidate is not None:
2997 fast = candidate
2996 fast = candidate
2998 break
2997 break
2999
2998
3000 if fast is not None:
2999 if fast is not None:
3001 it = fast()
3000 it = fast()
3002 else:
3001 else:
3003 it = self
3002 it = self
3004
3003
3005 for r in it:
3004 for r in it:
3006 return True
3005 return True
3007 return False
3006 return False
3008
3007
3009 def __len__(self):
3008 def __len__(self):
3010 # Basic implementation to be changed in future patches.
3009 # Basic implementation to be changed in future patches.
3011 # until this gets improved, we use generator expression
3010 # until this gets improved, we use generator expression
3012 # here, since list compr is free to call __len__ again
3011 # here, since list compr is free to call __len__ again
3013 # causing infinite recursion
3012 # causing infinite recursion
3014 l = baseset(r for r in self)
3013 l = baseset(r for r in self)
3015 return len(l)
3014 return len(l)
3016
3015
3017 def sort(self, reverse=False):
3016 def sort(self, reverse=False):
3018 self._subset.sort(reverse=reverse)
3017 self._subset.sort(reverse=reverse)
3019
3018
3020 def reverse(self):
3019 def reverse(self):
3021 self._subset.reverse()
3020 self._subset.reverse()
3022
3021
3023 def isascending(self):
3022 def isascending(self):
3024 return self._subset.isascending()
3023 return self._subset.isascending()
3025
3024
3026 def isdescending(self):
3025 def isdescending(self):
3027 return self._subset.isdescending()
3026 return self._subset.isdescending()
3028
3027
3029 def istopo(self):
3028 def istopo(self):
3030 return self._subset.istopo()
3029 return self._subset.istopo()
3031
3030
3032 def first(self):
3031 def first(self):
3033 for x in self:
3032 for x in self:
3034 return x
3033 return x
3035 return None
3034 return None
3036
3035
3037 def last(self):
3036 def last(self):
3038 it = None
3037 it = None
3039 if self.isascending():
3038 if self.isascending():
3040 it = self.fastdesc
3039 it = self.fastdesc
3041 elif self.isdescending():
3040 elif self.isdescending():
3042 it = self.fastasc
3041 it = self.fastasc
3043 if it is not None:
3042 if it is not None:
3044 for x in it():
3043 for x in it():
3045 return x
3044 return x
3046 return None #empty case
3045 return None #empty case
3047 else:
3046 else:
3048 x = None
3047 x = None
3049 for x in self:
3048 for x in self:
3050 pass
3049 pass
3051 return x
3050 return x
3052
3051
3053 def __repr__(self):
3052 def __repr__(self):
3054 xs = [repr(self._subset)]
3053 xs = [repr(self._subset)]
3055 s = _formatsetrepr(self._condrepr)
3054 s = _formatsetrepr(self._condrepr)
3056 if s:
3055 if s:
3057 xs.append(s)
3056 xs.append(s)
3058 return '<%s %s>' % (type(self).__name__, ', '.join(xs))
3057 return '<%s %s>' % (type(self).__name__, ', '.join(xs))
3059
3058
3060 def _iterordered(ascending, iter1, iter2):
3059 def _iterordered(ascending, iter1, iter2):
3061 """produce an ordered iteration from two iterators with the same order
3060 """produce an ordered iteration from two iterators with the same order
3062
3061
3063 The ascending is used to indicated the iteration direction.
3062 The ascending is used to indicated the iteration direction.
3064 """
3063 """
3065 choice = max
3064 choice = max
3066 if ascending:
3065 if ascending:
3067 choice = min
3066 choice = min
3068
3067
3069 val1 = None
3068 val1 = None
3070 val2 = None
3069 val2 = None
3071 try:
3070 try:
3072 # Consume both iterators in an ordered way until one is empty
3071 # Consume both iterators in an ordered way until one is empty
3073 while True:
3072 while True:
3074 if val1 is None:
3073 if val1 is None:
3075 val1 = next(iter1)
3074 val1 = next(iter1)
3076 if val2 is None:
3075 if val2 is None:
3077 val2 = next(iter2)
3076 val2 = next(iter2)
3078 n = choice(val1, val2)
3077 n = choice(val1, val2)
3079 yield n
3078 yield n
3080 if val1 == n:
3079 if val1 == n:
3081 val1 = None
3080 val1 = None
3082 if val2 == n:
3081 if val2 == n:
3083 val2 = None
3082 val2 = None
3084 except StopIteration:
3083 except StopIteration:
3085 # Flush any remaining values and consume the other one
3084 # Flush any remaining values and consume the other one
3086 it = iter2
3085 it = iter2
3087 if val1 is not None:
3086 if val1 is not None:
3088 yield val1
3087 yield val1
3089 it = iter1
3088 it = iter1
3090 elif val2 is not None:
3089 elif val2 is not None:
3091 # might have been equality and both are empty
3090 # might have been equality and both are empty
3092 yield val2
3091 yield val2
3093 for val in it:
3092 for val in it:
3094 yield val
3093 yield val
3095
3094
3096 class addset(abstractsmartset):
3095 class addset(abstractsmartset):
3097 """Represent the addition of two sets
3096 """Represent the addition of two sets
3098
3097
3099 Wrapper structure for lazily adding two structures without losing much
3098 Wrapper structure for lazily adding two structures without losing much
3100 performance on the __contains__ method
3099 performance on the __contains__ method
3101
3100
3102 If the ascending attribute is set, that means the two structures are
3101 If the ascending attribute is set, that means the two structures are
3103 ordered in either an ascending or descending way. Therefore, we can add
3102 ordered in either an ascending or descending way. Therefore, we can add
3104 them maintaining the order by iterating over both at the same time
3103 them maintaining the order by iterating over both at the same time
3105
3104
3106 >>> xs = baseset([0, 3, 2])
3105 >>> xs = baseset([0, 3, 2])
3107 >>> ys = baseset([5, 2, 4])
3106 >>> ys = baseset([5, 2, 4])
3108
3107
3109 >>> rs = addset(xs, ys)
3108 >>> rs = addset(xs, ys)
3110 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3109 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3111 (True, True, False, True, 0, 4)
3110 (True, True, False, True, 0, 4)
3112 >>> rs = addset(xs, baseset([]))
3111 >>> rs = addset(xs, baseset([]))
3113 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3112 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3114 (True, True, False, 0, 2)
3113 (True, True, False, 0, 2)
3115 >>> rs = addset(baseset([]), baseset([]))
3114 >>> rs = addset(baseset([]), baseset([]))
3116 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3115 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3117 (False, False, None, None)
3116 (False, False, None, None)
3118
3117
3119 iterate unsorted:
3118 iterate unsorted:
3120 >>> rs = addset(xs, ys)
3119 >>> rs = addset(xs, ys)
3121 >>> # (use generator because pypy could call len())
3120 >>> # (use generator because pypy could call len())
3122 >>> list(x for x in rs) # without _genlist
3121 >>> list(x for x in rs) # without _genlist
3123 [0, 3, 2, 5, 4]
3122 [0, 3, 2, 5, 4]
3124 >>> assert not rs._genlist
3123 >>> assert not rs._genlist
3125 >>> len(rs)
3124 >>> len(rs)
3126 5
3125 5
3127 >>> [x for x in rs] # with _genlist
3126 >>> [x for x in rs] # with _genlist
3128 [0, 3, 2, 5, 4]
3127 [0, 3, 2, 5, 4]
3129 >>> assert rs._genlist
3128 >>> assert rs._genlist
3130
3129
3131 iterate ascending:
3130 iterate ascending:
3132 >>> rs = addset(xs, ys, ascending=True)
3131 >>> rs = addset(xs, ys, ascending=True)
3133 >>> # (use generator because pypy could call len())
3132 >>> # (use generator because pypy could call len())
3134 >>> list(x for x in rs), list(x for x in rs.fastasc()) # without _asclist
3133 >>> list(x for x in rs), list(x for x in rs.fastasc()) # without _asclist
3135 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3134 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3136 >>> assert not rs._asclist
3135 >>> assert not rs._asclist
3137 >>> len(rs)
3136 >>> len(rs)
3138 5
3137 5
3139 >>> [x for x in rs], [x for x in rs.fastasc()]
3138 >>> [x for x in rs], [x for x in rs.fastasc()]
3140 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3139 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3141 >>> assert rs._asclist
3140 >>> assert rs._asclist
3142
3141
3143 iterate descending:
3142 iterate descending:
3144 >>> rs = addset(xs, ys, ascending=False)
3143 >>> rs = addset(xs, ys, ascending=False)
3145 >>> # (use generator because pypy could call len())
3144 >>> # (use generator because pypy could call len())
3146 >>> list(x for x in rs), list(x for x in rs.fastdesc()) # without _asclist
3145 >>> list(x for x in rs), list(x for x in rs.fastdesc()) # without _asclist
3147 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3146 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3148 >>> assert not rs._asclist
3147 >>> assert not rs._asclist
3149 >>> len(rs)
3148 >>> len(rs)
3150 5
3149 5
3151 >>> [x for x in rs], [x for x in rs.fastdesc()]
3150 >>> [x for x in rs], [x for x in rs.fastdesc()]
3152 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3151 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3153 >>> assert rs._asclist
3152 >>> assert rs._asclist
3154
3153
3155 iterate ascending without fastasc:
3154 iterate ascending without fastasc:
3156 >>> rs = addset(xs, generatorset(ys), ascending=True)
3155 >>> rs = addset(xs, generatorset(ys), ascending=True)
3157 >>> assert rs.fastasc is None
3156 >>> assert rs.fastasc is None
3158 >>> [x for x in rs]
3157 >>> [x for x in rs]
3159 [0, 2, 3, 4, 5]
3158 [0, 2, 3, 4, 5]
3160
3159
3161 iterate descending without fastdesc:
3160 iterate descending without fastdesc:
3162 >>> rs = addset(generatorset(xs), ys, ascending=False)
3161 >>> rs = addset(generatorset(xs), ys, ascending=False)
3163 >>> assert rs.fastdesc is None
3162 >>> assert rs.fastdesc is None
3164 >>> [x for x in rs]
3163 >>> [x for x in rs]
3165 [5, 4, 3, 2, 0]
3164 [5, 4, 3, 2, 0]
3166 """
3165 """
3167 def __init__(self, revs1, revs2, ascending=None):
3166 def __init__(self, revs1, revs2, ascending=None):
3168 self._r1 = revs1
3167 self._r1 = revs1
3169 self._r2 = revs2
3168 self._r2 = revs2
3170 self._iter = None
3169 self._iter = None
3171 self._ascending = ascending
3170 self._ascending = ascending
3172 self._genlist = None
3171 self._genlist = None
3173 self._asclist = None
3172 self._asclist = None
3174
3173
3175 def __len__(self):
3174 def __len__(self):
3176 return len(self._list)
3175 return len(self._list)
3177
3176
3178 def __nonzero__(self):
3177 def __nonzero__(self):
3179 return bool(self._r1) or bool(self._r2)
3178 return bool(self._r1) or bool(self._r2)
3180
3179
3181 @util.propertycache
3180 @util.propertycache
3182 def _list(self):
3181 def _list(self):
3183 if not self._genlist:
3182 if not self._genlist:
3184 self._genlist = baseset(iter(self))
3183 self._genlist = baseset(iter(self))
3185 return self._genlist
3184 return self._genlist
3186
3185
3187 def __iter__(self):
3186 def __iter__(self):
3188 """Iterate over both collections without repeating elements
3187 """Iterate over both collections without repeating elements
3189
3188
3190 If the ascending attribute is not set, iterate over the first one and
3189 If the ascending attribute is not set, iterate over the first one and
3191 then over the second one checking for membership on the first one so we
3190 then over the second one checking for membership on the first one so we
3192 dont yield any duplicates.
3191 dont yield any duplicates.
3193
3192
3194 If the ascending attribute is set, iterate over both collections at the
3193 If the ascending attribute is set, iterate over both collections at the
3195 same time, yielding only one value at a time in the given order.
3194 same time, yielding only one value at a time in the given order.
3196 """
3195 """
3197 if self._ascending is None:
3196 if self._ascending is None:
3198 if self._genlist:
3197 if self._genlist:
3199 return iter(self._genlist)
3198 return iter(self._genlist)
3200 def arbitraryordergen():
3199 def arbitraryordergen():
3201 for r in self._r1:
3200 for r in self._r1:
3202 yield r
3201 yield r
3203 inr1 = self._r1.__contains__
3202 inr1 = self._r1.__contains__
3204 for r in self._r2:
3203 for r in self._r2:
3205 if not inr1(r):
3204 if not inr1(r):
3206 yield r
3205 yield r
3207 return arbitraryordergen()
3206 return arbitraryordergen()
3208 # try to use our own fast iterator if it exists
3207 # try to use our own fast iterator if it exists
3209 self._trysetasclist()
3208 self._trysetasclist()
3210 if self._ascending:
3209 if self._ascending:
3211 attr = 'fastasc'
3210 attr = 'fastasc'
3212 else:
3211 else:
3213 attr = 'fastdesc'
3212 attr = 'fastdesc'
3214 it = getattr(self, attr)
3213 it = getattr(self, attr)
3215 if it is not None:
3214 if it is not None:
3216 return it()
3215 return it()
3217 # maybe half of the component supports fast
3216 # maybe half of the component supports fast
3218 # get iterator for _r1
3217 # get iterator for _r1
3219 iter1 = getattr(self._r1, attr)
3218 iter1 = getattr(self._r1, attr)
3220 if iter1 is None:
3219 if iter1 is None:
3221 # let's avoid side effect (not sure it matters)
3220 # let's avoid side effect (not sure it matters)
3222 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3221 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3223 else:
3222 else:
3224 iter1 = iter1()
3223 iter1 = iter1()
3225 # get iterator for _r2
3224 # get iterator for _r2
3226 iter2 = getattr(self._r2, attr)
3225 iter2 = getattr(self._r2, attr)
3227 if iter2 is None:
3226 if iter2 is None:
3228 # let's avoid side effect (not sure it matters)
3227 # let's avoid side effect (not sure it matters)
3229 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3228 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3230 else:
3229 else:
3231 iter2 = iter2()
3230 iter2 = iter2()
3232 return _iterordered(self._ascending, iter1, iter2)
3231 return _iterordered(self._ascending, iter1, iter2)
3233
3232
3234 def _trysetasclist(self):
3233 def _trysetasclist(self):
3235 """populate the _asclist attribute if possible and necessary"""
3234 """populate the _asclist attribute if possible and necessary"""
3236 if self._genlist is not None and self._asclist is None:
3235 if self._genlist is not None and self._asclist is None:
3237 self._asclist = sorted(self._genlist)
3236 self._asclist = sorted(self._genlist)
3238
3237
3239 @property
3238 @property
3240 def fastasc(self):
3239 def fastasc(self):
3241 self._trysetasclist()
3240 self._trysetasclist()
3242 if self._asclist is not None:
3241 if self._asclist is not None:
3243 return self._asclist.__iter__
3242 return self._asclist.__iter__
3244 iter1 = self._r1.fastasc
3243 iter1 = self._r1.fastasc
3245 iter2 = self._r2.fastasc
3244 iter2 = self._r2.fastasc
3246 if None in (iter1, iter2):
3245 if None in (iter1, iter2):
3247 return None
3246 return None
3248 return lambda: _iterordered(True, iter1(), iter2())
3247 return lambda: _iterordered(True, iter1(), iter2())
3249
3248
3250 @property
3249 @property
3251 def fastdesc(self):
3250 def fastdesc(self):
3252 self._trysetasclist()
3251 self._trysetasclist()
3253 if self._asclist is not None:
3252 if self._asclist is not None:
3254 return self._asclist.__reversed__
3253 return self._asclist.__reversed__
3255 iter1 = self._r1.fastdesc
3254 iter1 = self._r1.fastdesc
3256 iter2 = self._r2.fastdesc
3255 iter2 = self._r2.fastdesc
3257 if None in (iter1, iter2):
3256 if None in (iter1, iter2):
3258 return None
3257 return None
3259 return lambda: _iterordered(False, iter1(), iter2())
3258 return lambda: _iterordered(False, iter1(), iter2())
3260
3259
3261 def __contains__(self, x):
3260 def __contains__(self, x):
3262 return x in self._r1 or x in self._r2
3261 return x in self._r1 or x in self._r2
3263
3262
3264 def sort(self, reverse=False):
3263 def sort(self, reverse=False):
3265 """Sort the added set
3264 """Sort the added set
3266
3265
3267 For this we use the cached list with all the generated values and if we
3266 For this we use the cached list with all the generated values and if we
3268 know they are ascending or descending we can sort them in a smart way.
3267 know they are ascending or descending we can sort them in a smart way.
3269 """
3268 """
3270 self._ascending = not reverse
3269 self._ascending = not reverse
3271
3270
3272 def isascending(self):
3271 def isascending(self):
3273 return self._ascending is not None and self._ascending
3272 return self._ascending is not None and self._ascending
3274
3273
3275 def isdescending(self):
3274 def isdescending(self):
3276 return self._ascending is not None and not self._ascending
3275 return self._ascending is not None and not self._ascending
3277
3276
3278 def istopo(self):
3277 def istopo(self):
3279 # not worth the trouble asserting if the two sets combined are still
3278 # not worth the trouble asserting if the two sets combined are still
3280 # in topographical order. Use the sort() predicate to explicitly sort
3279 # in topographical order. Use the sort() predicate to explicitly sort
3281 # again instead.
3280 # again instead.
3282 return False
3281 return False
3283
3282
3284 def reverse(self):
3283 def reverse(self):
3285 if self._ascending is None:
3284 if self._ascending is None:
3286 self._list.reverse()
3285 self._list.reverse()
3287 else:
3286 else:
3288 self._ascending = not self._ascending
3287 self._ascending = not self._ascending
3289
3288
3290 def first(self):
3289 def first(self):
3291 for x in self:
3290 for x in self:
3292 return x
3291 return x
3293 return None
3292 return None
3294
3293
3295 def last(self):
3294 def last(self):
3296 self.reverse()
3295 self.reverse()
3297 val = self.first()
3296 val = self.first()
3298 self.reverse()
3297 self.reverse()
3299 return val
3298 return val
3300
3299
3301 def __repr__(self):
3300 def __repr__(self):
3302 d = {None: '', False: '-', True: '+'}[self._ascending]
3301 d = {None: '', False: '-', True: '+'}[self._ascending]
3303 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3302 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3304
3303
3305 class generatorset(abstractsmartset):
3304 class generatorset(abstractsmartset):
3306 """Wrap a generator for lazy iteration
3305 """Wrap a generator for lazy iteration
3307
3306
3308 Wrapper structure for generators that provides lazy membership and can
3307 Wrapper structure for generators that provides lazy membership and can
3309 be iterated more than once.
3308 be iterated more than once.
3310 When asked for membership it generates values until either it finds the
3309 When asked for membership it generates values until either it finds the
3311 requested one or has gone through all the elements in the generator
3310 requested one or has gone through all the elements in the generator
3312 """
3311 """
3313 def __init__(self, gen, iterasc=None):
3312 def __init__(self, gen, iterasc=None):
3314 """
3313 """
3315 gen: a generator producing the values for the generatorset.
3314 gen: a generator producing the values for the generatorset.
3316 """
3315 """
3317 self._gen = gen
3316 self._gen = gen
3318 self._asclist = None
3317 self._asclist = None
3319 self._cache = {}
3318 self._cache = {}
3320 self._genlist = []
3319 self._genlist = []
3321 self._finished = False
3320 self._finished = False
3322 self._ascending = True
3321 self._ascending = True
3323 if iterasc is not None:
3322 if iterasc is not None:
3324 if iterasc:
3323 if iterasc:
3325 self.fastasc = self._iterator
3324 self.fastasc = self._iterator
3326 self.__contains__ = self._asccontains
3325 self.__contains__ = self._asccontains
3327 else:
3326 else:
3328 self.fastdesc = self._iterator
3327 self.fastdesc = self._iterator
3329 self.__contains__ = self._desccontains
3328 self.__contains__ = self._desccontains
3330
3329
3331 def __nonzero__(self):
3330 def __nonzero__(self):
3332 # Do not use 'for r in self' because it will enforce the iteration
3331 # Do not use 'for r in self' because it will enforce the iteration
3333 # order (default ascending), possibly unrolling a whole descending
3332 # order (default ascending), possibly unrolling a whole descending
3334 # iterator.
3333 # iterator.
3335 if self._genlist:
3334 if self._genlist:
3336 return True
3335 return True
3337 for r in self._consumegen():
3336 for r in self._consumegen():
3338 return True
3337 return True
3339 return False
3338 return False
3340
3339
3341 def __contains__(self, x):
3340 def __contains__(self, x):
3342 if x in self._cache:
3341 if x in self._cache:
3343 return self._cache[x]
3342 return self._cache[x]
3344
3343
3345 # Use new values only, as existing values would be cached.
3344 # Use new values only, as existing values would be cached.
3346 for l in self._consumegen():
3345 for l in self._consumegen():
3347 if l == x:
3346 if l == x:
3348 return True
3347 return True
3349
3348
3350 self._cache[x] = False
3349 self._cache[x] = False
3351 return False
3350 return False
3352
3351
3353 def _asccontains(self, x):
3352 def _asccontains(self, x):
3354 """version of contains optimised for ascending generator"""
3353 """version of contains optimised for ascending generator"""
3355 if x in self._cache:
3354 if x in self._cache:
3356 return self._cache[x]
3355 return self._cache[x]
3357
3356
3358 # Use new values only, as existing values would be cached.
3357 # Use new values only, as existing values would be cached.
3359 for l in self._consumegen():
3358 for l in self._consumegen():
3360 if l == x:
3359 if l == x:
3361 return True
3360 return True
3362 if l > x:
3361 if l > x:
3363 break
3362 break
3364
3363
3365 self._cache[x] = False
3364 self._cache[x] = False
3366 return False
3365 return False
3367
3366
3368 def _desccontains(self, x):
3367 def _desccontains(self, x):
3369 """version of contains optimised for descending generator"""
3368 """version of contains optimised for descending generator"""
3370 if x in self._cache:
3369 if x in self._cache:
3371 return self._cache[x]
3370 return self._cache[x]
3372
3371
3373 # Use new values only, as existing values would be cached.
3372 # Use new values only, as existing values would be cached.
3374 for l in self._consumegen():
3373 for l in self._consumegen():
3375 if l == x:
3374 if l == x:
3376 return True
3375 return True
3377 if l < x:
3376 if l < x:
3378 break
3377 break
3379
3378
3380 self._cache[x] = False
3379 self._cache[x] = False
3381 return False
3380 return False
3382
3381
3383 def __iter__(self):
3382 def __iter__(self):
3384 if self._ascending:
3383 if self._ascending:
3385 it = self.fastasc
3384 it = self.fastasc
3386 else:
3385 else:
3387 it = self.fastdesc
3386 it = self.fastdesc
3388 if it is not None:
3387 if it is not None:
3389 return it()
3388 return it()
3390 # we need to consume the iterator
3389 # we need to consume the iterator
3391 for x in self._consumegen():
3390 for x in self._consumegen():
3392 pass
3391 pass
3393 # recall the same code
3392 # recall the same code
3394 return iter(self)
3393 return iter(self)
3395
3394
3396 def _iterator(self):
3395 def _iterator(self):
3397 if self._finished:
3396 if self._finished:
3398 return iter(self._genlist)
3397 return iter(self._genlist)
3399
3398
3400 # We have to use this complex iteration strategy to allow multiple
3399 # We have to use this complex iteration strategy to allow multiple
3401 # iterations at the same time. We need to be able to catch revision
3400 # iterations at the same time. We need to be able to catch revision
3402 # removed from _consumegen and added to genlist in another instance.
3401 # removed from _consumegen and added to genlist in another instance.
3403 #
3402 #
3404 # Getting rid of it would provide an about 15% speed up on this
3403 # Getting rid of it would provide an about 15% speed up on this
3405 # iteration.
3404 # iteration.
3406 genlist = self._genlist
3405 genlist = self._genlist
3407 nextrev = self._consumegen().next
3406 nextrev = self._consumegen().next
3408 _len = len # cache global lookup
3407 _len = len # cache global lookup
3409 def gen():
3408 def gen():
3410 i = 0
3409 i = 0
3411 while True:
3410 while True:
3412 if i < _len(genlist):
3411 if i < _len(genlist):
3413 yield genlist[i]
3412 yield genlist[i]
3414 else:
3413 else:
3415 yield nextrev()
3414 yield nextrev()
3416 i += 1
3415 i += 1
3417 return gen()
3416 return gen()
3418
3417
3419 def _consumegen(self):
3418 def _consumegen(self):
3420 cache = self._cache
3419 cache = self._cache
3421 genlist = self._genlist.append
3420 genlist = self._genlist.append
3422 for item in self._gen:
3421 for item in self._gen:
3423 cache[item] = True
3422 cache[item] = True
3424 genlist(item)
3423 genlist(item)
3425 yield item
3424 yield item
3426 if not self._finished:
3425 if not self._finished:
3427 self._finished = True
3426 self._finished = True
3428 asc = self._genlist[:]
3427 asc = self._genlist[:]
3429 asc.sort()
3428 asc.sort()
3430 self._asclist = asc
3429 self._asclist = asc
3431 self.fastasc = asc.__iter__
3430 self.fastasc = asc.__iter__
3432 self.fastdesc = asc.__reversed__
3431 self.fastdesc = asc.__reversed__
3433
3432
3434 def __len__(self):
3433 def __len__(self):
3435 for x in self._consumegen():
3434 for x in self._consumegen():
3436 pass
3435 pass
3437 return len(self._genlist)
3436 return len(self._genlist)
3438
3437
3439 def sort(self, reverse=False):
3438 def sort(self, reverse=False):
3440 self._ascending = not reverse
3439 self._ascending = not reverse
3441
3440
3442 def reverse(self):
3441 def reverse(self):
3443 self._ascending = not self._ascending
3442 self._ascending = not self._ascending
3444
3443
3445 def isascending(self):
3444 def isascending(self):
3446 return self._ascending
3445 return self._ascending
3447
3446
3448 def isdescending(self):
3447 def isdescending(self):
3449 return not self._ascending
3448 return not self._ascending
3450
3449
3451 def istopo(self):
3450 def istopo(self):
3452 # not worth the trouble asserting if the two sets combined are still
3451 # not worth the trouble asserting if the two sets combined are still
3453 # in topographical order. Use the sort() predicate to explicitly sort
3452 # in topographical order. Use the sort() predicate to explicitly sort
3454 # again instead.
3453 # again instead.
3455 return False
3454 return False
3456
3455
3457 def first(self):
3456 def first(self):
3458 if self._ascending:
3457 if self._ascending:
3459 it = self.fastasc
3458 it = self.fastasc
3460 else:
3459 else:
3461 it = self.fastdesc
3460 it = self.fastdesc
3462 if it is None:
3461 if it is None:
3463 # we need to consume all and try again
3462 # we need to consume all and try again
3464 for x in self._consumegen():
3463 for x in self._consumegen():
3465 pass
3464 pass
3466 return self.first()
3465 return self.first()
3467 return next(it(), None)
3466 return next(it(), None)
3468
3467
3469 def last(self):
3468 def last(self):
3470 if self._ascending:
3469 if self._ascending:
3471 it = self.fastdesc
3470 it = self.fastdesc
3472 else:
3471 else:
3473 it = self.fastasc
3472 it = self.fastasc
3474 if it is None:
3473 if it is None:
3475 # we need to consume all and try again
3474 # we need to consume all and try again
3476 for x in self._consumegen():
3475 for x in self._consumegen():
3477 pass
3476 pass
3478 return self.first()
3477 return self.first()
3479 return next(it(), None)
3478 return next(it(), None)
3480
3479
3481 def __repr__(self):
3480 def __repr__(self):
3482 d = {False: '-', True: '+'}[self._ascending]
3481 d = {False: '-', True: '+'}[self._ascending]
3483 return '<%s%s>' % (type(self).__name__, d)
3482 return '<%s%s>' % (type(self).__name__, d)
3484
3483
3485 class spanset(abstractsmartset):
3484 class spanset(abstractsmartset):
3486 """Duck type for baseset class which represents a range of revisions and
3485 """Duck type for baseset class which represents a range of revisions and
3487 can work lazily and without having all the range in memory
3486 can work lazily and without having all the range in memory
3488
3487
3489 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3488 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3490 notable points:
3489 notable points:
3491 - when x < y it will be automatically descending,
3490 - when x < y it will be automatically descending,
3492 - revision filtered with this repoview will be skipped.
3491 - revision filtered with this repoview will be skipped.
3493
3492
3494 """
3493 """
3495 def __init__(self, repo, start=0, end=None):
3494 def __init__(self, repo, start=0, end=None):
3496 """
3495 """
3497 start: first revision included the set
3496 start: first revision included the set
3498 (default to 0)
3497 (default to 0)
3499 end: first revision excluded (last+1)
3498 end: first revision excluded (last+1)
3500 (default to len(repo)
3499 (default to len(repo)
3501
3500
3502 Spanset will be descending if `end` < `start`.
3501 Spanset will be descending if `end` < `start`.
3503 """
3502 """
3504 if end is None:
3503 if end is None:
3505 end = len(repo)
3504 end = len(repo)
3506 self._ascending = start <= end
3505 self._ascending = start <= end
3507 if not self._ascending:
3506 if not self._ascending:
3508 start, end = end + 1, start +1
3507 start, end = end + 1, start +1
3509 self._start = start
3508 self._start = start
3510 self._end = end
3509 self._end = end
3511 self._hiddenrevs = repo.changelog.filteredrevs
3510 self._hiddenrevs = repo.changelog.filteredrevs
3512
3511
3513 def sort(self, reverse=False):
3512 def sort(self, reverse=False):
3514 self._ascending = not reverse
3513 self._ascending = not reverse
3515
3514
3516 def reverse(self):
3515 def reverse(self):
3517 self._ascending = not self._ascending
3516 self._ascending = not self._ascending
3518
3517
3519 def istopo(self):
3518 def istopo(self):
3520 # not worth the trouble asserting if the two sets combined are still
3519 # not worth the trouble asserting if the two sets combined are still
3521 # in topographical order. Use the sort() predicate to explicitly sort
3520 # in topographical order. Use the sort() predicate to explicitly sort
3522 # again instead.
3521 # again instead.
3523 return False
3522 return False
3524
3523
3525 def _iterfilter(self, iterrange):
3524 def _iterfilter(self, iterrange):
3526 s = self._hiddenrevs
3525 s = self._hiddenrevs
3527 for r in iterrange:
3526 for r in iterrange:
3528 if r not in s:
3527 if r not in s:
3529 yield r
3528 yield r
3530
3529
3531 def __iter__(self):
3530 def __iter__(self):
3532 if self._ascending:
3531 if self._ascending:
3533 return self.fastasc()
3532 return self.fastasc()
3534 else:
3533 else:
3535 return self.fastdesc()
3534 return self.fastdesc()
3536
3535
3537 def fastasc(self):
3536 def fastasc(self):
3538 iterrange = xrange(self._start, self._end)
3537 iterrange = xrange(self._start, self._end)
3539 if self._hiddenrevs:
3538 if self._hiddenrevs:
3540 return self._iterfilter(iterrange)
3539 return self._iterfilter(iterrange)
3541 return iter(iterrange)
3540 return iter(iterrange)
3542
3541
3543 def fastdesc(self):
3542 def fastdesc(self):
3544 iterrange = xrange(self._end - 1, self._start - 1, -1)
3543 iterrange = xrange(self._end - 1, self._start - 1, -1)
3545 if self._hiddenrevs:
3544 if self._hiddenrevs:
3546 return self._iterfilter(iterrange)
3545 return self._iterfilter(iterrange)
3547 return iter(iterrange)
3546 return iter(iterrange)
3548
3547
3549 def __contains__(self, rev):
3548 def __contains__(self, rev):
3550 hidden = self._hiddenrevs
3549 hidden = self._hiddenrevs
3551 return ((self._start <= rev < self._end)
3550 return ((self._start <= rev < self._end)
3552 and not (hidden and rev in hidden))
3551 and not (hidden and rev in hidden))
3553
3552
3554 def __nonzero__(self):
3553 def __nonzero__(self):
3555 for r in self:
3554 for r in self:
3556 return True
3555 return True
3557 return False
3556 return False
3558
3557
3559 def __len__(self):
3558 def __len__(self):
3560 if not self._hiddenrevs:
3559 if not self._hiddenrevs:
3561 return abs(self._end - self._start)
3560 return abs(self._end - self._start)
3562 else:
3561 else:
3563 count = 0
3562 count = 0
3564 start = self._start
3563 start = self._start
3565 end = self._end
3564 end = self._end
3566 for rev in self._hiddenrevs:
3565 for rev in self._hiddenrevs:
3567 if (end < rev <= start) or (start <= rev < end):
3566 if (end < rev <= start) or (start <= rev < end):
3568 count += 1
3567 count += 1
3569 return abs(self._end - self._start) - count
3568 return abs(self._end - self._start) - count
3570
3569
3571 def isascending(self):
3570 def isascending(self):
3572 return self._ascending
3571 return self._ascending
3573
3572
3574 def isdescending(self):
3573 def isdescending(self):
3575 return not self._ascending
3574 return not self._ascending
3576
3575
3577 def first(self):
3576 def first(self):
3578 if self._ascending:
3577 if self._ascending:
3579 it = self.fastasc
3578 it = self.fastasc
3580 else:
3579 else:
3581 it = self.fastdesc
3580 it = self.fastdesc
3582 for x in it():
3581 for x in it():
3583 return x
3582 return x
3584 return None
3583 return None
3585
3584
3586 def last(self):
3585 def last(self):
3587 if self._ascending:
3586 if self._ascending:
3588 it = self.fastdesc
3587 it = self.fastdesc
3589 else:
3588 else:
3590 it = self.fastasc
3589 it = self.fastasc
3591 for x in it():
3590 for x in it():
3592 return x
3591 return x
3593 return None
3592 return None
3594
3593
3595 def __repr__(self):
3594 def __repr__(self):
3596 d = {False: '-', True: '+'}[self._ascending]
3595 d = {False: '-', True: '+'}[self._ascending]
3597 return '<%s%s %d:%d>' % (type(self).__name__, d,
3596 return '<%s%s %d:%d>' % (type(self).__name__, d,
3598 self._start, self._end - 1)
3597 self._start, self._end - 1)
3599
3598
3600 class fullreposet(spanset):
3599 class fullreposet(spanset):
3601 """a set containing all revisions in the repo
3600 """a set containing all revisions in the repo
3602
3601
3603 This class exists to host special optimization and magic to handle virtual
3602 This class exists to host special optimization and magic to handle virtual
3604 revisions such as "null".
3603 revisions such as "null".
3605 """
3604 """
3606
3605
3607 def __init__(self, repo):
3606 def __init__(self, repo):
3608 super(fullreposet, self).__init__(repo)
3607 super(fullreposet, self).__init__(repo)
3609
3608
3610 def __and__(self, other):
3609 def __and__(self, other):
3611 """As self contains the whole repo, all of the other set should also be
3610 """As self contains the whole repo, all of the other set should also be
3612 in self. Therefore `self & other = other`.
3611 in self. Therefore `self & other = other`.
3613
3612
3614 This boldly assumes the other contains valid revs only.
3613 This boldly assumes the other contains valid revs only.
3615 """
3614 """
3616 # other not a smartset, make is so
3615 # other not a smartset, make is so
3617 if not util.safehasattr(other, 'isascending'):
3616 if not util.safehasattr(other, 'isascending'):
3618 # filter out hidden revision
3617 # filter out hidden revision
3619 # (this boldly assumes all smartset are pure)
3618 # (this boldly assumes all smartset are pure)
3620 #
3619 #
3621 # `other` was used with "&", let's assume this is a set like
3620 # `other` was used with "&", let's assume this is a set like
3622 # object.
3621 # object.
3623 other = baseset(other - self._hiddenrevs)
3622 other = baseset(other - self._hiddenrevs)
3624
3623
3625 # XXX As fullreposet is also used as bootstrap, this is wrong.
3624 # XXX As fullreposet is also used as bootstrap, this is wrong.
3626 #
3625 #
3627 # With a giveme312() revset returning [3,1,2], this makes
3626 # With a giveme312() revset returning [3,1,2], this makes
3628 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3627 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3629 # We cannot just drop it because other usage still need to sort it:
3628 # We cannot just drop it because other usage still need to sort it:
3630 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3629 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3631 #
3630 #
3632 # There is also some faulty revset implementations that rely on it
3631 # There is also some faulty revset implementations that rely on it
3633 # (eg: children as of its state in e8075329c5fb)
3632 # (eg: children as of its state in e8075329c5fb)
3634 #
3633 #
3635 # When we fix the two points above we can move this into the if clause
3634 # When we fix the two points above we can move this into the if clause
3636 other.sort(reverse=self.isdescending())
3635 other.sort(reverse=self.isdescending())
3637 return other
3636 return other
3638
3637
3639 def prettyformatset(revs):
3638 def prettyformatset(revs):
3640 lines = []
3639 lines = []
3641 rs = repr(revs)
3640 rs = repr(revs)
3642 p = 0
3641 p = 0
3643 while p < len(rs):
3642 while p < len(rs):
3644 q = rs.find('<', p + 1)
3643 q = rs.find('<', p + 1)
3645 if q < 0:
3644 if q < 0:
3646 q = len(rs)
3645 q = len(rs)
3647 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3646 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3648 assert l >= 0
3647 assert l >= 0
3649 lines.append((l, rs[p:q].rstrip()))
3648 lines.append((l, rs[p:q].rstrip()))
3650 p = q
3649 p = q
3651 return '\n'.join(' ' * l + s for l, s in lines)
3650 return '\n'.join(' ' * l + s for l, s in lines)
3652
3651
3653 def loadpredicate(ui, extname, registrarobj):
3652 def loadpredicate(ui, extname, registrarobj):
3654 """Load revset predicates from specified registrarobj
3653 """Load revset predicates from specified registrarobj
3655 """
3654 """
3656 for name, func in registrarobj._table.iteritems():
3655 for name, func in registrarobj._table.iteritems():
3657 symbols[name] = func
3656 symbols[name] = func
3658 if func._safe:
3657 if func._safe:
3659 safesymbols.add(name)
3658 safesymbols.add(name)
3660
3659
3661 # load built-in predicates explicitly to setup safesymbols
3660 # load built-in predicates explicitly to setup safesymbols
3662 loadpredicate(None, None, predicate)
3661 loadpredicate(None, None, predicate)
3663
3662
3664 # tell hggettext to extract docstrings from these functions:
3663 # tell hggettext to extract docstrings from these functions:
3665 i18nfunctions = symbols.values()
3664 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now