##// END OF EJS Templates
revset: add default value to getinteger() helper...
Yuya Nishihara -
r30802:5eb3e456 default
parent child Browse files
Show More
@@ -1,3888 +1,3889 b''
1 # revset.py - revision set queries for mercurial
1 # revset.py - revision set queries for mercurial
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import heapq
10 import heapq
11 import re
11 import re
12 import string
12 import string
13
13
14 from .i18n import _
14 from .i18n import _
15 from . import (
15 from . import (
16 destutil,
16 destutil,
17 encoding,
17 encoding,
18 error,
18 error,
19 hbisect,
19 hbisect,
20 match as matchmod,
20 match as matchmod,
21 node,
21 node,
22 obsolete as obsmod,
22 obsolete as obsmod,
23 parser,
23 parser,
24 pathutil,
24 pathutil,
25 phases,
25 phases,
26 pycompat,
26 pycompat,
27 registrar,
27 registrar,
28 repoview,
28 repoview,
29 util,
29 util,
30 )
30 )
31
31
32 def _revancestors(repo, revs, followfirst):
32 def _revancestors(repo, revs, followfirst):
33 """Like revlog.ancestors(), but supports followfirst."""
33 """Like revlog.ancestors(), but supports followfirst."""
34 if followfirst:
34 if followfirst:
35 cut = 1
35 cut = 1
36 else:
36 else:
37 cut = None
37 cut = None
38 cl = repo.changelog
38 cl = repo.changelog
39
39
40 def iterate():
40 def iterate():
41 revs.sort(reverse=True)
41 revs.sort(reverse=True)
42 irevs = iter(revs)
42 irevs = iter(revs)
43 h = []
43 h = []
44
44
45 inputrev = next(irevs, None)
45 inputrev = next(irevs, None)
46 if inputrev is not None:
46 if inputrev is not None:
47 heapq.heappush(h, -inputrev)
47 heapq.heappush(h, -inputrev)
48
48
49 seen = set()
49 seen = set()
50 while h:
50 while h:
51 current = -heapq.heappop(h)
51 current = -heapq.heappop(h)
52 if current == inputrev:
52 if current == inputrev:
53 inputrev = next(irevs, None)
53 inputrev = next(irevs, None)
54 if inputrev is not None:
54 if inputrev is not None:
55 heapq.heappush(h, -inputrev)
55 heapq.heappush(h, -inputrev)
56 if current not in seen:
56 if current not in seen:
57 seen.add(current)
57 seen.add(current)
58 yield current
58 yield current
59 for parent in cl.parentrevs(current)[:cut]:
59 for parent in cl.parentrevs(current)[:cut]:
60 if parent != node.nullrev:
60 if parent != node.nullrev:
61 heapq.heappush(h, -parent)
61 heapq.heappush(h, -parent)
62
62
63 return generatorset(iterate(), iterasc=False)
63 return generatorset(iterate(), iterasc=False)
64
64
65 def _revdescendants(repo, revs, followfirst):
65 def _revdescendants(repo, revs, followfirst):
66 """Like revlog.descendants() but supports followfirst."""
66 """Like revlog.descendants() but supports followfirst."""
67 if followfirst:
67 if followfirst:
68 cut = 1
68 cut = 1
69 else:
69 else:
70 cut = None
70 cut = None
71
71
72 def iterate():
72 def iterate():
73 cl = repo.changelog
73 cl = repo.changelog
74 # XXX this should be 'parentset.min()' assuming 'parentset' is a
74 # XXX this should be 'parentset.min()' assuming 'parentset' is a
75 # smartset (and if it is not, it should.)
75 # smartset (and if it is not, it should.)
76 first = min(revs)
76 first = min(revs)
77 nullrev = node.nullrev
77 nullrev = node.nullrev
78 if first == nullrev:
78 if first == nullrev:
79 # Are there nodes with a null first parent and a non-null
79 # Are there nodes with a null first parent and a non-null
80 # second one? Maybe. Do we care? Probably not.
80 # second one? Maybe. Do we care? Probably not.
81 for i in cl:
81 for i in cl:
82 yield i
82 yield i
83 else:
83 else:
84 seen = set(revs)
84 seen = set(revs)
85 for i in cl.revs(first + 1):
85 for i in cl.revs(first + 1):
86 for x in cl.parentrevs(i)[:cut]:
86 for x in cl.parentrevs(i)[:cut]:
87 if x != nullrev and x in seen:
87 if x != nullrev and x in seen:
88 seen.add(i)
88 seen.add(i)
89 yield i
89 yield i
90 break
90 break
91
91
92 return generatorset(iterate(), iterasc=True)
92 return generatorset(iterate(), iterasc=True)
93
93
94 def _reachablerootspure(repo, minroot, roots, heads, includepath):
94 def _reachablerootspure(repo, minroot, roots, heads, includepath):
95 """return (heads(::<roots> and ::<heads>))
95 """return (heads(::<roots> and ::<heads>))
96
96
97 If includepath is True, return (<roots>::<heads>)."""
97 If includepath is True, return (<roots>::<heads>)."""
98 if not roots:
98 if not roots:
99 return []
99 return []
100 parentrevs = repo.changelog.parentrevs
100 parentrevs = repo.changelog.parentrevs
101 roots = set(roots)
101 roots = set(roots)
102 visit = list(heads)
102 visit = list(heads)
103 reachable = set()
103 reachable = set()
104 seen = {}
104 seen = {}
105 # prefetch all the things! (because python is slow)
105 # prefetch all the things! (because python is slow)
106 reached = reachable.add
106 reached = reachable.add
107 dovisit = visit.append
107 dovisit = visit.append
108 nextvisit = visit.pop
108 nextvisit = visit.pop
109 # open-code the post-order traversal due to the tiny size of
109 # open-code the post-order traversal due to the tiny size of
110 # sys.getrecursionlimit()
110 # sys.getrecursionlimit()
111 while visit:
111 while visit:
112 rev = nextvisit()
112 rev = nextvisit()
113 if rev in roots:
113 if rev in roots:
114 reached(rev)
114 reached(rev)
115 if not includepath:
115 if not includepath:
116 continue
116 continue
117 parents = parentrevs(rev)
117 parents = parentrevs(rev)
118 seen[rev] = parents
118 seen[rev] = parents
119 for parent in parents:
119 for parent in parents:
120 if parent >= minroot and parent not in seen:
120 if parent >= minroot and parent not in seen:
121 dovisit(parent)
121 dovisit(parent)
122 if not reachable:
122 if not reachable:
123 return baseset()
123 return baseset()
124 if not includepath:
124 if not includepath:
125 return reachable
125 return reachable
126 for rev in sorted(seen):
126 for rev in sorted(seen):
127 for parent in seen[rev]:
127 for parent in seen[rev]:
128 if parent in reachable:
128 if parent in reachable:
129 reached(rev)
129 reached(rev)
130 return reachable
130 return reachable
131
131
132 def reachableroots(repo, roots, heads, includepath=False):
132 def reachableroots(repo, roots, heads, includepath=False):
133 """return (heads(::<roots> and ::<heads>))
133 """return (heads(::<roots> and ::<heads>))
134
134
135 If includepath is True, return (<roots>::<heads>)."""
135 If includepath is True, return (<roots>::<heads>)."""
136 if not roots:
136 if not roots:
137 return baseset()
137 return baseset()
138 minroot = roots.min()
138 minroot = roots.min()
139 roots = list(roots)
139 roots = list(roots)
140 heads = list(heads)
140 heads = list(heads)
141 try:
141 try:
142 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
142 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
143 except AttributeError:
143 except AttributeError:
144 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
144 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
145 revs = baseset(revs)
145 revs = baseset(revs)
146 revs.sort()
146 revs.sort()
147 return revs
147 return revs
148
148
149 elements = {
149 elements = {
150 # token-type: binding-strength, primary, prefix, infix, suffix
150 # token-type: binding-strength, primary, prefix, infix, suffix
151 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
151 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
152 "##": (20, None, None, ("_concat", 20), None),
152 "##": (20, None, None, ("_concat", 20), None),
153 "~": (18, None, None, ("ancestor", 18), None),
153 "~": (18, None, None, ("ancestor", 18), None),
154 "^": (18, None, None, ("parent", 18), "parentpost"),
154 "^": (18, None, None, ("parent", 18), "parentpost"),
155 "-": (5, None, ("negate", 19), ("minus", 5), None),
155 "-": (5, None, ("negate", 19), ("minus", 5), None),
156 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"),
156 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"),
157 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"),
157 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"),
158 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), "rangepost"),
158 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), "rangepost"),
159 "not": (10, None, ("not", 10), None, None),
159 "not": (10, None, ("not", 10), None, None),
160 "!": (10, None, ("not", 10), None, None),
160 "!": (10, None, ("not", 10), None, None),
161 "and": (5, None, None, ("and", 5), None),
161 "and": (5, None, None, ("and", 5), None),
162 "&": (5, None, None, ("and", 5), None),
162 "&": (5, None, None, ("and", 5), None),
163 "%": (5, None, None, ("only", 5), "onlypost"),
163 "%": (5, None, None, ("only", 5), "onlypost"),
164 "or": (4, None, None, ("or", 4), None),
164 "or": (4, None, None, ("or", 4), None),
165 "|": (4, None, None, ("or", 4), None),
165 "|": (4, None, None, ("or", 4), None),
166 "+": (4, None, None, ("or", 4), None),
166 "+": (4, None, None, ("or", 4), None),
167 "=": (3, None, None, ("keyvalue", 3), None),
167 "=": (3, None, None, ("keyvalue", 3), None),
168 ",": (2, None, None, ("list", 2), None),
168 ",": (2, None, None, ("list", 2), None),
169 ")": (0, None, None, None, None),
169 ")": (0, None, None, None, None),
170 "symbol": (0, "symbol", None, None, None),
170 "symbol": (0, "symbol", None, None, None),
171 "string": (0, "string", None, None, None),
171 "string": (0, "string", None, None, None),
172 "end": (0, None, None, None, None),
172 "end": (0, None, None, None, None),
173 }
173 }
174
174
175 keywords = set(['and', 'or', 'not'])
175 keywords = set(['and', 'or', 'not'])
176
176
177 # default set of valid characters for the initial letter of symbols
177 # default set of valid characters for the initial letter of symbols
178 _syminitletters = set(
178 _syminitletters = set(
179 string.ascii_letters +
179 string.ascii_letters +
180 string.digits + pycompat.sysstr('._@')) | set(map(chr, xrange(128, 256)))
180 string.digits + pycompat.sysstr('._@')) | set(map(chr, xrange(128, 256)))
181
181
182 # default set of valid characters for non-initial letters of symbols
182 # default set of valid characters for non-initial letters of symbols
183 _symletters = _syminitletters | set(pycompat.sysstr('-/'))
183 _symletters = _syminitletters | set(pycompat.sysstr('-/'))
184
184
185 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
185 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
186 '''
186 '''
187 Parse a revset statement into a stream of tokens
187 Parse a revset statement into a stream of tokens
188
188
189 ``syminitletters`` is the set of valid characters for the initial
189 ``syminitletters`` is the set of valid characters for the initial
190 letter of symbols.
190 letter of symbols.
191
191
192 By default, character ``c`` is recognized as valid for initial
192 By default, character ``c`` is recognized as valid for initial
193 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
193 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
194
194
195 ``symletters`` is the set of valid characters for non-initial
195 ``symletters`` is the set of valid characters for non-initial
196 letters of symbols.
196 letters of symbols.
197
197
198 By default, character ``c`` is recognized as valid for non-initial
198 By default, character ``c`` is recognized as valid for non-initial
199 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
199 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
200
200
201 Check that @ is a valid unquoted token character (issue3686):
201 Check that @ is a valid unquoted token character (issue3686):
202 >>> list(tokenize("@::"))
202 >>> list(tokenize("@::"))
203 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
203 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
204
204
205 '''
205 '''
206 if syminitletters is None:
206 if syminitletters is None:
207 syminitletters = _syminitletters
207 syminitletters = _syminitletters
208 if symletters is None:
208 if symletters is None:
209 symletters = _symletters
209 symletters = _symletters
210
210
211 if program and lookup:
211 if program and lookup:
212 # attempt to parse old-style ranges first to deal with
212 # attempt to parse old-style ranges first to deal with
213 # things like old-tag which contain query metacharacters
213 # things like old-tag which contain query metacharacters
214 parts = program.split(':', 1)
214 parts = program.split(':', 1)
215 if all(lookup(sym) for sym in parts if sym):
215 if all(lookup(sym) for sym in parts if sym):
216 if parts[0]:
216 if parts[0]:
217 yield ('symbol', parts[0], 0)
217 yield ('symbol', parts[0], 0)
218 if len(parts) > 1:
218 if len(parts) > 1:
219 s = len(parts[0])
219 s = len(parts[0])
220 yield (':', None, s)
220 yield (':', None, s)
221 if parts[1]:
221 if parts[1]:
222 yield ('symbol', parts[1], s + 1)
222 yield ('symbol', parts[1], s + 1)
223 yield ('end', None, len(program))
223 yield ('end', None, len(program))
224 return
224 return
225
225
226 pos, l = 0, len(program)
226 pos, l = 0, len(program)
227 while pos < l:
227 while pos < l:
228 c = program[pos]
228 c = program[pos]
229 if c.isspace(): # skip inter-token whitespace
229 if c.isspace(): # skip inter-token whitespace
230 pass
230 pass
231 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
231 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
232 yield ('::', None, pos)
232 yield ('::', None, pos)
233 pos += 1 # skip ahead
233 pos += 1 # skip ahead
234 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
234 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
235 yield ('..', None, pos)
235 yield ('..', None, pos)
236 pos += 1 # skip ahead
236 pos += 1 # skip ahead
237 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
237 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
238 yield ('##', None, pos)
238 yield ('##', None, pos)
239 pos += 1 # skip ahead
239 pos += 1 # skip ahead
240 elif c in "():=,-|&+!~^%": # handle simple operators
240 elif c in "():=,-|&+!~^%": # handle simple operators
241 yield (c, None, pos)
241 yield (c, None, pos)
242 elif (c in '"\'' or c == 'r' and
242 elif (c in '"\'' or c == 'r' and
243 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
243 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
244 if c == 'r':
244 if c == 'r':
245 pos += 1
245 pos += 1
246 c = program[pos]
246 c = program[pos]
247 decode = lambda x: x
247 decode = lambda x: x
248 else:
248 else:
249 decode = parser.unescapestr
249 decode = parser.unescapestr
250 pos += 1
250 pos += 1
251 s = pos
251 s = pos
252 while pos < l: # find closing quote
252 while pos < l: # find closing quote
253 d = program[pos]
253 d = program[pos]
254 if d == '\\': # skip over escaped characters
254 if d == '\\': # skip over escaped characters
255 pos += 2
255 pos += 2
256 continue
256 continue
257 if d == c:
257 if d == c:
258 yield ('string', decode(program[s:pos]), s)
258 yield ('string', decode(program[s:pos]), s)
259 break
259 break
260 pos += 1
260 pos += 1
261 else:
261 else:
262 raise error.ParseError(_("unterminated string"), s)
262 raise error.ParseError(_("unterminated string"), s)
263 # gather up a symbol/keyword
263 # gather up a symbol/keyword
264 elif c in syminitletters:
264 elif c in syminitletters:
265 s = pos
265 s = pos
266 pos += 1
266 pos += 1
267 while pos < l: # find end of symbol
267 while pos < l: # find end of symbol
268 d = program[pos]
268 d = program[pos]
269 if d not in symletters:
269 if d not in symletters:
270 break
270 break
271 if d == '.' and program[pos - 1] == '.': # special case for ..
271 if d == '.' and program[pos - 1] == '.': # special case for ..
272 pos -= 1
272 pos -= 1
273 break
273 break
274 pos += 1
274 pos += 1
275 sym = program[s:pos]
275 sym = program[s:pos]
276 if sym in keywords: # operator keywords
276 if sym in keywords: # operator keywords
277 yield (sym, None, s)
277 yield (sym, None, s)
278 elif '-' in sym:
278 elif '-' in sym:
279 # some jerk gave us foo-bar-baz, try to check if it's a symbol
279 # some jerk gave us foo-bar-baz, try to check if it's a symbol
280 if lookup and lookup(sym):
280 if lookup and lookup(sym):
281 # looks like a real symbol
281 # looks like a real symbol
282 yield ('symbol', sym, s)
282 yield ('symbol', sym, s)
283 else:
283 else:
284 # looks like an expression
284 # looks like an expression
285 parts = sym.split('-')
285 parts = sym.split('-')
286 for p in parts[:-1]:
286 for p in parts[:-1]:
287 if p: # possible consecutive -
287 if p: # possible consecutive -
288 yield ('symbol', p, s)
288 yield ('symbol', p, s)
289 s += len(p)
289 s += len(p)
290 yield ('-', None, pos)
290 yield ('-', None, pos)
291 s += 1
291 s += 1
292 if parts[-1]: # possible trailing -
292 if parts[-1]: # possible trailing -
293 yield ('symbol', parts[-1], s)
293 yield ('symbol', parts[-1], s)
294 else:
294 else:
295 yield ('symbol', sym, s)
295 yield ('symbol', sym, s)
296 pos -= 1
296 pos -= 1
297 else:
297 else:
298 raise error.ParseError(_("syntax error in revset '%s'") %
298 raise error.ParseError(_("syntax error in revset '%s'") %
299 program, pos)
299 program, pos)
300 pos += 1
300 pos += 1
301 yield ('end', None, pos)
301 yield ('end', None, pos)
302
302
303 # helpers
303 # helpers
304
304
305 _notset = object()
306
305 def getsymbol(x):
307 def getsymbol(x):
306 if x and x[0] == 'symbol':
308 if x and x[0] == 'symbol':
307 return x[1]
309 return x[1]
308 raise error.ParseError(_('not a symbol'))
310 raise error.ParseError(_('not a symbol'))
309
311
310 def getstring(x, err):
312 def getstring(x, err):
311 if x and (x[0] == 'string' or x[0] == 'symbol'):
313 if x and (x[0] == 'string' or x[0] == 'symbol'):
312 return x[1]
314 return x[1]
313 raise error.ParseError(err)
315 raise error.ParseError(err)
314
316
315 def getinteger(x, err):
317 def getinteger(x, err, default=_notset):
318 if not x and default is not _notset:
319 return default
316 try:
320 try:
317 return int(getstring(x, err))
321 return int(getstring(x, err))
318 except ValueError:
322 except ValueError:
319 raise error.ParseError(err)
323 raise error.ParseError(err)
320
324
321 def getlist(x):
325 def getlist(x):
322 if not x:
326 if not x:
323 return []
327 return []
324 if x[0] == 'list':
328 if x[0] == 'list':
325 return list(x[1:])
329 return list(x[1:])
326 return [x]
330 return [x]
327
331
328 def getargs(x, min, max, err):
332 def getargs(x, min, max, err):
329 l = getlist(x)
333 l = getlist(x)
330 if len(l) < min or (max >= 0 and len(l) > max):
334 if len(l) < min or (max >= 0 and len(l) > max):
331 raise error.ParseError(err)
335 raise error.ParseError(err)
332 return l
336 return l
333
337
334 def getargsdict(x, funcname, keys):
338 def getargsdict(x, funcname, keys):
335 return parser.buildargsdict(getlist(x), funcname, parser.splitargspec(keys),
339 return parser.buildargsdict(getlist(x), funcname, parser.splitargspec(keys),
336 keyvaluenode='keyvalue', keynode='symbol')
340 keyvaluenode='keyvalue', keynode='symbol')
337
341
338 def getset(repo, subset, x):
342 def getset(repo, subset, x):
339 if not x:
343 if not x:
340 raise error.ParseError(_("missing argument"))
344 raise error.ParseError(_("missing argument"))
341 s = methods[x[0]](repo, subset, *x[1:])
345 s = methods[x[0]](repo, subset, *x[1:])
342 if util.safehasattr(s, 'isascending'):
346 if util.safehasattr(s, 'isascending'):
343 return s
347 return s
344 # else case should not happen, because all non-func are internal,
348 # else case should not happen, because all non-func are internal,
345 # ignoring for now.
349 # ignoring for now.
346 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
350 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
347 repo.ui.deprecwarn('revset "%s" uses list instead of smartset'
351 repo.ui.deprecwarn('revset "%s" uses list instead of smartset'
348 % x[1][1],
352 % x[1][1],
349 '3.9')
353 '3.9')
350 return baseset(s)
354 return baseset(s)
351
355
352 def _getrevsource(repo, r):
356 def _getrevsource(repo, r):
353 extra = repo[r].extra()
357 extra = repo[r].extra()
354 for label in ('source', 'transplant_source', 'rebase_source'):
358 for label in ('source', 'transplant_source', 'rebase_source'):
355 if label in extra:
359 if label in extra:
356 try:
360 try:
357 return repo[extra[label]].rev()
361 return repo[extra[label]].rev()
358 except error.RepoLookupError:
362 except error.RepoLookupError:
359 pass
363 pass
360 return None
364 return None
361
365
362 # operator methods
366 # operator methods
363
367
364 def stringset(repo, subset, x):
368 def stringset(repo, subset, x):
365 x = repo[x].rev()
369 x = repo[x].rev()
366 if (x in subset
370 if (x in subset
367 or x == node.nullrev and isinstance(subset, fullreposet)):
371 or x == node.nullrev and isinstance(subset, fullreposet)):
368 return baseset([x])
372 return baseset([x])
369 return baseset()
373 return baseset()
370
374
371 def rangeset(repo, subset, x, y, order):
375 def rangeset(repo, subset, x, y, order):
372 m = getset(repo, fullreposet(repo), x)
376 m = getset(repo, fullreposet(repo), x)
373 n = getset(repo, fullreposet(repo), y)
377 n = getset(repo, fullreposet(repo), y)
374
378
375 if not m or not n:
379 if not m or not n:
376 return baseset()
380 return baseset()
377 return _makerangeset(repo, subset, m.first(), n.last(), order)
381 return _makerangeset(repo, subset, m.first(), n.last(), order)
378
382
379 def rangepre(repo, subset, y, order):
383 def rangepre(repo, subset, y, order):
380 # ':y' can't be rewritten to '0:y' since '0' may be hidden
384 # ':y' can't be rewritten to '0:y' since '0' may be hidden
381 n = getset(repo, fullreposet(repo), y)
385 n = getset(repo, fullreposet(repo), y)
382 if not n:
386 if not n:
383 return baseset()
387 return baseset()
384 return _makerangeset(repo, subset, 0, n.last(), order)
388 return _makerangeset(repo, subset, 0, n.last(), order)
385
389
386 def _makerangeset(repo, subset, m, n, order):
390 def _makerangeset(repo, subset, m, n, order):
387 if m == n:
391 if m == n:
388 r = baseset([m])
392 r = baseset([m])
389 elif n == node.wdirrev:
393 elif n == node.wdirrev:
390 r = spanset(repo, m, len(repo)) + baseset([n])
394 r = spanset(repo, m, len(repo)) + baseset([n])
391 elif m == node.wdirrev:
395 elif m == node.wdirrev:
392 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
396 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
393 elif m < n:
397 elif m < n:
394 r = spanset(repo, m, n + 1)
398 r = spanset(repo, m, n + 1)
395 else:
399 else:
396 r = spanset(repo, m, n - 1)
400 r = spanset(repo, m, n - 1)
397
401
398 if order == defineorder:
402 if order == defineorder:
399 return r & subset
403 return r & subset
400 else:
404 else:
401 # carrying the sorting over when possible would be more efficient
405 # carrying the sorting over when possible would be more efficient
402 return subset & r
406 return subset & r
403
407
404 def dagrange(repo, subset, x, y, order):
408 def dagrange(repo, subset, x, y, order):
405 r = fullreposet(repo)
409 r = fullreposet(repo)
406 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
410 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
407 includepath=True)
411 includepath=True)
408 return subset & xs
412 return subset & xs
409
413
410 def andset(repo, subset, x, y, order):
414 def andset(repo, subset, x, y, order):
411 return getset(repo, getset(repo, subset, x), y)
415 return getset(repo, getset(repo, subset, x), y)
412
416
413 def differenceset(repo, subset, x, y, order):
417 def differenceset(repo, subset, x, y, order):
414 return getset(repo, subset, x) - getset(repo, subset, y)
418 return getset(repo, subset, x) - getset(repo, subset, y)
415
419
416 def _orsetlist(repo, subset, xs):
420 def _orsetlist(repo, subset, xs):
417 assert xs
421 assert xs
418 if len(xs) == 1:
422 if len(xs) == 1:
419 return getset(repo, subset, xs[0])
423 return getset(repo, subset, xs[0])
420 p = len(xs) // 2
424 p = len(xs) // 2
421 a = _orsetlist(repo, subset, xs[:p])
425 a = _orsetlist(repo, subset, xs[:p])
422 b = _orsetlist(repo, subset, xs[p:])
426 b = _orsetlist(repo, subset, xs[p:])
423 return a + b
427 return a + b
424
428
425 def orset(repo, subset, x, order):
429 def orset(repo, subset, x, order):
426 xs = getlist(x)
430 xs = getlist(x)
427 if order == followorder:
431 if order == followorder:
428 # slow path to take the subset order
432 # slow path to take the subset order
429 return subset & _orsetlist(repo, fullreposet(repo), xs)
433 return subset & _orsetlist(repo, fullreposet(repo), xs)
430 else:
434 else:
431 return _orsetlist(repo, subset, xs)
435 return _orsetlist(repo, subset, xs)
432
436
433 def notset(repo, subset, x, order):
437 def notset(repo, subset, x, order):
434 return subset - getset(repo, subset, x)
438 return subset - getset(repo, subset, x)
435
439
436 def listset(repo, subset, *xs):
440 def listset(repo, subset, *xs):
437 raise error.ParseError(_("can't use a list in this context"),
441 raise error.ParseError(_("can't use a list in this context"),
438 hint=_('see hg help "revsets.x or y"'))
442 hint=_('see hg help "revsets.x or y"'))
439
443
440 def keyvaluepair(repo, subset, k, v):
444 def keyvaluepair(repo, subset, k, v):
441 raise error.ParseError(_("can't use a key-value pair in this context"))
445 raise error.ParseError(_("can't use a key-value pair in this context"))
442
446
443 def func(repo, subset, a, b, order):
447 def func(repo, subset, a, b, order):
444 f = getsymbol(a)
448 f = getsymbol(a)
445 if f in symbols:
449 if f in symbols:
446 func = symbols[f]
450 func = symbols[f]
447 if getattr(func, '_takeorder', False):
451 if getattr(func, '_takeorder', False):
448 return func(repo, subset, b, order)
452 return func(repo, subset, b, order)
449 return func(repo, subset, b)
453 return func(repo, subset, b)
450
454
451 keep = lambda fn: getattr(fn, '__doc__', None) is not None
455 keep = lambda fn: getattr(fn, '__doc__', None) is not None
452
456
453 syms = [s for (s, fn) in symbols.items() if keep(fn)]
457 syms = [s for (s, fn) in symbols.items() if keep(fn)]
454 raise error.UnknownIdentifier(f, syms)
458 raise error.UnknownIdentifier(f, syms)
455
459
456 # functions
460 # functions
457
461
458 # symbols are callables like:
462 # symbols are callables like:
459 # fn(repo, subset, x)
463 # fn(repo, subset, x)
460 # with:
464 # with:
461 # repo - current repository instance
465 # repo - current repository instance
462 # subset - of revisions to be examined
466 # subset - of revisions to be examined
463 # x - argument in tree form
467 # x - argument in tree form
464 symbols = {}
468 symbols = {}
465
469
466 # symbols which can't be used for a DoS attack for any given input
470 # symbols which can't be used for a DoS attack for any given input
467 # (e.g. those which accept regexes as plain strings shouldn't be included)
471 # (e.g. those which accept regexes as plain strings shouldn't be included)
468 # functions that just return a lot of changesets (like all) don't count here
472 # functions that just return a lot of changesets (like all) don't count here
469 safesymbols = set()
473 safesymbols = set()
470
474
471 predicate = registrar.revsetpredicate()
475 predicate = registrar.revsetpredicate()
472
476
473 @predicate('_destupdate')
477 @predicate('_destupdate')
474 def _destupdate(repo, subset, x):
478 def _destupdate(repo, subset, x):
475 # experimental revset for update destination
479 # experimental revset for update destination
476 args = getargsdict(x, 'limit', 'clean check')
480 args = getargsdict(x, 'limit', 'clean check')
477 return subset & baseset([destutil.destupdate(repo, **args)[0]])
481 return subset & baseset([destutil.destupdate(repo, **args)[0]])
478
482
479 @predicate('_destmerge')
483 @predicate('_destmerge')
480 def _destmerge(repo, subset, x):
484 def _destmerge(repo, subset, x):
481 # experimental revset for merge destination
485 # experimental revset for merge destination
482 sourceset = None
486 sourceset = None
483 if x is not None:
487 if x is not None:
484 sourceset = getset(repo, fullreposet(repo), x)
488 sourceset = getset(repo, fullreposet(repo), x)
485 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
489 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
486
490
487 @predicate('adds(pattern)', safe=True)
491 @predicate('adds(pattern)', safe=True)
488 def adds(repo, subset, x):
492 def adds(repo, subset, x):
489 """Changesets that add a file matching pattern.
493 """Changesets that add a file matching pattern.
490
494
491 The pattern without explicit kind like ``glob:`` is expected to be
495 The pattern without explicit kind like ``glob:`` is expected to be
492 relative to the current directory and match against a file or a
496 relative to the current directory and match against a file or a
493 directory.
497 directory.
494 """
498 """
495 # i18n: "adds" is a keyword
499 # i18n: "adds" is a keyword
496 pat = getstring(x, _("adds requires a pattern"))
500 pat = getstring(x, _("adds requires a pattern"))
497 return checkstatus(repo, subset, pat, 1)
501 return checkstatus(repo, subset, pat, 1)
498
502
499 @predicate('ancestor(*changeset)', safe=True)
503 @predicate('ancestor(*changeset)', safe=True)
500 def ancestor(repo, subset, x):
504 def ancestor(repo, subset, x):
501 """A greatest common ancestor of the changesets.
505 """A greatest common ancestor of the changesets.
502
506
503 Accepts 0 or more changesets.
507 Accepts 0 or more changesets.
504 Will return empty list when passed no args.
508 Will return empty list when passed no args.
505 Greatest common ancestor of a single changeset is that changeset.
509 Greatest common ancestor of a single changeset is that changeset.
506 """
510 """
507 # i18n: "ancestor" is a keyword
511 # i18n: "ancestor" is a keyword
508 l = getlist(x)
512 l = getlist(x)
509 rl = fullreposet(repo)
513 rl = fullreposet(repo)
510 anc = None
514 anc = None
511
515
512 # (getset(repo, rl, i) for i in l) generates a list of lists
516 # (getset(repo, rl, i) for i in l) generates a list of lists
513 for revs in (getset(repo, rl, i) for i in l):
517 for revs in (getset(repo, rl, i) for i in l):
514 for r in revs:
518 for r in revs:
515 if anc is None:
519 if anc is None:
516 anc = repo[r]
520 anc = repo[r]
517 else:
521 else:
518 anc = anc.ancestor(repo[r])
522 anc = anc.ancestor(repo[r])
519
523
520 if anc is not None and anc.rev() in subset:
524 if anc is not None and anc.rev() in subset:
521 return baseset([anc.rev()])
525 return baseset([anc.rev()])
522 return baseset()
526 return baseset()
523
527
524 def _ancestors(repo, subset, x, followfirst=False):
528 def _ancestors(repo, subset, x, followfirst=False):
525 heads = getset(repo, fullreposet(repo), x)
529 heads = getset(repo, fullreposet(repo), x)
526 if not heads:
530 if not heads:
527 return baseset()
531 return baseset()
528 s = _revancestors(repo, heads, followfirst)
532 s = _revancestors(repo, heads, followfirst)
529 return subset & s
533 return subset & s
530
534
531 @predicate('ancestors(set)', safe=True)
535 @predicate('ancestors(set)', safe=True)
532 def ancestors(repo, subset, x):
536 def ancestors(repo, subset, x):
533 """Changesets that are ancestors of a changeset in set.
537 """Changesets that are ancestors of a changeset in set.
534 """
538 """
535 return _ancestors(repo, subset, x)
539 return _ancestors(repo, subset, x)
536
540
537 @predicate('_firstancestors', safe=True)
541 @predicate('_firstancestors', safe=True)
538 def _firstancestors(repo, subset, x):
542 def _firstancestors(repo, subset, x):
539 # ``_firstancestors(set)``
543 # ``_firstancestors(set)``
540 # Like ``ancestors(set)`` but follows only the first parents.
544 # Like ``ancestors(set)`` but follows only the first parents.
541 return _ancestors(repo, subset, x, followfirst=True)
545 return _ancestors(repo, subset, x, followfirst=True)
542
546
543 def ancestorspec(repo, subset, x, n, order):
547 def ancestorspec(repo, subset, x, n, order):
544 """``set~n``
548 """``set~n``
545 Changesets that are the Nth ancestor (first parents only) of a changeset
549 Changesets that are the Nth ancestor (first parents only) of a changeset
546 in set.
550 in set.
547 """
551 """
548 n = getinteger(n, _("~ expects a number"))
552 n = getinteger(n, _("~ expects a number"))
549 ps = set()
553 ps = set()
550 cl = repo.changelog
554 cl = repo.changelog
551 for r in getset(repo, fullreposet(repo), x):
555 for r in getset(repo, fullreposet(repo), x):
552 for i in range(n):
556 for i in range(n):
553 r = cl.parentrevs(r)[0]
557 r = cl.parentrevs(r)[0]
554 ps.add(r)
558 ps.add(r)
555 return subset & ps
559 return subset & ps
556
560
557 @predicate('author(string)', safe=True)
561 @predicate('author(string)', safe=True)
558 def author(repo, subset, x):
562 def author(repo, subset, x):
559 """Alias for ``user(string)``.
563 """Alias for ``user(string)``.
560 """
564 """
561 # i18n: "author" is a keyword
565 # i18n: "author" is a keyword
562 n = getstring(x, _("author requires a string"))
566 n = getstring(x, _("author requires a string"))
563 kind, pattern, matcher = _substringmatcher(n, casesensitive=False)
567 kind, pattern, matcher = _substringmatcher(n, casesensitive=False)
564 return subset.filter(lambda x: matcher(repo[x].user()),
568 return subset.filter(lambda x: matcher(repo[x].user()),
565 condrepr=('<user %r>', n))
569 condrepr=('<user %r>', n))
566
570
567 @predicate('bisect(string)', safe=True)
571 @predicate('bisect(string)', safe=True)
568 def bisect(repo, subset, x):
572 def bisect(repo, subset, x):
569 """Changesets marked in the specified bisect status:
573 """Changesets marked in the specified bisect status:
570
574
571 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
575 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
572 - ``goods``, ``bads`` : csets topologically good/bad
576 - ``goods``, ``bads`` : csets topologically good/bad
573 - ``range`` : csets taking part in the bisection
577 - ``range`` : csets taking part in the bisection
574 - ``pruned`` : csets that are goods, bads or skipped
578 - ``pruned`` : csets that are goods, bads or skipped
575 - ``untested`` : csets whose fate is yet unknown
579 - ``untested`` : csets whose fate is yet unknown
576 - ``ignored`` : csets ignored due to DAG topology
580 - ``ignored`` : csets ignored due to DAG topology
577 - ``current`` : the cset currently being bisected
581 - ``current`` : the cset currently being bisected
578 """
582 """
579 # i18n: "bisect" is a keyword
583 # i18n: "bisect" is a keyword
580 status = getstring(x, _("bisect requires a string")).lower()
584 status = getstring(x, _("bisect requires a string")).lower()
581 state = set(hbisect.get(repo, status))
585 state = set(hbisect.get(repo, status))
582 return subset & state
586 return subset & state
583
587
584 # Backward-compatibility
588 # Backward-compatibility
585 # - no help entry so that we do not advertise it any more
589 # - no help entry so that we do not advertise it any more
586 @predicate('bisected', safe=True)
590 @predicate('bisected', safe=True)
587 def bisected(repo, subset, x):
591 def bisected(repo, subset, x):
588 return bisect(repo, subset, x)
592 return bisect(repo, subset, x)
589
593
590 @predicate('bookmark([name])', safe=True)
594 @predicate('bookmark([name])', safe=True)
591 def bookmark(repo, subset, x):
595 def bookmark(repo, subset, x):
592 """The named bookmark or all bookmarks.
596 """The named bookmark or all bookmarks.
593
597
594 Pattern matching is supported for `name`. See :hg:`help revisions.patterns`.
598 Pattern matching is supported for `name`. See :hg:`help revisions.patterns`.
595 """
599 """
596 # i18n: "bookmark" is a keyword
600 # i18n: "bookmark" is a keyword
597 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
601 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
598 if args:
602 if args:
599 bm = getstring(args[0],
603 bm = getstring(args[0],
600 # i18n: "bookmark" is a keyword
604 # i18n: "bookmark" is a keyword
601 _('the argument to bookmark must be a string'))
605 _('the argument to bookmark must be a string'))
602 kind, pattern, matcher = util.stringmatcher(bm)
606 kind, pattern, matcher = util.stringmatcher(bm)
603 bms = set()
607 bms = set()
604 if kind == 'literal':
608 if kind == 'literal':
605 bmrev = repo._bookmarks.get(pattern, None)
609 bmrev = repo._bookmarks.get(pattern, None)
606 if not bmrev:
610 if not bmrev:
607 raise error.RepoLookupError(_("bookmark '%s' does not exist")
611 raise error.RepoLookupError(_("bookmark '%s' does not exist")
608 % pattern)
612 % pattern)
609 bms.add(repo[bmrev].rev())
613 bms.add(repo[bmrev].rev())
610 else:
614 else:
611 matchrevs = set()
615 matchrevs = set()
612 for name, bmrev in repo._bookmarks.iteritems():
616 for name, bmrev in repo._bookmarks.iteritems():
613 if matcher(name):
617 if matcher(name):
614 matchrevs.add(bmrev)
618 matchrevs.add(bmrev)
615 if not matchrevs:
619 if not matchrevs:
616 raise error.RepoLookupError(_("no bookmarks exist"
620 raise error.RepoLookupError(_("no bookmarks exist"
617 " that match '%s'") % pattern)
621 " that match '%s'") % pattern)
618 for bmrev in matchrevs:
622 for bmrev in matchrevs:
619 bms.add(repo[bmrev].rev())
623 bms.add(repo[bmrev].rev())
620 else:
624 else:
621 bms = set([repo[r].rev()
625 bms = set([repo[r].rev()
622 for r in repo._bookmarks.values()])
626 for r in repo._bookmarks.values()])
623 bms -= set([node.nullrev])
627 bms -= set([node.nullrev])
624 return subset & bms
628 return subset & bms
625
629
626 @predicate('branch(string or set)', safe=True)
630 @predicate('branch(string or set)', safe=True)
627 def branch(repo, subset, x):
631 def branch(repo, subset, x):
628 """
632 """
629 All changesets belonging to the given branch or the branches of the given
633 All changesets belonging to the given branch or the branches of the given
630 changesets.
634 changesets.
631
635
632 Pattern matching is supported for `string`. See
636 Pattern matching is supported for `string`. See
633 :hg:`help revisions.patterns`.
637 :hg:`help revisions.patterns`.
634 """
638 """
635 getbi = repo.revbranchcache().branchinfo
639 getbi = repo.revbranchcache().branchinfo
636
640
637 try:
641 try:
638 b = getstring(x, '')
642 b = getstring(x, '')
639 except error.ParseError:
643 except error.ParseError:
640 # not a string, but another revspec, e.g. tip()
644 # not a string, but another revspec, e.g. tip()
641 pass
645 pass
642 else:
646 else:
643 kind, pattern, matcher = util.stringmatcher(b)
647 kind, pattern, matcher = util.stringmatcher(b)
644 if kind == 'literal':
648 if kind == 'literal':
645 # note: falls through to the revspec case if no branch with
649 # note: falls through to the revspec case if no branch with
646 # this name exists and pattern kind is not specified explicitly
650 # this name exists and pattern kind is not specified explicitly
647 if pattern in repo.branchmap():
651 if pattern in repo.branchmap():
648 return subset.filter(lambda r: matcher(getbi(r)[0]),
652 return subset.filter(lambda r: matcher(getbi(r)[0]),
649 condrepr=('<branch %r>', b))
653 condrepr=('<branch %r>', b))
650 if b.startswith('literal:'):
654 if b.startswith('literal:'):
651 raise error.RepoLookupError(_("branch '%s' does not exist")
655 raise error.RepoLookupError(_("branch '%s' does not exist")
652 % pattern)
656 % pattern)
653 else:
657 else:
654 return subset.filter(lambda r: matcher(getbi(r)[0]),
658 return subset.filter(lambda r: matcher(getbi(r)[0]),
655 condrepr=('<branch %r>', b))
659 condrepr=('<branch %r>', b))
656
660
657 s = getset(repo, fullreposet(repo), x)
661 s = getset(repo, fullreposet(repo), x)
658 b = set()
662 b = set()
659 for r in s:
663 for r in s:
660 b.add(getbi(r)[0])
664 b.add(getbi(r)[0])
661 c = s.__contains__
665 c = s.__contains__
662 return subset.filter(lambda r: c(r) or getbi(r)[0] in b,
666 return subset.filter(lambda r: c(r) or getbi(r)[0] in b,
663 condrepr=lambda: '<branch %r>' % sorted(b))
667 condrepr=lambda: '<branch %r>' % sorted(b))
664
668
665 @predicate('bumped()', safe=True)
669 @predicate('bumped()', safe=True)
666 def bumped(repo, subset, x):
670 def bumped(repo, subset, x):
667 """Mutable changesets marked as successors of public changesets.
671 """Mutable changesets marked as successors of public changesets.
668
672
669 Only non-public and non-obsolete changesets can be `bumped`.
673 Only non-public and non-obsolete changesets can be `bumped`.
670 """
674 """
671 # i18n: "bumped" is a keyword
675 # i18n: "bumped" is a keyword
672 getargs(x, 0, 0, _("bumped takes no arguments"))
676 getargs(x, 0, 0, _("bumped takes no arguments"))
673 bumped = obsmod.getrevs(repo, 'bumped')
677 bumped = obsmod.getrevs(repo, 'bumped')
674 return subset & bumped
678 return subset & bumped
675
679
676 @predicate('bundle()', safe=True)
680 @predicate('bundle()', safe=True)
677 def bundle(repo, subset, x):
681 def bundle(repo, subset, x):
678 """Changesets in the bundle.
682 """Changesets in the bundle.
679
683
680 Bundle must be specified by the -R option."""
684 Bundle must be specified by the -R option."""
681
685
682 try:
686 try:
683 bundlerevs = repo.changelog.bundlerevs
687 bundlerevs = repo.changelog.bundlerevs
684 except AttributeError:
688 except AttributeError:
685 raise error.Abort(_("no bundle provided - specify with -R"))
689 raise error.Abort(_("no bundle provided - specify with -R"))
686 return subset & bundlerevs
690 return subset & bundlerevs
687
691
688 def checkstatus(repo, subset, pat, field):
692 def checkstatus(repo, subset, pat, field):
689 hasset = matchmod.patkind(pat) == 'set'
693 hasset = matchmod.patkind(pat) == 'set'
690
694
691 mcache = [None]
695 mcache = [None]
692 def matches(x):
696 def matches(x):
693 c = repo[x]
697 c = repo[x]
694 if not mcache[0] or hasset:
698 if not mcache[0] or hasset:
695 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
699 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
696 m = mcache[0]
700 m = mcache[0]
697 fname = None
701 fname = None
698 if not m.anypats() and len(m.files()) == 1:
702 if not m.anypats() and len(m.files()) == 1:
699 fname = m.files()[0]
703 fname = m.files()[0]
700 if fname is not None:
704 if fname is not None:
701 if fname not in c.files():
705 if fname not in c.files():
702 return False
706 return False
703 else:
707 else:
704 for f in c.files():
708 for f in c.files():
705 if m(f):
709 if m(f):
706 break
710 break
707 else:
711 else:
708 return False
712 return False
709 files = repo.status(c.p1().node(), c.node())[field]
713 files = repo.status(c.p1().node(), c.node())[field]
710 if fname is not None:
714 if fname is not None:
711 if fname in files:
715 if fname in files:
712 return True
716 return True
713 else:
717 else:
714 for f in files:
718 for f in files:
715 if m(f):
719 if m(f):
716 return True
720 return True
717
721
718 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
722 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
719
723
720 def _children(repo, subset, parentset):
724 def _children(repo, subset, parentset):
721 if not parentset:
725 if not parentset:
722 return baseset()
726 return baseset()
723 cs = set()
727 cs = set()
724 pr = repo.changelog.parentrevs
728 pr = repo.changelog.parentrevs
725 minrev = parentset.min()
729 minrev = parentset.min()
726 nullrev = node.nullrev
730 nullrev = node.nullrev
727 for r in subset:
731 for r in subset:
728 if r <= minrev:
732 if r <= minrev:
729 continue
733 continue
730 p1, p2 = pr(r)
734 p1, p2 = pr(r)
731 if p1 in parentset:
735 if p1 in parentset:
732 cs.add(r)
736 cs.add(r)
733 if p2 != nullrev and p2 in parentset:
737 if p2 != nullrev and p2 in parentset:
734 cs.add(r)
738 cs.add(r)
735 return baseset(cs)
739 return baseset(cs)
736
740
737 @predicate('children(set)', safe=True)
741 @predicate('children(set)', safe=True)
738 def children(repo, subset, x):
742 def children(repo, subset, x):
739 """Child changesets of changesets in set.
743 """Child changesets of changesets in set.
740 """
744 """
741 s = getset(repo, fullreposet(repo), x)
745 s = getset(repo, fullreposet(repo), x)
742 cs = _children(repo, subset, s)
746 cs = _children(repo, subset, s)
743 return subset & cs
747 return subset & cs
744
748
745 @predicate('closed()', safe=True)
749 @predicate('closed()', safe=True)
746 def closed(repo, subset, x):
750 def closed(repo, subset, x):
747 """Changeset is closed.
751 """Changeset is closed.
748 """
752 """
749 # i18n: "closed" is a keyword
753 # i18n: "closed" is a keyword
750 getargs(x, 0, 0, _("closed takes no arguments"))
754 getargs(x, 0, 0, _("closed takes no arguments"))
751 return subset.filter(lambda r: repo[r].closesbranch(),
755 return subset.filter(lambda r: repo[r].closesbranch(),
752 condrepr='<branch closed>')
756 condrepr='<branch closed>')
753
757
754 @predicate('contains(pattern)')
758 @predicate('contains(pattern)')
755 def contains(repo, subset, x):
759 def contains(repo, subset, x):
756 """The revision's manifest contains a file matching pattern (but might not
760 """The revision's manifest contains a file matching pattern (but might not
757 modify it). See :hg:`help patterns` for information about file patterns.
761 modify it). See :hg:`help patterns` for information about file patterns.
758
762
759 The pattern without explicit kind like ``glob:`` is expected to be
763 The pattern without explicit kind like ``glob:`` is expected to be
760 relative to the current directory and match against a file exactly
764 relative to the current directory and match against a file exactly
761 for efficiency.
765 for efficiency.
762 """
766 """
763 # i18n: "contains" is a keyword
767 # i18n: "contains" is a keyword
764 pat = getstring(x, _("contains requires a pattern"))
768 pat = getstring(x, _("contains requires a pattern"))
765
769
766 def matches(x):
770 def matches(x):
767 if not matchmod.patkind(pat):
771 if not matchmod.patkind(pat):
768 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
772 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
769 if pats in repo[x]:
773 if pats in repo[x]:
770 return True
774 return True
771 else:
775 else:
772 c = repo[x]
776 c = repo[x]
773 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
777 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
774 for f in c.manifest():
778 for f in c.manifest():
775 if m(f):
779 if m(f):
776 return True
780 return True
777 return False
781 return False
778
782
779 return subset.filter(matches, condrepr=('<contains %r>', pat))
783 return subset.filter(matches, condrepr=('<contains %r>', pat))
780
784
781 @predicate('converted([id])', safe=True)
785 @predicate('converted([id])', safe=True)
782 def converted(repo, subset, x):
786 def converted(repo, subset, x):
783 """Changesets converted from the given identifier in the old repository if
787 """Changesets converted from the given identifier in the old repository if
784 present, or all converted changesets if no identifier is specified.
788 present, or all converted changesets if no identifier is specified.
785 """
789 """
786
790
787 # There is exactly no chance of resolving the revision, so do a simple
791 # There is exactly no chance of resolving the revision, so do a simple
788 # string compare and hope for the best
792 # string compare and hope for the best
789
793
790 rev = None
794 rev = None
791 # i18n: "converted" is a keyword
795 # i18n: "converted" is a keyword
792 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
796 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
793 if l:
797 if l:
794 # i18n: "converted" is a keyword
798 # i18n: "converted" is a keyword
795 rev = getstring(l[0], _('converted requires a revision'))
799 rev = getstring(l[0], _('converted requires a revision'))
796
800
797 def _matchvalue(r):
801 def _matchvalue(r):
798 source = repo[r].extra().get('convert_revision', None)
802 source = repo[r].extra().get('convert_revision', None)
799 return source is not None and (rev is None or source.startswith(rev))
803 return source is not None and (rev is None or source.startswith(rev))
800
804
801 return subset.filter(lambda r: _matchvalue(r),
805 return subset.filter(lambda r: _matchvalue(r),
802 condrepr=('<converted %r>', rev))
806 condrepr=('<converted %r>', rev))
803
807
804 @predicate('date(interval)', safe=True)
808 @predicate('date(interval)', safe=True)
805 def date(repo, subset, x):
809 def date(repo, subset, x):
806 """Changesets within the interval, see :hg:`help dates`.
810 """Changesets within the interval, see :hg:`help dates`.
807 """
811 """
808 # i18n: "date" is a keyword
812 # i18n: "date" is a keyword
809 ds = getstring(x, _("date requires a string"))
813 ds = getstring(x, _("date requires a string"))
810 dm = util.matchdate(ds)
814 dm = util.matchdate(ds)
811 return subset.filter(lambda x: dm(repo[x].date()[0]),
815 return subset.filter(lambda x: dm(repo[x].date()[0]),
812 condrepr=('<date %r>', ds))
816 condrepr=('<date %r>', ds))
813
817
814 @predicate('desc(string)', safe=True)
818 @predicate('desc(string)', safe=True)
815 def desc(repo, subset, x):
819 def desc(repo, subset, x):
816 """Search commit message for string. The match is case-insensitive.
820 """Search commit message for string. The match is case-insensitive.
817
821
818 Pattern matching is supported for `string`. See
822 Pattern matching is supported for `string`. See
819 :hg:`help revisions.patterns`.
823 :hg:`help revisions.patterns`.
820 """
824 """
821 # i18n: "desc" is a keyword
825 # i18n: "desc" is a keyword
822 ds = getstring(x, _("desc requires a string"))
826 ds = getstring(x, _("desc requires a string"))
823
827
824 kind, pattern, matcher = _substringmatcher(ds, casesensitive=False)
828 kind, pattern, matcher = _substringmatcher(ds, casesensitive=False)
825
829
826 return subset.filter(lambda r: matcher(repo[r].description()),
830 return subset.filter(lambda r: matcher(repo[r].description()),
827 condrepr=('<desc %r>', ds))
831 condrepr=('<desc %r>', ds))
828
832
829 def _descendants(repo, subset, x, followfirst=False):
833 def _descendants(repo, subset, x, followfirst=False):
830 roots = getset(repo, fullreposet(repo), x)
834 roots = getset(repo, fullreposet(repo), x)
831 if not roots:
835 if not roots:
832 return baseset()
836 return baseset()
833 s = _revdescendants(repo, roots, followfirst)
837 s = _revdescendants(repo, roots, followfirst)
834
838
835 # Both sets need to be ascending in order to lazily return the union
839 # Both sets need to be ascending in order to lazily return the union
836 # in the correct order.
840 # in the correct order.
837 base = subset & roots
841 base = subset & roots
838 desc = subset & s
842 desc = subset & s
839 result = base + desc
843 result = base + desc
840 if subset.isascending():
844 if subset.isascending():
841 result.sort()
845 result.sort()
842 elif subset.isdescending():
846 elif subset.isdescending():
843 result.sort(reverse=True)
847 result.sort(reverse=True)
844 else:
848 else:
845 result = subset & result
849 result = subset & result
846 return result
850 return result
847
851
848 @predicate('descendants(set)', safe=True)
852 @predicate('descendants(set)', safe=True)
849 def descendants(repo, subset, x):
853 def descendants(repo, subset, x):
850 """Changesets which are descendants of changesets in set.
854 """Changesets which are descendants of changesets in set.
851 """
855 """
852 return _descendants(repo, subset, x)
856 return _descendants(repo, subset, x)
853
857
854 @predicate('_firstdescendants', safe=True)
858 @predicate('_firstdescendants', safe=True)
855 def _firstdescendants(repo, subset, x):
859 def _firstdescendants(repo, subset, x):
856 # ``_firstdescendants(set)``
860 # ``_firstdescendants(set)``
857 # Like ``descendants(set)`` but follows only the first parents.
861 # Like ``descendants(set)`` but follows only the first parents.
858 return _descendants(repo, subset, x, followfirst=True)
862 return _descendants(repo, subset, x, followfirst=True)
859
863
860 @predicate('destination([set])', safe=True)
864 @predicate('destination([set])', safe=True)
861 def destination(repo, subset, x):
865 def destination(repo, subset, x):
862 """Changesets that were created by a graft, transplant or rebase operation,
866 """Changesets that were created by a graft, transplant or rebase operation,
863 with the given revisions specified as the source. Omitting the optional set
867 with the given revisions specified as the source. Omitting the optional set
864 is the same as passing all().
868 is the same as passing all().
865 """
869 """
866 if x is not None:
870 if x is not None:
867 sources = getset(repo, fullreposet(repo), x)
871 sources = getset(repo, fullreposet(repo), x)
868 else:
872 else:
869 sources = fullreposet(repo)
873 sources = fullreposet(repo)
870
874
871 dests = set()
875 dests = set()
872
876
873 # subset contains all of the possible destinations that can be returned, so
877 # subset contains all of the possible destinations that can be returned, so
874 # iterate over them and see if their source(s) were provided in the arg set.
878 # iterate over them and see if their source(s) were provided in the arg set.
875 # Even if the immediate src of r is not in the arg set, src's source (or
879 # Even if the immediate src of r is not in the arg set, src's source (or
876 # further back) may be. Scanning back further than the immediate src allows
880 # further back) may be. Scanning back further than the immediate src allows
877 # transitive transplants and rebases to yield the same results as transitive
881 # transitive transplants and rebases to yield the same results as transitive
878 # grafts.
882 # grafts.
879 for r in subset:
883 for r in subset:
880 src = _getrevsource(repo, r)
884 src = _getrevsource(repo, r)
881 lineage = None
885 lineage = None
882
886
883 while src is not None:
887 while src is not None:
884 if lineage is None:
888 if lineage is None:
885 lineage = list()
889 lineage = list()
886
890
887 lineage.append(r)
891 lineage.append(r)
888
892
889 # The visited lineage is a match if the current source is in the arg
893 # The visited lineage is a match if the current source is in the arg
890 # set. Since every candidate dest is visited by way of iterating
894 # set. Since every candidate dest is visited by way of iterating
891 # subset, any dests further back in the lineage will be tested by a
895 # subset, any dests further back in the lineage will be tested by a
892 # different iteration over subset. Likewise, if the src was already
896 # different iteration over subset. Likewise, if the src was already
893 # selected, the current lineage can be selected without going back
897 # selected, the current lineage can be selected without going back
894 # further.
898 # further.
895 if src in sources or src in dests:
899 if src in sources or src in dests:
896 dests.update(lineage)
900 dests.update(lineage)
897 break
901 break
898
902
899 r = src
903 r = src
900 src = _getrevsource(repo, r)
904 src = _getrevsource(repo, r)
901
905
902 return subset.filter(dests.__contains__,
906 return subset.filter(dests.__contains__,
903 condrepr=lambda: '<destination %r>' % sorted(dests))
907 condrepr=lambda: '<destination %r>' % sorted(dests))
904
908
905 @predicate('divergent()', safe=True)
909 @predicate('divergent()', safe=True)
906 def divergent(repo, subset, x):
910 def divergent(repo, subset, x):
907 """
911 """
908 Final successors of changesets with an alternative set of final successors.
912 Final successors of changesets with an alternative set of final successors.
909 """
913 """
910 # i18n: "divergent" is a keyword
914 # i18n: "divergent" is a keyword
911 getargs(x, 0, 0, _("divergent takes no arguments"))
915 getargs(x, 0, 0, _("divergent takes no arguments"))
912 divergent = obsmod.getrevs(repo, 'divergent')
916 divergent = obsmod.getrevs(repo, 'divergent')
913 return subset & divergent
917 return subset & divergent
914
918
915 @predicate('extinct()', safe=True)
919 @predicate('extinct()', safe=True)
916 def extinct(repo, subset, x):
920 def extinct(repo, subset, x):
917 """Obsolete changesets with obsolete descendants only.
921 """Obsolete changesets with obsolete descendants only.
918 """
922 """
919 # i18n: "extinct" is a keyword
923 # i18n: "extinct" is a keyword
920 getargs(x, 0, 0, _("extinct takes no arguments"))
924 getargs(x, 0, 0, _("extinct takes no arguments"))
921 extincts = obsmod.getrevs(repo, 'extinct')
925 extincts = obsmod.getrevs(repo, 'extinct')
922 return subset & extincts
926 return subset & extincts
923
927
924 @predicate('extra(label, [value])', safe=True)
928 @predicate('extra(label, [value])', safe=True)
925 def extra(repo, subset, x):
929 def extra(repo, subset, x):
926 """Changesets with the given label in the extra metadata, with the given
930 """Changesets with the given label in the extra metadata, with the given
927 optional value.
931 optional value.
928
932
929 Pattern matching is supported for `value`. See
933 Pattern matching is supported for `value`. See
930 :hg:`help revisions.patterns`.
934 :hg:`help revisions.patterns`.
931 """
935 """
932 args = getargsdict(x, 'extra', 'label value')
936 args = getargsdict(x, 'extra', 'label value')
933 if 'label' not in args:
937 if 'label' not in args:
934 # i18n: "extra" is a keyword
938 # i18n: "extra" is a keyword
935 raise error.ParseError(_('extra takes at least 1 argument'))
939 raise error.ParseError(_('extra takes at least 1 argument'))
936 # i18n: "extra" is a keyword
940 # i18n: "extra" is a keyword
937 label = getstring(args['label'], _('first argument to extra must be '
941 label = getstring(args['label'], _('first argument to extra must be '
938 'a string'))
942 'a string'))
939 value = None
943 value = None
940
944
941 if 'value' in args:
945 if 'value' in args:
942 # i18n: "extra" is a keyword
946 # i18n: "extra" is a keyword
943 value = getstring(args['value'], _('second argument to extra must be '
947 value = getstring(args['value'], _('second argument to extra must be '
944 'a string'))
948 'a string'))
945 kind, value, matcher = util.stringmatcher(value)
949 kind, value, matcher = util.stringmatcher(value)
946
950
947 def _matchvalue(r):
951 def _matchvalue(r):
948 extra = repo[r].extra()
952 extra = repo[r].extra()
949 return label in extra and (value is None or matcher(extra[label]))
953 return label in extra and (value is None or matcher(extra[label]))
950
954
951 return subset.filter(lambda r: _matchvalue(r),
955 return subset.filter(lambda r: _matchvalue(r),
952 condrepr=('<extra[%r] %r>', label, value))
956 condrepr=('<extra[%r] %r>', label, value))
953
957
954 @predicate('filelog(pattern)', safe=True)
958 @predicate('filelog(pattern)', safe=True)
955 def filelog(repo, subset, x):
959 def filelog(repo, subset, x):
956 """Changesets connected to the specified filelog.
960 """Changesets connected to the specified filelog.
957
961
958 For performance reasons, visits only revisions mentioned in the file-level
962 For performance reasons, visits only revisions mentioned in the file-level
959 filelog, rather than filtering through all changesets (much faster, but
963 filelog, rather than filtering through all changesets (much faster, but
960 doesn't include deletes or duplicate changes). For a slower, more accurate
964 doesn't include deletes or duplicate changes). For a slower, more accurate
961 result, use ``file()``.
965 result, use ``file()``.
962
966
963 The pattern without explicit kind like ``glob:`` is expected to be
967 The pattern without explicit kind like ``glob:`` is expected to be
964 relative to the current directory and match against a file exactly
968 relative to the current directory and match against a file exactly
965 for efficiency.
969 for efficiency.
966
970
967 If some linkrev points to revisions filtered by the current repoview, we'll
971 If some linkrev points to revisions filtered by the current repoview, we'll
968 work around it to return a non-filtered value.
972 work around it to return a non-filtered value.
969 """
973 """
970
974
971 # i18n: "filelog" is a keyword
975 # i18n: "filelog" is a keyword
972 pat = getstring(x, _("filelog requires a pattern"))
976 pat = getstring(x, _("filelog requires a pattern"))
973 s = set()
977 s = set()
974 cl = repo.changelog
978 cl = repo.changelog
975
979
976 if not matchmod.patkind(pat):
980 if not matchmod.patkind(pat):
977 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
981 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
978 files = [f]
982 files = [f]
979 else:
983 else:
980 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
984 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
981 files = (f for f in repo[None] if m(f))
985 files = (f for f in repo[None] if m(f))
982
986
983 for f in files:
987 for f in files:
984 fl = repo.file(f)
988 fl = repo.file(f)
985 known = {}
989 known = {}
986 scanpos = 0
990 scanpos = 0
987 for fr in list(fl):
991 for fr in list(fl):
988 fn = fl.node(fr)
992 fn = fl.node(fr)
989 if fn in known:
993 if fn in known:
990 s.add(known[fn])
994 s.add(known[fn])
991 continue
995 continue
992
996
993 lr = fl.linkrev(fr)
997 lr = fl.linkrev(fr)
994 if lr in cl:
998 if lr in cl:
995 s.add(lr)
999 s.add(lr)
996 elif scanpos is not None:
1000 elif scanpos is not None:
997 # lowest matching changeset is filtered, scan further
1001 # lowest matching changeset is filtered, scan further
998 # ahead in changelog
1002 # ahead in changelog
999 start = max(lr, scanpos) + 1
1003 start = max(lr, scanpos) + 1
1000 scanpos = None
1004 scanpos = None
1001 for r in cl.revs(start):
1005 for r in cl.revs(start):
1002 # minimize parsing of non-matching entries
1006 # minimize parsing of non-matching entries
1003 if f in cl.revision(r) and f in cl.readfiles(r):
1007 if f in cl.revision(r) and f in cl.readfiles(r):
1004 try:
1008 try:
1005 # try to use manifest delta fastpath
1009 # try to use manifest delta fastpath
1006 n = repo[r].filenode(f)
1010 n = repo[r].filenode(f)
1007 if n not in known:
1011 if n not in known:
1008 if n == fn:
1012 if n == fn:
1009 s.add(r)
1013 s.add(r)
1010 scanpos = r
1014 scanpos = r
1011 break
1015 break
1012 else:
1016 else:
1013 known[n] = r
1017 known[n] = r
1014 except error.ManifestLookupError:
1018 except error.ManifestLookupError:
1015 # deletion in changelog
1019 # deletion in changelog
1016 continue
1020 continue
1017
1021
1018 return subset & s
1022 return subset & s
1019
1023
1020 @predicate('first(set, [n])', safe=True)
1024 @predicate('first(set, [n])', safe=True)
1021 def first(repo, subset, x):
1025 def first(repo, subset, x):
1022 """An alias for limit().
1026 """An alias for limit().
1023 """
1027 """
1024 return limit(repo, subset, x)
1028 return limit(repo, subset, x)
1025
1029
1026 def _follow(repo, subset, x, name, followfirst=False):
1030 def _follow(repo, subset, x, name, followfirst=False):
1027 l = getargs(x, 0, 2, _("%s takes no arguments or a pattern "
1031 l = getargs(x, 0, 2, _("%s takes no arguments or a pattern "
1028 "and an optional revset") % name)
1032 "and an optional revset") % name)
1029 c = repo['.']
1033 c = repo['.']
1030 if l:
1034 if l:
1031 x = getstring(l[0], _("%s expected a pattern") % name)
1035 x = getstring(l[0], _("%s expected a pattern") % name)
1032 rev = None
1036 rev = None
1033 if len(l) >= 2:
1037 if len(l) >= 2:
1034 revs = getset(repo, fullreposet(repo), l[1])
1038 revs = getset(repo, fullreposet(repo), l[1])
1035 if len(revs) != 1:
1039 if len(revs) != 1:
1036 raise error.RepoLookupError(
1040 raise error.RepoLookupError(
1037 _("%s expected one starting revision") % name)
1041 _("%s expected one starting revision") % name)
1038 rev = revs.last()
1042 rev = revs.last()
1039 c = repo[rev]
1043 c = repo[rev]
1040 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
1044 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
1041 ctx=repo[rev], default='path')
1045 ctx=repo[rev], default='path')
1042
1046
1043 files = c.manifest().walk(matcher)
1047 files = c.manifest().walk(matcher)
1044
1048
1045 s = set()
1049 s = set()
1046 for fname in files:
1050 for fname in files:
1047 fctx = c[fname]
1051 fctx = c[fname]
1048 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
1052 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
1049 # include the revision responsible for the most recent version
1053 # include the revision responsible for the most recent version
1050 s.add(fctx.introrev())
1054 s.add(fctx.introrev())
1051 else:
1055 else:
1052 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1056 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1053
1057
1054 return subset & s
1058 return subset & s
1055
1059
1056 @predicate('follow([pattern[, startrev]])', safe=True)
1060 @predicate('follow([pattern[, startrev]])', safe=True)
1057 def follow(repo, subset, x):
1061 def follow(repo, subset, x):
1058 """
1062 """
1059 An alias for ``::.`` (ancestors of the working directory's first parent).
1063 An alias for ``::.`` (ancestors of the working directory's first parent).
1060 If pattern is specified, the histories of files matching given
1064 If pattern is specified, the histories of files matching given
1061 pattern in the revision given by startrev are followed, including copies.
1065 pattern in the revision given by startrev are followed, including copies.
1062 """
1066 """
1063 return _follow(repo, subset, x, 'follow')
1067 return _follow(repo, subset, x, 'follow')
1064
1068
1065 @predicate('_followfirst', safe=True)
1069 @predicate('_followfirst', safe=True)
1066 def _followfirst(repo, subset, x):
1070 def _followfirst(repo, subset, x):
1067 # ``followfirst([pattern[, startrev]])``
1071 # ``followfirst([pattern[, startrev]])``
1068 # Like ``follow([pattern[, startrev]])`` but follows only the first parent
1072 # Like ``follow([pattern[, startrev]])`` but follows only the first parent
1069 # of every revisions or files revisions.
1073 # of every revisions or files revisions.
1070 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1074 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1071
1075
1072 @predicate('followlines(file, fromline, toline[, startrev=.])', safe=True)
1076 @predicate('followlines(file, fromline, toline[, startrev=.])', safe=True)
1073 def followlines(repo, subset, x):
1077 def followlines(repo, subset, x):
1074 """Changesets modifying `file` in line range ('fromline', 'toline').
1078 """Changesets modifying `file` in line range ('fromline', 'toline').
1075
1079
1076 Line range corresponds to 'file' content at 'startrev' and should hence be
1080 Line range corresponds to 'file' content at 'startrev' and should hence be
1077 consistent with file size. If startrev is not specified, working directory's
1081 consistent with file size. If startrev is not specified, working directory's
1078 parent is used.
1082 parent is used.
1079 """
1083 """
1080 from . import context # avoid circular import issues
1084 from . import context # avoid circular import issues
1081
1085
1082 args = getargsdict(x, 'followlines', 'file *lines startrev')
1086 args = getargsdict(x, 'followlines', 'file *lines startrev')
1083 if len(args['lines']) != 2:
1087 if len(args['lines']) != 2:
1084 raise error.ParseError(_("followlines takes at least three arguments"))
1088 raise error.ParseError(_("followlines takes at least three arguments"))
1085
1089
1086 rev = '.'
1090 rev = '.'
1087 if 'startrev' in args:
1091 if 'startrev' in args:
1088 revs = getset(repo, fullreposet(repo), args['startrev'])
1092 revs = getset(repo, fullreposet(repo), args['startrev'])
1089 if len(revs) != 1:
1093 if len(revs) != 1:
1090 raise error.ParseError(
1094 raise error.ParseError(
1091 _("followlines expects exactly one revision"))
1095 _("followlines expects exactly one revision"))
1092 rev = revs.last()
1096 rev = revs.last()
1093
1097
1094 pat = getstring(args['file'], _("followlines requires a pattern"))
1098 pat = getstring(args['file'], _("followlines requires a pattern"))
1095 if not matchmod.patkind(pat):
1099 if not matchmod.patkind(pat):
1096 fname = pathutil.canonpath(repo.root, repo.getcwd(), pat)
1100 fname = pathutil.canonpath(repo.root, repo.getcwd(), pat)
1097 else:
1101 else:
1098 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[rev])
1102 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[rev])
1099 files = [f for f in repo[rev] if m(f)]
1103 files = [f for f in repo[rev] if m(f)]
1100 if len(files) != 1:
1104 if len(files) != 1:
1101 raise error.ParseError(_("followlines expects exactly one file"))
1105 raise error.ParseError(_("followlines expects exactly one file"))
1102 fname = files[0]
1106 fname = files[0]
1103
1107
1104 fromline, toline = [getinteger(a, _("line range bounds must be integers"))
1108 fromline, toline = [getinteger(a, _("line range bounds must be integers"))
1105 for a in args['lines']]
1109 for a in args['lines']]
1106 if toline - fromline < 0:
1110 if toline - fromline < 0:
1107 raise error.ParseError(_("line range must be positive"))
1111 raise error.ParseError(_("line range must be positive"))
1108 if fromline < 1:
1112 if fromline < 1:
1109 raise error.ParseError(_("fromline must be strictly positive"))
1113 raise error.ParseError(_("fromline must be strictly positive"))
1110 fromline -= 1
1114 fromline -= 1
1111
1115
1112 fctx = repo[rev].filectx(fname)
1116 fctx = repo[rev].filectx(fname)
1113 revs = (c.rev() for c in context.blockancestors(fctx, fromline, toline))
1117 revs = (c.rev() for c in context.blockancestors(fctx, fromline, toline))
1114 return subset & generatorset(revs, iterasc=False)
1118 return subset & generatorset(revs, iterasc=False)
1115
1119
1116 @predicate('all()', safe=True)
1120 @predicate('all()', safe=True)
1117 def getall(repo, subset, x):
1121 def getall(repo, subset, x):
1118 """All changesets, the same as ``0:tip``.
1122 """All changesets, the same as ``0:tip``.
1119 """
1123 """
1120 # i18n: "all" is a keyword
1124 # i18n: "all" is a keyword
1121 getargs(x, 0, 0, _("all takes no arguments"))
1125 getargs(x, 0, 0, _("all takes no arguments"))
1122 return subset & spanset(repo) # drop "null" if any
1126 return subset & spanset(repo) # drop "null" if any
1123
1127
1124 @predicate('grep(regex)')
1128 @predicate('grep(regex)')
1125 def grep(repo, subset, x):
1129 def grep(repo, subset, x):
1126 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1130 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1127 to ensure special escape characters are handled correctly. Unlike
1131 to ensure special escape characters are handled correctly. Unlike
1128 ``keyword(string)``, the match is case-sensitive.
1132 ``keyword(string)``, the match is case-sensitive.
1129 """
1133 """
1130 try:
1134 try:
1131 # i18n: "grep" is a keyword
1135 # i18n: "grep" is a keyword
1132 gr = re.compile(getstring(x, _("grep requires a string")))
1136 gr = re.compile(getstring(x, _("grep requires a string")))
1133 except re.error as e:
1137 except re.error as e:
1134 raise error.ParseError(_('invalid match pattern: %s') % e)
1138 raise error.ParseError(_('invalid match pattern: %s') % e)
1135
1139
1136 def matches(x):
1140 def matches(x):
1137 c = repo[x]
1141 c = repo[x]
1138 for e in c.files() + [c.user(), c.description()]:
1142 for e in c.files() + [c.user(), c.description()]:
1139 if gr.search(e):
1143 if gr.search(e):
1140 return True
1144 return True
1141 return False
1145 return False
1142
1146
1143 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1147 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1144
1148
1145 @predicate('_matchfiles', safe=True)
1149 @predicate('_matchfiles', safe=True)
1146 def _matchfiles(repo, subset, x):
1150 def _matchfiles(repo, subset, x):
1147 # _matchfiles takes a revset list of prefixed arguments:
1151 # _matchfiles takes a revset list of prefixed arguments:
1148 #
1152 #
1149 # [p:foo, i:bar, x:baz]
1153 # [p:foo, i:bar, x:baz]
1150 #
1154 #
1151 # builds a match object from them and filters subset. Allowed
1155 # builds a match object from them and filters subset. Allowed
1152 # prefixes are 'p:' for regular patterns, 'i:' for include
1156 # prefixes are 'p:' for regular patterns, 'i:' for include
1153 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1157 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1154 # a revision identifier, or the empty string to reference the
1158 # a revision identifier, or the empty string to reference the
1155 # working directory, from which the match object is
1159 # working directory, from which the match object is
1156 # initialized. Use 'd:' to set the default matching mode, default
1160 # initialized. Use 'd:' to set the default matching mode, default
1157 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1161 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1158
1162
1159 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1163 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1160 pats, inc, exc = [], [], []
1164 pats, inc, exc = [], [], []
1161 rev, default = None, None
1165 rev, default = None, None
1162 for arg in l:
1166 for arg in l:
1163 s = getstring(arg, "_matchfiles requires string arguments")
1167 s = getstring(arg, "_matchfiles requires string arguments")
1164 prefix, value = s[:2], s[2:]
1168 prefix, value = s[:2], s[2:]
1165 if prefix == 'p:':
1169 if prefix == 'p:':
1166 pats.append(value)
1170 pats.append(value)
1167 elif prefix == 'i:':
1171 elif prefix == 'i:':
1168 inc.append(value)
1172 inc.append(value)
1169 elif prefix == 'x:':
1173 elif prefix == 'x:':
1170 exc.append(value)
1174 exc.append(value)
1171 elif prefix == 'r:':
1175 elif prefix == 'r:':
1172 if rev is not None:
1176 if rev is not None:
1173 raise error.ParseError('_matchfiles expected at most one '
1177 raise error.ParseError('_matchfiles expected at most one '
1174 'revision')
1178 'revision')
1175 if value != '': # empty means working directory; leave rev as None
1179 if value != '': # empty means working directory; leave rev as None
1176 rev = value
1180 rev = value
1177 elif prefix == 'd:':
1181 elif prefix == 'd:':
1178 if default is not None:
1182 if default is not None:
1179 raise error.ParseError('_matchfiles expected at most one '
1183 raise error.ParseError('_matchfiles expected at most one '
1180 'default mode')
1184 'default mode')
1181 default = value
1185 default = value
1182 else:
1186 else:
1183 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1187 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1184 if not default:
1188 if not default:
1185 default = 'glob'
1189 default = 'glob'
1186
1190
1187 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1191 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1188 exclude=exc, ctx=repo[rev], default=default)
1192 exclude=exc, ctx=repo[rev], default=default)
1189
1193
1190 # This directly read the changelog data as creating changectx for all
1194 # This directly read the changelog data as creating changectx for all
1191 # revisions is quite expensive.
1195 # revisions is quite expensive.
1192 getfiles = repo.changelog.readfiles
1196 getfiles = repo.changelog.readfiles
1193 wdirrev = node.wdirrev
1197 wdirrev = node.wdirrev
1194 def matches(x):
1198 def matches(x):
1195 if x == wdirrev:
1199 if x == wdirrev:
1196 files = repo[x].files()
1200 files = repo[x].files()
1197 else:
1201 else:
1198 files = getfiles(x)
1202 files = getfiles(x)
1199 for f in files:
1203 for f in files:
1200 if m(f):
1204 if m(f):
1201 return True
1205 return True
1202 return False
1206 return False
1203
1207
1204 return subset.filter(matches,
1208 return subset.filter(matches,
1205 condrepr=('<matchfiles patterns=%r, include=%r '
1209 condrepr=('<matchfiles patterns=%r, include=%r '
1206 'exclude=%r, default=%r, rev=%r>',
1210 'exclude=%r, default=%r, rev=%r>',
1207 pats, inc, exc, default, rev))
1211 pats, inc, exc, default, rev))
1208
1212
1209 @predicate('file(pattern)', safe=True)
1213 @predicate('file(pattern)', safe=True)
1210 def hasfile(repo, subset, x):
1214 def hasfile(repo, subset, x):
1211 """Changesets affecting files matched by pattern.
1215 """Changesets affecting files matched by pattern.
1212
1216
1213 For a faster but less accurate result, consider using ``filelog()``
1217 For a faster but less accurate result, consider using ``filelog()``
1214 instead.
1218 instead.
1215
1219
1216 This predicate uses ``glob:`` as the default kind of pattern.
1220 This predicate uses ``glob:`` as the default kind of pattern.
1217 """
1221 """
1218 # i18n: "file" is a keyword
1222 # i18n: "file" is a keyword
1219 pat = getstring(x, _("file requires a pattern"))
1223 pat = getstring(x, _("file requires a pattern"))
1220 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1224 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1221
1225
1222 @predicate('head()', safe=True)
1226 @predicate('head()', safe=True)
1223 def head(repo, subset, x):
1227 def head(repo, subset, x):
1224 """Changeset is a named branch head.
1228 """Changeset is a named branch head.
1225 """
1229 """
1226 # i18n: "head" is a keyword
1230 # i18n: "head" is a keyword
1227 getargs(x, 0, 0, _("head takes no arguments"))
1231 getargs(x, 0, 0, _("head takes no arguments"))
1228 hs = set()
1232 hs = set()
1229 cl = repo.changelog
1233 cl = repo.changelog
1230 for ls in repo.branchmap().itervalues():
1234 for ls in repo.branchmap().itervalues():
1231 hs.update(cl.rev(h) for h in ls)
1235 hs.update(cl.rev(h) for h in ls)
1232 return subset & baseset(hs)
1236 return subset & baseset(hs)
1233
1237
1234 @predicate('heads(set)', safe=True)
1238 @predicate('heads(set)', safe=True)
1235 def heads(repo, subset, x):
1239 def heads(repo, subset, x):
1236 """Members of set with no children in set.
1240 """Members of set with no children in set.
1237 """
1241 """
1238 s = getset(repo, subset, x)
1242 s = getset(repo, subset, x)
1239 ps = parents(repo, subset, x)
1243 ps = parents(repo, subset, x)
1240 return s - ps
1244 return s - ps
1241
1245
1242 @predicate('hidden()', safe=True)
1246 @predicate('hidden()', safe=True)
1243 def hidden(repo, subset, x):
1247 def hidden(repo, subset, x):
1244 """Hidden changesets.
1248 """Hidden changesets.
1245 """
1249 """
1246 # i18n: "hidden" is a keyword
1250 # i18n: "hidden" is a keyword
1247 getargs(x, 0, 0, _("hidden takes no arguments"))
1251 getargs(x, 0, 0, _("hidden takes no arguments"))
1248 hiddenrevs = repoview.filterrevs(repo, 'visible')
1252 hiddenrevs = repoview.filterrevs(repo, 'visible')
1249 return subset & hiddenrevs
1253 return subset & hiddenrevs
1250
1254
1251 @predicate('keyword(string)', safe=True)
1255 @predicate('keyword(string)', safe=True)
1252 def keyword(repo, subset, x):
1256 def keyword(repo, subset, x):
1253 """Search commit message, user name, and names of changed files for
1257 """Search commit message, user name, and names of changed files for
1254 string. The match is case-insensitive.
1258 string. The match is case-insensitive.
1255
1259
1256 For a regular expression or case sensitive search of these fields, use
1260 For a regular expression or case sensitive search of these fields, use
1257 ``grep(regex)``.
1261 ``grep(regex)``.
1258 """
1262 """
1259 # i18n: "keyword" is a keyword
1263 # i18n: "keyword" is a keyword
1260 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1264 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1261
1265
1262 def matches(r):
1266 def matches(r):
1263 c = repo[r]
1267 c = repo[r]
1264 return any(kw in encoding.lower(t)
1268 return any(kw in encoding.lower(t)
1265 for t in c.files() + [c.user(), c.description()])
1269 for t in c.files() + [c.user(), c.description()])
1266
1270
1267 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1271 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1268
1272
1269 @predicate('limit(set[, n[, offset]])', safe=True)
1273 @predicate('limit(set[, n[, offset]])', safe=True)
1270 def limit(repo, subset, x):
1274 def limit(repo, subset, x):
1271 """First n members of set, defaulting to 1, starting from offset.
1275 """First n members of set, defaulting to 1, starting from offset.
1272 """
1276 """
1273 args = getargsdict(x, 'limit', 'set n offset')
1277 args = getargsdict(x, 'limit', 'set n offset')
1274 if 'set' not in args:
1278 if 'set' not in args:
1275 # i18n: "limit" is a keyword
1279 # i18n: "limit" is a keyword
1276 raise error.ParseError(_("limit requires one to three arguments"))
1280 raise error.ParseError(_("limit requires one to three arguments"))
1277 lim, ofs = 1, 0
1281 # i18n: "limit" is a keyword
1278 if 'n' in args:
1282 lim = getinteger(args.get('n'), _("limit expects a number"), default=1)
1279 # i18n: "limit" is a keyword
1283 # i18n: "limit" is a keyword
1280 lim = getinteger(args['n'], _("limit expects a number"))
1284 ofs = getinteger(args.get('offset'), _("limit expects a number"), default=0)
1281 if 'offset' in args:
1282 # i18n: "limit" is a keyword
1283 ofs = getinteger(args['offset'], _("limit expects a number"))
1284 if ofs < 0:
1285 if ofs < 0:
1285 raise error.ParseError(_("negative offset"))
1286 raise error.ParseError(_("negative offset"))
1286 os = getset(repo, fullreposet(repo), args['set'])
1287 os = getset(repo, fullreposet(repo), args['set'])
1287 result = []
1288 result = []
1288 it = iter(os)
1289 it = iter(os)
1289 for x in xrange(ofs):
1290 for x in xrange(ofs):
1290 y = next(it, None)
1291 y = next(it, None)
1291 if y is None:
1292 if y is None:
1292 break
1293 break
1293 for x in xrange(lim):
1294 for x in xrange(lim):
1294 y = next(it, None)
1295 y = next(it, None)
1295 if y is None:
1296 if y is None:
1296 break
1297 break
1297 elif y in subset:
1298 elif y in subset:
1298 result.append(y)
1299 result.append(y)
1299 return baseset(result, datarepr=('<limit n=%d, offset=%d, %r, %r>',
1300 return baseset(result, datarepr=('<limit n=%d, offset=%d, %r, %r>',
1300 lim, ofs, subset, os))
1301 lim, ofs, subset, os))
1301
1302
1302 @predicate('last(set, [n])', safe=True)
1303 @predicate('last(set, [n])', safe=True)
1303 def last(repo, subset, x):
1304 def last(repo, subset, x):
1304 """Last n members of set, defaulting to 1.
1305 """Last n members of set, defaulting to 1.
1305 """
1306 """
1306 # i18n: "last" is a keyword
1307 # i18n: "last" is a keyword
1307 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1308 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1308 lim = 1
1309 lim = 1
1309 if len(l) == 2:
1310 if len(l) == 2:
1310 # i18n: "last" is a keyword
1311 # i18n: "last" is a keyword
1311 lim = getinteger(l[1], _("last expects a number"))
1312 lim = getinteger(l[1], _("last expects a number"))
1312 os = getset(repo, fullreposet(repo), l[0])
1313 os = getset(repo, fullreposet(repo), l[0])
1313 os.reverse()
1314 os.reverse()
1314 result = []
1315 result = []
1315 it = iter(os)
1316 it = iter(os)
1316 for x in xrange(lim):
1317 for x in xrange(lim):
1317 y = next(it, None)
1318 y = next(it, None)
1318 if y is None:
1319 if y is None:
1319 break
1320 break
1320 elif y in subset:
1321 elif y in subset:
1321 result.append(y)
1322 result.append(y)
1322 return baseset(result, datarepr=('<last n=%d, %r, %r>', lim, subset, os))
1323 return baseset(result, datarepr=('<last n=%d, %r, %r>', lim, subset, os))
1323
1324
1324 @predicate('max(set)', safe=True)
1325 @predicate('max(set)', safe=True)
1325 def maxrev(repo, subset, x):
1326 def maxrev(repo, subset, x):
1326 """Changeset with highest revision number in set.
1327 """Changeset with highest revision number in set.
1327 """
1328 """
1328 os = getset(repo, fullreposet(repo), x)
1329 os = getset(repo, fullreposet(repo), x)
1329 try:
1330 try:
1330 m = os.max()
1331 m = os.max()
1331 if m in subset:
1332 if m in subset:
1332 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1333 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1333 except ValueError:
1334 except ValueError:
1334 # os.max() throws a ValueError when the collection is empty.
1335 # os.max() throws a ValueError when the collection is empty.
1335 # Same as python's max().
1336 # Same as python's max().
1336 pass
1337 pass
1337 return baseset(datarepr=('<max %r, %r>', subset, os))
1338 return baseset(datarepr=('<max %r, %r>', subset, os))
1338
1339
1339 @predicate('merge()', safe=True)
1340 @predicate('merge()', safe=True)
1340 def merge(repo, subset, x):
1341 def merge(repo, subset, x):
1341 """Changeset is a merge changeset.
1342 """Changeset is a merge changeset.
1342 """
1343 """
1343 # i18n: "merge" is a keyword
1344 # i18n: "merge" is a keyword
1344 getargs(x, 0, 0, _("merge takes no arguments"))
1345 getargs(x, 0, 0, _("merge takes no arguments"))
1345 cl = repo.changelog
1346 cl = repo.changelog
1346 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1347 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1347 condrepr='<merge>')
1348 condrepr='<merge>')
1348
1349
1349 @predicate('branchpoint()', safe=True)
1350 @predicate('branchpoint()', safe=True)
1350 def branchpoint(repo, subset, x):
1351 def branchpoint(repo, subset, x):
1351 """Changesets with more than one child.
1352 """Changesets with more than one child.
1352 """
1353 """
1353 # i18n: "branchpoint" is a keyword
1354 # i18n: "branchpoint" is a keyword
1354 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1355 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1355 cl = repo.changelog
1356 cl = repo.changelog
1356 if not subset:
1357 if not subset:
1357 return baseset()
1358 return baseset()
1358 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1359 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1359 # (and if it is not, it should.)
1360 # (and if it is not, it should.)
1360 baserev = min(subset)
1361 baserev = min(subset)
1361 parentscount = [0]*(len(repo) - baserev)
1362 parentscount = [0]*(len(repo) - baserev)
1362 for r in cl.revs(start=baserev + 1):
1363 for r in cl.revs(start=baserev + 1):
1363 for p in cl.parentrevs(r):
1364 for p in cl.parentrevs(r):
1364 if p >= baserev:
1365 if p >= baserev:
1365 parentscount[p - baserev] += 1
1366 parentscount[p - baserev] += 1
1366 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1367 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1367 condrepr='<branchpoint>')
1368 condrepr='<branchpoint>')
1368
1369
1369 @predicate('min(set)', safe=True)
1370 @predicate('min(set)', safe=True)
1370 def minrev(repo, subset, x):
1371 def minrev(repo, subset, x):
1371 """Changeset with lowest revision number in set.
1372 """Changeset with lowest revision number in set.
1372 """
1373 """
1373 os = getset(repo, fullreposet(repo), x)
1374 os = getset(repo, fullreposet(repo), x)
1374 try:
1375 try:
1375 m = os.min()
1376 m = os.min()
1376 if m in subset:
1377 if m in subset:
1377 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1378 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1378 except ValueError:
1379 except ValueError:
1379 # os.min() throws a ValueError when the collection is empty.
1380 # os.min() throws a ValueError when the collection is empty.
1380 # Same as python's min().
1381 # Same as python's min().
1381 pass
1382 pass
1382 return baseset(datarepr=('<min %r, %r>', subset, os))
1383 return baseset(datarepr=('<min %r, %r>', subset, os))
1383
1384
1384 @predicate('modifies(pattern)', safe=True)
1385 @predicate('modifies(pattern)', safe=True)
1385 def modifies(repo, subset, x):
1386 def modifies(repo, subset, x):
1386 """Changesets modifying files matched by pattern.
1387 """Changesets modifying files matched by pattern.
1387
1388
1388 The pattern without explicit kind like ``glob:`` is expected to be
1389 The pattern without explicit kind like ``glob:`` is expected to be
1389 relative to the current directory and match against a file or a
1390 relative to the current directory and match against a file or a
1390 directory.
1391 directory.
1391 """
1392 """
1392 # i18n: "modifies" is a keyword
1393 # i18n: "modifies" is a keyword
1393 pat = getstring(x, _("modifies requires a pattern"))
1394 pat = getstring(x, _("modifies requires a pattern"))
1394 return checkstatus(repo, subset, pat, 0)
1395 return checkstatus(repo, subset, pat, 0)
1395
1396
1396 @predicate('named(namespace)')
1397 @predicate('named(namespace)')
1397 def named(repo, subset, x):
1398 def named(repo, subset, x):
1398 """The changesets in a given namespace.
1399 """The changesets in a given namespace.
1399
1400
1400 Pattern matching is supported for `namespace`. See
1401 Pattern matching is supported for `namespace`. See
1401 :hg:`help revisions.patterns`.
1402 :hg:`help revisions.patterns`.
1402 """
1403 """
1403 # i18n: "named" is a keyword
1404 # i18n: "named" is a keyword
1404 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1405 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1405
1406
1406 ns = getstring(args[0],
1407 ns = getstring(args[0],
1407 # i18n: "named" is a keyword
1408 # i18n: "named" is a keyword
1408 _('the argument to named must be a string'))
1409 _('the argument to named must be a string'))
1409 kind, pattern, matcher = util.stringmatcher(ns)
1410 kind, pattern, matcher = util.stringmatcher(ns)
1410 namespaces = set()
1411 namespaces = set()
1411 if kind == 'literal':
1412 if kind == 'literal':
1412 if pattern not in repo.names:
1413 if pattern not in repo.names:
1413 raise error.RepoLookupError(_("namespace '%s' does not exist")
1414 raise error.RepoLookupError(_("namespace '%s' does not exist")
1414 % ns)
1415 % ns)
1415 namespaces.add(repo.names[pattern])
1416 namespaces.add(repo.names[pattern])
1416 else:
1417 else:
1417 for name, ns in repo.names.iteritems():
1418 for name, ns in repo.names.iteritems():
1418 if matcher(name):
1419 if matcher(name):
1419 namespaces.add(ns)
1420 namespaces.add(ns)
1420 if not namespaces:
1421 if not namespaces:
1421 raise error.RepoLookupError(_("no namespace exists"
1422 raise error.RepoLookupError(_("no namespace exists"
1422 " that match '%s'") % pattern)
1423 " that match '%s'") % pattern)
1423
1424
1424 names = set()
1425 names = set()
1425 for ns in namespaces:
1426 for ns in namespaces:
1426 for name in ns.listnames(repo):
1427 for name in ns.listnames(repo):
1427 if name not in ns.deprecated:
1428 if name not in ns.deprecated:
1428 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1429 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1429
1430
1430 names -= set([node.nullrev])
1431 names -= set([node.nullrev])
1431 return subset & names
1432 return subset & names
1432
1433
1433 @predicate('id(string)', safe=True)
1434 @predicate('id(string)', safe=True)
1434 def node_(repo, subset, x):
1435 def node_(repo, subset, x):
1435 """Revision non-ambiguously specified by the given hex string prefix.
1436 """Revision non-ambiguously specified by the given hex string prefix.
1436 """
1437 """
1437 # i18n: "id" is a keyword
1438 # i18n: "id" is a keyword
1438 l = getargs(x, 1, 1, _("id requires one argument"))
1439 l = getargs(x, 1, 1, _("id requires one argument"))
1439 # i18n: "id" is a keyword
1440 # i18n: "id" is a keyword
1440 n = getstring(l[0], _("id requires a string"))
1441 n = getstring(l[0], _("id requires a string"))
1441 if len(n) == 40:
1442 if len(n) == 40:
1442 try:
1443 try:
1443 rn = repo.changelog.rev(node.bin(n))
1444 rn = repo.changelog.rev(node.bin(n))
1444 except (LookupError, TypeError):
1445 except (LookupError, TypeError):
1445 rn = None
1446 rn = None
1446 else:
1447 else:
1447 rn = None
1448 rn = None
1448 pm = repo.changelog._partialmatch(n)
1449 pm = repo.changelog._partialmatch(n)
1449 if pm is not None:
1450 if pm is not None:
1450 rn = repo.changelog.rev(pm)
1451 rn = repo.changelog.rev(pm)
1451
1452
1452 if rn is None:
1453 if rn is None:
1453 return baseset()
1454 return baseset()
1454 result = baseset([rn])
1455 result = baseset([rn])
1455 return result & subset
1456 return result & subset
1456
1457
1457 @predicate('obsolete()', safe=True)
1458 @predicate('obsolete()', safe=True)
1458 def obsolete(repo, subset, x):
1459 def obsolete(repo, subset, x):
1459 """Mutable changeset with a newer version."""
1460 """Mutable changeset with a newer version."""
1460 # i18n: "obsolete" is a keyword
1461 # i18n: "obsolete" is a keyword
1461 getargs(x, 0, 0, _("obsolete takes no arguments"))
1462 getargs(x, 0, 0, _("obsolete takes no arguments"))
1462 obsoletes = obsmod.getrevs(repo, 'obsolete')
1463 obsoletes = obsmod.getrevs(repo, 'obsolete')
1463 return subset & obsoletes
1464 return subset & obsoletes
1464
1465
1465 @predicate('only(set, [set])', safe=True)
1466 @predicate('only(set, [set])', safe=True)
1466 def only(repo, subset, x):
1467 def only(repo, subset, x):
1467 """Changesets that are ancestors of the first set that are not ancestors
1468 """Changesets that are ancestors of the first set that are not ancestors
1468 of any other head in the repo. If a second set is specified, the result
1469 of any other head in the repo. If a second set is specified, the result
1469 is ancestors of the first set that are not ancestors of the second set
1470 is ancestors of the first set that are not ancestors of the second set
1470 (i.e. ::<set1> - ::<set2>).
1471 (i.e. ::<set1> - ::<set2>).
1471 """
1472 """
1472 cl = repo.changelog
1473 cl = repo.changelog
1473 # i18n: "only" is a keyword
1474 # i18n: "only" is a keyword
1474 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1475 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1475 include = getset(repo, fullreposet(repo), args[0])
1476 include = getset(repo, fullreposet(repo), args[0])
1476 if len(args) == 1:
1477 if len(args) == 1:
1477 if not include:
1478 if not include:
1478 return baseset()
1479 return baseset()
1479
1480
1480 descendants = set(_revdescendants(repo, include, False))
1481 descendants = set(_revdescendants(repo, include, False))
1481 exclude = [rev for rev in cl.headrevs()
1482 exclude = [rev for rev in cl.headrevs()
1482 if not rev in descendants and not rev in include]
1483 if not rev in descendants and not rev in include]
1483 else:
1484 else:
1484 exclude = getset(repo, fullreposet(repo), args[1])
1485 exclude = getset(repo, fullreposet(repo), args[1])
1485
1486
1486 results = set(cl.findmissingrevs(common=exclude, heads=include))
1487 results = set(cl.findmissingrevs(common=exclude, heads=include))
1487 # XXX we should turn this into a baseset instead of a set, smartset may do
1488 # XXX we should turn this into a baseset instead of a set, smartset may do
1488 # some optimizations from the fact this is a baseset.
1489 # some optimizations from the fact this is a baseset.
1489 return subset & results
1490 return subset & results
1490
1491
1491 @predicate('origin([set])', safe=True)
1492 @predicate('origin([set])', safe=True)
1492 def origin(repo, subset, x):
1493 def origin(repo, subset, x):
1493 """
1494 """
1494 Changesets that were specified as a source for the grafts, transplants or
1495 Changesets that were specified as a source for the grafts, transplants or
1495 rebases that created the given revisions. Omitting the optional set is the
1496 rebases that created the given revisions. Omitting the optional set is the
1496 same as passing all(). If a changeset created by these operations is itself
1497 same as passing all(). If a changeset created by these operations is itself
1497 specified as a source for one of these operations, only the source changeset
1498 specified as a source for one of these operations, only the source changeset
1498 for the first operation is selected.
1499 for the first operation is selected.
1499 """
1500 """
1500 if x is not None:
1501 if x is not None:
1501 dests = getset(repo, fullreposet(repo), x)
1502 dests = getset(repo, fullreposet(repo), x)
1502 else:
1503 else:
1503 dests = fullreposet(repo)
1504 dests = fullreposet(repo)
1504
1505
1505 def _firstsrc(rev):
1506 def _firstsrc(rev):
1506 src = _getrevsource(repo, rev)
1507 src = _getrevsource(repo, rev)
1507 if src is None:
1508 if src is None:
1508 return None
1509 return None
1509
1510
1510 while True:
1511 while True:
1511 prev = _getrevsource(repo, src)
1512 prev = _getrevsource(repo, src)
1512
1513
1513 if prev is None:
1514 if prev is None:
1514 return src
1515 return src
1515 src = prev
1516 src = prev
1516
1517
1517 o = set([_firstsrc(r) for r in dests])
1518 o = set([_firstsrc(r) for r in dests])
1518 o -= set([None])
1519 o -= set([None])
1519 # XXX we should turn this into a baseset instead of a set, smartset may do
1520 # XXX we should turn this into a baseset instead of a set, smartset may do
1520 # some optimizations from the fact this is a baseset.
1521 # some optimizations from the fact this is a baseset.
1521 return subset & o
1522 return subset & o
1522
1523
1523 @predicate('outgoing([path])', safe=True)
1524 @predicate('outgoing([path])', safe=True)
1524 def outgoing(repo, subset, x):
1525 def outgoing(repo, subset, x):
1525 """Changesets not found in the specified destination repository, or the
1526 """Changesets not found in the specified destination repository, or the
1526 default push location.
1527 default push location.
1527 """
1528 """
1528 # Avoid cycles.
1529 # Avoid cycles.
1529 from . import (
1530 from . import (
1530 discovery,
1531 discovery,
1531 hg,
1532 hg,
1532 )
1533 )
1533 # i18n: "outgoing" is a keyword
1534 # i18n: "outgoing" is a keyword
1534 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1535 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1535 # i18n: "outgoing" is a keyword
1536 # i18n: "outgoing" is a keyword
1536 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1537 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1537 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1538 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1538 dest, branches = hg.parseurl(dest)
1539 dest, branches = hg.parseurl(dest)
1539 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1540 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1540 if revs:
1541 if revs:
1541 revs = [repo.lookup(rev) for rev in revs]
1542 revs = [repo.lookup(rev) for rev in revs]
1542 other = hg.peer(repo, {}, dest)
1543 other = hg.peer(repo, {}, dest)
1543 repo.ui.pushbuffer()
1544 repo.ui.pushbuffer()
1544 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1545 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1545 repo.ui.popbuffer()
1546 repo.ui.popbuffer()
1546 cl = repo.changelog
1547 cl = repo.changelog
1547 o = set([cl.rev(r) for r in outgoing.missing])
1548 o = set([cl.rev(r) for r in outgoing.missing])
1548 return subset & o
1549 return subset & o
1549
1550
1550 @predicate('p1([set])', safe=True)
1551 @predicate('p1([set])', safe=True)
1551 def p1(repo, subset, x):
1552 def p1(repo, subset, x):
1552 """First parent of changesets in set, or the working directory.
1553 """First parent of changesets in set, or the working directory.
1553 """
1554 """
1554 if x is None:
1555 if x is None:
1555 p = repo[x].p1().rev()
1556 p = repo[x].p1().rev()
1556 if p >= 0:
1557 if p >= 0:
1557 return subset & baseset([p])
1558 return subset & baseset([p])
1558 return baseset()
1559 return baseset()
1559
1560
1560 ps = set()
1561 ps = set()
1561 cl = repo.changelog
1562 cl = repo.changelog
1562 for r in getset(repo, fullreposet(repo), x):
1563 for r in getset(repo, fullreposet(repo), x):
1563 ps.add(cl.parentrevs(r)[0])
1564 ps.add(cl.parentrevs(r)[0])
1564 ps -= set([node.nullrev])
1565 ps -= set([node.nullrev])
1565 # XXX we should turn this into a baseset instead of a set, smartset may do
1566 # XXX we should turn this into a baseset instead of a set, smartset may do
1566 # some optimizations from the fact this is a baseset.
1567 # some optimizations from the fact this is a baseset.
1567 return subset & ps
1568 return subset & ps
1568
1569
1569 @predicate('p2([set])', safe=True)
1570 @predicate('p2([set])', safe=True)
1570 def p2(repo, subset, x):
1571 def p2(repo, subset, x):
1571 """Second parent of changesets in set, or the working directory.
1572 """Second parent of changesets in set, or the working directory.
1572 """
1573 """
1573 if x is None:
1574 if x is None:
1574 ps = repo[x].parents()
1575 ps = repo[x].parents()
1575 try:
1576 try:
1576 p = ps[1].rev()
1577 p = ps[1].rev()
1577 if p >= 0:
1578 if p >= 0:
1578 return subset & baseset([p])
1579 return subset & baseset([p])
1579 return baseset()
1580 return baseset()
1580 except IndexError:
1581 except IndexError:
1581 return baseset()
1582 return baseset()
1582
1583
1583 ps = set()
1584 ps = set()
1584 cl = repo.changelog
1585 cl = repo.changelog
1585 for r in getset(repo, fullreposet(repo), x):
1586 for r in getset(repo, fullreposet(repo), x):
1586 ps.add(cl.parentrevs(r)[1])
1587 ps.add(cl.parentrevs(r)[1])
1587 ps -= set([node.nullrev])
1588 ps -= set([node.nullrev])
1588 # XXX we should turn this into a baseset instead of a set, smartset may do
1589 # XXX we should turn this into a baseset instead of a set, smartset may do
1589 # some optimizations from the fact this is a baseset.
1590 # some optimizations from the fact this is a baseset.
1590 return subset & ps
1591 return subset & ps
1591
1592
1592 def parentpost(repo, subset, x, order):
1593 def parentpost(repo, subset, x, order):
1593 return p1(repo, subset, x)
1594 return p1(repo, subset, x)
1594
1595
1595 @predicate('parents([set])', safe=True)
1596 @predicate('parents([set])', safe=True)
1596 def parents(repo, subset, x):
1597 def parents(repo, subset, x):
1597 """
1598 """
1598 The set of all parents for all changesets in set, or the working directory.
1599 The set of all parents for all changesets in set, or the working directory.
1599 """
1600 """
1600 if x is None:
1601 if x is None:
1601 ps = set(p.rev() for p in repo[x].parents())
1602 ps = set(p.rev() for p in repo[x].parents())
1602 else:
1603 else:
1603 ps = set()
1604 ps = set()
1604 cl = repo.changelog
1605 cl = repo.changelog
1605 up = ps.update
1606 up = ps.update
1606 parentrevs = cl.parentrevs
1607 parentrevs = cl.parentrevs
1607 for r in getset(repo, fullreposet(repo), x):
1608 for r in getset(repo, fullreposet(repo), x):
1608 if r == node.wdirrev:
1609 if r == node.wdirrev:
1609 up(p.rev() for p in repo[r].parents())
1610 up(p.rev() for p in repo[r].parents())
1610 else:
1611 else:
1611 up(parentrevs(r))
1612 up(parentrevs(r))
1612 ps -= set([node.nullrev])
1613 ps -= set([node.nullrev])
1613 return subset & ps
1614 return subset & ps
1614
1615
1615 def _phase(repo, subset, target):
1616 def _phase(repo, subset, target):
1616 """helper to select all rev in phase <target>"""
1617 """helper to select all rev in phase <target>"""
1617 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1618 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1618 if repo._phasecache._phasesets:
1619 if repo._phasecache._phasesets:
1619 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1620 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1620 s = baseset(s)
1621 s = baseset(s)
1621 s.sort() # set are non ordered, so we enforce ascending
1622 s.sort() # set are non ordered, so we enforce ascending
1622 return subset & s
1623 return subset & s
1623 else:
1624 else:
1624 phase = repo._phasecache.phase
1625 phase = repo._phasecache.phase
1625 condition = lambda r: phase(repo, r) == target
1626 condition = lambda r: phase(repo, r) == target
1626 return subset.filter(condition, condrepr=('<phase %r>', target),
1627 return subset.filter(condition, condrepr=('<phase %r>', target),
1627 cache=False)
1628 cache=False)
1628
1629
1629 @predicate('draft()', safe=True)
1630 @predicate('draft()', safe=True)
1630 def draft(repo, subset, x):
1631 def draft(repo, subset, x):
1631 """Changeset in draft phase."""
1632 """Changeset in draft phase."""
1632 # i18n: "draft" is a keyword
1633 # i18n: "draft" is a keyword
1633 getargs(x, 0, 0, _("draft takes no arguments"))
1634 getargs(x, 0, 0, _("draft takes no arguments"))
1634 target = phases.draft
1635 target = phases.draft
1635 return _phase(repo, subset, target)
1636 return _phase(repo, subset, target)
1636
1637
1637 @predicate('secret()', safe=True)
1638 @predicate('secret()', safe=True)
1638 def secret(repo, subset, x):
1639 def secret(repo, subset, x):
1639 """Changeset in secret phase."""
1640 """Changeset in secret phase."""
1640 # i18n: "secret" is a keyword
1641 # i18n: "secret" is a keyword
1641 getargs(x, 0, 0, _("secret takes no arguments"))
1642 getargs(x, 0, 0, _("secret takes no arguments"))
1642 target = phases.secret
1643 target = phases.secret
1643 return _phase(repo, subset, target)
1644 return _phase(repo, subset, target)
1644
1645
1645 def parentspec(repo, subset, x, n, order):
1646 def parentspec(repo, subset, x, n, order):
1646 """``set^0``
1647 """``set^0``
1647 The set.
1648 The set.
1648 ``set^1`` (or ``set^``), ``set^2``
1649 ``set^1`` (or ``set^``), ``set^2``
1649 First or second parent, respectively, of all changesets in set.
1650 First or second parent, respectively, of all changesets in set.
1650 """
1651 """
1651 try:
1652 try:
1652 n = int(n[1])
1653 n = int(n[1])
1653 if n not in (0, 1, 2):
1654 if n not in (0, 1, 2):
1654 raise ValueError
1655 raise ValueError
1655 except (TypeError, ValueError):
1656 except (TypeError, ValueError):
1656 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1657 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1657 ps = set()
1658 ps = set()
1658 cl = repo.changelog
1659 cl = repo.changelog
1659 for r in getset(repo, fullreposet(repo), x):
1660 for r in getset(repo, fullreposet(repo), x):
1660 if n == 0:
1661 if n == 0:
1661 ps.add(r)
1662 ps.add(r)
1662 elif n == 1:
1663 elif n == 1:
1663 ps.add(cl.parentrevs(r)[0])
1664 ps.add(cl.parentrevs(r)[0])
1664 elif n == 2:
1665 elif n == 2:
1665 parents = cl.parentrevs(r)
1666 parents = cl.parentrevs(r)
1666 if parents[1] != node.nullrev:
1667 if parents[1] != node.nullrev:
1667 ps.add(parents[1])
1668 ps.add(parents[1])
1668 return subset & ps
1669 return subset & ps
1669
1670
1670 @predicate('present(set)', safe=True)
1671 @predicate('present(set)', safe=True)
1671 def present(repo, subset, x):
1672 def present(repo, subset, x):
1672 """An empty set, if any revision in set isn't found; otherwise,
1673 """An empty set, if any revision in set isn't found; otherwise,
1673 all revisions in set.
1674 all revisions in set.
1674
1675
1675 If any of specified revisions is not present in the local repository,
1676 If any of specified revisions is not present in the local repository,
1676 the query is normally aborted. But this predicate allows the query
1677 the query is normally aborted. But this predicate allows the query
1677 to continue even in such cases.
1678 to continue even in such cases.
1678 """
1679 """
1679 try:
1680 try:
1680 return getset(repo, subset, x)
1681 return getset(repo, subset, x)
1681 except error.RepoLookupError:
1682 except error.RepoLookupError:
1682 return baseset()
1683 return baseset()
1683
1684
1684 # for internal use
1685 # for internal use
1685 @predicate('_notpublic', safe=True)
1686 @predicate('_notpublic', safe=True)
1686 def _notpublic(repo, subset, x):
1687 def _notpublic(repo, subset, x):
1687 getargs(x, 0, 0, "_notpublic takes no arguments")
1688 getargs(x, 0, 0, "_notpublic takes no arguments")
1688 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1689 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1689 if repo._phasecache._phasesets:
1690 if repo._phasecache._phasesets:
1690 s = set()
1691 s = set()
1691 for u in repo._phasecache._phasesets[1:]:
1692 for u in repo._phasecache._phasesets[1:]:
1692 s.update(u)
1693 s.update(u)
1693 s = baseset(s - repo.changelog.filteredrevs)
1694 s = baseset(s - repo.changelog.filteredrevs)
1694 s.sort()
1695 s.sort()
1695 return subset & s
1696 return subset & s
1696 else:
1697 else:
1697 phase = repo._phasecache.phase
1698 phase = repo._phasecache.phase
1698 target = phases.public
1699 target = phases.public
1699 condition = lambda r: phase(repo, r) != target
1700 condition = lambda r: phase(repo, r) != target
1700 return subset.filter(condition, condrepr=('<phase %r>', target),
1701 return subset.filter(condition, condrepr=('<phase %r>', target),
1701 cache=False)
1702 cache=False)
1702
1703
1703 @predicate('public()', safe=True)
1704 @predicate('public()', safe=True)
1704 def public(repo, subset, x):
1705 def public(repo, subset, x):
1705 """Changeset in public phase."""
1706 """Changeset in public phase."""
1706 # i18n: "public" is a keyword
1707 # i18n: "public" is a keyword
1707 getargs(x, 0, 0, _("public takes no arguments"))
1708 getargs(x, 0, 0, _("public takes no arguments"))
1708 phase = repo._phasecache.phase
1709 phase = repo._phasecache.phase
1709 target = phases.public
1710 target = phases.public
1710 condition = lambda r: phase(repo, r) == target
1711 condition = lambda r: phase(repo, r) == target
1711 return subset.filter(condition, condrepr=('<phase %r>', target),
1712 return subset.filter(condition, condrepr=('<phase %r>', target),
1712 cache=False)
1713 cache=False)
1713
1714
1714 @predicate('remote([id [,path]])', safe=True)
1715 @predicate('remote([id [,path]])', safe=True)
1715 def remote(repo, subset, x):
1716 def remote(repo, subset, x):
1716 """Local revision that corresponds to the given identifier in a
1717 """Local revision that corresponds to the given identifier in a
1717 remote repository, if present. Here, the '.' identifier is a
1718 remote repository, if present. Here, the '.' identifier is a
1718 synonym for the current local branch.
1719 synonym for the current local branch.
1719 """
1720 """
1720
1721
1721 from . import hg # avoid start-up nasties
1722 from . import hg # avoid start-up nasties
1722 # i18n: "remote" is a keyword
1723 # i18n: "remote" is a keyword
1723 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1724 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1724
1725
1725 q = '.'
1726 q = '.'
1726 if len(l) > 0:
1727 if len(l) > 0:
1727 # i18n: "remote" is a keyword
1728 # i18n: "remote" is a keyword
1728 q = getstring(l[0], _("remote requires a string id"))
1729 q = getstring(l[0], _("remote requires a string id"))
1729 if q == '.':
1730 if q == '.':
1730 q = repo['.'].branch()
1731 q = repo['.'].branch()
1731
1732
1732 dest = ''
1733 dest = ''
1733 if len(l) > 1:
1734 if len(l) > 1:
1734 # i18n: "remote" is a keyword
1735 # i18n: "remote" is a keyword
1735 dest = getstring(l[1], _("remote requires a repository path"))
1736 dest = getstring(l[1], _("remote requires a repository path"))
1736 dest = repo.ui.expandpath(dest or 'default')
1737 dest = repo.ui.expandpath(dest or 'default')
1737 dest, branches = hg.parseurl(dest)
1738 dest, branches = hg.parseurl(dest)
1738 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1739 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1739 if revs:
1740 if revs:
1740 revs = [repo.lookup(rev) for rev in revs]
1741 revs = [repo.lookup(rev) for rev in revs]
1741 other = hg.peer(repo, {}, dest)
1742 other = hg.peer(repo, {}, dest)
1742 n = other.lookup(q)
1743 n = other.lookup(q)
1743 if n in repo:
1744 if n in repo:
1744 r = repo[n].rev()
1745 r = repo[n].rev()
1745 if r in subset:
1746 if r in subset:
1746 return baseset([r])
1747 return baseset([r])
1747 return baseset()
1748 return baseset()
1748
1749
1749 @predicate('removes(pattern)', safe=True)
1750 @predicate('removes(pattern)', safe=True)
1750 def removes(repo, subset, x):
1751 def removes(repo, subset, x):
1751 """Changesets which remove files matching pattern.
1752 """Changesets which remove files matching pattern.
1752
1753
1753 The pattern without explicit kind like ``glob:`` is expected to be
1754 The pattern without explicit kind like ``glob:`` is expected to be
1754 relative to the current directory and match against a file or a
1755 relative to the current directory and match against a file or a
1755 directory.
1756 directory.
1756 """
1757 """
1757 # i18n: "removes" is a keyword
1758 # i18n: "removes" is a keyword
1758 pat = getstring(x, _("removes requires a pattern"))
1759 pat = getstring(x, _("removes requires a pattern"))
1759 return checkstatus(repo, subset, pat, 2)
1760 return checkstatus(repo, subset, pat, 2)
1760
1761
1761 @predicate('rev(number)', safe=True)
1762 @predicate('rev(number)', safe=True)
1762 def rev(repo, subset, x):
1763 def rev(repo, subset, x):
1763 """Revision with the given numeric identifier.
1764 """Revision with the given numeric identifier.
1764 """
1765 """
1765 # i18n: "rev" is a keyword
1766 # i18n: "rev" is a keyword
1766 l = getargs(x, 1, 1, _("rev requires one argument"))
1767 l = getargs(x, 1, 1, _("rev requires one argument"))
1767 try:
1768 try:
1768 # i18n: "rev" is a keyword
1769 # i18n: "rev" is a keyword
1769 l = int(getstring(l[0], _("rev requires a number")))
1770 l = int(getstring(l[0], _("rev requires a number")))
1770 except (TypeError, ValueError):
1771 except (TypeError, ValueError):
1771 # i18n: "rev" is a keyword
1772 # i18n: "rev" is a keyword
1772 raise error.ParseError(_("rev expects a number"))
1773 raise error.ParseError(_("rev expects a number"))
1773 if l not in repo.changelog and l != node.nullrev:
1774 if l not in repo.changelog and l != node.nullrev:
1774 return baseset()
1775 return baseset()
1775 return subset & baseset([l])
1776 return subset & baseset([l])
1776
1777
1777 @predicate('matching(revision [, field])', safe=True)
1778 @predicate('matching(revision [, field])', safe=True)
1778 def matching(repo, subset, x):
1779 def matching(repo, subset, x):
1779 """Changesets in which a given set of fields match the set of fields in the
1780 """Changesets in which a given set of fields match the set of fields in the
1780 selected revision or set.
1781 selected revision or set.
1781
1782
1782 To match more than one field pass the list of fields to match separated
1783 To match more than one field pass the list of fields to match separated
1783 by spaces (e.g. ``author description``).
1784 by spaces (e.g. ``author description``).
1784
1785
1785 Valid fields are most regular revision fields and some special fields.
1786 Valid fields are most regular revision fields and some special fields.
1786
1787
1787 Regular revision fields are ``description``, ``author``, ``branch``,
1788 Regular revision fields are ``description``, ``author``, ``branch``,
1788 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1789 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1789 and ``diff``.
1790 and ``diff``.
1790 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1791 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1791 contents of the revision. Two revisions matching their ``diff`` will
1792 contents of the revision. Two revisions matching their ``diff`` will
1792 also match their ``files``.
1793 also match their ``files``.
1793
1794
1794 Special fields are ``summary`` and ``metadata``:
1795 Special fields are ``summary`` and ``metadata``:
1795 ``summary`` matches the first line of the description.
1796 ``summary`` matches the first line of the description.
1796 ``metadata`` is equivalent to matching ``description user date``
1797 ``metadata`` is equivalent to matching ``description user date``
1797 (i.e. it matches the main metadata fields).
1798 (i.e. it matches the main metadata fields).
1798
1799
1799 ``metadata`` is the default field which is used when no fields are
1800 ``metadata`` is the default field which is used when no fields are
1800 specified. You can match more than one field at a time.
1801 specified. You can match more than one field at a time.
1801 """
1802 """
1802 # i18n: "matching" is a keyword
1803 # i18n: "matching" is a keyword
1803 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1804 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1804
1805
1805 revs = getset(repo, fullreposet(repo), l[0])
1806 revs = getset(repo, fullreposet(repo), l[0])
1806
1807
1807 fieldlist = ['metadata']
1808 fieldlist = ['metadata']
1808 if len(l) > 1:
1809 if len(l) > 1:
1809 fieldlist = getstring(l[1],
1810 fieldlist = getstring(l[1],
1810 # i18n: "matching" is a keyword
1811 # i18n: "matching" is a keyword
1811 _("matching requires a string "
1812 _("matching requires a string "
1812 "as its second argument")).split()
1813 "as its second argument")).split()
1813
1814
1814 # Make sure that there are no repeated fields,
1815 # Make sure that there are no repeated fields,
1815 # expand the 'special' 'metadata' field type
1816 # expand the 'special' 'metadata' field type
1816 # and check the 'files' whenever we check the 'diff'
1817 # and check the 'files' whenever we check the 'diff'
1817 fields = []
1818 fields = []
1818 for field in fieldlist:
1819 for field in fieldlist:
1819 if field == 'metadata':
1820 if field == 'metadata':
1820 fields += ['user', 'description', 'date']
1821 fields += ['user', 'description', 'date']
1821 elif field == 'diff':
1822 elif field == 'diff':
1822 # a revision matching the diff must also match the files
1823 # a revision matching the diff must also match the files
1823 # since matching the diff is very costly, make sure to
1824 # since matching the diff is very costly, make sure to
1824 # also match the files first
1825 # also match the files first
1825 fields += ['files', 'diff']
1826 fields += ['files', 'diff']
1826 else:
1827 else:
1827 if field == 'author':
1828 if field == 'author':
1828 field = 'user'
1829 field = 'user'
1829 fields.append(field)
1830 fields.append(field)
1830 fields = set(fields)
1831 fields = set(fields)
1831 if 'summary' in fields and 'description' in fields:
1832 if 'summary' in fields and 'description' in fields:
1832 # If a revision matches its description it also matches its summary
1833 # If a revision matches its description it also matches its summary
1833 fields.discard('summary')
1834 fields.discard('summary')
1834
1835
1835 # We may want to match more than one field
1836 # We may want to match more than one field
1836 # Not all fields take the same amount of time to be matched
1837 # Not all fields take the same amount of time to be matched
1837 # Sort the selected fields in order of increasing matching cost
1838 # Sort the selected fields in order of increasing matching cost
1838 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1839 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1839 'files', 'description', 'substate', 'diff']
1840 'files', 'description', 'substate', 'diff']
1840 def fieldkeyfunc(f):
1841 def fieldkeyfunc(f):
1841 try:
1842 try:
1842 return fieldorder.index(f)
1843 return fieldorder.index(f)
1843 except ValueError:
1844 except ValueError:
1844 # assume an unknown field is very costly
1845 # assume an unknown field is very costly
1845 return len(fieldorder)
1846 return len(fieldorder)
1846 fields = list(fields)
1847 fields = list(fields)
1847 fields.sort(key=fieldkeyfunc)
1848 fields.sort(key=fieldkeyfunc)
1848
1849
1849 # Each field will be matched with its own "getfield" function
1850 # Each field will be matched with its own "getfield" function
1850 # which will be added to the getfieldfuncs array of functions
1851 # which will be added to the getfieldfuncs array of functions
1851 getfieldfuncs = []
1852 getfieldfuncs = []
1852 _funcs = {
1853 _funcs = {
1853 'user': lambda r: repo[r].user(),
1854 'user': lambda r: repo[r].user(),
1854 'branch': lambda r: repo[r].branch(),
1855 'branch': lambda r: repo[r].branch(),
1855 'date': lambda r: repo[r].date(),
1856 'date': lambda r: repo[r].date(),
1856 'description': lambda r: repo[r].description(),
1857 'description': lambda r: repo[r].description(),
1857 'files': lambda r: repo[r].files(),
1858 'files': lambda r: repo[r].files(),
1858 'parents': lambda r: repo[r].parents(),
1859 'parents': lambda r: repo[r].parents(),
1859 'phase': lambda r: repo[r].phase(),
1860 'phase': lambda r: repo[r].phase(),
1860 'substate': lambda r: repo[r].substate,
1861 'substate': lambda r: repo[r].substate,
1861 'summary': lambda r: repo[r].description().splitlines()[0],
1862 'summary': lambda r: repo[r].description().splitlines()[0],
1862 'diff': lambda r: list(repo[r].diff(git=True),)
1863 'diff': lambda r: list(repo[r].diff(git=True),)
1863 }
1864 }
1864 for info in fields:
1865 for info in fields:
1865 getfield = _funcs.get(info, None)
1866 getfield = _funcs.get(info, None)
1866 if getfield is None:
1867 if getfield is None:
1867 raise error.ParseError(
1868 raise error.ParseError(
1868 # i18n: "matching" is a keyword
1869 # i18n: "matching" is a keyword
1869 _("unexpected field name passed to matching: %s") % info)
1870 _("unexpected field name passed to matching: %s") % info)
1870 getfieldfuncs.append(getfield)
1871 getfieldfuncs.append(getfield)
1871 # convert the getfield array of functions into a "getinfo" function
1872 # convert the getfield array of functions into a "getinfo" function
1872 # which returns an array of field values (or a single value if there
1873 # which returns an array of field values (or a single value if there
1873 # is only one field to match)
1874 # is only one field to match)
1874 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1875 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1875
1876
1876 def matches(x):
1877 def matches(x):
1877 for rev in revs:
1878 for rev in revs:
1878 target = getinfo(rev)
1879 target = getinfo(rev)
1879 match = True
1880 match = True
1880 for n, f in enumerate(getfieldfuncs):
1881 for n, f in enumerate(getfieldfuncs):
1881 if target[n] != f(x):
1882 if target[n] != f(x):
1882 match = False
1883 match = False
1883 if match:
1884 if match:
1884 return True
1885 return True
1885 return False
1886 return False
1886
1887
1887 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1888 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1888
1889
1889 @predicate('reverse(set)', safe=True, takeorder=True)
1890 @predicate('reverse(set)', safe=True, takeorder=True)
1890 def reverse(repo, subset, x, order):
1891 def reverse(repo, subset, x, order):
1891 """Reverse order of set.
1892 """Reverse order of set.
1892 """
1893 """
1893 l = getset(repo, subset, x)
1894 l = getset(repo, subset, x)
1894 if order == defineorder:
1895 if order == defineorder:
1895 l.reverse()
1896 l.reverse()
1896 return l
1897 return l
1897
1898
1898 @predicate('roots(set)', safe=True)
1899 @predicate('roots(set)', safe=True)
1899 def roots(repo, subset, x):
1900 def roots(repo, subset, x):
1900 """Changesets in set with no parent changeset in set.
1901 """Changesets in set with no parent changeset in set.
1901 """
1902 """
1902 s = getset(repo, fullreposet(repo), x)
1903 s = getset(repo, fullreposet(repo), x)
1903 parents = repo.changelog.parentrevs
1904 parents = repo.changelog.parentrevs
1904 def filter(r):
1905 def filter(r):
1905 for p in parents(r):
1906 for p in parents(r):
1906 if 0 <= p and p in s:
1907 if 0 <= p and p in s:
1907 return False
1908 return False
1908 return True
1909 return True
1909 return subset & s.filter(filter, condrepr='<roots>')
1910 return subset & s.filter(filter, condrepr='<roots>')
1910
1911
1911 _sortkeyfuncs = {
1912 _sortkeyfuncs = {
1912 'rev': lambda c: c.rev(),
1913 'rev': lambda c: c.rev(),
1913 'branch': lambda c: c.branch(),
1914 'branch': lambda c: c.branch(),
1914 'desc': lambda c: c.description(),
1915 'desc': lambda c: c.description(),
1915 'user': lambda c: c.user(),
1916 'user': lambda c: c.user(),
1916 'author': lambda c: c.user(),
1917 'author': lambda c: c.user(),
1917 'date': lambda c: c.date()[0],
1918 'date': lambda c: c.date()[0],
1918 }
1919 }
1919
1920
1920 def _getsortargs(x):
1921 def _getsortargs(x):
1921 """Parse sort options into (set, [(key, reverse)], opts)"""
1922 """Parse sort options into (set, [(key, reverse)], opts)"""
1922 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
1923 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
1923 if 'set' not in args:
1924 if 'set' not in args:
1924 # i18n: "sort" is a keyword
1925 # i18n: "sort" is a keyword
1925 raise error.ParseError(_('sort requires one or two arguments'))
1926 raise error.ParseError(_('sort requires one or two arguments'))
1926 keys = "rev"
1927 keys = "rev"
1927 if 'keys' in args:
1928 if 'keys' in args:
1928 # i18n: "sort" is a keyword
1929 # i18n: "sort" is a keyword
1929 keys = getstring(args['keys'], _("sort spec must be a string"))
1930 keys = getstring(args['keys'], _("sort spec must be a string"))
1930
1931
1931 keyflags = []
1932 keyflags = []
1932 for k in keys.split():
1933 for k in keys.split():
1933 fk = k
1934 fk = k
1934 reverse = (k[0] == '-')
1935 reverse = (k[0] == '-')
1935 if reverse:
1936 if reverse:
1936 k = k[1:]
1937 k = k[1:]
1937 if k not in _sortkeyfuncs and k != 'topo':
1938 if k not in _sortkeyfuncs and k != 'topo':
1938 raise error.ParseError(_("unknown sort key %r") % fk)
1939 raise error.ParseError(_("unknown sort key %r") % fk)
1939 keyflags.append((k, reverse))
1940 keyflags.append((k, reverse))
1940
1941
1941 if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
1942 if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
1942 # i18n: "topo" is a keyword
1943 # i18n: "topo" is a keyword
1943 raise error.ParseError(_('topo sort order cannot be combined '
1944 raise error.ParseError(_('topo sort order cannot be combined '
1944 'with other sort keys'))
1945 'with other sort keys'))
1945
1946
1946 opts = {}
1947 opts = {}
1947 if 'topo.firstbranch' in args:
1948 if 'topo.firstbranch' in args:
1948 if any(k == 'topo' for k, reverse in keyflags):
1949 if any(k == 'topo' for k, reverse in keyflags):
1949 opts['topo.firstbranch'] = args['topo.firstbranch']
1950 opts['topo.firstbranch'] = args['topo.firstbranch']
1950 else:
1951 else:
1951 # i18n: "topo" and "topo.firstbranch" are keywords
1952 # i18n: "topo" and "topo.firstbranch" are keywords
1952 raise error.ParseError(_('topo.firstbranch can only be used '
1953 raise error.ParseError(_('topo.firstbranch can only be used '
1953 'when using the topo sort key'))
1954 'when using the topo sort key'))
1954
1955
1955 return args['set'], keyflags, opts
1956 return args['set'], keyflags, opts
1956
1957
1957 @predicate('sort(set[, [-]key... [, ...]])', safe=True, takeorder=True)
1958 @predicate('sort(set[, [-]key... [, ...]])', safe=True, takeorder=True)
1958 def sort(repo, subset, x, order):
1959 def sort(repo, subset, x, order):
1959 """Sort set by keys. The default sort order is ascending, specify a key
1960 """Sort set by keys. The default sort order is ascending, specify a key
1960 as ``-key`` to sort in descending order.
1961 as ``-key`` to sort in descending order.
1961
1962
1962 The keys can be:
1963 The keys can be:
1963
1964
1964 - ``rev`` for the revision number,
1965 - ``rev`` for the revision number,
1965 - ``branch`` for the branch name,
1966 - ``branch`` for the branch name,
1966 - ``desc`` for the commit message (description),
1967 - ``desc`` for the commit message (description),
1967 - ``user`` for user name (``author`` can be used as an alias),
1968 - ``user`` for user name (``author`` can be used as an alias),
1968 - ``date`` for the commit date
1969 - ``date`` for the commit date
1969 - ``topo`` for a reverse topographical sort
1970 - ``topo`` for a reverse topographical sort
1970
1971
1971 The ``topo`` sort order cannot be combined with other sort keys. This sort
1972 The ``topo`` sort order cannot be combined with other sort keys. This sort
1972 takes one optional argument, ``topo.firstbranch``, which takes a revset that
1973 takes one optional argument, ``topo.firstbranch``, which takes a revset that
1973 specifies what topographical branches to prioritize in the sort.
1974 specifies what topographical branches to prioritize in the sort.
1974
1975
1975 """
1976 """
1976 s, keyflags, opts = _getsortargs(x)
1977 s, keyflags, opts = _getsortargs(x)
1977 revs = getset(repo, subset, s)
1978 revs = getset(repo, subset, s)
1978
1979
1979 if not keyflags or order != defineorder:
1980 if not keyflags or order != defineorder:
1980 return revs
1981 return revs
1981 if len(keyflags) == 1 and keyflags[0][0] == "rev":
1982 if len(keyflags) == 1 and keyflags[0][0] == "rev":
1982 revs.sort(reverse=keyflags[0][1])
1983 revs.sort(reverse=keyflags[0][1])
1983 return revs
1984 return revs
1984 elif keyflags[0][0] == "topo":
1985 elif keyflags[0][0] == "topo":
1985 firstbranch = ()
1986 firstbranch = ()
1986 if 'topo.firstbranch' in opts:
1987 if 'topo.firstbranch' in opts:
1987 firstbranch = getset(repo, subset, opts['topo.firstbranch'])
1988 firstbranch = getset(repo, subset, opts['topo.firstbranch'])
1988 revs = baseset(_toposort(revs, repo.changelog.parentrevs, firstbranch),
1989 revs = baseset(_toposort(revs, repo.changelog.parentrevs, firstbranch),
1989 istopo=True)
1990 istopo=True)
1990 if keyflags[0][1]:
1991 if keyflags[0][1]:
1991 revs.reverse()
1992 revs.reverse()
1992 return revs
1993 return revs
1993
1994
1994 # sort() is guaranteed to be stable
1995 # sort() is guaranteed to be stable
1995 ctxs = [repo[r] for r in revs]
1996 ctxs = [repo[r] for r in revs]
1996 for k, reverse in reversed(keyflags):
1997 for k, reverse in reversed(keyflags):
1997 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
1998 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
1998 return baseset([c.rev() for c in ctxs])
1999 return baseset([c.rev() for c in ctxs])
1999
2000
2000 def _toposort(revs, parentsfunc, firstbranch=()):
2001 def _toposort(revs, parentsfunc, firstbranch=()):
2001 """Yield revisions from heads to roots one (topo) branch at a time.
2002 """Yield revisions from heads to roots one (topo) branch at a time.
2002
2003
2003 This function aims to be used by a graph generator that wishes to minimize
2004 This function aims to be used by a graph generator that wishes to minimize
2004 the number of parallel branches and their interleaving.
2005 the number of parallel branches and their interleaving.
2005
2006
2006 Example iteration order (numbers show the "true" order in a changelog):
2007 Example iteration order (numbers show the "true" order in a changelog):
2007
2008
2008 o 4
2009 o 4
2009 |
2010 |
2010 o 1
2011 o 1
2011 |
2012 |
2012 | o 3
2013 | o 3
2013 | |
2014 | |
2014 | o 2
2015 | o 2
2015 |/
2016 |/
2016 o 0
2017 o 0
2017
2018
2018 Note that the ancestors of merges are understood by the current
2019 Note that the ancestors of merges are understood by the current
2019 algorithm to be on the same branch. This means no reordering will
2020 algorithm to be on the same branch. This means no reordering will
2020 occur behind a merge.
2021 occur behind a merge.
2021 """
2022 """
2022
2023
2023 ### Quick summary of the algorithm
2024 ### Quick summary of the algorithm
2024 #
2025 #
2025 # This function is based around a "retention" principle. We keep revisions
2026 # This function is based around a "retention" principle. We keep revisions
2026 # in memory until we are ready to emit a whole branch that immediately
2027 # in memory until we are ready to emit a whole branch that immediately
2027 # "merges" into an existing one. This reduces the number of parallel
2028 # "merges" into an existing one. This reduces the number of parallel
2028 # branches with interleaved revisions.
2029 # branches with interleaved revisions.
2029 #
2030 #
2030 # During iteration revs are split into two groups:
2031 # During iteration revs are split into two groups:
2031 # A) revision already emitted
2032 # A) revision already emitted
2032 # B) revision in "retention". They are stored as different subgroups.
2033 # B) revision in "retention". They are stored as different subgroups.
2033 #
2034 #
2034 # for each REV, we do the following logic:
2035 # for each REV, we do the following logic:
2035 #
2036 #
2036 # 1) if REV is a parent of (A), we will emit it. If there is a
2037 # 1) if REV is a parent of (A), we will emit it. If there is a
2037 # retention group ((B) above) that is blocked on REV being
2038 # retention group ((B) above) that is blocked on REV being
2038 # available, we emit all the revisions out of that retention
2039 # available, we emit all the revisions out of that retention
2039 # group first.
2040 # group first.
2040 #
2041 #
2041 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
2042 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
2042 # available, if such subgroup exist, we add REV to it and the subgroup is
2043 # available, if such subgroup exist, we add REV to it and the subgroup is
2043 # now awaiting for REV.parents() to be available.
2044 # now awaiting for REV.parents() to be available.
2044 #
2045 #
2045 # 3) finally if no such group existed in (B), we create a new subgroup.
2046 # 3) finally if no such group existed in (B), we create a new subgroup.
2046 #
2047 #
2047 #
2048 #
2048 # To bootstrap the algorithm, we emit the tipmost revision (which
2049 # To bootstrap the algorithm, we emit the tipmost revision (which
2049 # puts it in group (A) from above).
2050 # puts it in group (A) from above).
2050
2051
2051 revs.sort(reverse=True)
2052 revs.sort(reverse=True)
2052
2053
2053 # Set of parents of revision that have been emitted. They can be considered
2054 # Set of parents of revision that have been emitted. They can be considered
2054 # unblocked as the graph generator is already aware of them so there is no
2055 # unblocked as the graph generator is already aware of them so there is no
2055 # need to delay the revisions that reference them.
2056 # need to delay the revisions that reference them.
2056 #
2057 #
2057 # If someone wants to prioritize a branch over the others, pre-filling this
2058 # If someone wants to prioritize a branch over the others, pre-filling this
2058 # set will force all other branches to wait until this branch is ready to be
2059 # set will force all other branches to wait until this branch is ready to be
2059 # emitted.
2060 # emitted.
2060 unblocked = set(firstbranch)
2061 unblocked = set(firstbranch)
2061
2062
2062 # list of groups waiting to be displayed, each group is defined by:
2063 # list of groups waiting to be displayed, each group is defined by:
2063 #
2064 #
2064 # (revs: lists of revs waiting to be displayed,
2065 # (revs: lists of revs waiting to be displayed,
2065 # blocked: set of that cannot be displayed before those in 'revs')
2066 # blocked: set of that cannot be displayed before those in 'revs')
2066 #
2067 #
2067 # The second value ('blocked') correspond to parents of any revision in the
2068 # The second value ('blocked') correspond to parents of any revision in the
2068 # group ('revs') that is not itself contained in the group. The main idea
2069 # group ('revs') that is not itself contained in the group. The main idea
2069 # of this algorithm is to delay as much as possible the emission of any
2070 # of this algorithm is to delay as much as possible the emission of any
2070 # revision. This means waiting for the moment we are about to display
2071 # revision. This means waiting for the moment we are about to display
2071 # these parents to display the revs in a group.
2072 # these parents to display the revs in a group.
2072 #
2073 #
2073 # This first implementation is smart until it encounters a merge: it will
2074 # This first implementation is smart until it encounters a merge: it will
2074 # emit revs as soon as any parent is about to be emitted and can grow an
2075 # emit revs as soon as any parent is about to be emitted and can grow an
2075 # arbitrary number of revs in 'blocked'. In practice this mean we properly
2076 # arbitrary number of revs in 'blocked'. In practice this mean we properly
2076 # retains new branches but gives up on any special ordering for ancestors
2077 # retains new branches but gives up on any special ordering for ancestors
2077 # of merges. The implementation can be improved to handle this better.
2078 # of merges. The implementation can be improved to handle this better.
2078 #
2079 #
2079 # The first subgroup is special. It corresponds to all the revision that
2080 # The first subgroup is special. It corresponds to all the revision that
2080 # were already emitted. The 'revs' lists is expected to be empty and the
2081 # were already emitted. The 'revs' lists is expected to be empty and the
2081 # 'blocked' set contains the parents revisions of already emitted revision.
2082 # 'blocked' set contains the parents revisions of already emitted revision.
2082 #
2083 #
2083 # You could pre-seed the <parents> set of groups[0] to a specific
2084 # You could pre-seed the <parents> set of groups[0] to a specific
2084 # changesets to select what the first emitted branch should be.
2085 # changesets to select what the first emitted branch should be.
2085 groups = [([], unblocked)]
2086 groups = [([], unblocked)]
2086 pendingheap = []
2087 pendingheap = []
2087 pendingset = set()
2088 pendingset = set()
2088
2089
2089 heapq.heapify(pendingheap)
2090 heapq.heapify(pendingheap)
2090 heappop = heapq.heappop
2091 heappop = heapq.heappop
2091 heappush = heapq.heappush
2092 heappush = heapq.heappush
2092 for currentrev in revs:
2093 for currentrev in revs:
2093 # Heap works with smallest element, we want highest so we invert
2094 # Heap works with smallest element, we want highest so we invert
2094 if currentrev not in pendingset:
2095 if currentrev not in pendingset:
2095 heappush(pendingheap, -currentrev)
2096 heappush(pendingheap, -currentrev)
2096 pendingset.add(currentrev)
2097 pendingset.add(currentrev)
2097 # iterates on pending rev until after the current rev have been
2098 # iterates on pending rev until after the current rev have been
2098 # processed.
2099 # processed.
2099 rev = None
2100 rev = None
2100 while rev != currentrev:
2101 while rev != currentrev:
2101 rev = -heappop(pendingheap)
2102 rev = -heappop(pendingheap)
2102 pendingset.remove(rev)
2103 pendingset.remove(rev)
2103
2104
2104 # Seek for a subgroup blocked, waiting for the current revision.
2105 # Seek for a subgroup blocked, waiting for the current revision.
2105 matching = [i for i, g in enumerate(groups) if rev in g[1]]
2106 matching = [i for i, g in enumerate(groups) if rev in g[1]]
2106
2107
2107 if matching:
2108 if matching:
2108 # The main idea is to gather together all sets that are blocked
2109 # The main idea is to gather together all sets that are blocked
2109 # on the same revision.
2110 # on the same revision.
2110 #
2111 #
2111 # Groups are merged when a common blocking ancestor is
2112 # Groups are merged when a common blocking ancestor is
2112 # observed. For example, given two groups:
2113 # observed. For example, given two groups:
2113 #
2114 #
2114 # revs [5, 4] waiting for 1
2115 # revs [5, 4] waiting for 1
2115 # revs [3, 2] waiting for 1
2116 # revs [3, 2] waiting for 1
2116 #
2117 #
2117 # These two groups will be merged when we process
2118 # These two groups will be merged when we process
2118 # 1. In theory, we could have merged the groups when
2119 # 1. In theory, we could have merged the groups when
2119 # we added 2 to the group it is now in (we could have
2120 # we added 2 to the group it is now in (we could have
2120 # noticed the groups were both blocked on 1 then), but
2121 # noticed the groups were both blocked on 1 then), but
2121 # the way it works now makes the algorithm simpler.
2122 # the way it works now makes the algorithm simpler.
2122 #
2123 #
2123 # We also always keep the oldest subgroup first. We can
2124 # We also always keep the oldest subgroup first. We can
2124 # probably improve the behavior by having the longest set
2125 # probably improve the behavior by having the longest set
2125 # first. That way, graph algorithms could minimise the length
2126 # first. That way, graph algorithms could minimise the length
2126 # of parallel lines their drawing. This is currently not done.
2127 # of parallel lines their drawing. This is currently not done.
2127 targetidx = matching.pop(0)
2128 targetidx = matching.pop(0)
2128 trevs, tparents = groups[targetidx]
2129 trevs, tparents = groups[targetidx]
2129 for i in matching:
2130 for i in matching:
2130 gr = groups[i]
2131 gr = groups[i]
2131 trevs.extend(gr[0])
2132 trevs.extend(gr[0])
2132 tparents |= gr[1]
2133 tparents |= gr[1]
2133 # delete all merged subgroups (except the one we kept)
2134 # delete all merged subgroups (except the one we kept)
2134 # (starting from the last subgroup for performance and
2135 # (starting from the last subgroup for performance and
2135 # sanity reasons)
2136 # sanity reasons)
2136 for i in reversed(matching):
2137 for i in reversed(matching):
2137 del groups[i]
2138 del groups[i]
2138 else:
2139 else:
2139 # This is a new head. We create a new subgroup for it.
2140 # This is a new head. We create a new subgroup for it.
2140 targetidx = len(groups)
2141 targetidx = len(groups)
2141 groups.append(([], set([rev])))
2142 groups.append(([], set([rev])))
2142
2143
2143 gr = groups[targetidx]
2144 gr = groups[targetidx]
2144
2145
2145 # We now add the current nodes to this subgroups. This is done
2146 # We now add the current nodes to this subgroups. This is done
2146 # after the subgroup merging because all elements from a subgroup
2147 # after the subgroup merging because all elements from a subgroup
2147 # that relied on this rev must precede it.
2148 # that relied on this rev must precede it.
2148 #
2149 #
2149 # we also update the <parents> set to include the parents of the
2150 # we also update the <parents> set to include the parents of the
2150 # new nodes.
2151 # new nodes.
2151 if rev == currentrev: # only display stuff in rev
2152 if rev == currentrev: # only display stuff in rev
2152 gr[0].append(rev)
2153 gr[0].append(rev)
2153 gr[1].remove(rev)
2154 gr[1].remove(rev)
2154 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
2155 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
2155 gr[1].update(parents)
2156 gr[1].update(parents)
2156 for p in parents:
2157 for p in parents:
2157 if p not in pendingset:
2158 if p not in pendingset:
2158 pendingset.add(p)
2159 pendingset.add(p)
2159 heappush(pendingheap, -p)
2160 heappush(pendingheap, -p)
2160
2161
2161 # Look for a subgroup to display
2162 # Look for a subgroup to display
2162 #
2163 #
2163 # When unblocked is empty (if clause), we were not waiting for any
2164 # When unblocked is empty (if clause), we were not waiting for any
2164 # revisions during the first iteration (if no priority was given) or
2165 # revisions during the first iteration (if no priority was given) or
2165 # if we emitted a whole disconnected set of the graph (reached a
2166 # if we emitted a whole disconnected set of the graph (reached a
2166 # root). In that case we arbitrarily take the oldest known
2167 # root). In that case we arbitrarily take the oldest known
2167 # subgroup. The heuristic could probably be better.
2168 # subgroup. The heuristic could probably be better.
2168 #
2169 #
2169 # Otherwise (elif clause) if the subgroup is blocked on
2170 # Otherwise (elif clause) if the subgroup is blocked on
2170 # a revision we just emitted, we can safely emit it as
2171 # a revision we just emitted, we can safely emit it as
2171 # well.
2172 # well.
2172 if not unblocked:
2173 if not unblocked:
2173 if len(groups) > 1: # display other subset
2174 if len(groups) > 1: # display other subset
2174 targetidx = 1
2175 targetidx = 1
2175 gr = groups[1]
2176 gr = groups[1]
2176 elif not gr[1] & unblocked:
2177 elif not gr[1] & unblocked:
2177 gr = None
2178 gr = None
2178
2179
2179 if gr is not None:
2180 if gr is not None:
2180 # update the set of awaited revisions with the one from the
2181 # update the set of awaited revisions with the one from the
2181 # subgroup
2182 # subgroup
2182 unblocked |= gr[1]
2183 unblocked |= gr[1]
2183 # output all revisions in the subgroup
2184 # output all revisions in the subgroup
2184 for r in gr[0]:
2185 for r in gr[0]:
2185 yield r
2186 yield r
2186 # delete the subgroup that you just output
2187 # delete the subgroup that you just output
2187 # unless it is groups[0] in which case you just empty it.
2188 # unless it is groups[0] in which case you just empty it.
2188 if targetidx:
2189 if targetidx:
2189 del groups[targetidx]
2190 del groups[targetidx]
2190 else:
2191 else:
2191 gr[0][:] = []
2192 gr[0][:] = []
2192 # Check if we have some subgroup waiting for revisions we are not going to
2193 # Check if we have some subgroup waiting for revisions we are not going to
2193 # iterate over
2194 # iterate over
2194 for g in groups:
2195 for g in groups:
2195 for r in g[0]:
2196 for r in g[0]:
2196 yield r
2197 yield r
2197
2198
2198 @predicate('subrepo([pattern])')
2199 @predicate('subrepo([pattern])')
2199 def subrepo(repo, subset, x):
2200 def subrepo(repo, subset, x):
2200 """Changesets that add, modify or remove the given subrepo. If no subrepo
2201 """Changesets that add, modify or remove the given subrepo. If no subrepo
2201 pattern is named, any subrepo changes are returned.
2202 pattern is named, any subrepo changes are returned.
2202 """
2203 """
2203 # i18n: "subrepo" is a keyword
2204 # i18n: "subrepo" is a keyword
2204 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
2205 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
2205 pat = None
2206 pat = None
2206 if len(args) != 0:
2207 if len(args) != 0:
2207 pat = getstring(args[0], _("subrepo requires a pattern"))
2208 pat = getstring(args[0], _("subrepo requires a pattern"))
2208
2209
2209 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
2210 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
2210
2211
2211 def submatches(names):
2212 def submatches(names):
2212 k, p, m = util.stringmatcher(pat)
2213 k, p, m = util.stringmatcher(pat)
2213 for name in names:
2214 for name in names:
2214 if m(name):
2215 if m(name):
2215 yield name
2216 yield name
2216
2217
2217 def matches(x):
2218 def matches(x):
2218 c = repo[x]
2219 c = repo[x]
2219 s = repo.status(c.p1().node(), c.node(), match=m)
2220 s = repo.status(c.p1().node(), c.node(), match=m)
2220
2221
2221 if pat is None:
2222 if pat is None:
2222 return s.added or s.modified or s.removed
2223 return s.added or s.modified or s.removed
2223
2224
2224 if s.added:
2225 if s.added:
2225 return any(submatches(c.substate.keys()))
2226 return any(submatches(c.substate.keys()))
2226
2227
2227 if s.modified:
2228 if s.modified:
2228 subs = set(c.p1().substate.keys())
2229 subs = set(c.p1().substate.keys())
2229 subs.update(c.substate.keys())
2230 subs.update(c.substate.keys())
2230
2231
2231 for path in submatches(subs):
2232 for path in submatches(subs):
2232 if c.p1().substate.get(path) != c.substate.get(path):
2233 if c.p1().substate.get(path) != c.substate.get(path):
2233 return True
2234 return True
2234
2235
2235 if s.removed:
2236 if s.removed:
2236 return any(submatches(c.p1().substate.keys()))
2237 return any(submatches(c.p1().substate.keys()))
2237
2238
2238 return False
2239 return False
2239
2240
2240 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
2241 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
2241
2242
2242 def _substringmatcher(pattern, casesensitive=True):
2243 def _substringmatcher(pattern, casesensitive=True):
2243 kind, pattern, matcher = util.stringmatcher(pattern,
2244 kind, pattern, matcher = util.stringmatcher(pattern,
2244 casesensitive=casesensitive)
2245 casesensitive=casesensitive)
2245 if kind == 'literal':
2246 if kind == 'literal':
2246 if not casesensitive:
2247 if not casesensitive:
2247 pattern = encoding.lower(pattern)
2248 pattern = encoding.lower(pattern)
2248 matcher = lambda s: pattern in encoding.lower(s)
2249 matcher = lambda s: pattern in encoding.lower(s)
2249 else:
2250 else:
2250 matcher = lambda s: pattern in s
2251 matcher = lambda s: pattern in s
2251 return kind, pattern, matcher
2252 return kind, pattern, matcher
2252
2253
2253 @predicate('tag([name])', safe=True)
2254 @predicate('tag([name])', safe=True)
2254 def tag(repo, subset, x):
2255 def tag(repo, subset, x):
2255 """The specified tag by name, or all tagged revisions if no name is given.
2256 """The specified tag by name, or all tagged revisions if no name is given.
2256
2257
2257 Pattern matching is supported for `name`. See
2258 Pattern matching is supported for `name`. See
2258 :hg:`help revisions.patterns`.
2259 :hg:`help revisions.patterns`.
2259 """
2260 """
2260 # i18n: "tag" is a keyword
2261 # i18n: "tag" is a keyword
2261 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2262 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2262 cl = repo.changelog
2263 cl = repo.changelog
2263 if args:
2264 if args:
2264 pattern = getstring(args[0],
2265 pattern = getstring(args[0],
2265 # i18n: "tag" is a keyword
2266 # i18n: "tag" is a keyword
2266 _('the argument to tag must be a string'))
2267 _('the argument to tag must be a string'))
2267 kind, pattern, matcher = util.stringmatcher(pattern)
2268 kind, pattern, matcher = util.stringmatcher(pattern)
2268 if kind == 'literal':
2269 if kind == 'literal':
2269 # avoid resolving all tags
2270 # avoid resolving all tags
2270 tn = repo._tagscache.tags.get(pattern, None)
2271 tn = repo._tagscache.tags.get(pattern, None)
2271 if tn is None:
2272 if tn is None:
2272 raise error.RepoLookupError(_("tag '%s' does not exist")
2273 raise error.RepoLookupError(_("tag '%s' does not exist")
2273 % pattern)
2274 % pattern)
2274 s = set([repo[tn].rev()])
2275 s = set([repo[tn].rev()])
2275 else:
2276 else:
2276 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2277 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2277 else:
2278 else:
2278 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2279 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2279 return subset & s
2280 return subset & s
2280
2281
2281 @predicate('tagged', safe=True)
2282 @predicate('tagged', safe=True)
2282 def tagged(repo, subset, x):
2283 def tagged(repo, subset, x):
2283 return tag(repo, subset, x)
2284 return tag(repo, subset, x)
2284
2285
2285 @predicate('unstable()', safe=True)
2286 @predicate('unstable()', safe=True)
2286 def unstable(repo, subset, x):
2287 def unstable(repo, subset, x):
2287 """Non-obsolete changesets with obsolete ancestors.
2288 """Non-obsolete changesets with obsolete ancestors.
2288 """
2289 """
2289 # i18n: "unstable" is a keyword
2290 # i18n: "unstable" is a keyword
2290 getargs(x, 0, 0, _("unstable takes no arguments"))
2291 getargs(x, 0, 0, _("unstable takes no arguments"))
2291 unstables = obsmod.getrevs(repo, 'unstable')
2292 unstables = obsmod.getrevs(repo, 'unstable')
2292 return subset & unstables
2293 return subset & unstables
2293
2294
2294
2295
2295 @predicate('user(string)', safe=True)
2296 @predicate('user(string)', safe=True)
2296 def user(repo, subset, x):
2297 def user(repo, subset, x):
2297 """User name contains string. The match is case-insensitive.
2298 """User name contains string. The match is case-insensitive.
2298
2299
2299 Pattern matching is supported for `string`. See
2300 Pattern matching is supported for `string`. See
2300 :hg:`help revisions.patterns`.
2301 :hg:`help revisions.patterns`.
2301 """
2302 """
2302 return author(repo, subset, x)
2303 return author(repo, subset, x)
2303
2304
2304 @predicate('wdir', safe=True)
2305 @predicate('wdir', safe=True)
2305 def wdir(repo, subset, x):
2306 def wdir(repo, subset, x):
2306 """Working directory. (EXPERIMENTAL)"""
2307 """Working directory. (EXPERIMENTAL)"""
2307 # i18n: "wdir" is a keyword
2308 # i18n: "wdir" is a keyword
2308 getargs(x, 0, 0, _("wdir takes no arguments"))
2309 getargs(x, 0, 0, _("wdir takes no arguments"))
2309 if node.wdirrev in subset or isinstance(subset, fullreposet):
2310 if node.wdirrev in subset or isinstance(subset, fullreposet):
2310 return baseset([node.wdirrev])
2311 return baseset([node.wdirrev])
2311 return baseset()
2312 return baseset()
2312
2313
2313 def _orderedlist(repo, subset, x):
2314 def _orderedlist(repo, subset, x):
2314 s = getstring(x, "internal error")
2315 s = getstring(x, "internal error")
2315 if not s:
2316 if not s:
2316 return baseset()
2317 return baseset()
2317 # remove duplicates here. it's difficult for caller to deduplicate sets
2318 # remove duplicates here. it's difficult for caller to deduplicate sets
2318 # because different symbols can point to the same rev.
2319 # because different symbols can point to the same rev.
2319 cl = repo.changelog
2320 cl = repo.changelog
2320 ls = []
2321 ls = []
2321 seen = set()
2322 seen = set()
2322 for t in s.split('\0'):
2323 for t in s.split('\0'):
2323 try:
2324 try:
2324 # fast path for integer revision
2325 # fast path for integer revision
2325 r = int(t)
2326 r = int(t)
2326 if str(r) != t or r not in cl:
2327 if str(r) != t or r not in cl:
2327 raise ValueError
2328 raise ValueError
2328 revs = [r]
2329 revs = [r]
2329 except ValueError:
2330 except ValueError:
2330 revs = stringset(repo, subset, t)
2331 revs = stringset(repo, subset, t)
2331
2332
2332 for r in revs:
2333 for r in revs:
2333 if r in seen:
2334 if r in seen:
2334 continue
2335 continue
2335 if (r in subset
2336 if (r in subset
2336 or r == node.nullrev and isinstance(subset, fullreposet)):
2337 or r == node.nullrev and isinstance(subset, fullreposet)):
2337 ls.append(r)
2338 ls.append(r)
2338 seen.add(r)
2339 seen.add(r)
2339 return baseset(ls)
2340 return baseset(ls)
2340
2341
2341 # for internal use
2342 # for internal use
2342 @predicate('_list', safe=True, takeorder=True)
2343 @predicate('_list', safe=True, takeorder=True)
2343 def _list(repo, subset, x, order):
2344 def _list(repo, subset, x, order):
2344 if order == followorder:
2345 if order == followorder:
2345 # slow path to take the subset order
2346 # slow path to take the subset order
2346 return subset & _orderedlist(repo, fullreposet(repo), x)
2347 return subset & _orderedlist(repo, fullreposet(repo), x)
2347 else:
2348 else:
2348 return _orderedlist(repo, subset, x)
2349 return _orderedlist(repo, subset, x)
2349
2350
2350 def _orderedintlist(repo, subset, x):
2351 def _orderedintlist(repo, subset, x):
2351 s = getstring(x, "internal error")
2352 s = getstring(x, "internal error")
2352 if not s:
2353 if not s:
2353 return baseset()
2354 return baseset()
2354 ls = [int(r) for r in s.split('\0')]
2355 ls = [int(r) for r in s.split('\0')]
2355 s = subset
2356 s = subset
2356 return baseset([r for r in ls if r in s])
2357 return baseset([r for r in ls if r in s])
2357
2358
2358 # for internal use
2359 # for internal use
2359 @predicate('_intlist', safe=True, takeorder=True)
2360 @predicate('_intlist', safe=True, takeorder=True)
2360 def _intlist(repo, subset, x, order):
2361 def _intlist(repo, subset, x, order):
2361 if order == followorder:
2362 if order == followorder:
2362 # slow path to take the subset order
2363 # slow path to take the subset order
2363 return subset & _orderedintlist(repo, fullreposet(repo), x)
2364 return subset & _orderedintlist(repo, fullreposet(repo), x)
2364 else:
2365 else:
2365 return _orderedintlist(repo, subset, x)
2366 return _orderedintlist(repo, subset, x)
2366
2367
2367 def _orderedhexlist(repo, subset, x):
2368 def _orderedhexlist(repo, subset, x):
2368 s = getstring(x, "internal error")
2369 s = getstring(x, "internal error")
2369 if not s:
2370 if not s:
2370 return baseset()
2371 return baseset()
2371 cl = repo.changelog
2372 cl = repo.changelog
2372 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2373 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2373 s = subset
2374 s = subset
2374 return baseset([r for r in ls if r in s])
2375 return baseset([r for r in ls if r in s])
2375
2376
2376 # for internal use
2377 # for internal use
2377 @predicate('_hexlist', safe=True, takeorder=True)
2378 @predicate('_hexlist', safe=True, takeorder=True)
2378 def _hexlist(repo, subset, x, order):
2379 def _hexlist(repo, subset, x, order):
2379 if order == followorder:
2380 if order == followorder:
2380 # slow path to take the subset order
2381 # slow path to take the subset order
2381 return subset & _orderedhexlist(repo, fullreposet(repo), x)
2382 return subset & _orderedhexlist(repo, fullreposet(repo), x)
2382 else:
2383 else:
2383 return _orderedhexlist(repo, subset, x)
2384 return _orderedhexlist(repo, subset, x)
2384
2385
2385 methods = {
2386 methods = {
2386 "range": rangeset,
2387 "range": rangeset,
2387 "rangepre": rangepre,
2388 "rangepre": rangepre,
2388 "dagrange": dagrange,
2389 "dagrange": dagrange,
2389 "string": stringset,
2390 "string": stringset,
2390 "symbol": stringset,
2391 "symbol": stringset,
2391 "and": andset,
2392 "and": andset,
2392 "or": orset,
2393 "or": orset,
2393 "not": notset,
2394 "not": notset,
2394 "difference": differenceset,
2395 "difference": differenceset,
2395 "list": listset,
2396 "list": listset,
2396 "keyvalue": keyvaluepair,
2397 "keyvalue": keyvaluepair,
2397 "func": func,
2398 "func": func,
2398 "ancestor": ancestorspec,
2399 "ancestor": ancestorspec,
2399 "parent": parentspec,
2400 "parent": parentspec,
2400 "parentpost": parentpost,
2401 "parentpost": parentpost,
2401 }
2402 }
2402
2403
2403 # Constants for ordering requirement, used in _analyze():
2404 # Constants for ordering requirement, used in _analyze():
2404 #
2405 #
2405 # If 'define', any nested functions and operations can change the ordering of
2406 # If 'define', any nested functions and operations can change the ordering of
2406 # the entries in the set. If 'follow', any nested functions and operations
2407 # the entries in the set. If 'follow', any nested functions and operations
2407 # should take the ordering specified by the first operand to the '&' operator.
2408 # should take the ordering specified by the first operand to the '&' operator.
2408 #
2409 #
2409 # For instance,
2410 # For instance,
2410 #
2411 #
2411 # X & (Y | Z)
2412 # X & (Y | Z)
2412 # ^ ^^^^^^^
2413 # ^ ^^^^^^^
2413 # | follow
2414 # | follow
2414 # define
2415 # define
2415 #
2416 #
2416 # will be evaluated as 'or(y(x()), z(x()))', where 'x()' can change the order
2417 # will be evaluated as 'or(y(x()), z(x()))', where 'x()' can change the order
2417 # of the entries in the set, but 'y()', 'z()' and 'or()' shouldn't.
2418 # of the entries in the set, but 'y()', 'z()' and 'or()' shouldn't.
2418 #
2419 #
2419 # 'any' means the order doesn't matter. For instance,
2420 # 'any' means the order doesn't matter. For instance,
2420 #
2421 #
2421 # X & !Y
2422 # X & !Y
2422 # ^
2423 # ^
2423 # any
2424 # any
2424 #
2425 #
2425 # 'y()' can either enforce its ordering requirement or take the ordering
2426 # 'y()' can either enforce its ordering requirement or take the ordering
2426 # specified by 'x()' because 'not()' doesn't care the order.
2427 # specified by 'x()' because 'not()' doesn't care the order.
2427 #
2428 #
2428 # Transition of ordering requirement:
2429 # Transition of ordering requirement:
2429 #
2430 #
2430 # 1. starts with 'define'
2431 # 1. starts with 'define'
2431 # 2. shifts to 'follow' by 'x & y'
2432 # 2. shifts to 'follow' by 'x & y'
2432 # 3. changes back to 'define' on function call 'f(x)' or function-like
2433 # 3. changes back to 'define' on function call 'f(x)' or function-like
2433 # operation 'x (f) y' because 'f' may have its own ordering requirement
2434 # operation 'x (f) y' because 'f' may have its own ordering requirement
2434 # for 'x' and 'y' (e.g. 'first(x)')
2435 # for 'x' and 'y' (e.g. 'first(x)')
2435 #
2436 #
2436 anyorder = 'any' # don't care the order
2437 anyorder = 'any' # don't care the order
2437 defineorder = 'define' # should define the order
2438 defineorder = 'define' # should define the order
2438 followorder = 'follow' # must follow the current order
2439 followorder = 'follow' # must follow the current order
2439
2440
2440 # transition table for 'x & y', from the current expression 'x' to 'y'
2441 # transition table for 'x & y', from the current expression 'x' to 'y'
2441 _tofolloworder = {
2442 _tofolloworder = {
2442 anyorder: anyorder,
2443 anyorder: anyorder,
2443 defineorder: followorder,
2444 defineorder: followorder,
2444 followorder: followorder,
2445 followorder: followorder,
2445 }
2446 }
2446
2447
2447 def _matchonly(revs, bases):
2448 def _matchonly(revs, bases):
2448 """
2449 """
2449 >>> f = lambda *args: _matchonly(*map(parse, args))
2450 >>> f = lambda *args: _matchonly(*map(parse, args))
2450 >>> f('ancestors(A)', 'not ancestors(B)')
2451 >>> f('ancestors(A)', 'not ancestors(B)')
2451 ('list', ('symbol', 'A'), ('symbol', 'B'))
2452 ('list', ('symbol', 'A'), ('symbol', 'B'))
2452 """
2453 """
2453 if (revs is not None
2454 if (revs is not None
2454 and revs[0] == 'func'
2455 and revs[0] == 'func'
2455 and getsymbol(revs[1]) == 'ancestors'
2456 and getsymbol(revs[1]) == 'ancestors'
2456 and bases is not None
2457 and bases is not None
2457 and bases[0] == 'not'
2458 and bases[0] == 'not'
2458 and bases[1][0] == 'func'
2459 and bases[1][0] == 'func'
2459 and getsymbol(bases[1][1]) == 'ancestors'):
2460 and getsymbol(bases[1][1]) == 'ancestors'):
2460 return ('list', revs[2], bases[1][2])
2461 return ('list', revs[2], bases[1][2])
2461
2462
2462 def _fixops(x):
2463 def _fixops(x):
2463 """Rewrite raw parsed tree to resolve ambiguous syntax which cannot be
2464 """Rewrite raw parsed tree to resolve ambiguous syntax which cannot be
2464 handled well by our simple top-down parser"""
2465 handled well by our simple top-down parser"""
2465 if not isinstance(x, tuple):
2466 if not isinstance(x, tuple):
2466 return x
2467 return x
2467
2468
2468 op = x[0]
2469 op = x[0]
2469 if op == 'parent':
2470 if op == 'parent':
2470 # x^:y means (x^) : y, not x ^ (:y)
2471 # x^:y means (x^) : y, not x ^ (:y)
2471 # x^: means (x^) :, not x ^ (:)
2472 # x^: means (x^) :, not x ^ (:)
2472 post = ('parentpost', x[1])
2473 post = ('parentpost', x[1])
2473 if x[2][0] == 'dagrangepre':
2474 if x[2][0] == 'dagrangepre':
2474 return _fixops(('dagrange', post, x[2][1]))
2475 return _fixops(('dagrange', post, x[2][1]))
2475 elif x[2][0] == 'rangepre':
2476 elif x[2][0] == 'rangepre':
2476 return _fixops(('range', post, x[2][1]))
2477 return _fixops(('range', post, x[2][1]))
2477 elif x[2][0] == 'rangeall':
2478 elif x[2][0] == 'rangeall':
2478 return _fixops(('rangepost', post))
2479 return _fixops(('rangepost', post))
2479 elif op == 'or':
2480 elif op == 'or':
2480 # make number of arguments deterministic:
2481 # make number of arguments deterministic:
2481 # x + y + z -> (or x y z) -> (or (list x y z))
2482 # x + y + z -> (or x y z) -> (or (list x y z))
2482 return (op, _fixops(('list',) + x[1:]))
2483 return (op, _fixops(('list',) + x[1:]))
2483
2484
2484 return (op,) + tuple(_fixops(y) for y in x[1:])
2485 return (op,) + tuple(_fixops(y) for y in x[1:])
2485
2486
2486 def _analyze(x, order):
2487 def _analyze(x, order):
2487 if x is None:
2488 if x is None:
2488 return x
2489 return x
2489
2490
2490 op = x[0]
2491 op = x[0]
2491 if op == 'minus':
2492 if op == 'minus':
2492 return _analyze(('and', x[1], ('not', x[2])), order)
2493 return _analyze(('and', x[1], ('not', x[2])), order)
2493 elif op == 'only':
2494 elif op == 'only':
2494 t = ('func', ('symbol', 'only'), ('list', x[1], x[2]))
2495 t = ('func', ('symbol', 'only'), ('list', x[1], x[2]))
2495 return _analyze(t, order)
2496 return _analyze(t, order)
2496 elif op == 'onlypost':
2497 elif op == 'onlypost':
2497 return _analyze(('func', ('symbol', 'only'), x[1]), order)
2498 return _analyze(('func', ('symbol', 'only'), x[1]), order)
2498 elif op == 'dagrangepre':
2499 elif op == 'dagrangepre':
2499 return _analyze(('func', ('symbol', 'ancestors'), x[1]), order)
2500 return _analyze(('func', ('symbol', 'ancestors'), x[1]), order)
2500 elif op == 'dagrangepost':
2501 elif op == 'dagrangepost':
2501 return _analyze(('func', ('symbol', 'descendants'), x[1]), order)
2502 return _analyze(('func', ('symbol', 'descendants'), x[1]), order)
2502 elif op == 'rangeall':
2503 elif op == 'rangeall':
2503 return _analyze(('rangepre', ('string', 'tip')), order)
2504 return _analyze(('rangepre', ('string', 'tip')), order)
2504 elif op == 'rangepost':
2505 elif op == 'rangepost':
2505 return _analyze(('range', x[1], ('string', 'tip')), order)
2506 return _analyze(('range', x[1], ('string', 'tip')), order)
2506 elif op == 'negate':
2507 elif op == 'negate':
2507 s = getstring(x[1], _("can't negate that"))
2508 s = getstring(x[1], _("can't negate that"))
2508 return _analyze(('string', '-' + s), order)
2509 return _analyze(('string', '-' + s), order)
2509 elif op in ('string', 'symbol'):
2510 elif op in ('string', 'symbol'):
2510 return x
2511 return x
2511 elif op == 'and':
2512 elif op == 'and':
2512 ta = _analyze(x[1], order)
2513 ta = _analyze(x[1], order)
2513 tb = _analyze(x[2], _tofolloworder[order])
2514 tb = _analyze(x[2], _tofolloworder[order])
2514 return (op, ta, tb, order)
2515 return (op, ta, tb, order)
2515 elif op == 'or':
2516 elif op == 'or':
2516 return (op, _analyze(x[1], order), order)
2517 return (op, _analyze(x[1], order), order)
2517 elif op == 'not':
2518 elif op == 'not':
2518 return (op, _analyze(x[1], anyorder), order)
2519 return (op, _analyze(x[1], anyorder), order)
2519 elif op in ('rangepre', 'parentpost'):
2520 elif op in ('rangepre', 'parentpost'):
2520 return (op, _analyze(x[1], defineorder), order)
2521 return (op, _analyze(x[1], defineorder), order)
2521 elif op == 'group':
2522 elif op == 'group':
2522 return _analyze(x[1], order)
2523 return _analyze(x[1], order)
2523 elif op in ('dagrange', 'range', 'parent', 'ancestor'):
2524 elif op in ('dagrange', 'range', 'parent', 'ancestor'):
2524 ta = _analyze(x[1], defineorder)
2525 ta = _analyze(x[1], defineorder)
2525 tb = _analyze(x[2], defineorder)
2526 tb = _analyze(x[2], defineorder)
2526 return (op, ta, tb, order)
2527 return (op, ta, tb, order)
2527 elif op == 'list':
2528 elif op == 'list':
2528 return (op,) + tuple(_analyze(y, order) for y in x[1:])
2529 return (op,) + tuple(_analyze(y, order) for y in x[1:])
2529 elif op == 'keyvalue':
2530 elif op == 'keyvalue':
2530 return (op, x[1], _analyze(x[2], order))
2531 return (op, x[1], _analyze(x[2], order))
2531 elif op == 'func':
2532 elif op == 'func':
2532 f = getsymbol(x[1])
2533 f = getsymbol(x[1])
2533 d = defineorder
2534 d = defineorder
2534 if f == 'present':
2535 if f == 'present':
2535 # 'present(set)' is known to return the argument set with no
2536 # 'present(set)' is known to return the argument set with no
2536 # modification, so forward the current order to its argument
2537 # modification, so forward the current order to its argument
2537 d = order
2538 d = order
2538 return (op, x[1], _analyze(x[2], d), order)
2539 return (op, x[1], _analyze(x[2], d), order)
2539 raise ValueError('invalid operator %r' % op)
2540 raise ValueError('invalid operator %r' % op)
2540
2541
2541 def analyze(x, order=defineorder):
2542 def analyze(x, order=defineorder):
2542 """Transform raw parsed tree to evaluatable tree which can be fed to
2543 """Transform raw parsed tree to evaluatable tree which can be fed to
2543 optimize() or getset()
2544 optimize() or getset()
2544
2545
2545 All pseudo operations should be mapped to real operations or functions
2546 All pseudo operations should be mapped to real operations or functions
2546 defined in methods or symbols table respectively.
2547 defined in methods or symbols table respectively.
2547
2548
2548 'order' specifies how the current expression 'x' is ordered (see the
2549 'order' specifies how the current expression 'x' is ordered (see the
2549 constants defined above.)
2550 constants defined above.)
2550 """
2551 """
2551 return _analyze(x, order)
2552 return _analyze(x, order)
2552
2553
2553 def _optimize(x, small):
2554 def _optimize(x, small):
2554 if x is None:
2555 if x is None:
2555 return 0, x
2556 return 0, x
2556
2557
2557 smallbonus = 1
2558 smallbonus = 1
2558 if small:
2559 if small:
2559 smallbonus = .5
2560 smallbonus = .5
2560
2561
2561 op = x[0]
2562 op = x[0]
2562 if op in ('string', 'symbol'):
2563 if op in ('string', 'symbol'):
2563 return smallbonus, x # single revisions are small
2564 return smallbonus, x # single revisions are small
2564 elif op == 'and':
2565 elif op == 'and':
2565 wa, ta = _optimize(x[1], True)
2566 wa, ta = _optimize(x[1], True)
2566 wb, tb = _optimize(x[2], True)
2567 wb, tb = _optimize(x[2], True)
2567 order = x[3]
2568 order = x[3]
2568 w = min(wa, wb)
2569 w = min(wa, wb)
2569
2570
2570 # (::x and not ::y)/(not ::y and ::x) have a fast path
2571 # (::x and not ::y)/(not ::y and ::x) have a fast path
2571 tm = _matchonly(ta, tb) or _matchonly(tb, ta)
2572 tm = _matchonly(ta, tb) or _matchonly(tb, ta)
2572 if tm:
2573 if tm:
2573 return w, ('func', ('symbol', 'only'), tm, order)
2574 return w, ('func', ('symbol', 'only'), tm, order)
2574
2575
2575 if tb is not None and tb[0] == 'not':
2576 if tb is not None and tb[0] == 'not':
2576 return wa, ('difference', ta, tb[1], order)
2577 return wa, ('difference', ta, tb[1], order)
2577
2578
2578 if wa > wb:
2579 if wa > wb:
2579 return w, (op, tb, ta, order)
2580 return w, (op, tb, ta, order)
2580 return w, (op, ta, tb, order)
2581 return w, (op, ta, tb, order)
2581 elif op == 'or':
2582 elif op == 'or':
2582 # fast path for machine-generated expression, that is likely to have
2583 # fast path for machine-generated expression, that is likely to have
2583 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2584 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2584 order = x[2]
2585 order = x[2]
2585 ws, ts, ss = [], [], []
2586 ws, ts, ss = [], [], []
2586 def flushss():
2587 def flushss():
2587 if not ss:
2588 if not ss:
2588 return
2589 return
2589 if len(ss) == 1:
2590 if len(ss) == 1:
2590 w, t = ss[0]
2591 w, t = ss[0]
2591 else:
2592 else:
2592 s = '\0'.join(t[1] for w, t in ss)
2593 s = '\0'.join(t[1] for w, t in ss)
2593 y = ('func', ('symbol', '_list'), ('string', s), order)
2594 y = ('func', ('symbol', '_list'), ('string', s), order)
2594 w, t = _optimize(y, False)
2595 w, t = _optimize(y, False)
2595 ws.append(w)
2596 ws.append(w)
2596 ts.append(t)
2597 ts.append(t)
2597 del ss[:]
2598 del ss[:]
2598 for y in getlist(x[1]):
2599 for y in getlist(x[1]):
2599 w, t = _optimize(y, False)
2600 w, t = _optimize(y, False)
2600 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2601 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2601 ss.append((w, t))
2602 ss.append((w, t))
2602 continue
2603 continue
2603 flushss()
2604 flushss()
2604 ws.append(w)
2605 ws.append(w)
2605 ts.append(t)
2606 ts.append(t)
2606 flushss()
2607 flushss()
2607 if len(ts) == 1:
2608 if len(ts) == 1:
2608 return ws[0], ts[0] # 'or' operation is fully optimized out
2609 return ws[0], ts[0] # 'or' operation is fully optimized out
2609 # we can't reorder trees by weight because it would change the order.
2610 # we can't reorder trees by weight because it would change the order.
2610 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2611 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2611 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2612 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2612 return max(ws), (op, ('list',) + tuple(ts), order)
2613 return max(ws), (op, ('list',) + tuple(ts), order)
2613 elif op == 'not':
2614 elif op == 'not':
2614 # Optimize not public() to _notpublic() because we have a fast version
2615 # Optimize not public() to _notpublic() because we have a fast version
2615 if x[1][:3] == ('func', ('symbol', 'public'), None):
2616 if x[1][:3] == ('func', ('symbol', 'public'), None):
2616 order = x[1][3]
2617 order = x[1][3]
2617 newsym = ('func', ('symbol', '_notpublic'), None, order)
2618 newsym = ('func', ('symbol', '_notpublic'), None, order)
2618 o = _optimize(newsym, not small)
2619 o = _optimize(newsym, not small)
2619 return o[0], o[1]
2620 return o[0], o[1]
2620 else:
2621 else:
2621 o = _optimize(x[1], not small)
2622 o = _optimize(x[1], not small)
2622 order = x[2]
2623 order = x[2]
2623 return o[0], (op, o[1], order)
2624 return o[0], (op, o[1], order)
2624 elif op in ('rangepre', 'parentpost'):
2625 elif op in ('rangepre', 'parentpost'):
2625 o = _optimize(x[1], small)
2626 o = _optimize(x[1], small)
2626 order = x[2]
2627 order = x[2]
2627 return o[0], (op, o[1], order)
2628 return o[0], (op, o[1], order)
2628 elif op in ('dagrange', 'range', 'parent', 'ancestor'):
2629 elif op in ('dagrange', 'range', 'parent', 'ancestor'):
2629 wa, ta = _optimize(x[1], small)
2630 wa, ta = _optimize(x[1], small)
2630 wb, tb = _optimize(x[2], small)
2631 wb, tb = _optimize(x[2], small)
2631 order = x[3]
2632 order = x[3]
2632 return wa + wb, (op, ta, tb, order)
2633 return wa + wb, (op, ta, tb, order)
2633 elif op == 'list':
2634 elif op == 'list':
2634 ws, ts = zip(*(_optimize(y, small) for y in x[1:]))
2635 ws, ts = zip(*(_optimize(y, small) for y in x[1:]))
2635 return sum(ws), (op,) + ts
2636 return sum(ws), (op,) + ts
2636 elif op == 'keyvalue':
2637 elif op == 'keyvalue':
2637 w, t = _optimize(x[2], small)
2638 w, t = _optimize(x[2], small)
2638 return w, (op, x[1], t)
2639 return w, (op, x[1], t)
2639 elif op == 'func':
2640 elif op == 'func':
2640 f = getsymbol(x[1])
2641 f = getsymbol(x[1])
2641 wa, ta = _optimize(x[2], small)
2642 wa, ta = _optimize(x[2], small)
2642 if f in ('author', 'branch', 'closed', 'date', 'desc', 'file', 'grep',
2643 if f in ('author', 'branch', 'closed', 'date', 'desc', 'file', 'grep',
2643 'keyword', 'outgoing', 'user', 'destination'):
2644 'keyword', 'outgoing', 'user', 'destination'):
2644 w = 10 # slow
2645 w = 10 # slow
2645 elif f in ('modifies', 'adds', 'removes'):
2646 elif f in ('modifies', 'adds', 'removes'):
2646 w = 30 # slower
2647 w = 30 # slower
2647 elif f == "contains":
2648 elif f == "contains":
2648 w = 100 # very slow
2649 w = 100 # very slow
2649 elif f == "ancestor":
2650 elif f == "ancestor":
2650 w = 1 * smallbonus
2651 w = 1 * smallbonus
2651 elif f in ('reverse', 'limit', 'first', 'wdir', '_intlist'):
2652 elif f in ('reverse', 'limit', 'first', 'wdir', '_intlist'):
2652 w = 0
2653 w = 0
2653 elif f == "sort":
2654 elif f == "sort":
2654 w = 10 # assume most sorts look at changelog
2655 w = 10 # assume most sorts look at changelog
2655 else:
2656 else:
2656 w = 1
2657 w = 1
2657 order = x[3]
2658 order = x[3]
2658 return w + wa, (op, x[1], ta, order)
2659 return w + wa, (op, x[1], ta, order)
2659 raise ValueError('invalid operator %r' % op)
2660 raise ValueError('invalid operator %r' % op)
2660
2661
2661 def optimize(tree):
2662 def optimize(tree):
2662 """Optimize evaluatable tree
2663 """Optimize evaluatable tree
2663
2664
2664 All pseudo operations should be transformed beforehand.
2665 All pseudo operations should be transformed beforehand.
2665 """
2666 """
2666 _weight, newtree = _optimize(tree, small=True)
2667 _weight, newtree = _optimize(tree, small=True)
2667 return newtree
2668 return newtree
2668
2669
2669 # the set of valid characters for the initial letter of symbols in
2670 # the set of valid characters for the initial letter of symbols in
2670 # alias declarations and definitions
2671 # alias declarations and definitions
2671 _aliassyminitletters = _syminitletters | set(pycompat.sysstr('$'))
2672 _aliassyminitletters = _syminitletters | set(pycompat.sysstr('$'))
2672
2673
2673 def _parsewith(spec, lookup=None, syminitletters=None):
2674 def _parsewith(spec, lookup=None, syminitletters=None):
2674 """Generate a parse tree of given spec with given tokenizing options
2675 """Generate a parse tree of given spec with given tokenizing options
2675
2676
2676 >>> _parsewith('foo($1)', syminitletters=_aliassyminitletters)
2677 >>> _parsewith('foo($1)', syminitletters=_aliassyminitletters)
2677 ('func', ('symbol', 'foo'), ('symbol', '$1'))
2678 ('func', ('symbol', 'foo'), ('symbol', '$1'))
2678 >>> _parsewith('$1')
2679 >>> _parsewith('$1')
2679 Traceback (most recent call last):
2680 Traceback (most recent call last):
2680 ...
2681 ...
2681 ParseError: ("syntax error in revset '$1'", 0)
2682 ParseError: ("syntax error in revset '$1'", 0)
2682 >>> _parsewith('foo bar')
2683 >>> _parsewith('foo bar')
2683 Traceback (most recent call last):
2684 Traceback (most recent call last):
2684 ...
2685 ...
2685 ParseError: ('invalid token', 4)
2686 ParseError: ('invalid token', 4)
2686 """
2687 """
2687 p = parser.parser(elements)
2688 p = parser.parser(elements)
2688 tree, pos = p.parse(tokenize(spec, lookup=lookup,
2689 tree, pos = p.parse(tokenize(spec, lookup=lookup,
2689 syminitletters=syminitletters))
2690 syminitletters=syminitletters))
2690 if pos != len(spec):
2691 if pos != len(spec):
2691 raise error.ParseError(_('invalid token'), pos)
2692 raise error.ParseError(_('invalid token'), pos)
2692 return _fixops(parser.simplifyinfixops(tree, ('list', 'or')))
2693 return _fixops(parser.simplifyinfixops(tree, ('list', 'or')))
2693
2694
2694 class _aliasrules(parser.basealiasrules):
2695 class _aliasrules(parser.basealiasrules):
2695 """Parsing and expansion rule set of revset aliases"""
2696 """Parsing and expansion rule set of revset aliases"""
2696 _section = _('revset alias')
2697 _section = _('revset alias')
2697
2698
2698 @staticmethod
2699 @staticmethod
2699 def _parse(spec):
2700 def _parse(spec):
2700 """Parse alias declaration/definition ``spec``
2701 """Parse alias declaration/definition ``spec``
2701
2702
2702 This allows symbol names to use also ``$`` as an initial letter
2703 This allows symbol names to use also ``$`` as an initial letter
2703 (for backward compatibility), and callers of this function should
2704 (for backward compatibility), and callers of this function should
2704 examine whether ``$`` is used also for unexpected symbols or not.
2705 examine whether ``$`` is used also for unexpected symbols or not.
2705 """
2706 """
2706 return _parsewith(spec, syminitletters=_aliassyminitletters)
2707 return _parsewith(spec, syminitletters=_aliassyminitletters)
2707
2708
2708 @staticmethod
2709 @staticmethod
2709 def _trygetfunc(tree):
2710 def _trygetfunc(tree):
2710 if tree[0] == 'func' and tree[1][0] == 'symbol':
2711 if tree[0] == 'func' and tree[1][0] == 'symbol':
2711 return tree[1][1], getlist(tree[2])
2712 return tree[1][1], getlist(tree[2])
2712
2713
2713 def expandaliases(ui, tree):
2714 def expandaliases(ui, tree):
2714 aliases = _aliasrules.buildmap(ui.configitems('revsetalias'))
2715 aliases = _aliasrules.buildmap(ui.configitems('revsetalias'))
2715 tree = _aliasrules.expand(aliases, tree)
2716 tree = _aliasrules.expand(aliases, tree)
2716 # warn about problematic (but not referred) aliases
2717 # warn about problematic (but not referred) aliases
2717 for name, alias in sorted(aliases.iteritems()):
2718 for name, alias in sorted(aliases.iteritems()):
2718 if alias.error and not alias.warned:
2719 if alias.error and not alias.warned:
2719 ui.warn(_('warning: %s\n') % (alias.error))
2720 ui.warn(_('warning: %s\n') % (alias.error))
2720 alias.warned = True
2721 alias.warned = True
2721 return tree
2722 return tree
2722
2723
2723 def foldconcat(tree):
2724 def foldconcat(tree):
2724 """Fold elements to be concatenated by `##`
2725 """Fold elements to be concatenated by `##`
2725 """
2726 """
2726 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2727 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2727 return tree
2728 return tree
2728 if tree[0] == '_concat':
2729 if tree[0] == '_concat':
2729 pending = [tree]
2730 pending = [tree]
2730 l = []
2731 l = []
2731 while pending:
2732 while pending:
2732 e = pending.pop()
2733 e = pending.pop()
2733 if e[0] == '_concat':
2734 if e[0] == '_concat':
2734 pending.extend(reversed(e[1:]))
2735 pending.extend(reversed(e[1:]))
2735 elif e[0] in ('string', 'symbol'):
2736 elif e[0] in ('string', 'symbol'):
2736 l.append(e[1])
2737 l.append(e[1])
2737 else:
2738 else:
2738 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2739 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2739 raise error.ParseError(msg)
2740 raise error.ParseError(msg)
2740 return ('string', ''.join(l))
2741 return ('string', ''.join(l))
2741 else:
2742 else:
2742 return tuple(foldconcat(t) for t in tree)
2743 return tuple(foldconcat(t) for t in tree)
2743
2744
2744 def parse(spec, lookup=None):
2745 def parse(spec, lookup=None):
2745 return _parsewith(spec, lookup=lookup)
2746 return _parsewith(spec, lookup=lookup)
2746
2747
2747 def posttreebuilthook(tree, repo):
2748 def posttreebuilthook(tree, repo):
2748 # hook for extensions to execute code on the optimized tree
2749 # hook for extensions to execute code on the optimized tree
2749 pass
2750 pass
2750
2751
2751 def match(ui, spec, repo=None, order=defineorder):
2752 def match(ui, spec, repo=None, order=defineorder):
2752 """Create a matcher for a single revision spec
2753 """Create a matcher for a single revision spec
2753
2754
2754 If order=followorder, a matcher takes the ordering specified by the input
2755 If order=followorder, a matcher takes the ordering specified by the input
2755 set.
2756 set.
2756 """
2757 """
2757 return matchany(ui, [spec], repo=repo, order=order)
2758 return matchany(ui, [spec], repo=repo, order=order)
2758
2759
2759 def matchany(ui, specs, repo=None, order=defineorder):
2760 def matchany(ui, specs, repo=None, order=defineorder):
2760 """Create a matcher that will include any revisions matching one of the
2761 """Create a matcher that will include any revisions matching one of the
2761 given specs
2762 given specs
2762
2763
2763 If order=followorder, a matcher takes the ordering specified by the input
2764 If order=followorder, a matcher takes the ordering specified by the input
2764 set.
2765 set.
2765 """
2766 """
2766 if not specs:
2767 if not specs:
2767 def mfunc(repo, subset=None):
2768 def mfunc(repo, subset=None):
2768 return baseset()
2769 return baseset()
2769 return mfunc
2770 return mfunc
2770 if not all(specs):
2771 if not all(specs):
2771 raise error.ParseError(_("empty query"))
2772 raise error.ParseError(_("empty query"))
2772 lookup = None
2773 lookup = None
2773 if repo:
2774 if repo:
2774 lookup = repo.__contains__
2775 lookup = repo.__contains__
2775 if len(specs) == 1:
2776 if len(specs) == 1:
2776 tree = parse(specs[0], lookup)
2777 tree = parse(specs[0], lookup)
2777 else:
2778 else:
2778 tree = ('or', ('list',) + tuple(parse(s, lookup) for s in specs))
2779 tree = ('or', ('list',) + tuple(parse(s, lookup) for s in specs))
2779
2780
2780 if ui:
2781 if ui:
2781 tree = expandaliases(ui, tree)
2782 tree = expandaliases(ui, tree)
2782 tree = foldconcat(tree)
2783 tree = foldconcat(tree)
2783 tree = analyze(tree, order)
2784 tree = analyze(tree, order)
2784 tree = optimize(tree)
2785 tree = optimize(tree)
2785 posttreebuilthook(tree, repo)
2786 posttreebuilthook(tree, repo)
2786 return makematcher(tree)
2787 return makematcher(tree)
2787
2788
2788 def makematcher(tree):
2789 def makematcher(tree):
2789 """Create a matcher from an evaluatable tree"""
2790 """Create a matcher from an evaluatable tree"""
2790 def mfunc(repo, subset=None):
2791 def mfunc(repo, subset=None):
2791 if subset is None:
2792 if subset is None:
2792 subset = fullreposet(repo)
2793 subset = fullreposet(repo)
2793 if util.safehasattr(subset, 'isascending'):
2794 if util.safehasattr(subset, 'isascending'):
2794 result = getset(repo, subset, tree)
2795 result = getset(repo, subset, tree)
2795 else:
2796 else:
2796 result = getset(repo, baseset(subset), tree)
2797 result = getset(repo, baseset(subset), tree)
2797 return result
2798 return result
2798 return mfunc
2799 return mfunc
2799
2800
2800 def formatspec(expr, *args):
2801 def formatspec(expr, *args):
2801 '''
2802 '''
2802 This is a convenience function for using revsets internally, and
2803 This is a convenience function for using revsets internally, and
2803 escapes arguments appropriately. Aliases are intentionally ignored
2804 escapes arguments appropriately. Aliases are intentionally ignored
2804 so that intended expression behavior isn't accidentally subverted.
2805 so that intended expression behavior isn't accidentally subverted.
2805
2806
2806 Supported arguments:
2807 Supported arguments:
2807
2808
2808 %r = revset expression, parenthesized
2809 %r = revset expression, parenthesized
2809 %d = int(arg), no quoting
2810 %d = int(arg), no quoting
2810 %s = string(arg), escaped and single-quoted
2811 %s = string(arg), escaped and single-quoted
2811 %b = arg.branch(), escaped and single-quoted
2812 %b = arg.branch(), escaped and single-quoted
2812 %n = hex(arg), single-quoted
2813 %n = hex(arg), single-quoted
2813 %% = a literal '%'
2814 %% = a literal '%'
2814
2815
2815 Prefixing the type with 'l' specifies a parenthesized list of that type.
2816 Prefixing the type with 'l' specifies a parenthesized list of that type.
2816
2817
2817 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2818 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2818 '(10 or 11):: and ((this()) or (that()))'
2819 '(10 or 11):: and ((this()) or (that()))'
2819 >>> formatspec('%d:: and not %d::', 10, 20)
2820 >>> formatspec('%d:: and not %d::', 10, 20)
2820 '10:: and not 20::'
2821 '10:: and not 20::'
2821 >>> formatspec('%ld or %ld', [], [1])
2822 >>> formatspec('%ld or %ld', [], [1])
2822 "_list('') or 1"
2823 "_list('') or 1"
2823 >>> formatspec('keyword(%s)', 'foo\\xe9')
2824 >>> formatspec('keyword(%s)', 'foo\\xe9')
2824 "keyword('foo\\\\xe9')"
2825 "keyword('foo\\\\xe9')"
2825 >>> b = lambda: 'default'
2826 >>> b = lambda: 'default'
2826 >>> b.branch = b
2827 >>> b.branch = b
2827 >>> formatspec('branch(%b)', b)
2828 >>> formatspec('branch(%b)', b)
2828 "branch('default')"
2829 "branch('default')"
2829 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2830 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2830 "root(_list('a\\x00b\\x00c\\x00d'))"
2831 "root(_list('a\\x00b\\x00c\\x00d'))"
2831 '''
2832 '''
2832
2833
2833 def quote(s):
2834 def quote(s):
2834 return repr(str(s))
2835 return repr(str(s))
2835
2836
2836 def argtype(c, arg):
2837 def argtype(c, arg):
2837 if c == 'd':
2838 if c == 'd':
2838 return str(int(arg))
2839 return str(int(arg))
2839 elif c == 's':
2840 elif c == 's':
2840 return quote(arg)
2841 return quote(arg)
2841 elif c == 'r':
2842 elif c == 'r':
2842 parse(arg) # make sure syntax errors are confined
2843 parse(arg) # make sure syntax errors are confined
2843 return '(%s)' % arg
2844 return '(%s)' % arg
2844 elif c == 'n':
2845 elif c == 'n':
2845 return quote(node.hex(arg))
2846 return quote(node.hex(arg))
2846 elif c == 'b':
2847 elif c == 'b':
2847 return quote(arg.branch())
2848 return quote(arg.branch())
2848
2849
2849 def listexp(s, t):
2850 def listexp(s, t):
2850 l = len(s)
2851 l = len(s)
2851 if l == 0:
2852 if l == 0:
2852 return "_list('')"
2853 return "_list('')"
2853 elif l == 1:
2854 elif l == 1:
2854 return argtype(t, s[0])
2855 return argtype(t, s[0])
2855 elif t == 'd':
2856 elif t == 'd':
2856 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2857 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2857 elif t == 's':
2858 elif t == 's':
2858 return "_list('%s')" % "\0".join(s)
2859 return "_list('%s')" % "\0".join(s)
2859 elif t == 'n':
2860 elif t == 'n':
2860 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2861 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2861 elif t == 'b':
2862 elif t == 'b':
2862 return "_list('%s')" % "\0".join(a.branch() for a in s)
2863 return "_list('%s')" % "\0".join(a.branch() for a in s)
2863
2864
2864 m = l // 2
2865 m = l // 2
2865 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2866 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2866
2867
2867 ret = ''
2868 ret = ''
2868 pos = 0
2869 pos = 0
2869 arg = 0
2870 arg = 0
2870 while pos < len(expr):
2871 while pos < len(expr):
2871 c = expr[pos]
2872 c = expr[pos]
2872 if c == '%':
2873 if c == '%':
2873 pos += 1
2874 pos += 1
2874 d = expr[pos]
2875 d = expr[pos]
2875 if d == '%':
2876 if d == '%':
2876 ret += d
2877 ret += d
2877 elif d in 'dsnbr':
2878 elif d in 'dsnbr':
2878 ret += argtype(d, args[arg])
2879 ret += argtype(d, args[arg])
2879 arg += 1
2880 arg += 1
2880 elif d == 'l':
2881 elif d == 'l':
2881 # a list of some type
2882 # a list of some type
2882 pos += 1
2883 pos += 1
2883 d = expr[pos]
2884 d = expr[pos]
2884 ret += listexp(list(args[arg]), d)
2885 ret += listexp(list(args[arg]), d)
2885 arg += 1
2886 arg += 1
2886 else:
2887 else:
2887 raise error.Abort(_('unexpected revspec format character %s')
2888 raise error.Abort(_('unexpected revspec format character %s')
2888 % d)
2889 % d)
2889 else:
2890 else:
2890 ret += c
2891 ret += c
2891 pos += 1
2892 pos += 1
2892
2893
2893 return ret
2894 return ret
2894
2895
2895 def prettyformat(tree):
2896 def prettyformat(tree):
2896 return parser.prettyformat(tree, ('string', 'symbol'))
2897 return parser.prettyformat(tree, ('string', 'symbol'))
2897
2898
2898 def depth(tree):
2899 def depth(tree):
2899 if isinstance(tree, tuple):
2900 if isinstance(tree, tuple):
2900 return max(map(depth, tree)) + 1
2901 return max(map(depth, tree)) + 1
2901 else:
2902 else:
2902 return 0
2903 return 0
2903
2904
2904 def funcsused(tree):
2905 def funcsused(tree):
2905 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2906 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2906 return set()
2907 return set()
2907 else:
2908 else:
2908 funcs = set()
2909 funcs = set()
2909 for s in tree[1:]:
2910 for s in tree[1:]:
2910 funcs |= funcsused(s)
2911 funcs |= funcsused(s)
2911 if tree[0] == 'func':
2912 if tree[0] == 'func':
2912 funcs.add(tree[1][1])
2913 funcs.add(tree[1][1])
2913 return funcs
2914 return funcs
2914
2915
2915 def _formatsetrepr(r):
2916 def _formatsetrepr(r):
2916 """Format an optional printable representation of a set
2917 """Format an optional printable representation of a set
2917
2918
2918 ======== =================================
2919 ======== =================================
2919 type(r) example
2920 type(r) example
2920 ======== =================================
2921 ======== =================================
2921 tuple ('<not %r>', other)
2922 tuple ('<not %r>', other)
2922 str '<branch closed>'
2923 str '<branch closed>'
2923 callable lambda: '<branch %r>' % sorted(b)
2924 callable lambda: '<branch %r>' % sorted(b)
2924 object other
2925 object other
2925 ======== =================================
2926 ======== =================================
2926 """
2927 """
2927 if r is None:
2928 if r is None:
2928 return ''
2929 return ''
2929 elif isinstance(r, tuple):
2930 elif isinstance(r, tuple):
2930 return r[0] % r[1:]
2931 return r[0] % r[1:]
2931 elif isinstance(r, str):
2932 elif isinstance(r, str):
2932 return r
2933 return r
2933 elif callable(r):
2934 elif callable(r):
2934 return r()
2935 return r()
2935 else:
2936 else:
2936 return repr(r)
2937 return repr(r)
2937
2938
2938 class abstractsmartset(object):
2939 class abstractsmartset(object):
2939
2940
2940 def __nonzero__(self):
2941 def __nonzero__(self):
2941 """True if the smartset is not empty"""
2942 """True if the smartset is not empty"""
2942 raise NotImplementedError()
2943 raise NotImplementedError()
2943
2944
2944 def __contains__(self, rev):
2945 def __contains__(self, rev):
2945 """provide fast membership testing"""
2946 """provide fast membership testing"""
2946 raise NotImplementedError()
2947 raise NotImplementedError()
2947
2948
2948 def __iter__(self):
2949 def __iter__(self):
2949 """iterate the set in the order it is supposed to be iterated"""
2950 """iterate the set in the order it is supposed to be iterated"""
2950 raise NotImplementedError()
2951 raise NotImplementedError()
2951
2952
2952 # Attributes containing a function to perform a fast iteration in a given
2953 # Attributes containing a function to perform a fast iteration in a given
2953 # direction. A smartset can have none, one, or both defined.
2954 # direction. A smartset can have none, one, or both defined.
2954 #
2955 #
2955 # Default value is None instead of a function returning None to avoid
2956 # Default value is None instead of a function returning None to avoid
2956 # initializing an iterator just for testing if a fast method exists.
2957 # initializing an iterator just for testing if a fast method exists.
2957 fastasc = None
2958 fastasc = None
2958 fastdesc = None
2959 fastdesc = None
2959
2960
2960 def isascending(self):
2961 def isascending(self):
2961 """True if the set will iterate in ascending order"""
2962 """True if the set will iterate in ascending order"""
2962 raise NotImplementedError()
2963 raise NotImplementedError()
2963
2964
2964 def isdescending(self):
2965 def isdescending(self):
2965 """True if the set will iterate in descending order"""
2966 """True if the set will iterate in descending order"""
2966 raise NotImplementedError()
2967 raise NotImplementedError()
2967
2968
2968 def istopo(self):
2969 def istopo(self):
2969 """True if the set will iterate in topographical order"""
2970 """True if the set will iterate in topographical order"""
2970 raise NotImplementedError()
2971 raise NotImplementedError()
2971
2972
2972 def min(self):
2973 def min(self):
2973 """return the minimum element in the set"""
2974 """return the minimum element in the set"""
2974 if self.fastasc is None:
2975 if self.fastasc is None:
2975 v = min(self)
2976 v = min(self)
2976 else:
2977 else:
2977 for v in self.fastasc():
2978 for v in self.fastasc():
2978 break
2979 break
2979 else:
2980 else:
2980 raise ValueError('arg is an empty sequence')
2981 raise ValueError('arg is an empty sequence')
2981 self.min = lambda: v
2982 self.min = lambda: v
2982 return v
2983 return v
2983
2984
2984 def max(self):
2985 def max(self):
2985 """return the maximum element in the set"""
2986 """return the maximum element in the set"""
2986 if self.fastdesc is None:
2987 if self.fastdesc is None:
2987 return max(self)
2988 return max(self)
2988 else:
2989 else:
2989 for v in self.fastdesc():
2990 for v in self.fastdesc():
2990 break
2991 break
2991 else:
2992 else:
2992 raise ValueError('arg is an empty sequence')
2993 raise ValueError('arg is an empty sequence')
2993 self.max = lambda: v
2994 self.max = lambda: v
2994 return v
2995 return v
2995
2996
2996 def first(self):
2997 def first(self):
2997 """return the first element in the set (user iteration perspective)
2998 """return the first element in the set (user iteration perspective)
2998
2999
2999 Return None if the set is empty"""
3000 Return None if the set is empty"""
3000 raise NotImplementedError()
3001 raise NotImplementedError()
3001
3002
3002 def last(self):
3003 def last(self):
3003 """return the last element in the set (user iteration perspective)
3004 """return the last element in the set (user iteration perspective)
3004
3005
3005 Return None if the set is empty"""
3006 Return None if the set is empty"""
3006 raise NotImplementedError()
3007 raise NotImplementedError()
3007
3008
3008 def __len__(self):
3009 def __len__(self):
3009 """return the length of the smartsets
3010 """return the length of the smartsets
3010
3011
3011 This can be expensive on smartset that could be lazy otherwise."""
3012 This can be expensive on smartset that could be lazy otherwise."""
3012 raise NotImplementedError()
3013 raise NotImplementedError()
3013
3014
3014 def reverse(self):
3015 def reverse(self):
3015 """reverse the expected iteration order"""
3016 """reverse the expected iteration order"""
3016 raise NotImplementedError()
3017 raise NotImplementedError()
3017
3018
3018 def sort(self, reverse=True):
3019 def sort(self, reverse=True):
3019 """get the set to iterate in an ascending or descending order"""
3020 """get the set to iterate in an ascending or descending order"""
3020 raise NotImplementedError()
3021 raise NotImplementedError()
3021
3022
3022 def __and__(self, other):
3023 def __and__(self, other):
3023 """Returns a new object with the intersection of the two collections.
3024 """Returns a new object with the intersection of the two collections.
3024
3025
3025 This is part of the mandatory API for smartset."""
3026 This is part of the mandatory API for smartset."""
3026 if isinstance(other, fullreposet):
3027 if isinstance(other, fullreposet):
3027 return self
3028 return self
3028 return self.filter(other.__contains__, condrepr=other, cache=False)
3029 return self.filter(other.__contains__, condrepr=other, cache=False)
3029
3030
3030 def __add__(self, other):
3031 def __add__(self, other):
3031 """Returns a new object with the union of the two collections.
3032 """Returns a new object with the union of the two collections.
3032
3033
3033 This is part of the mandatory API for smartset."""
3034 This is part of the mandatory API for smartset."""
3034 return addset(self, other)
3035 return addset(self, other)
3035
3036
3036 def __sub__(self, other):
3037 def __sub__(self, other):
3037 """Returns a new object with the substraction of the two collections.
3038 """Returns a new object with the substraction of the two collections.
3038
3039
3039 This is part of the mandatory API for smartset."""
3040 This is part of the mandatory API for smartset."""
3040 c = other.__contains__
3041 c = other.__contains__
3041 return self.filter(lambda r: not c(r), condrepr=('<not %r>', other),
3042 return self.filter(lambda r: not c(r), condrepr=('<not %r>', other),
3042 cache=False)
3043 cache=False)
3043
3044
3044 def filter(self, condition, condrepr=None, cache=True):
3045 def filter(self, condition, condrepr=None, cache=True):
3045 """Returns this smartset filtered by condition as a new smartset.
3046 """Returns this smartset filtered by condition as a new smartset.
3046
3047
3047 `condition` is a callable which takes a revision number and returns a
3048 `condition` is a callable which takes a revision number and returns a
3048 boolean. Optional `condrepr` provides a printable representation of
3049 boolean. Optional `condrepr` provides a printable representation of
3049 the given `condition`.
3050 the given `condition`.
3050
3051
3051 This is part of the mandatory API for smartset."""
3052 This is part of the mandatory API for smartset."""
3052 # builtin cannot be cached. but do not needs to
3053 # builtin cannot be cached. but do not needs to
3053 if cache and util.safehasattr(condition, 'func_code'):
3054 if cache and util.safehasattr(condition, 'func_code'):
3054 condition = util.cachefunc(condition)
3055 condition = util.cachefunc(condition)
3055 return filteredset(self, condition, condrepr)
3056 return filteredset(self, condition, condrepr)
3056
3057
3057 class baseset(abstractsmartset):
3058 class baseset(abstractsmartset):
3058 """Basic data structure that represents a revset and contains the basic
3059 """Basic data structure that represents a revset and contains the basic
3059 operation that it should be able to perform.
3060 operation that it should be able to perform.
3060
3061
3061 Every method in this class should be implemented by any smartset class.
3062 Every method in this class should be implemented by any smartset class.
3062 """
3063 """
3063 def __init__(self, data=(), datarepr=None, istopo=False):
3064 def __init__(self, data=(), datarepr=None, istopo=False):
3064 """
3065 """
3065 datarepr: a tuple of (format, obj, ...), a function or an object that
3066 datarepr: a tuple of (format, obj, ...), a function or an object that
3066 provides a printable representation of the given data.
3067 provides a printable representation of the given data.
3067 """
3068 """
3068 self._ascending = None
3069 self._ascending = None
3069 self._istopo = istopo
3070 self._istopo = istopo
3070 if not isinstance(data, list):
3071 if not isinstance(data, list):
3071 if isinstance(data, set):
3072 if isinstance(data, set):
3072 self._set = data
3073 self._set = data
3073 # set has no order we pick one for stability purpose
3074 # set has no order we pick one for stability purpose
3074 self._ascending = True
3075 self._ascending = True
3075 data = list(data)
3076 data = list(data)
3076 self._list = data
3077 self._list = data
3077 self._datarepr = datarepr
3078 self._datarepr = datarepr
3078
3079
3079 @util.propertycache
3080 @util.propertycache
3080 def _set(self):
3081 def _set(self):
3081 return set(self._list)
3082 return set(self._list)
3082
3083
3083 @util.propertycache
3084 @util.propertycache
3084 def _asclist(self):
3085 def _asclist(self):
3085 asclist = self._list[:]
3086 asclist = self._list[:]
3086 asclist.sort()
3087 asclist.sort()
3087 return asclist
3088 return asclist
3088
3089
3089 def __iter__(self):
3090 def __iter__(self):
3090 if self._ascending is None:
3091 if self._ascending is None:
3091 return iter(self._list)
3092 return iter(self._list)
3092 elif self._ascending:
3093 elif self._ascending:
3093 return iter(self._asclist)
3094 return iter(self._asclist)
3094 else:
3095 else:
3095 return reversed(self._asclist)
3096 return reversed(self._asclist)
3096
3097
3097 def fastasc(self):
3098 def fastasc(self):
3098 return iter(self._asclist)
3099 return iter(self._asclist)
3099
3100
3100 def fastdesc(self):
3101 def fastdesc(self):
3101 return reversed(self._asclist)
3102 return reversed(self._asclist)
3102
3103
3103 @util.propertycache
3104 @util.propertycache
3104 def __contains__(self):
3105 def __contains__(self):
3105 return self._set.__contains__
3106 return self._set.__contains__
3106
3107
3107 def __nonzero__(self):
3108 def __nonzero__(self):
3108 return bool(self._list)
3109 return bool(self._list)
3109
3110
3110 def sort(self, reverse=False):
3111 def sort(self, reverse=False):
3111 self._ascending = not bool(reverse)
3112 self._ascending = not bool(reverse)
3112 self._istopo = False
3113 self._istopo = False
3113
3114
3114 def reverse(self):
3115 def reverse(self):
3115 if self._ascending is None:
3116 if self._ascending is None:
3116 self._list.reverse()
3117 self._list.reverse()
3117 else:
3118 else:
3118 self._ascending = not self._ascending
3119 self._ascending = not self._ascending
3119 self._istopo = False
3120 self._istopo = False
3120
3121
3121 def __len__(self):
3122 def __len__(self):
3122 return len(self._list)
3123 return len(self._list)
3123
3124
3124 def isascending(self):
3125 def isascending(self):
3125 """Returns True if the collection is ascending order, False if not.
3126 """Returns True if the collection is ascending order, False if not.
3126
3127
3127 This is part of the mandatory API for smartset."""
3128 This is part of the mandatory API for smartset."""
3128 if len(self) <= 1:
3129 if len(self) <= 1:
3129 return True
3130 return True
3130 return self._ascending is not None and self._ascending
3131 return self._ascending is not None and self._ascending
3131
3132
3132 def isdescending(self):
3133 def isdescending(self):
3133 """Returns True if the collection is descending order, False if not.
3134 """Returns True if the collection is descending order, False if not.
3134
3135
3135 This is part of the mandatory API for smartset."""
3136 This is part of the mandatory API for smartset."""
3136 if len(self) <= 1:
3137 if len(self) <= 1:
3137 return True
3138 return True
3138 return self._ascending is not None and not self._ascending
3139 return self._ascending is not None and not self._ascending
3139
3140
3140 def istopo(self):
3141 def istopo(self):
3141 """Is the collection is in topographical order or not.
3142 """Is the collection is in topographical order or not.
3142
3143
3143 This is part of the mandatory API for smartset."""
3144 This is part of the mandatory API for smartset."""
3144 if len(self) <= 1:
3145 if len(self) <= 1:
3145 return True
3146 return True
3146 return self._istopo
3147 return self._istopo
3147
3148
3148 def first(self):
3149 def first(self):
3149 if self:
3150 if self:
3150 if self._ascending is None:
3151 if self._ascending is None:
3151 return self._list[0]
3152 return self._list[0]
3152 elif self._ascending:
3153 elif self._ascending:
3153 return self._asclist[0]
3154 return self._asclist[0]
3154 else:
3155 else:
3155 return self._asclist[-1]
3156 return self._asclist[-1]
3156 return None
3157 return None
3157
3158
3158 def last(self):
3159 def last(self):
3159 if self:
3160 if self:
3160 if self._ascending is None:
3161 if self._ascending is None:
3161 return self._list[-1]
3162 return self._list[-1]
3162 elif self._ascending:
3163 elif self._ascending:
3163 return self._asclist[-1]
3164 return self._asclist[-1]
3164 else:
3165 else:
3165 return self._asclist[0]
3166 return self._asclist[0]
3166 return None
3167 return None
3167
3168
3168 def __repr__(self):
3169 def __repr__(self):
3169 d = {None: '', False: '-', True: '+'}[self._ascending]
3170 d = {None: '', False: '-', True: '+'}[self._ascending]
3170 s = _formatsetrepr(self._datarepr)
3171 s = _formatsetrepr(self._datarepr)
3171 if not s:
3172 if not s:
3172 l = self._list
3173 l = self._list
3173 # if _list has been built from a set, it might have a different
3174 # if _list has been built from a set, it might have a different
3174 # order from one python implementation to another.
3175 # order from one python implementation to another.
3175 # We fallback to the sorted version for a stable output.
3176 # We fallback to the sorted version for a stable output.
3176 if self._ascending is not None:
3177 if self._ascending is not None:
3177 l = self._asclist
3178 l = self._asclist
3178 s = repr(l)
3179 s = repr(l)
3179 return '<%s%s %s>' % (type(self).__name__, d, s)
3180 return '<%s%s %s>' % (type(self).__name__, d, s)
3180
3181
3181 class filteredset(abstractsmartset):
3182 class filteredset(abstractsmartset):
3182 """Duck type for baseset class which iterates lazily over the revisions in
3183 """Duck type for baseset class which iterates lazily over the revisions in
3183 the subset and contains a function which tests for membership in the
3184 the subset and contains a function which tests for membership in the
3184 revset
3185 revset
3185 """
3186 """
3186 def __init__(self, subset, condition=lambda x: True, condrepr=None):
3187 def __init__(self, subset, condition=lambda x: True, condrepr=None):
3187 """
3188 """
3188 condition: a function that decide whether a revision in the subset
3189 condition: a function that decide whether a revision in the subset
3189 belongs to the revset or not.
3190 belongs to the revset or not.
3190 condrepr: a tuple of (format, obj, ...), a function or an object that
3191 condrepr: a tuple of (format, obj, ...), a function or an object that
3191 provides a printable representation of the given condition.
3192 provides a printable representation of the given condition.
3192 """
3193 """
3193 self._subset = subset
3194 self._subset = subset
3194 self._condition = condition
3195 self._condition = condition
3195 self._condrepr = condrepr
3196 self._condrepr = condrepr
3196
3197
3197 def __contains__(self, x):
3198 def __contains__(self, x):
3198 return x in self._subset and self._condition(x)
3199 return x in self._subset and self._condition(x)
3199
3200
3200 def __iter__(self):
3201 def __iter__(self):
3201 return self._iterfilter(self._subset)
3202 return self._iterfilter(self._subset)
3202
3203
3203 def _iterfilter(self, it):
3204 def _iterfilter(self, it):
3204 cond = self._condition
3205 cond = self._condition
3205 for x in it:
3206 for x in it:
3206 if cond(x):
3207 if cond(x):
3207 yield x
3208 yield x
3208
3209
3209 @property
3210 @property
3210 def fastasc(self):
3211 def fastasc(self):
3211 it = self._subset.fastasc
3212 it = self._subset.fastasc
3212 if it is None:
3213 if it is None:
3213 return None
3214 return None
3214 return lambda: self._iterfilter(it())
3215 return lambda: self._iterfilter(it())
3215
3216
3216 @property
3217 @property
3217 def fastdesc(self):
3218 def fastdesc(self):
3218 it = self._subset.fastdesc
3219 it = self._subset.fastdesc
3219 if it is None:
3220 if it is None:
3220 return None
3221 return None
3221 return lambda: self._iterfilter(it())
3222 return lambda: self._iterfilter(it())
3222
3223
3223 def __nonzero__(self):
3224 def __nonzero__(self):
3224 fast = None
3225 fast = None
3225 candidates = [self.fastasc if self.isascending() else None,
3226 candidates = [self.fastasc if self.isascending() else None,
3226 self.fastdesc if self.isdescending() else None,
3227 self.fastdesc if self.isdescending() else None,
3227 self.fastasc,
3228 self.fastasc,
3228 self.fastdesc]
3229 self.fastdesc]
3229 for candidate in candidates:
3230 for candidate in candidates:
3230 if candidate is not None:
3231 if candidate is not None:
3231 fast = candidate
3232 fast = candidate
3232 break
3233 break
3233
3234
3234 if fast is not None:
3235 if fast is not None:
3235 it = fast()
3236 it = fast()
3236 else:
3237 else:
3237 it = self
3238 it = self
3238
3239
3239 for r in it:
3240 for r in it:
3240 return True
3241 return True
3241 return False
3242 return False
3242
3243
3243 def __len__(self):
3244 def __len__(self):
3244 # Basic implementation to be changed in future patches.
3245 # Basic implementation to be changed in future patches.
3245 # until this gets improved, we use generator expression
3246 # until this gets improved, we use generator expression
3246 # here, since list comprehensions are free to call __len__ again
3247 # here, since list comprehensions are free to call __len__ again
3247 # causing infinite recursion
3248 # causing infinite recursion
3248 l = baseset(r for r in self)
3249 l = baseset(r for r in self)
3249 return len(l)
3250 return len(l)
3250
3251
3251 def sort(self, reverse=False):
3252 def sort(self, reverse=False):
3252 self._subset.sort(reverse=reverse)
3253 self._subset.sort(reverse=reverse)
3253
3254
3254 def reverse(self):
3255 def reverse(self):
3255 self._subset.reverse()
3256 self._subset.reverse()
3256
3257
3257 def isascending(self):
3258 def isascending(self):
3258 return self._subset.isascending()
3259 return self._subset.isascending()
3259
3260
3260 def isdescending(self):
3261 def isdescending(self):
3261 return self._subset.isdescending()
3262 return self._subset.isdescending()
3262
3263
3263 def istopo(self):
3264 def istopo(self):
3264 return self._subset.istopo()
3265 return self._subset.istopo()
3265
3266
3266 def first(self):
3267 def first(self):
3267 for x in self:
3268 for x in self:
3268 return x
3269 return x
3269 return None
3270 return None
3270
3271
3271 def last(self):
3272 def last(self):
3272 it = None
3273 it = None
3273 if self.isascending():
3274 if self.isascending():
3274 it = self.fastdesc
3275 it = self.fastdesc
3275 elif self.isdescending():
3276 elif self.isdescending():
3276 it = self.fastasc
3277 it = self.fastasc
3277 if it is not None:
3278 if it is not None:
3278 for x in it():
3279 for x in it():
3279 return x
3280 return x
3280 return None #empty case
3281 return None #empty case
3281 else:
3282 else:
3282 x = None
3283 x = None
3283 for x in self:
3284 for x in self:
3284 pass
3285 pass
3285 return x
3286 return x
3286
3287
3287 def __repr__(self):
3288 def __repr__(self):
3288 xs = [repr(self._subset)]
3289 xs = [repr(self._subset)]
3289 s = _formatsetrepr(self._condrepr)
3290 s = _formatsetrepr(self._condrepr)
3290 if s:
3291 if s:
3291 xs.append(s)
3292 xs.append(s)
3292 return '<%s %s>' % (type(self).__name__, ', '.join(xs))
3293 return '<%s %s>' % (type(self).__name__, ', '.join(xs))
3293
3294
3294 def _iterordered(ascending, iter1, iter2):
3295 def _iterordered(ascending, iter1, iter2):
3295 """produce an ordered iteration from two iterators with the same order
3296 """produce an ordered iteration from two iterators with the same order
3296
3297
3297 The ascending is used to indicated the iteration direction.
3298 The ascending is used to indicated the iteration direction.
3298 """
3299 """
3299 choice = max
3300 choice = max
3300 if ascending:
3301 if ascending:
3301 choice = min
3302 choice = min
3302
3303
3303 val1 = None
3304 val1 = None
3304 val2 = None
3305 val2 = None
3305 try:
3306 try:
3306 # Consume both iterators in an ordered way until one is empty
3307 # Consume both iterators in an ordered way until one is empty
3307 while True:
3308 while True:
3308 if val1 is None:
3309 if val1 is None:
3309 val1 = next(iter1)
3310 val1 = next(iter1)
3310 if val2 is None:
3311 if val2 is None:
3311 val2 = next(iter2)
3312 val2 = next(iter2)
3312 n = choice(val1, val2)
3313 n = choice(val1, val2)
3313 yield n
3314 yield n
3314 if val1 == n:
3315 if val1 == n:
3315 val1 = None
3316 val1 = None
3316 if val2 == n:
3317 if val2 == n:
3317 val2 = None
3318 val2 = None
3318 except StopIteration:
3319 except StopIteration:
3319 # Flush any remaining values and consume the other one
3320 # Flush any remaining values and consume the other one
3320 it = iter2
3321 it = iter2
3321 if val1 is not None:
3322 if val1 is not None:
3322 yield val1
3323 yield val1
3323 it = iter1
3324 it = iter1
3324 elif val2 is not None:
3325 elif val2 is not None:
3325 # might have been equality and both are empty
3326 # might have been equality and both are empty
3326 yield val2
3327 yield val2
3327 for val in it:
3328 for val in it:
3328 yield val
3329 yield val
3329
3330
3330 class addset(abstractsmartset):
3331 class addset(abstractsmartset):
3331 """Represent the addition of two sets
3332 """Represent the addition of two sets
3332
3333
3333 Wrapper structure for lazily adding two structures without losing much
3334 Wrapper structure for lazily adding two structures without losing much
3334 performance on the __contains__ method
3335 performance on the __contains__ method
3335
3336
3336 If the ascending attribute is set, that means the two structures are
3337 If the ascending attribute is set, that means the two structures are
3337 ordered in either an ascending or descending way. Therefore, we can add
3338 ordered in either an ascending or descending way. Therefore, we can add
3338 them maintaining the order by iterating over both at the same time
3339 them maintaining the order by iterating over both at the same time
3339
3340
3340 >>> xs = baseset([0, 3, 2])
3341 >>> xs = baseset([0, 3, 2])
3341 >>> ys = baseset([5, 2, 4])
3342 >>> ys = baseset([5, 2, 4])
3342
3343
3343 >>> rs = addset(xs, ys)
3344 >>> rs = addset(xs, ys)
3344 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3345 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3345 (True, True, False, True, 0, 4)
3346 (True, True, False, True, 0, 4)
3346 >>> rs = addset(xs, baseset([]))
3347 >>> rs = addset(xs, baseset([]))
3347 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3348 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3348 (True, True, False, 0, 2)
3349 (True, True, False, 0, 2)
3349 >>> rs = addset(baseset([]), baseset([]))
3350 >>> rs = addset(baseset([]), baseset([]))
3350 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3351 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3351 (False, False, None, None)
3352 (False, False, None, None)
3352
3353
3353 iterate unsorted:
3354 iterate unsorted:
3354 >>> rs = addset(xs, ys)
3355 >>> rs = addset(xs, ys)
3355 >>> # (use generator because pypy could call len())
3356 >>> # (use generator because pypy could call len())
3356 >>> list(x for x in rs) # without _genlist
3357 >>> list(x for x in rs) # without _genlist
3357 [0, 3, 2, 5, 4]
3358 [0, 3, 2, 5, 4]
3358 >>> assert not rs._genlist
3359 >>> assert not rs._genlist
3359 >>> len(rs)
3360 >>> len(rs)
3360 5
3361 5
3361 >>> [x for x in rs] # with _genlist
3362 >>> [x for x in rs] # with _genlist
3362 [0, 3, 2, 5, 4]
3363 [0, 3, 2, 5, 4]
3363 >>> assert rs._genlist
3364 >>> assert rs._genlist
3364
3365
3365 iterate ascending:
3366 iterate ascending:
3366 >>> rs = addset(xs, ys, ascending=True)
3367 >>> rs = addset(xs, ys, ascending=True)
3367 >>> # (use generator because pypy could call len())
3368 >>> # (use generator because pypy could call len())
3368 >>> list(x for x in rs), list(x for x in rs.fastasc()) # without _asclist
3369 >>> list(x for x in rs), list(x for x in rs.fastasc()) # without _asclist
3369 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3370 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3370 >>> assert not rs._asclist
3371 >>> assert not rs._asclist
3371 >>> len(rs)
3372 >>> len(rs)
3372 5
3373 5
3373 >>> [x for x in rs], [x for x in rs.fastasc()]
3374 >>> [x for x in rs], [x for x in rs.fastasc()]
3374 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3375 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3375 >>> assert rs._asclist
3376 >>> assert rs._asclist
3376
3377
3377 iterate descending:
3378 iterate descending:
3378 >>> rs = addset(xs, ys, ascending=False)
3379 >>> rs = addset(xs, ys, ascending=False)
3379 >>> # (use generator because pypy could call len())
3380 >>> # (use generator because pypy could call len())
3380 >>> list(x for x in rs), list(x for x in rs.fastdesc()) # without _asclist
3381 >>> list(x for x in rs), list(x for x in rs.fastdesc()) # without _asclist
3381 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3382 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3382 >>> assert not rs._asclist
3383 >>> assert not rs._asclist
3383 >>> len(rs)
3384 >>> len(rs)
3384 5
3385 5
3385 >>> [x for x in rs], [x for x in rs.fastdesc()]
3386 >>> [x for x in rs], [x for x in rs.fastdesc()]
3386 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3387 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3387 >>> assert rs._asclist
3388 >>> assert rs._asclist
3388
3389
3389 iterate ascending without fastasc:
3390 iterate ascending without fastasc:
3390 >>> rs = addset(xs, generatorset(ys), ascending=True)
3391 >>> rs = addset(xs, generatorset(ys), ascending=True)
3391 >>> assert rs.fastasc is None
3392 >>> assert rs.fastasc is None
3392 >>> [x for x in rs]
3393 >>> [x for x in rs]
3393 [0, 2, 3, 4, 5]
3394 [0, 2, 3, 4, 5]
3394
3395
3395 iterate descending without fastdesc:
3396 iterate descending without fastdesc:
3396 >>> rs = addset(generatorset(xs), ys, ascending=False)
3397 >>> rs = addset(generatorset(xs), ys, ascending=False)
3397 >>> assert rs.fastdesc is None
3398 >>> assert rs.fastdesc is None
3398 >>> [x for x in rs]
3399 >>> [x for x in rs]
3399 [5, 4, 3, 2, 0]
3400 [5, 4, 3, 2, 0]
3400 """
3401 """
3401 def __init__(self, revs1, revs2, ascending=None):
3402 def __init__(self, revs1, revs2, ascending=None):
3402 self._r1 = revs1
3403 self._r1 = revs1
3403 self._r2 = revs2
3404 self._r2 = revs2
3404 self._iter = None
3405 self._iter = None
3405 self._ascending = ascending
3406 self._ascending = ascending
3406 self._genlist = None
3407 self._genlist = None
3407 self._asclist = None
3408 self._asclist = None
3408
3409
3409 def __len__(self):
3410 def __len__(self):
3410 return len(self._list)
3411 return len(self._list)
3411
3412
3412 def __nonzero__(self):
3413 def __nonzero__(self):
3413 return bool(self._r1) or bool(self._r2)
3414 return bool(self._r1) or bool(self._r2)
3414
3415
3415 @util.propertycache
3416 @util.propertycache
3416 def _list(self):
3417 def _list(self):
3417 if not self._genlist:
3418 if not self._genlist:
3418 self._genlist = baseset(iter(self))
3419 self._genlist = baseset(iter(self))
3419 return self._genlist
3420 return self._genlist
3420
3421
3421 def __iter__(self):
3422 def __iter__(self):
3422 """Iterate over both collections without repeating elements
3423 """Iterate over both collections without repeating elements
3423
3424
3424 If the ascending attribute is not set, iterate over the first one and
3425 If the ascending attribute is not set, iterate over the first one and
3425 then over the second one checking for membership on the first one so we
3426 then over the second one checking for membership on the first one so we
3426 dont yield any duplicates.
3427 dont yield any duplicates.
3427
3428
3428 If the ascending attribute is set, iterate over both collections at the
3429 If the ascending attribute is set, iterate over both collections at the
3429 same time, yielding only one value at a time in the given order.
3430 same time, yielding only one value at a time in the given order.
3430 """
3431 """
3431 if self._ascending is None:
3432 if self._ascending is None:
3432 if self._genlist:
3433 if self._genlist:
3433 return iter(self._genlist)
3434 return iter(self._genlist)
3434 def arbitraryordergen():
3435 def arbitraryordergen():
3435 for r in self._r1:
3436 for r in self._r1:
3436 yield r
3437 yield r
3437 inr1 = self._r1.__contains__
3438 inr1 = self._r1.__contains__
3438 for r in self._r2:
3439 for r in self._r2:
3439 if not inr1(r):
3440 if not inr1(r):
3440 yield r
3441 yield r
3441 return arbitraryordergen()
3442 return arbitraryordergen()
3442 # try to use our own fast iterator if it exists
3443 # try to use our own fast iterator if it exists
3443 self._trysetasclist()
3444 self._trysetasclist()
3444 if self._ascending:
3445 if self._ascending:
3445 attr = 'fastasc'
3446 attr = 'fastasc'
3446 else:
3447 else:
3447 attr = 'fastdesc'
3448 attr = 'fastdesc'
3448 it = getattr(self, attr)
3449 it = getattr(self, attr)
3449 if it is not None:
3450 if it is not None:
3450 return it()
3451 return it()
3451 # maybe half of the component supports fast
3452 # maybe half of the component supports fast
3452 # get iterator for _r1
3453 # get iterator for _r1
3453 iter1 = getattr(self._r1, attr)
3454 iter1 = getattr(self._r1, attr)
3454 if iter1 is None:
3455 if iter1 is None:
3455 # let's avoid side effect (not sure it matters)
3456 # let's avoid side effect (not sure it matters)
3456 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3457 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3457 else:
3458 else:
3458 iter1 = iter1()
3459 iter1 = iter1()
3459 # get iterator for _r2
3460 # get iterator for _r2
3460 iter2 = getattr(self._r2, attr)
3461 iter2 = getattr(self._r2, attr)
3461 if iter2 is None:
3462 if iter2 is None:
3462 # let's avoid side effect (not sure it matters)
3463 # let's avoid side effect (not sure it matters)
3463 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3464 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3464 else:
3465 else:
3465 iter2 = iter2()
3466 iter2 = iter2()
3466 return _iterordered(self._ascending, iter1, iter2)
3467 return _iterordered(self._ascending, iter1, iter2)
3467
3468
3468 def _trysetasclist(self):
3469 def _trysetasclist(self):
3469 """populate the _asclist attribute if possible and necessary"""
3470 """populate the _asclist attribute if possible and necessary"""
3470 if self._genlist is not None and self._asclist is None:
3471 if self._genlist is not None and self._asclist is None:
3471 self._asclist = sorted(self._genlist)
3472 self._asclist = sorted(self._genlist)
3472
3473
3473 @property
3474 @property
3474 def fastasc(self):
3475 def fastasc(self):
3475 self._trysetasclist()
3476 self._trysetasclist()
3476 if self._asclist is not None:
3477 if self._asclist is not None:
3477 return self._asclist.__iter__
3478 return self._asclist.__iter__
3478 iter1 = self._r1.fastasc
3479 iter1 = self._r1.fastasc
3479 iter2 = self._r2.fastasc
3480 iter2 = self._r2.fastasc
3480 if None in (iter1, iter2):
3481 if None in (iter1, iter2):
3481 return None
3482 return None
3482 return lambda: _iterordered(True, iter1(), iter2())
3483 return lambda: _iterordered(True, iter1(), iter2())
3483
3484
3484 @property
3485 @property
3485 def fastdesc(self):
3486 def fastdesc(self):
3486 self._trysetasclist()
3487 self._trysetasclist()
3487 if self._asclist is not None:
3488 if self._asclist is not None:
3488 return self._asclist.__reversed__
3489 return self._asclist.__reversed__
3489 iter1 = self._r1.fastdesc
3490 iter1 = self._r1.fastdesc
3490 iter2 = self._r2.fastdesc
3491 iter2 = self._r2.fastdesc
3491 if None in (iter1, iter2):
3492 if None in (iter1, iter2):
3492 return None
3493 return None
3493 return lambda: _iterordered(False, iter1(), iter2())
3494 return lambda: _iterordered(False, iter1(), iter2())
3494
3495
3495 def __contains__(self, x):
3496 def __contains__(self, x):
3496 return x in self._r1 or x in self._r2
3497 return x in self._r1 or x in self._r2
3497
3498
3498 def sort(self, reverse=False):
3499 def sort(self, reverse=False):
3499 """Sort the added set
3500 """Sort the added set
3500
3501
3501 For this we use the cached list with all the generated values and if we
3502 For this we use the cached list with all the generated values and if we
3502 know they are ascending or descending we can sort them in a smart way.
3503 know they are ascending or descending we can sort them in a smart way.
3503 """
3504 """
3504 self._ascending = not reverse
3505 self._ascending = not reverse
3505
3506
3506 def isascending(self):
3507 def isascending(self):
3507 return self._ascending is not None and self._ascending
3508 return self._ascending is not None and self._ascending
3508
3509
3509 def isdescending(self):
3510 def isdescending(self):
3510 return self._ascending is not None and not self._ascending
3511 return self._ascending is not None and not self._ascending
3511
3512
3512 def istopo(self):
3513 def istopo(self):
3513 # not worth the trouble asserting if the two sets combined are still
3514 # not worth the trouble asserting if the two sets combined are still
3514 # in topographical order. Use the sort() predicate to explicitly sort
3515 # in topographical order. Use the sort() predicate to explicitly sort
3515 # again instead.
3516 # again instead.
3516 return False
3517 return False
3517
3518
3518 def reverse(self):
3519 def reverse(self):
3519 if self._ascending is None:
3520 if self._ascending is None:
3520 self._list.reverse()
3521 self._list.reverse()
3521 else:
3522 else:
3522 self._ascending = not self._ascending
3523 self._ascending = not self._ascending
3523
3524
3524 def first(self):
3525 def first(self):
3525 for x in self:
3526 for x in self:
3526 return x
3527 return x
3527 return None
3528 return None
3528
3529
3529 def last(self):
3530 def last(self):
3530 self.reverse()
3531 self.reverse()
3531 val = self.first()
3532 val = self.first()
3532 self.reverse()
3533 self.reverse()
3533 return val
3534 return val
3534
3535
3535 def __repr__(self):
3536 def __repr__(self):
3536 d = {None: '', False: '-', True: '+'}[self._ascending]
3537 d = {None: '', False: '-', True: '+'}[self._ascending]
3537 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3538 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3538
3539
3539 class generatorset(abstractsmartset):
3540 class generatorset(abstractsmartset):
3540 """Wrap a generator for lazy iteration
3541 """Wrap a generator for lazy iteration
3541
3542
3542 Wrapper structure for generators that provides lazy membership and can
3543 Wrapper structure for generators that provides lazy membership and can
3543 be iterated more than once.
3544 be iterated more than once.
3544 When asked for membership it generates values until either it finds the
3545 When asked for membership it generates values until either it finds the
3545 requested one or has gone through all the elements in the generator
3546 requested one or has gone through all the elements in the generator
3546 """
3547 """
3547 def __init__(self, gen, iterasc=None):
3548 def __init__(self, gen, iterasc=None):
3548 """
3549 """
3549 gen: a generator producing the values for the generatorset.
3550 gen: a generator producing the values for the generatorset.
3550 """
3551 """
3551 self._gen = gen
3552 self._gen = gen
3552 self._asclist = None
3553 self._asclist = None
3553 self._cache = {}
3554 self._cache = {}
3554 self._genlist = []
3555 self._genlist = []
3555 self._finished = False
3556 self._finished = False
3556 self._ascending = True
3557 self._ascending = True
3557 if iterasc is not None:
3558 if iterasc is not None:
3558 if iterasc:
3559 if iterasc:
3559 self.fastasc = self._iterator
3560 self.fastasc = self._iterator
3560 self.__contains__ = self._asccontains
3561 self.__contains__ = self._asccontains
3561 else:
3562 else:
3562 self.fastdesc = self._iterator
3563 self.fastdesc = self._iterator
3563 self.__contains__ = self._desccontains
3564 self.__contains__ = self._desccontains
3564
3565
3565 def __nonzero__(self):
3566 def __nonzero__(self):
3566 # Do not use 'for r in self' because it will enforce the iteration
3567 # Do not use 'for r in self' because it will enforce the iteration
3567 # order (default ascending), possibly unrolling a whole descending
3568 # order (default ascending), possibly unrolling a whole descending
3568 # iterator.
3569 # iterator.
3569 if self._genlist:
3570 if self._genlist:
3570 return True
3571 return True
3571 for r in self._consumegen():
3572 for r in self._consumegen():
3572 return True
3573 return True
3573 return False
3574 return False
3574
3575
3575 def __contains__(self, x):
3576 def __contains__(self, x):
3576 if x in self._cache:
3577 if x in self._cache:
3577 return self._cache[x]
3578 return self._cache[x]
3578
3579
3579 # Use new values only, as existing values would be cached.
3580 # Use new values only, as existing values would be cached.
3580 for l in self._consumegen():
3581 for l in self._consumegen():
3581 if l == x:
3582 if l == x:
3582 return True
3583 return True
3583
3584
3584 self._cache[x] = False
3585 self._cache[x] = False
3585 return False
3586 return False
3586
3587
3587 def _asccontains(self, x):
3588 def _asccontains(self, x):
3588 """version of contains optimised for ascending generator"""
3589 """version of contains optimised for ascending generator"""
3589 if x in self._cache:
3590 if x in self._cache:
3590 return self._cache[x]
3591 return self._cache[x]
3591
3592
3592 # Use new values only, as existing values would be cached.
3593 # Use new values only, as existing values would be cached.
3593 for l in self._consumegen():
3594 for l in self._consumegen():
3594 if l == x:
3595 if l == x:
3595 return True
3596 return True
3596 if l > x:
3597 if l > x:
3597 break
3598 break
3598
3599
3599 self._cache[x] = False
3600 self._cache[x] = False
3600 return False
3601 return False
3601
3602
3602 def _desccontains(self, x):
3603 def _desccontains(self, x):
3603 """version of contains optimised for descending generator"""
3604 """version of contains optimised for descending generator"""
3604 if x in self._cache:
3605 if x in self._cache:
3605 return self._cache[x]
3606 return self._cache[x]
3606
3607
3607 # Use new values only, as existing values would be cached.
3608 # Use new values only, as existing values would be cached.
3608 for l in self._consumegen():
3609 for l in self._consumegen():
3609 if l == x:
3610 if l == x:
3610 return True
3611 return True
3611 if l < x:
3612 if l < x:
3612 break
3613 break
3613
3614
3614 self._cache[x] = False
3615 self._cache[x] = False
3615 return False
3616 return False
3616
3617
3617 def __iter__(self):
3618 def __iter__(self):
3618 if self._ascending:
3619 if self._ascending:
3619 it = self.fastasc
3620 it = self.fastasc
3620 else:
3621 else:
3621 it = self.fastdesc
3622 it = self.fastdesc
3622 if it is not None:
3623 if it is not None:
3623 return it()
3624 return it()
3624 # we need to consume the iterator
3625 # we need to consume the iterator
3625 for x in self._consumegen():
3626 for x in self._consumegen():
3626 pass
3627 pass
3627 # recall the same code
3628 # recall the same code
3628 return iter(self)
3629 return iter(self)
3629
3630
3630 def _iterator(self):
3631 def _iterator(self):
3631 if self._finished:
3632 if self._finished:
3632 return iter(self._genlist)
3633 return iter(self._genlist)
3633
3634
3634 # We have to use this complex iteration strategy to allow multiple
3635 # We have to use this complex iteration strategy to allow multiple
3635 # iterations at the same time. We need to be able to catch revision
3636 # iterations at the same time. We need to be able to catch revision
3636 # removed from _consumegen and added to genlist in another instance.
3637 # removed from _consumegen and added to genlist in another instance.
3637 #
3638 #
3638 # Getting rid of it would provide an about 15% speed up on this
3639 # Getting rid of it would provide an about 15% speed up on this
3639 # iteration.
3640 # iteration.
3640 genlist = self._genlist
3641 genlist = self._genlist
3641 nextrev = self._consumegen().next
3642 nextrev = self._consumegen().next
3642 _len = len # cache global lookup
3643 _len = len # cache global lookup
3643 def gen():
3644 def gen():
3644 i = 0
3645 i = 0
3645 while True:
3646 while True:
3646 if i < _len(genlist):
3647 if i < _len(genlist):
3647 yield genlist[i]
3648 yield genlist[i]
3648 else:
3649 else:
3649 yield nextrev()
3650 yield nextrev()
3650 i += 1
3651 i += 1
3651 return gen()
3652 return gen()
3652
3653
3653 def _consumegen(self):
3654 def _consumegen(self):
3654 cache = self._cache
3655 cache = self._cache
3655 genlist = self._genlist.append
3656 genlist = self._genlist.append
3656 for item in self._gen:
3657 for item in self._gen:
3657 cache[item] = True
3658 cache[item] = True
3658 genlist(item)
3659 genlist(item)
3659 yield item
3660 yield item
3660 if not self._finished:
3661 if not self._finished:
3661 self._finished = True
3662 self._finished = True
3662 asc = self._genlist[:]
3663 asc = self._genlist[:]
3663 asc.sort()
3664 asc.sort()
3664 self._asclist = asc
3665 self._asclist = asc
3665 self.fastasc = asc.__iter__
3666 self.fastasc = asc.__iter__
3666 self.fastdesc = asc.__reversed__
3667 self.fastdesc = asc.__reversed__
3667
3668
3668 def __len__(self):
3669 def __len__(self):
3669 for x in self._consumegen():
3670 for x in self._consumegen():
3670 pass
3671 pass
3671 return len(self._genlist)
3672 return len(self._genlist)
3672
3673
3673 def sort(self, reverse=False):
3674 def sort(self, reverse=False):
3674 self._ascending = not reverse
3675 self._ascending = not reverse
3675
3676
3676 def reverse(self):
3677 def reverse(self):
3677 self._ascending = not self._ascending
3678 self._ascending = not self._ascending
3678
3679
3679 def isascending(self):
3680 def isascending(self):
3680 return self._ascending
3681 return self._ascending
3681
3682
3682 def isdescending(self):
3683 def isdescending(self):
3683 return not self._ascending
3684 return not self._ascending
3684
3685
3685 def istopo(self):
3686 def istopo(self):
3686 # not worth the trouble asserting if the two sets combined are still
3687 # not worth the trouble asserting if the two sets combined are still
3687 # in topographical order. Use the sort() predicate to explicitly sort
3688 # in topographical order. Use the sort() predicate to explicitly sort
3688 # again instead.
3689 # again instead.
3689 return False
3690 return False
3690
3691
3691 def first(self):
3692 def first(self):
3692 if self._ascending:
3693 if self._ascending:
3693 it = self.fastasc
3694 it = self.fastasc
3694 else:
3695 else:
3695 it = self.fastdesc
3696 it = self.fastdesc
3696 if it is None:
3697 if it is None:
3697 # we need to consume all and try again
3698 # we need to consume all and try again
3698 for x in self._consumegen():
3699 for x in self._consumegen():
3699 pass
3700 pass
3700 return self.first()
3701 return self.first()
3701 return next(it(), None)
3702 return next(it(), None)
3702
3703
3703 def last(self):
3704 def last(self):
3704 if self._ascending:
3705 if self._ascending:
3705 it = self.fastdesc
3706 it = self.fastdesc
3706 else:
3707 else:
3707 it = self.fastasc
3708 it = self.fastasc
3708 if it is None:
3709 if it is None:
3709 # we need to consume all and try again
3710 # we need to consume all and try again
3710 for x in self._consumegen():
3711 for x in self._consumegen():
3711 pass
3712 pass
3712 return self.first()
3713 return self.first()
3713 return next(it(), None)
3714 return next(it(), None)
3714
3715
3715 def __repr__(self):
3716 def __repr__(self):
3716 d = {False: '-', True: '+'}[self._ascending]
3717 d = {False: '-', True: '+'}[self._ascending]
3717 return '<%s%s>' % (type(self).__name__, d)
3718 return '<%s%s>' % (type(self).__name__, d)
3718
3719
3719 class spanset(abstractsmartset):
3720 class spanset(abstractsmartset):
3720 """Duck type for baseset class which represents a range of revisions and
3721 """Duck type for baseset class which represents a range of revisions and
3721 can work lazily and without having all the range in memory
3722 can work lazily and without having all the range in memory
3722
3723
3723 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3724 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3724 notable points:
3725 notable points:
3725 - when x < y it will be automatically descending,
3726 - when x < y it will be automatically descending,
3726 - revision filtered with this repoview will be skipped.
3727 - revision filtered with this repoview will be skipped.
3727
3728
3728 """
3729 """
3729 def __init__(self, repo, start=0, end=None):
3730 def __init__(self, repo, start=0, end=None):
3730 """
3731 """
3731 start: first revision included the set
3732 start: first revision included the set
3732 (default to 0)
3733 (default to 0)
3733 end: first revision excluded (last+1)
3734 end: first revision excluded (last+1)
3734 (default to len(repo)
3735 (default to len(repo)
3735
3736
3736 Spanset will be descending if `end` < `start`.
3737 Spanset will be descending if `end` < `start`.
3737 """
3738 """
3738 if end is None:
3739 if end is None:
3739 end = len(repo)
3740 end = len(repo)
3740 self._ascending = start <= end
3741 self._ascending = start <= end
3741 if not self._ascending:
3742 if not self._ascending:
3742 start, end = end + 1, start +1
3743 start, end = end + 1, start +1
3743 self._start = start
3744 self._start = start
3744 self._end = end
3745 self._end = end
3745 self._hiddenrevs = repo.changelog.filteredrevs
3746 self._hiddenrevs = repo.changelog.filteredrevs
3746
3747
3747 def sort(self, reverse=False):
3748 def sort(self, reverse=False):
3748 self._ascending = not reverse
3749 self._ascending = not reverse
3749
3750
3750 def reverse(self):
3751 def reverse(self):
3751 self._ascending = not self._ascending
3752 self._ascending = not self._ascending
3752
3753
3753 def istopo(self):
3754 def istopo(self):
3754 # not worth the trouble asserting if the two sets combined are still
3755 # not worth the trouble asserting if the two sets combined are still
3755 # in topographical order. Use the sort() predicate to explicitly sort
3756 # in topographical order. Use the sort() predicate to explicitly sort
3756 # again instead.
3757 # again instead.
3757 return False
3758 return False
3758
3759
3759 def _iterfilter(self, iterrange):
3760 def _iterfilter(self, iterrange):
3760 s = self._hiddenrevs
3761 s = self._hiddenrevs
3761 for r in iterrange:
3762 for r in iterrange:
3762 if r not in s:
3763 if r not in s:
3763 yield r
3764 yield r
3764
3765
3765 def __iter__(self):
3766 def __iter__(self):
3766 if self._ascending:
3767 if self._ascending:
3767 return self.fastasc()
3768 return self.fastasc()
3768 else:
3769 else:
3769 return self.fastdesc()
3770 return self.fastdesc()
3770
3771
3771 def fastasc(self):
3772 def fastasc(self):
3772 iterrange = xrange(self._start, self._end)
3773 iterrange = xrange(self._start, self._end)
3773 if self._hiddenrevs:
3774 if self._hiddenrevs:
3774 return self._iterfilter(iterrange)
3775 return self._iterfilter(iterrange)
3775 return iter(iterrange)
3776 return iter(iterrange)
3776
3777
3777 def fastdesc(self):
3778 def fastdesc(self):
3778 iterrange = xrange(self._end - 1, self._start - 1, -1)
3779 iterrange = xrange(self._end - 1, self._start - 1, -1)
3779 if self._hiddenrevs:
3780 if self._hiddenrevs:
3780 return self._iterfilter(iterrange)
3781 return self._iterfilter(iterrange)
3781 return iter(iterrange)
3782 return iter(iterrange)
3782
3783
3783 def __contains__(self, rev):
3784 def __contains__(self, rev):
3784 hidden = self._hiddenrevs
3785 hidden = self._hiddenrevs
3785 return ((self._start <= rev < self._end)
3786 return ((self._start <= rev < self._end)
3786 and not (hidden and rev in hidden))
3787 and not (hidden and rev in hidden))
3787
3788
3788 def __nonzero__(self):
3789 def __nonzero__(self):
3789 for r in self:
3790 for r in self:
3790 return True
3791 return True
3791 return False
3792 return False
3792
3793
3793 def __len__(self):
3794 def __len__(self):
3794 if not self._hiddenrevs:
3795 if not self._hiddenrevs:
3795 return abs(self._end - self._start)
3796 return abs(self._end - self._start)
3796 else:
3797 else:
3797 count = 0
3798 count = 0
3798 start = self._start
3799 start = self._start
3799 end = self._end
3800 end = self._end
3800 for rev in self._hiddenrevs:
3801 for rev in self._hiddenrevs:
3801 if (end < rev <= start) or (start <= rev < end):
3802 if (end < rev <= start) or (start <= rev < end):
3802 count += 1
3803 count += 1
3803 return abs(self._end - self._start) - count
3804 return abs(self._end - self._start) - count
3804
3805
3805 def isascending(self):
3806 def isascending(self):
3806 return self._ascending
3807 return self._ascending
3807
3808
3808 def isdescending(self):
3809 def isdescending(self):
3809 return not self._ascending
3810 return not self._ascending
3810
3811
3811 def first(self):
3812 def first(self):
3812 if self._ascending:
3813 if self._ascending:
3813 it = self.fastasc
3814 it = self.fastasc
3814 else:
3815 else:
3815 it = self.fastdesc
3816 it = self.fastdesc
3816 for x in it():
3817 for x in it():
3817 return x
3818 return x
3818 return None
3819 return None
3819
3820
3820 def last(self):
3821 def last(self):
3821 if self._ascending:
3822 if self._ascending:
3822 it = self.fastdesc
3823 it = self.fastdesc
3823 else:
3824 else:
3824 it = self.fastasc
3825 it = self.fastasc
3825 for x in it():
3826 for x in it():
3826 return x
3827 return x
3827 return None
3828 return None
3828
3829
3829 def __repr__(self):
3830 def __repr__(self):
3830 d = {False: '-', True: '+'}[self._ascending]
3831 d = {False: '-', True: '+'}[self._ascending]
3831 return '<%s%s %d:%d>' % (type(self).__name__, d,
3832 return '<%s%s %d:%d>' % (type(self).__name__, d,
3832 self._start, self._end - 1)
3833 self._start, self._end - 1)
3833
3834
3834 class fullreposet(spanset):
3835 class fullreposet(spanset):
3835 """a set containing all revisions in the repo
3836 """a set containing all revisions in the repo
3836
3837
3837 This class exists to host special optimization and magic to handle virtual
3838 This class exists to host special optimization and magic to handle virtual
3838 revisions such as "null".
3839 revisions such as "null".
3839 """
3840 """
3840
3841
3841 def __init__(self, repo):
3842 def __init__(self, repo):
3842 super(fullreposet, self).__init__(repo)
3843 super(fullreposet, self).__init__(repo)
3843
3844
3844 def __and__(self, other):
3845 def __and__(self, other):
3845 """As self contains the whole repo, all of the other set should also be
3846 """As self contains the whole repo, all of the other set should also be
3846 in self. Therefore `self & other = other`.
3847 in self. Therefore `self & other = other`.
3847
3848
3848 This boldly assumes the other contains valid revs only.
3849 This boldly assumes the other contains valid revs only.
3849 """
3850 """
3850 # other not a smartset, make is so
3851 # other not a smartset, make is so
3851 if not util.safehasattr(other, 'isascending'):
3852 if not util.safehasattr(other, 'isascending'):
3852 # filter out hidden revision
3853 # filter out hidden revision
3853 # (this boldly assumes all smartset are pure)
3854 # (this boldly assumes all smartset are pure)
3854 #
3855 #
3855 # `other` was used with "&", let's assume this is a set like
3856 # `other` was used with "&", let's assume this is a set like
3856 # object.
3857 # object.
3857 other = baseset(other - self._hiddenrevs)
3858 other = baseset(other - self._hiddenrevs)
3858
3859
3859 other.sort(reverse=self.isdescending())
3860 other.sort(reverse=self.isdescending())
3860 return other
3861 return other
3861
3862
3862 def prettyformatset(revs):
3863 def prettyformatset(revs):
3863 lines = []
3864 lines = []
3864 rs = repr(revs)
3865 rs = repr(revs)
3865 p = 0
3866 p = 0
3866 while p < len(rs):
3867 while p < len(rs):
3867 q = rs.find('<', p + 1)
3868 q = rs.find('<', p + 1)
3868 if q < 0:
3869 if q < 0:
3869 q = len(rs)
3870 q = len(rs)
3870 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3871 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3871 assert l >= 0
3872 assert l >= 0
3872 lines.append((l, rs[p:q].rstrip()))
3873 lines.append((l, rs[p:q].rstrip()))
3873 p = q
3874 p = q
3874 return '\n'.join(' ' * l + s for l, s in lines)
3875 return '\n'.join(' ' * l + s for l, s in lines)
3875
3876
3876 def loadpredicate(ui, extname, registrarobj):
3877 def loadpredicate(ui, extname, registrarobj):
3877 """Load revset predicates from specified registrarobj
3878 """Load revset predicates from specified registrarobj
3878 """
3879 """
3879 for name, func in registrarobj._table.iteritems():
3880 for name, func in registrarobj._table.iteritems():
3880 symbols[name] = func
3881 symbols[name] = func
3881 if func._safe:
3882 if func._safe:
3882 safesymbols.add(name)
3883 safesymbols.add(name)
3883
3884
3884 # load built-in predicates explicitly to setup safesymbols
3885 # load built-in predicates explicitly to setup safesymbols
3885 loadpredicate(None, None, predicate)
3886 loadpredicate(None, None, predicate)
3886
3887
3887 # tell hggettext to extract docstrings from these functions:
3888 # tell hggettext to extract docstrings from these functions:
3888 i18nfunctions = symbols.values()
3889 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now