##// END OF EJS Templates
revset: point to 'grep' in the 'keyword' help for regex searches...
Matt Harbison -
r30772:b1012cb1 default
parent child Browse files
Show More
@@ -1,3892 +1,3895
1 # revset.py - revision set queries for mercurial
1 # revset.py - revision set queries for mercurial
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import heapq
10 import heapq
11 import re
11 import re
12 import string
12 import string
13
13
14 from .i18n import _
14 from .i18n import _
15 from . import (
15 from . import (
16 destutil,
16 destutil,
17 encoding,
17 encoding,
18 error,
18 error,
19 hbisect,
19 hbisect,
20 match as matchmod,
20 match as matchmod,
21 node,
21 node,
22 obsolete as obsmod,
22 obsolete as obsmod,
23 parser,
23 parser,
24 pathutil,
24 pathutil,
25 phases,
25 phases,
26 pycompat,
26 pycompat,
27 registrar,
27 registrar,
28 repoview,
28 repoview,
29 util,
29 util,
30 )
30 )
31
31
32 def _revancestors(repo, revs, followfirst):
32 def _revancestors(repo, revs, followfirst):
33 """Like revlog.ancestors(), but supports followfirst."""
33 """Like revlog.ancestors(), but supports followfirst."""
34 if followfirst:
34 if followfirst:
35 cut = 1
35 cut = 1
36 else:
36 else:
37 cut = None
37 cut = None
38 cl = repo.changelog
38 cl = repo.changelog
39
39
40 def iterate():
40 def iterate():
41 revs.sort(reverse=True)
41 revs.sort(reverse=True)
42 irevs = iter(revs)
42 irevs = iter(revs)
43 h = []
43 h = []
44
44
45 inputrev = next(irevs, None)
45 inputrev = next(irevs, None)
46 if inputrev is not None:
46 if inputrev is not None:
47 heapq.heappush(h, -inputrev)
47 heapq.heappush(h, -inputrev)
48
48
49 seen = set()
49 seen = set()
50 while h:
50 while h:
51 current = -heapq.heappop(h)
51 current = -heapq.heappop(h)
52 if current == inputrev:
52 if current == inputrev:
53 inputrev = next(irevs, None)
53 inputrev = next(irevs, None)
54 if inputrev is not None:
54 if inputrev is not None:
55 heapq.heappush(h, -inputrev)
55 heapq.heappush(h, -inputrev)
56 if current not in seen:
56 if current not in seen:
57 seen.add(current)
57 seen.add(current)
58 yield current
58 yield current
59 for parent in cl.parentrevs(current)[:cut]:
59 for parent in cl.parentrevs(current)[:cut]:
60 if parent != node.nullrev:
60 if parent != node.nullrev:
61 heapq.heappush(h, -parent)
61 heapq.heappush(h, -parent)
62
62
63 return generatorset(iterate(), iterasc=False)
63 return generatorset(iterate(), iterasc=False)
64
64
65 def _revdescendants(repo, revs, followfirst):
65 def _revdescendants(repo, revs, followfirst):
66 """Like revlog.descendants() but supports followfirst."""
66 """Like revlog.descendants() but supports followfirst."""
67 if followfirst:
67 if followfirst:
68 cut = 1
68 cut = 1
69 else:
69 else:
70 cut = None
70 cut = None
71
71
72 def iterate():
72 def iterate():
73 cl = repo.changelog
73 cl = repo.changelog
74 # XXX this should be 'parentset.min()' assuming 'parentset' is a
74 # XXX this should be 'parentset.min()' assuming 'parentset' is a
75 # smartset (and if it is not, it should.)
75 # smartset (and if it is not, it should.)
76 first = min(revs)
76 first = min(revs)
77 nullrev = node.nullrev
77 nullrev = node.nullrev
78 if first == nullrev:
78 if first == nullrev:
79 # Are there nodes with a null first parent and a non-null
79 # Are there nodes with a null first parent and a non-null
80 # second one? Maybe. Do we care? Probably not.
80 # second one? Maybe. Do we care? Probably not.
81 for i in cl:
81 for i in cl:
82 yield i
82 yield i
83 else:
83 else:
84 seen = set(revs)
84 seen = set(revs)
85 for i in cl.revs(first + 1):
85 for i in cl.revs(first + 1):
86 for x in cl.parentrevs(i)[:cut]:
86 for x in cl.parentrevs(i)[:cut]:
87 if x != nullrev and x in seen:
87 if x != nullrev and x in seen:
88 seen.add(i)
88 seen.add(i)
89 yield i
89 yield i
90 break
90 break
91
91
92 return generatorset(iterate(), iterasc=True)
92 return generatorset(iterate(), iterasc=True)
93
93
94 def _reachablerootspure(repo, minroot, roots, heads, includepath):
94 def _reachablerootspure(repo, minroot, roots, heads, includepath):
95 """return (heads(::<roots> and ::<heads>))
95 """return (heads(::<roots> and ::<heads>))
96
96
97 If includepath is True, return (<roots>::<heads>)."""
97 If includepath is True, return (<roots>::<heads>)."""
98 if not roots:
98 if not roots:
99 return []
99 return []
100 parentrevs = repo.changelog.parentrevs
100 parentrevs = repo.changelog.parentrevs
101 roots = set(roots)
101 roots = set(roots)
102 visit = list(heads)
102 visit = list(heads)
103 reachable = set()
103 reachable = set()
104 seen = {}
104 seen = {}
105 # prefetch all the things! (because python is slow)
105 # prefetch all the things! (because python is slow)
106 reached = reachable.add
106 reached = reachable.add
107 dovisit = visit.append
107 dovisit = visit.append
108 nextvisit = visit.pop
108 nextvisit = visit.pop
109 # open-code the post-order traversal due to the tiny size of
109 # open-code the post-order traversal due to the tiny size of
110 # sys.getrecursionlimit()
110 # sys.getrecursionlimit()
111 while visit:
111 while visit:
112 rev = nextvisit()
112 rev = nextvisit()
113 if rev in roots:
113 if rev in roots:
114 reached(rev)
114 reached(rev)
115 if not includepath:
115 if not includepath:
116 continue
116 continue
117 parents = parentrevs(rev)
117 parents = parentrevs(rev)
118 seen[rev] = parents
118 seen[rev] = parents
119 for parent in parents:
119 for parent in parents:
120 if parent >= minroot and parent not in seen:
120 if parent >= minroot and parent not in seen:
121 dovisit(parent)
121 dovisit(parent)
122 if not reachable:
122 if not reachable:
123 return baseset()
123 return baseset()
124 if not includepath:
124 if not includepath:
125 return reachable
125 return reachable
126 for rev in sorted(seen):
126 for rev in sorted(seen):
127 for parent in seen[rev]:
127 for parent in seen[rev]:
128 if parent in reachable:
128 if parent in reachable:
129 reached(rev)
129 reached(rev)
130 return reachable
130 return reachable
131
131
132 def reachableroots(repo, roots, heads, includepath=False):
132 def reachableroots(repo, roots, heads, includepath=False):
133 """return (heads(::<roots> and ::<heads>))
133 """return (heads(::<roots> and ::<heads>))
134
134
135 If includepath is True, return (<roots>::<heads>)."""
135 If includepath is True, return (<roots>::<heads>)."""
136 if not roots:
136 if not roots:
137 return baseset()
137 return baseset()
138 minroot = roots.min()
138 minroot = roots.min()
139 roots = list(roots)
139 roots = list(roots)
140 heads = list(heads)
140 heads = list(heads)
141 try:
141 try:
142 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
142 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
143 except AttributeError:
143 except AttributeError:
144 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
144 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
145 revs = baseset(revs)
145 revs = baseset(revs)
146 revs.sort()
146 revs.sort()
147 return revs
147 return revs
148
148
149 elements = {
149 elements = {
150 # token-type: binding-strength, primary, prefix, infix, suffix
150 # token-type: binding-strength, primary, prefix, infix, suffix
151 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
151 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
152 "##": (20, None, None, ("_concat", 20), None),
152 "##": (20, None, None, ("_concat", 20), None),
153 "~": (18, None, None, ("ancestor", 18), None),
153 "~": (18, None, None, ("ancestor", 18), None),
154 "^": (18, None, None, ("parent", 18), "parentpost"),
154 "^": (18, None, None, ("parent", 18), "parentpost"),
155 "-": (5, None, ("negate", 19), ("minus", 5), None),
155 "-": (5, None, ("negate", 19), ("minus", 5), None),
156 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"),
156 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"),
157 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"),
157 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"),
158 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), "rangepost"),
158 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), "rangepost"),
159 "not": (10, None, ("not", 10), None, None),
159 "not": (10, None, ("not", 10), None, None),
160 "!": (10, None, ("not", 10), None, None),
160 "!": (10, None, ("not", 10), None, None),
161 "and": (5, None, None, ("and", 5), None),
161 "and": (5, None, None, ("and", 5), None),
162 "&": (5, None, None, ("and", 5), None),
162 "&": (5, None, None, ("and", 5), None),
163 "%": (5, None, None, ("only", 5), "onlypost"),
163 "%": (5, None, None, ("only", 5), "onlypost"),
164 "or": (4, None, None, ("or", 4), None),
164 "or": (4, None, None, ("or", 4), None),
165 "|": (4, None, None, ("or", 4), None),
165 "|": (4, None, None, ("or", 4), None),
166 "+": (4, None, None, ("or", 4), None),
166 "+": (4, None, None, ("or", 4), None),
167 "=": (3, None, None, ("keyvalue", 3), None),
167 "=": (3, None, None, ("keyvalue", 3), None),
168 ",": (2, None, None, ("list", 2), None),
168 ",": (2, None, None, ("list", 2), None),
169 ")": (0, None, None, None, None),
169 ")": (0, None, None, None, None),
170 "symbol": (0, "symbol", None, None, None),
170 "symbol": (0, "symbol", None, None, None),
171 "string": (0, "string", None, None, None),
171 "string": (0, "string", None, None, None),
172 "end": (0, None, None, None, None),
172 "end": (0, None, None, None, None),
173 }
173 }
174
174
175 keywords = set(['and', 'or', 'not'])
175 keywords = set(['and', 'or', 'not'])
176
176
177 # default set of valid characters for the initial letter of symbols
177 # default set of valid characters for the initial letter of symbols
178 _syminitletters = set(
178 _syminitletters = set(
179 string.ascii_letters +
179 string.ascii_letters +
180 string.digits + pycompat.sysstr('._@')) | set(map(chr, xrange(128, 256)))
180 string.digits + pycompat.sysstr('._@')) | set(map(chr, xrange(128, 256)))
181
181
182 # default set of valid characters for non-initial letters of symbols
182 # default set of valid characters for non-initial letters of symbols
183 _symletters = _syminitletters | set(pycompat.sysstr('-/'))
183 _symletters = _syminitletters | set(pycompat.sysstr('-/'))
184
184
185 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
185 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
186 '''
186 '''
187 Parse a revset statement into a stream of tokens
187 Parse a revset statement into a stream of tokens
188
188
189 ``syminitletters`` is the set of valid characters for the initial
189 ``syminitletters`` is the set of valid characters for the initial
190 letter of symbols.
190 letter of symbols.
191
191
192 By default, character ``c`` is recognized as valid for initial
192 By default, character ``c`` is recognized as valid for initial
193 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
193 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
194
194
195 ``symletters`` is the set of valid characters for non-initial
195 ``symletters`` is the set of valid characters for non-initial
196 letters of symbols.
196 letters of symbols.
197
197
198 By default, character ``c`` is recognized as valid for non-initial
198 By default, character ``c`` is recognized as valid for non-initial
199 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
199 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
200
200
201 Check that @ is a valid unquoted token character (issue3686):
201 Check that @ is a valid unquoted token character (issue3686):
202 >>> list(tokenize("@::"))
202 >>> list(tokenize("@::"))
203 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
203 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
204
204
205 '''
205 '''
206 if syminitletters is None:
206 if syminitletters is None:
207 syminitletters = _syminitletters
207 syminitletters = _syminitletters
208 if symletters is None:
208 if symletters is None:
209 symletters = _symletters
209 symletters = _symletters
210
210
211 if program and lookup:
211 if program and lookup:
212 # attempt to parse old-style ranges first to deal with
212 # attempt to parse old-style ranges first to deal with
213 # things like old-tag which contain query metacharacters
213 # things like old-tag which contain query metacharacters
214 parts = program.split(':', 1)
214 parts = program.split(':', 1)
215 if all(lookup(sym) for sym in parts if sym):
215 if all(lookup(sym) for sym in parts if sym):
216 if parts[0]:
216 if parts[0]:
217 yield ('symbol', parts[0], 0)
217 yield ('symbol', parts[0], 0)
218 if len(parts) > 1:
218 if len(parts) > 1:
219 s = len(parts[0])
219 s = len(parts[0])
220 yield (':', None, s)
220 yield (':', None, s)
221 if parts[1]:
221 if parts[1]:
222 yield ('symbol', parts[1], s + 1)
222 yield ('symbol', parts[1], s + 1)
223 yield ('end', None, len(program))
223 yield ('end', None, len(program))
224 return
224 return
225
225
226 pos, l = 0, len(program)
226 pos, l = 0, len(program)
227 while pos < l:
227 while pos < l:
228 c = program[pos]
228 c = program[pos]
229 if c.isspace(): # skip inter-token whitespace
229 if c.isspace(): # skip inter-token whitespace
230 pass
230 pass
231 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
231 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
232 yield ('::', None, pos)
232 yield ('::', None, pos)
233 pos += 1 # skip ahead
233 pos += 1 # skip ahead
234 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
234 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
235 yield ('..', None, pos)
235 yield ('..', None, pos)
236 pos += 1 # skip ahead
236 pos += 1 # skip ahead
237 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
237 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
238 yield ('##', None, pos)
238 yield ('##', None, pos)
239 pos += 1 # skip ahead
239 pos += 1 # skip ahead
240 elif c in "():=,-|&+!~^%": # handle simple operators
240 elif c in "():=,-|&+!~^%": # handle simple operators
241 yield (c, None, pos)
241 yield (c, None, pos)
242 elif (c in '"\'' or c == 'r' and
242 elif (c in '"\'' or c == 'r' and
243 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
243 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
244 if c == 'r':
244 if c == 'r':
245 pos += 1
245 pos += 1
246 c = program[pos]
246 c = program[pos]
247 decode = lambda x: x
247 decode = lambda x: x
248 else:
248 else:
249 decode = parser.unescapestr
249 decode = parser.unescapestr
250 pos += 1
250 pos += 1
251 s = pos
251 s = pos
252 while pos < l: # find closing quote
252 while pos < l: # find closing quote
253 d = program[pos]
253 d = program[pos]
254 if d == '\\': # skip over escaped characters
254 if d == '\\': # skip over escaped characters
255 pos += 2
255 pos += 2
256 continue
256 continue
257 if d == c:
257 if d == c:
258 yield ('string', decode(program[s:pos]), s)
258 yield ('string', decode(program[s:pos]), s)
259 break
259 break
260 pos += 1
260 pos += 1
261 else:
261 else:
262 raise error.ParseError(_("unterminated string"), s)
262 raise error.ParseError(_("unterminated string"), s)
263 # gather up a symbol/keyword
263 # gather up a symbol/keyword
264 elif c in syminitletters:
264 elif c in syminitletters:
265 s = pos
265 s = pos
266 pos += 1
266 pos += 1
267 while pos < l: # find end of symbol
267 while pos < l: # find end of symbol
268 d = program[pos]
268 d = program[pos]
269 if d not in symletters:
269 if d not in symletters:
270 break
270 break
271 if d == '.' and program[pos - 1] == '.': # special case for ..
271 if d == '.' and program[pos - 1] == '.': # special case for ..
272 pos -= 1
272 pos -= 1
273 break
273 break
274 pos += 1
274 pos += 1
275 sym = program[s:pos]
275 sym = program[s:pos]
276 if sym in keywords: # operator keywords
276 if sym in keywords: # operator keywords
277 yield (sym, None, s)
277 yield (sym, None, s)
278 elif '-' in sym:
278 elif '-' in sym:
279 # some jerk gave us foo-bar-baz, try to check if it's a symbol
279 # some jerk gave us foo-bar-baz, try to check if it's a symbol
280 if lookup and lookup(sym):
280 if lookup and lookup(sym):
281 # looks like a real symbol
281 # looks like a real symbol
282 yield ('symbol', sym, s)
282 yield ('symbol', sym, s)
283 else:
283 else:
284 # looks like an expression
284 # looks like an expression
285 parts = sym.split('-')
285 parts = sym.split('-')
286 for p in parts[:-1]:
286 for p in parts[:-1]:
287 if p: # possible consecutive -
287 if p: # possible consecutive -
288 yield ('symbol', p, s)
288 yield ('symbol', p, s)
289 s += len(p)
289 s += len(p)
290 yield ('-', None, pos)
290 yield ('-', None, pos)
291 s += 1
291 s += 1
292 if parts[-1]: # possible trailing -
292 if parts[-1]: # possible trailing -
293 yield ('symbol', parts[-1], s)
293 yield ('symbol', parts[-1], s)
294 else:
294 else:
295 yield ('symbol', sym, s)
295 yield ('symbol', sym, s)
296 pos -= 1
296 pos -= 1
297 else:
297 else:
298 raise error.ParseError(_("syntax error in revset '%s'") %
298 raise error.ParseError(_("syntax error in revset '%s'") %
299 program, pos)
299 program, pos)
300 pos += 1
300 pos += 1
301 yield ('end', None, pos)
301 yield ('end', None, pos)
302
302
303 # helpers
303 # helpers
304
304
305 def getsymbol(x):
305 def getsymbol(x):
306 if x and x[0] == 'symbol':
306 if x and x[0] == 'symbol':
307 return x[1]
307 return x[1]
308 raise error.ParseError(_('not a symbol'))
308 raise error.ParseError(_('not a symbol'))
309
309
310 def getstring(x, err):
310 def getstring(x, err):
311 if x and (x[0] == 'string' or x[0] == 'symbol'):
311 if x and (x[0] == 'string' or x[0] == 'symbol'):
312 return x[1]
312 return x[1]
313 raise error.ParseError(err)
313 raise error.ParseError(err)
314
314
315 def getlist(x):
315 def getlist(x):
316 if not x:
316 if not x:
317 return []
317 return []
318 if x[0] == 'list':
318 if x[0] == 'list':
319 return list(x[1:])
319 return list(x[1:])
320 return [x]
320 return [x]
321
321
322 def getargs(x, min, max, err):
322 def getargs(x, min, max, err):
323 l = getlist(x)
323 l = getlist(x)
324 if len(l) < min or (max >= 0 and len(l) > max):
324 if len(l) < min or (max >= 0 and len(l) > max):
325 raise error.ParseError(err)
325 raise error.ParseError(err)
326 return l
326 return l
327
327
328 def getargsdict(x, funcname, keys):
328 def getargsdict(x, funcname, keys):
329 return parser.buildargsdict(getlist(x), funcname, parser.splitargspec(keys),
329 return parser.buildargsdict(getlist(x), funcname, parser.splitargspec(keys),
330 keyvaluenode='keyvalue', keynode='symbol')
330 keyvaluenode='keyvalue', keynode='symbol')
331
331
332 def getset(repo, subset, x):
332 def getset(repo, subset, x):
333 if not x:
333 if not x:
334 raise error.ParseError(_("missing argument"))
334 raise error.ParseError(_("missing argument"))
335 s = methods[x[0]](repo, subset, *x[1:])
335 s = methods[x[0]](repo, subset, *x[1:])
336 if util.safehasattr(s, 'isascending'):
336 if util.safehasattr(s, 'isascending'):
337 return s
337 return s
338 # else case should not happen, because all non-func are internal,
338 # else case should not happen, because all non-func are internal,
339 # ignoring for now.
339 # ignoring for now.
340 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
340 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
341 repo.ui.deprecwarn('revset "%s" uses list instead of smartset'
341 repo.ui.deprecwarn('revset "%s" uses list instead of smartset'
342 % x[1][1],
342 % x[1][1],
343 '3.9')
343 '3.9')
344 return baseset(s)
344 return baseset(s)
345
345
346 def _getrevsource(repo, r):
346 def _getrevsource(repo, r):
347 extra = repo[r].extra()
347 extra = repo[r].extra()
348 for label in ('source', 'transplant_source', 'rebase_source'):
348 for label in ('source', 'transplant_source', 'rebase_source'):
349 if label in extra:
349 if label in extra:
350 try:
350 try:
351 return repo[extra[label]].rev()
351 return repo[extra[label]].rev()
352 except error.RepoLookupError:
352 except error.RepoLookupError:
353 pass
353 pass
354 return None
354 return None
355
355
356 # operator methods
356 # operator methods
357
357
358 def stringset(repo, subset, x):
358 def stringset(repo, subset, x):
359 x = repo[x].rev()
359 x = repo[x].rev()
360 if (x in subset
360 if (x in subset
361 or x == node.nullrev and isinstance(subset, fullreposet)):
361 or x == node.nullrev and isinstance(subset, fullreposet)):
362 return baseset([x])
362 return baseset([x])
363 return baseset()
363 return baseset()
364
364
365 def rangeset(repo, subset, x, y, order):
365 def rangeset(repo, subset, x, y, order):
366 m = getset(repo, fullreposet(repo), x)
366 m = getset(repo, fullreposet(repo), x)
367 n = getset(repo, fullreposet(repo), y)
367 n = getset(repo, fullreposet(repo), y)
368
368
369 if not m or not n:
369 if not m or not n:
370 return baseset()
370 return baseset()
371 return _makerangeset(repo, subset, m.first(), n.last(), order)
371 return _makerangeset(repo, subset, m.first(), n.last(), order)
372
372
373 def rangepre(repo, subset, y, order):
373 def rangepre(repo, subset, y, order):
374 # ':y' can't be rewritten to '0:y' since '0' may be hidden
374 # ':y' can't be rewritten to '0:y' since '0' may be hidden
375 n = getset(repo, fullreposet(repo), y)
375 n = getset(repo, fullreposet(repo), y)
376 if not n:
376 if not n:
377 return baseset()
377 return baseset()
378 return _makerangeset(repo, subset, 0, n.last(), order)
378 return _makerangeset(repo, subset, 0, n.last(), order)
379
379
380 def _makerangeset(repo, subset, m, n, order):
380 def _makerangeset(repo, subset, m, n, order):
381 if m == n:
381 if m == n:
382 r = baseset([m])
382 r = baseset([m])
383 elif n == node.wdirrev:
383 elif n == node.wdirrev:
384 r = spanset(repo, m, len(repo)) + baseset([n])
384 r = spanset(repo, m, len(repo)) + baseset([n])
385 elif m == node.wdirrev:
385 elif m == node.wdirrev:
386 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
386 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
387 elif m < n:
387 elif m < n:
388 r = spanset(repo, m, n + 1)
388 r = spanset(repo, m, n + 1)
389 else:
389 else:
390 r = spanset(repo, m, n - 1)
390 r = spanset(repo, m, n - 1)
391
391
392 if order == defineorder:
392 if order == defineorder:
393 return r & subset
393 return r & subset
394 else:
394 else:
395 # carrying the sorting over when possible would be more efficient
395 # carrying the sorting over when possible would be more efficient
396 return subset & r
396 return subset & r
397
397
398 def dagrange(repo, subset, x, y, order):
398 def dagrange(repo, subset, x, y, order):
399 r = fullreposet(repo)
399 r = fullreposet(repo)
400 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
400 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
401 includepath=True)
401 includepath=True)
402 return subset & xs
402 return subset & xs
403
403
404 def andset(repo, subset, x, y, order):
404 def andset(repo, subset, x, y, order):
405 return getset(repo, getset(repo, subset, x), y)
405 return getset(repo, getset(repo, subset, x), y)
406
406
407 def differenceset(repo, subset, x, y, order):
407 def differenceset(repo, subset, x, y, order):
408 return getset(repo, subset, x) - getset(repo, subset, y)
408 return getset(repo, subset, x) - getset(repo, subset, y)
409
409
410 def _orsetlist(repo, subset, xs):
410 def _orsetlist(repo, subset, xs):
411 assert xs
411 assert xs
412 if len(xs) == 1:
412 if len(xs) == 1:
413 return getset(repo, subset, xs[0])
413 return getset(repo, subset, xs[0])
414 p = len(xs) // 2
414 p = len(xs) // 2
415 a = _orsetlist(repo, subset, xs[:p])
415 a = _orsetlist(repo, subset, xs[:p])
416 b = _orsetlist(repo, subset, xs[p:])
416 b = _orsetlist(repo, subset, xs[p:])
417 return a + b
417 return a + b
418
418
419 def orset(repo, subset, x, order):
419 def orset(repo, subset, x, order):
420 xs = getlist(x)
420 xs = getlist(x)
421 if order == followorder:
421 if order == followorder:
422 # slow path to take the subset order
422 # slow path to take the subset order
423 return subset & _orsetlist(repo, fullreposet(repo), xs)
423 return subset & _orsetlist(repo, fullreposet(repo), xs)
424 else:
424 else:
425 return _orsetlist(repo, subset, xs)
425 return _orsetlist(repo, subset, xs)
426
426
427 def notset(repo, subset, x, order):
427 def notset(repo, subset, x, order):
428 return subset - getset(repo, subset, x)
428 return subset - getset(repo, subset, x)
429
429
430 def listset(repo, subset, *xs):
430 def listset(repo, subset, *xs):
431 raise error.ParseError(_("can't use a list in this context"),
431 raise error.ParseError(_("can't use a list in this context"),
432 hint=_('see hg help "revsets.x or y"'))
432 hint=_('see hg help "revsets.x or y"'))
433
433
434 def keyvaluepair(repo, subset, k, v):
434 def keyvaluepair(repo, subset, k, v):
435 raise error.ParseError(_("can't use a key-value pair in this context"))
435 raise error.ParseError(_("can't use a key-value pair in this context"))
436
436
437 def func(repo, subset, a, b, order):
437 def func(repo, subset, a, b, order):
438 f = getsymbol(a)
438 f = getsymbol(a)
439 if f in symbols:
439 if f in symbols:
440 func = symbols[f]
440 func = symbols[f]
441 if getattr(func, '_takeorder', False):
441 if getattr(func, '_takeorder', False):
442 return func(repo, subset, b, order)
442 return func(repo, subset, b, order)
443 return func(repo, subset, b)
443 return func(repo, subset, b)
444
444
445 keep = lambda fn: getattr(fn, '__doc__', None) is not None
445 keep = lambda fn: getattr(fn, '__doc__', None) is not None
446
446
447 syms = [s for (s, fn) in symbols.items() if keep(fn)]
447 syms = [s for (s, fn) in symbols.items() if keep(fn)]
448 raise error.UnknownIdentifier(f, syms)
448 raise error.UnknownIdentifier(f, syms)
449
449
450 # functions
450 # functions
451
451
452 # symbols are callables like:
452 # symbols are callables like:
453 # fn(repo, subset, x)
453 # fn(repo, subset, x)
454 # with:
454 # with:
455 # repo - current repository instance
455 # repo - current repository instance
456 # subset - of revisions to be examined
456 # subset - of revisions to be examined
457 # x - argument in tree form
457 # x - argument in tree form
458 symbols = {}
458 symbols = {}
459
459
460 # symbols which can't be used for a DoS attack for any given input
460 # symbols which can't be used for a DoS attack for any given input
461 # (e.g. those which accept regexes as plain strings shouldn't be included)
461 # (e.g. those which accept regexes as plain strings shouldn't be included)
462 # functions that just return a lot of changesets (like all) don't count here
462 # functions that just return a lot of changesets (like all) don't count here
463 safesymbols = set()
463 safesymbols = set()
464
464
465 predicate = registrar.revsetpredicate()
465 predicate = registrar.revsetpredicate()
466
466
467 @predicate('_destupdate')
467 @predicate('_destupdate')
468 def _destupdate(repo, subset, x):
468 def _destupdate(repo, subset, x):
469 # experimental revset for update destination
469 # experimental revset for update destination
470 args = getargsdict(x, 'limit', 'clean check')
470 args = getargsdict(x, 'limit', 'clean check')
471 return subset & baseset([destutil.destupdate(repo, **args)[0]])
471 return subset & baseset([destutil.destupdate(repo, **args)[0]])
472
472
473 @predicate('_destmerge')
473 @predicate('_destmerge')
474 def _destmerge(repo, subset, x):
474 def _destmerge(repo, subset, x):
475 # experimental revset for merge destination
475 # experimental revset for merge destination
476 sourceset = None
476 sourceset = None
477 if x is not None:
477 if x is not None:
478 sourceset = getset(repo, fullreposet(repo), x)
478 sourceset = getset(repo, fullreposet(repo), x)
479 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
479 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
480
480
481 @predicate('adds(pattern)', safe=True)
481 @predicate('adds(pattern)', safe=True)
482 def adds(repo, subset, x):
482 def adds(repo, subset, x):
483 """Changesets that add a file matching pattern.
483 """Changesets that add a file matching pattern.
484
484
485 The pattern without explicit kind like ``glob:`` is expected to be
485 The pattern without explicit kind like ``glob:`` is expected to be
486 relative to the current directory and match against a file or a
486 relative to the current directory and match against a file or a
487 directory.
487 directory.
488 """
488 """
489 # i18n: "adds" is a keyword
489 # i18n: "adds" is a keyword
490 pat = getstring(x, _("adds requires a pattern"))
490 pat = getstring(x, _("adds requires a pattern"))
491 return checkstatus(repo, subset, pat, 1)
491 return checkstatus(repo, subset, pat, 1)
492
492
493 @predicate('ancestor(*changeset)', safe=True)
493 @predicate('ancestor(*changeset)', safe=True)
494 def ancestor(repo, subset, x):
494 def ancestor(repo, subset, x):
495 """A greatest common ancestor of the changesets.
495 """A greatest common ancestor of the changesets.
496
496
497 Accepts 0 or more changesets.
497 Accepts 0 or more changesets.
498 Will return empty list when passed no args.
498 Will return empty list when passed no args.
499 Greatest common ancestor of a single changeset is that changeset.
499 Greatest common ancestor of a single changeset is that changeset.
500 """
500 """
501 # i18n: "ancestor" is a keyword
501 # i18n: "ancestor" is a keyword
502 l = getlist(x)
502 l = getlist(x)
503 rl = fullreposet(repo)
503 rl = fullreposet(repo)
504 anc = None
504 anc = None
505
505
506 # (getset(repo, rl, i) for i in l) generates a list of lists
506 # (getset(repo, rl, i) for i in l) generates a list of lists
507 for revs in (getset(repo, rl, i) for i in l):
507 for revs in (getset(repo, rl, i) for i in l):
508 for r in revs:
508 for r in revs:
509 if anc is None:
509 if anc is None:
510 anc = repo[r]
510 anc = repo[r]
511 else:
511 else:
512 anc = anc.ancestor(repo[r])
512 anc = anc.ancestor(repo[r])
513
513
514 if anc is not None and anc.rev() in subset:
514 if anc is not None and anc.rev() in subset:
515 return baseset([anc.rev()])
515 return baseset([anc.rev()])
516 return baseset()
516 return baseset()
517
517
518 def _ancestors(repo, subset, x, followfirst=False):
518 def _ancestors(repo, subset, x, followfirst=False):
519 heads = getset(repo, fullreposet(repo), x)
519 heads = getset(repo, fullreposet(repo), x)
520 if not heads:
520 if not heads:
521 return baseset()
521 return baseset()
522 s = _revancestors(repo, heads, followfirst)
522 s = _revancestors(repo, heads, followfirst)
523 return subset & s
523 return subset & s
524
524
525 @predicate('ancestors(set)', safe=True)
525 @predicate('ancestors(set)', safe=True)
526 def ancestors(repo, subset, x):
526 def ancestors(repo, subset, x):
527 """Changesets that are ancestors of a changeset in set.
527 """Changesets that are ancestors of a changeset in set.
528 """
528 """
529 return _ancestors(repo, subset, x)
529 return _ancestors(repo, subset, x)
530
530
531 @predicate('_firstancestors', safe=True)
531 @predicate('_firstancestors', safe=True)
532 def _firstancestors(repo, subset, x):
532 def _firstancestors(repo, subset, x):
533 # ``_firstancestors(set)``
533 # ``_firstancestors(set)``
534 # Like ``ancestors(set)`` but follows only the first parents.
534 # Like ``ancestors(set)`` but follows only the first parents.
535 return _ancestors(repo, subset, x, followfirst=True)
535 return _ancestors(repo, subset, x, followfirst=True)
536
536
537 def ancestorspec(repo, subset, x, n, order):
537 def ancestorspec(repo, subset, x, n, order):
538 """``set~n``
538 """``set~n``
539 Changesets that are the Nth ancestor (first parents only) of a changeset
539 Changesets that are the Nth ancestor (first parents only) of a changeset
540 in set.
540 in set.
541 """
541 """
542 try:
542 try:
543 n = int(n[1])
543 n = int(n[1])
544 except (TypeError, ValueError):
544 except (TypeError, ValueError):
545 raise error.ParseError(_("~ expects a number"))
545 raise error.ParseError(_("~ expects a number"))
546 ps = set()
546 ps = set()
547 cl = repo.changelog
547 cl = repo.changelog
548 for r in getset(repo, fullreposet(repo), x):
548 for r in getset(repo, fullreposet(repo), x):
549 for i in range(n):
549 for i in range(n):
550 r = cl.parentrevs(r)[0]
550 r = cl.parentrevs(r)[0]
551 ps.add(r)
551 ps.add(r)
552 return subset & ps
552 return subset & ps
553
553
554 @predicate('author(string)', safe=True)
554 @predicate('author(string)', safe=True)
555 def author(repo, subset, x):
555 def author(repo, subset, x):
556 """Alias for ``user(string)``.
556 """Alias for ``user(string)``.
557 """
557 """
558 # i18n: "author" is a keyword
558 # i18n: "author" is a keyword
559 n = encoding.lower(getstring(x, _("author requires a string")))
559 n = encoding.lower(getstring(x, _("author requires a string")))
560 kind, pattern, matcher = _substringmatcher(n)
560 kind, pattern, matcher = _substringmatcher(n)
561 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())),
561 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())),
562 condrepr=('<user %r>', n))
562 condrepr=('<user %r>', n))
563
563
564 @predicate('bisect(string)', safe=True)
564 @predicate('bisect(string)', safe=True)
565 def bisect(repo, subset, x):
565 def bisect(repo, subset, x):
566 """Changesets marked in the specified bisect status:
566 """Changesets marked in the specified bisect status:
567
567
568 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
568 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
569 - ``goods``, ``bads`` : csets topologically good/bad
569 - ``goods``, ``bads`` : csets topologically good/bad
570 - ``range`` : csets taking part in the bisection
570 - ``range`` : csets taking part in the bisection
571 - ``pruned`` : csets that are goods, bads or skipped
571 - ``pruned`` : csets that are goods, bads or skipped
572 - ``untested`` : csets whose fate is yet unknown
572 - ``untested`` : csets whose fate is yet unknown
573 - ``ignored`` : csets ignored due to DAG topology
573 - ``ignored`` : csets ignored due to DAG topology
574 - ``current`` : the cset currently being bisected
574 - ``current`` : the cset currently being bisected
575 """
575 """
576 # i18n: "bisect" is a keyword
576 # i18n: "bisect" is a keyword
577 status = getstring(x, _("bisect requires a string")).lower()
577 status = getstring(x, _("bisect requires a string")).lower()
578 state = set(hbisect.get(repo, status))
578 state = set(hbisect.get(repo, status))
579 return subset & state
579 return subset & state
580
580
581 # Backward-compatibility
581 # Backward-compatibility
582 # - no help entry so that we do not advertise it any more
582 # - no help entry so that we do not advertise it any more
583 @predicate('bisected', safe=True)
583 @predicate('bisected', safe=True)
584 def bisected(repo, subset, x):
584 def bisected(repo, subset, x):
585 return bisect(repo, subset, x)
585 return bisect(repo, subset, x)
586
586
587 @predicate('bookmark([name])', safe=True)
587 @predicate('bookmark([name])', safe=True)
588 def bookmark(repo, subset, x):
588 def bookmark(repo, subset, x):
589 """The named bookmark or all bookmarks.
589 """The named bookmark or all bookmarks.
590
590
591 If `name` starts with `re:`, the remainder of the name is treated as
591 If `name` starts with `re:`, the remainder of the name is treated as
592 a regular expression. To match a bookmark that actually starts with `re:`,
592 a regular expression. To match a bookmark that actually starts with `re:`,
593 use the prefix `literal:`.
593 use the prefix `literal:`.
594 """
594 """
595 # i18n: "bookmark" is a keyword
595 # i18n: "bookmark" is a keyword
596 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
596 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
597 if args:
597 if args:
598 bm = getstring(args[0],
598 bm = getstring(args[0],
599 # i18n: "bookmark" is a keyword
599 # i18n: "bookmark" is a keyword
600 _('the argument to bookmark must be a string'))
600 _('the argument to bookmark must be a string'))
601 kind, pattern, matcher = util.stringmatcher(bm)
601 kind, pattern, matcher = util.stringmatcher(bm)
602 bms = set()
602 bms = set()
603 if kind == 'literal':
603 if kind == 'literal':
604 bmrev = repo._bookmarks.get(pattern, None)
604 bmrev = repo._bookmarks.get(pattern, None)
605 if not bmrev:
605 if not bmrev:
606 raise error.RepoLookupError(_("bookmark '%s' does not exist")
606 raise error.RepoLookupError(_("bookmark '%s' does not exist")
607 % pattern)
607 % pattern)
608 bms.add(repo[bmrev].rev())
608 bms.add(repo[bmrev].rev())
609 else:
609 else:
610 matchrevs = set()
610 matchrevs = set()
611 for name, bmrev in repo._bookmarks.iteritems():
611 for name, bmrev in repo._bookmarks.iteritems():
612 if matcher(name):
612 if matcher(name):
613 matchrevs.add(bmrev)
613 matchrevs.add(bmrev)
614 if not matchrevs:
614 if not matchrevs:
615 raise error.RepoLookupError(_("no bookmarks exist"
615 raise error.RepoLookupError(_("no bookmarks exist"
616 " that match '%s'") % pattern)
616 " that match '%s'") % pattern)
617 for bmrev in matchrevs:
617 for bmrev in matchrevs:
618 bms.add(repo[bmrev].rev())
618 bms.add(repo[bmrev].rev())
619 else:
619 else:
620 bms = set([repo[r].rev()
620 bms = set([repo[r].rev()
621 for r in repo._bookmarks.values()])
621 for r in repo._bookmarks.values()])
622 bms -= set([node.nullrev])
622 bms -= set([node.nullrev])
623 return subset & bms
623 return subset & bms
624
624
625 @predicate('branch(string or set)', safe=True)
625 @predicate('branch(string or set)', safe=True)
626 def branch(repo, subset, x):
626 def branch(repo, subset, x):
627 """
627 """
628 All changesets belonging to the given branch or the branches of the given
628 All changesets belonging to the given branch or the branches of the given
629 changesets.
629 changesets.
630
630
631 If `string` starts with `re:`, the remainder of the name is treated as
631 If `string` starts with `re:`, the remainder of the name is treated as
632 a regular expression. To match a branch that actually starts with `re:`,
632 a regular expression. To match a branch that actually starts with `re:`,
633 use the prefix `literal:`.
633 use the prefix `literal:`.
634 """
634 """
635 getbi = repo.revbranchcache().branchinfo
635 getbi = repo.revbranchcache().branchinfo
636
636
637 try:
637 try:
638 b = getstring(x, '')
638 b = getstring(x, '')
639 except error.ParseError:
639 except error.ParseError:
640 # not a string, but another revspec, e.g. tip()
640 # not a string, but another revspec, e.g. tip()
641 pass
641 pass
642 else:
642 else:
643 kind, pattern, matcher = util.stringmatcher(b)
643 kind, pattern, matcher = util.stringmatcher(b)
644 if kind == 'literal':
644 if kind == 'literal':
645 # note: falls through to the revspec case if no branch with
645 # note: falls through to the revspec case if no branch with
646 # this name exists and pattern kind is not specified explicitly
646 # this name exists and pattern kind is not specified explicitly
647 if pattern in repo.branchmap():
647 if pattern in repo.branchmap():
648 return subset.filter(lambda r: matcher(getbi(r)[0]),
648 return subset.filter(lambda r: matcher(getbi(r)[0]),
649 condrepr=('<branch %r>', b))
649 condrepr=('<branch %r>', b))
650 if b.startswith('literal:'):
650 if b.startswith('literal:'):
651 raise error.RepoLookupError(_("branch '%s' does not exist")
651 raise error.RepoLookupError(_("branch '%s' does not exist")
652 % pattern)
652 % pattern)
653 else:
653 else:
654 return subset.filter(lambda r: matcher(getbi(r)[0]),
654 return subset.filter(lambda r: matcher(getbi(r)[0]),
655 condrepr=('<branch %r>', b))
655 condrepr=('<branch %r>', b))
656
656
657 s = getset(repo, fullreposet(repo), x)
657 s = getset(repo, fullreposet(repo), x)
658 b = set()
658 b = set()
659 for r in s:
659 for r in s:
660 b.add(getbi(r)[0])
660 b.add(getbi(r)[0])
661 c = s.__contains__
661 c = s.__contains__
662 return subset.filter(lambda r: c(r) or getbi(r)[0] in b,
662 return subset.filter(lambda r: c(r) or getbi(r)[0] in b,
663 condrepr=lambda: '<branch %r>' % sorted(b))
663 condrepr=lambda: '<branch %r>' % sorted(b))
664
664
665 @predicate('bumped()', safe=True)
665 @predicate('bumped()', safe=True)
666 def bumped(repo, subset, x):
666 def bumped(repo, subset, x):
667 """Mutable changesets marked as successors of public changesets.
667 """Mutable changesets marked as successors of public changesets.
668
668
669 Only non-public and non-obsolete changesets can be `bumped`.
669 Only non-public and non-obsolete changesets can be `bumped`.
670 """
670 """
671 # i18n: "bumped" is a keyword
671 # i18n: "bumped" is a keyword
672 getargs(x, 0, 0, _("bumped takes no arguments"))
672 getargs(x, 0, 0, _("bumped takes no arguments"))
673 bumped = obsmod.getrevs(repo, 'bumped')
673 bumped = obsmod.getrevs(repo, 'bumped')
674 return subset & bumped
674 return subset & bumped
675
675
676 @predicate('bundle()', safe=True)
676 @predicate('bundle()', safe=True)
677 def bundle(repo, subset, x):
677 def bundle(repo, subset, x):
678 """Changesets in the bundle.
678 """Changesets in the bundle.
679
679
680 Bundle must be specified by the -R option."""
680 Bundle must be specified by the -R option."""
681
681
682 try:
682 try:
683 bundlerevs = repo.changelog.bundlerevs
683 bundlerevs = repo.changelog.bundlerevs
684 except AttributeError:
684 except AttributeError:
685 raise error.Abort(_("no bundle provided - specify with -R"))
685 raise error.Abort(_("no bundle provided - specify with -R"))
686 return subset & bundlerevs
686 return subset & bundlerevs
687
687
688 def checkstatus(repo, subset, pat, field):
688 def checkstatus(repo, subset, pat, field):
689 hasset = matchmod.patkind(pat) == 'set'
689 hasset = matchmod.patkind(pat) == 'set'
690
690
691 mcache = [None]
691 mcache = [None]
692 def matches(x):
692 def matches(x):
693 c = repo[x]
693 c = repo[x]
694 if not mcache[0] or hasset:
694 if not mcache[0] or hasset:
695 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
695 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
696 m = mcache[0]
696 m = mcache[0]
697 fname = None
697 fname = None
698 if not m.anypats() and len(m.files()) == 1:
698 if not m.anypats() and len(m.files()) == 1:
699 fname = m.files()[0]
699 fname = m.files()[0]
700 if fname is not None:
700 if fname is not None:
701 if fname not in c.files():
701 if fname not in c.files():
702 return False
702 return False
703 else:
703 else:
704 for f in c.files():
704 for f in c.files():
705 if m(f):
705 if m(f):
706 break
706 break
707 else:
707 else:
708 return False
708 return False
709 files = repo.status(c.p1().node(), c.node())[field]
709 files = repo.status(c.p1().node(), c.node())[field]
710 if fname is not None:
710 if fname is not None:
711 if fname in files:
711 if fname in files:
712 return True
712 return True
713 else:
713 else:
714 for f in files:
714 for f in files:
715 if m(f):
715 if m(f):
716 return True
716 return True
717
717
718 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
718 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
719
719
720 def _children(repo, subset, parentset):
720 def _children(repo, subset, parentset):
721 if not parentset:
721 if not parentset:
722 return baseset()
722 return baseset()
723 cs = set()
723 cs = set()
724 pr = repo.changelog.parentrevs
724 pr = repo.changelog.parentrevs
725 minrev = parentset.min()
725 minrev = parentset.min()
726 nullrev = node.nullrev
726 nullrev = node.nullrev
727 for r in subset:
727 for r in subset:
728 if r <= minrev:
728 if r <= minrev:
729 continue
729 continue
730 p1, p2 = pr(r)
730 p1, p2 = pr(r)
731 if p1 in parentset:
731 if p1 in parentset:
732 cs.add(r)
732 cs.add(r)
733 if p2 != nullrev and p2 in parentset:
733 if p2 != nullrev and p2 in parentset:
734 cs.add(r)
734 cs.add(r)
735 return baseset(cs)
735 return baseset(cs)
736
736
737 @predicate('children(set)', safe=True)
737 @predicate('children(set)', safe=True)
738 def children(repo, subset, x):
738 def children(repo, subset, x):
739 """Child changesets of changesets in set.
739 """Child changesets of changesets in set.
740 """
740 """
741 s = getset(repo, fullreposet(repo), x)
741 s = getset(repo, fullreposet(repo), x)
742 cs = _children(repo, subset, s)
742 cs = _children(repo, subset, s)
743 return subset & cs
743 return subset & cs
744
744
745 @predicate('closed()', safe=True)
745 @predicate('closed()', safe=True)
746 def closed(repo, subset, x):
746 def closed(repo, subset, x):
747 """Changeset is closed.
747 """Changeset is closed.
748 """
748 """
749 # i18n: "closed" is a keyword
749 # i18n: "closed" is a keyword
750 getargs(x, 0, 0, _("closed takes no arguments"))
750 getargs(x, 0, 0, _("closed takes no arguments"))
751 return subset.filter(lambda r: repo[r].closesbranch(),
751 return subset.filter(lambda r: repo[r].closesbranch(),
752 condrepr='<branch closed>')
752 condrepr='<branch closed>')
753
753
754 @predicate('contains(pattern)')
754 @predicate('contains(pattern)')
755 def contains(repo, subset, x):
755 def contains(repo, subset, x):
756 """The revision's manifest contains a file matching pattern (but might not
756 """The revision's manifest contains a file matching pattern (but might not
757 modify it). See :hg:`help patterns` for information about file patterns.
757 modify it). See :hg:`help patterns` for information about file patterns.
758
758
759 The pattern without explicit kind like ``glob:`` is expected to be
759 The pattern without explicit kind like ``glob:`` is expected to be
760 relative to the current directory and match against a file exactly
760 relative to the current directory and match against a file exactly
761 for efficiency.
761 for efficiency.
762 """
762 """
763 # i18n: "contains" is a keyword
763 # i18n: "contains" is a keyword
764 pat = getstring(x, _("contains requires a pattern"))
764 pat = getstring(x, _("contains requires a pattern"))
765
765
766 def matches(x):
766 def matches(x):
767 if not matchmod.patkind(pat):
767 if not matchmod.patkind(pat):
768 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
768 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
769 if pats in repo[x]:
769 if pats in repo[x]:
770 return True
770 return True
771 else:
771 else:
772 c = repo[x]
772 c = repo[x]
773 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
773 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
774 for f in c.manifest():
774 for f in c.manifest():
775 if m(f):
775 if m(f):
776 return True
776 return True
777 return False
777 return False
778
778
779 return subset.filter(matches, condrepr=('<contains %r>', pat))
779 return subset.filter(matches, condrepr=('<contains %r>', pat))
780
780
781 @predicate('converted([id])', safe=True)
781 @predicate('converted([id])', safe=True)
782 def converted(repo, subset, x):
782 def converted(repo, subset, x):
783 """Changesets converted from the given identifier in the old repository if
783 """Changesets converted from the given identifier in the old repository if
784 present, or all converted changesets if no identifier is specified.
784 present, or all converted changesets if no identifier is specified.
785 """
785 """
786
786
787 # There is exactly no chance of resolving the revision, so do a simple
787 # There is exactly no chance of resolving the revision, so do a simple
788 # string compare and hope for the best
788 # string compare and hope for the best
789
789
790 rev = None
790 rev = None
791 # i18n: "converted" is a keyword
791 # i18n: "converted" is a keyword
792 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
792 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
793 if l:
793 if l:
794 # i18n: "converted" is a keyword
794 # i18n: "converted" is a keyword
795 rev = getstring(l[0], _('converted requires a revision'))
795 rev = getstring(l[0], _('converted requires a revision'))
796
796
797 def _matchvalue(r):
797 def _matchvalue(r):
798 source = repo[r].extra().get('convert_revision', None)
798 source = repo[r].extra().get('convert_revision', None)
799 return source is not None and (rev is None or source.startswith(rev))
799 return source is not None and (rev is None or source.startswith(rev))
800
800
801 return subset.filter(lambda r: _matchvalue(r),
801 return subset.filter(lambda r: _matchvalue(r),
802 condrepr=('<converted %r>', rev))
802 condrepr=('<converted %r>', rev))
803
803
804 @predicate('date(interval)', safe=True)
804 @predicate('date(interval)', safe=True)
805 def date(repo, subset, x):
805 def date(repo, subset, x):
806 """Changesets within the interval, see :hg:`help dates`.
806 """Changesets within the interval, see :hg:`help dates`.
807 """
807 """
808 # i18n: "date" is a keyword
808 # i18n: "date" is a keyword
809 ds = getstring(x, _("date requires a string"))
809 ds = getstring(x, _("date requires a string"))
810 dm = util.matchdate(ds)
810 dm = util.matchdate(ds)
811 return subset.filter(lambda x: dm(repo[x].date()[0]),
811 return subset.filter(lambda x: dm(repo[x].date()[0]),
812 condrepr=('<date %r>', ds))
812 condrepr=('<date %r>', ds))
813
813
814 @predicate('desc(string)', safe=True)
814 @predicate('desc(string)', safe=True)
815 def desc(repo, subset, x):
815 def desc(repo, subset, x):
816 """Search commit message for string. The match is case-insensitive.
816 """Search commit message for string. The match is case-insensitive.
817 """
817 """
818 # i18n: "desc" is a keyword
818 # i18n: "desc" is a keyword
819 ds = encoding.lower(getstring(x, _("desc requires a string")))
819 ds = encoding.lower(getstring(x, _("desc requires a string")))
820
820
821 def matches(x):
821 def matches(x):
822 c = repo[x]
822 c = repo[x]
823 return ds in encoding.lower(c.description())
823 return ds in encoding.lower(c.description())
824
824
825 return subset.filter(matches, condrepr=('<desc %r>', ds))
825 return subset.filter(matches, condrepr=('<desc %r>', ds))
826
826
827 def _descendants(repo, subset, x, followfirst=False):
827 def _descendants(repo, subset, x, followfirst=False):
828 roots = getset(repo, fullreposet(repo), x)
828 roots = getset(repo, fullreposet(repo), x)
829 if not roots:
829 if not roots:
830 return baseset()
830 return baseset()
831 s = _revdescendants(repo, roots, followfirst)
831 s = _revdescendants(repo, roots, followfirst)
832
832
833 # Both sets need to be ascending in order to lazily return the union
833 # Both sets need to be ascending in order to lazily return the union
834 # in the correct order.
834 # in the correct order.
835 base = subset & roots
835 base = subset & roots
836 desc = subset & s
836 desc = subset & s
837 result = base + desc
837 result = base + desc
838 if subset.isascending():
838 if subset.isascending():
839 result.sort()
839 result.sort()
840 elif subset.isdescending():
840 elif subset.isdescending():
841 result.sort(reverse=True)
841 result.sort(reverse=True)
842 else:
842 else:
843 result = subset & result
843 result = subset & result
844 return result
844 return result
845
845
846 @predicate('descendants(set)', safe=True)
846 @predicate('descendants(set)', safe=True)
847 def descendants(repo, subset, x):
847 def descendants(repo, subset, x):
848 """Changesets which are descendants of changesets in set.
848 """Changesets which are descendants of changesets in set.
849 """
849 """
850 return _descendants(repo, subset, x)
850 return _descendants(repo, subset, x)
851
851
852 @predicate('_firstdescendants', safe=True)
852 @predicate('_firstdescendants', safe=True)
853 def _firstdescendants(repo, subset, x):
853 def _firstdescendants(repo, subset, x):
854 # ``_firstdescendants(set)``
854 # ``_firstdescendants(set)``
855 # Like ``descendants(set)`` but follows only the first parents.
855 # Like ``descendants(set)`` but follows only the first parents.
856 return _descendants(repo, subset, x, followfirst=True)
856 return _descendants(repo, subset, x, followfirst=True)
857
857
858 @predicate('destination([set])', safe=True)
858 @predicate('destination([set])', safe=True)
859 def destination(repo, subset, x):
859 def destination(repo, subset, x):
860 """Changesets that were created by a graft, transplant or rebase operation,
860 """Changesets that were created by a graft, transplant or rebase operation,
861 with the given revisions specified as the source. Omitting the optional set
861 with the given revisions specified as the source. Omitting the optional set
862 is the same as passing all().
862 is the same as passing all().
863 """
863 """
864 if x is not None:
864 if x is not None:
865 sources = getset(repo, fullreposet(repo), x)
865 sources = getset(repo, fullreposet(repo), x)
866 else:
866 else:
867 sources = fullreposet(repo)
867 sources = fullreposet(repo)
868
868
869 dests = set()
869 dests = set()
870
870
871 # subset contains all of the possible destinations that can be returned, so
871 # subset contains all of the possible destinations that can be returned, so
872 # iterate over them and see if their source(s) were provided in the arg set.
872 # iterate over them and see if their source(s) were provided in the arg set.
873 # Even if the immediate src of r is not in the arg set, src's source (or
873 # Even if the immediate src of r is not in the arg set, src's source (or
874 # further back) may be. Scanning back further than the immediate src allows
874 # further back) may be. Scanning back further than the immediate src allows
875 # transitive transplants and rebases to yield the same results as transitive
875 # transitive transplants and rebases to yield the same results as transitive
876 # grafts.
876 # grafts.
877 for r in subset:
877 for r in subset:
878 src = _getrevsource(repo, r)
878 src = _getrevsource(repo, r)
879 lineage = None
879 lineage = None
880
880
881 while src is not None:
881 while src is not None:
882 if lineage is None:
882 if lineage is None:
883 lineage = list()
883 lineage = list()
884
884
885 lineage.append(r)
885 lineage.append(r)
886
886
887 # The visited lineage is a match if the current source is in the arg
887 # The visited lineage is a match if the current source is in the arg
888 # set. Since every candidate dest is visited by way of iterating
888 # set. Since every candidate dest is visited by way of iterating
889 # subset, any dests further back in the lineage will be tested by a
889 # subset, any dests further back in the lineage will be tested by a
890 # different iteration over subset. Likewise, if the src was already
890 # different iteration over subset. Likewise, if the src was already
891 # selected, the current lineage can be selected without going back
891 # selected, the current lineage can be selected without going back
892 # further.
892 # further.
893 if src in sources or src in dests:
893 if src in sources or src in dests:
894 dests.update(lineage)
894 dests.update(lineage)
895 break
895 break
896
896
897 r = src
897 r = src
898 src = _getrevsource(repo, r)
898 src = _getrevsource(repo, r)
899
899
900 return subset.filter(dests.__contains__,
900 return subset.filter(dests.__contains__,
901 condrepr=lambda: '<destination %r>' % sorted(dests))
901 condrepr=lambda: '<destination %r>' % sorted(dests))
902
902
903 @predicate('divergent()', safe=True)
903 @predicate('divergent()', safe=True)
904 def divergent(repo, subset, x):
904 def divergent(repo, subset, x):
905 """
905 """
906 Final successors of changesets with an alternative set of final successors.
906 Final successors of changesets with an alternative set of final successors.
907 """
907 """
908 # i18n: "divergent" is a keyword
908 # i18n: "divergent" is a keyword
909 getargs(x, 0, 0, _("divergent takes no arguments"))
909 getargs(x, 0, 0, _("divergent takes no arguments"))
910 divergent = obsmod.getrevs(repo, 'divergent')
910 divergent = obsmod.getrevs(repo, 'divergent')
911 return subset & divergent
911 return subset & divergent
912
912
913 @predicate('extinct()', safe=True)
913 @predicate('extinct()', safe=True)
914 def extinct(repo, subset, x):
914 def extinct(repo, subset, x):
915 """Obsolete changesets with obsolete descendants only.
915 """Obsolete changesets with obsolete descendants only.
916 """
916 """
917 # i18n: "extinct" is a keyword
917 # i18n: "extinct" is a keyword
918 getargs(x, 0, 0, _("extinct takes no arguments"))
918 getargs(x, 0, 0, _("extinct takes no arguments"))
919 extincts = obsmod.getrevs(repo, 'extinct')
919 extincts = obsmod.getrevs(repo, 'extinct')
920 return subset & extincts
920 return subset & extincts
921
921
922 @predicate('extra(label, [value])', safe=True)
922 @predicate('extra(label, [value])', safe=True)
923 def extra(repo, subset, x):
923 def extra(repo, subset, x):
924 """Changesets with the given label in the extra metadata, with the given
924 """Changesets with the given label in the extra metadata, with the given
925 optional value.
925 optional value.
926
926
927 If `value` starts with `re:`, the remainder of the value is treated as
927 If `value` starts with `re:`, the remainder of the value is treated as
928 a regular expression. To match a value that actually starts with `re:`,
928 a regular expression. To match a value that actually starts with `re:`,
929 use the prefix `literal:`.
929 use the prefix `literal:`.
930 """
930 """
931 args = getargsdict(x, 'extra', 'label value')
931 args = getargsdict(x, 'extra', 'label value')
932 if 'label' not in args:
932 if 'label' not in args:
933 # i18n: "extra" is a keyword
933 # i18n: "extra" is a keyword
934 raise error.ParseError(_('extra takes at least 1 argument'))
934 raise error.ParseError(_('extra takes at least 1 argument'))
935 # i18n: "extra" is a keyword
935 # i18n: "extra" is a keyword
936 label = getstring(args['label'], _('first argument to extra must be '
936 label = getstring(args['label'], _('first argument to extra must be '
937 'a string'))
937 'a string'))
938 value = None
938 value = None
939
939
940 if 'value' in args:
940 if 'value' in args:
941 # i18n: "extra" is a keyword
941 # i18n: "extra" is a keyword
942 value = getstring(args['value'], _('second argument to extra must be '
942 value = getstring(args['value'], _('second argument to extra must be '
943 'a string'))
943 'a string'))
944 kind, value, matcher = util.stringmatcher(value)
944 kind, value, matcher = util.stringmatcher(value)
945
945
946 def _matchvalue(r):
946 def _matchvalue(r):
947 extra = repo[r].extra()
947 extra = repo[r].extra()
948 return label in extra and (value is None or matcher(extra[label]))
948 return label in extra and (value is None or matcher(extra[label]))
949
949
950 return subset.filter(lambda r: _matchvalue(r),
950 return subset.filter(lambda r: _matchvalue(r),
951 condrepr=('<extra[%r] %r>', label, value))
951 condrepr=('<extra[%r] %r>', label, value))
952
952
953 @predicate('filelog(pattern)', safe=True)
953 @predicate('filelog(pattern)', safe=True)
954 def filelog(repo, subset, x):
954 def filelog(repo, subset, x):
955 """Changesets connected to the specified filelog.
955 """Changesets connected to the specified filelog.
956
956
957 For performance reasons, visits only revisions mentioned in the file-level
957 For performance reasons, visits only revisions mentioned in the file-level
958 filelog, rather than filtering through all changesets (much faster, but
958 filelog, rather than filtering through all changesets (much faster, but
959 doesn't include deletes or duplicate changes). For a slower, more accurate
959 doesn't include deletes or duplicate changes). For a slower, more accurate
960 result, use ``file()``.
960 result, use ``file()``.
961
961
962 The pattern without explicit kind like ``glob:`` is expected to be
962 The pattern without explicit kind like ``glob:`` is expected to be
963 relative to the current directory and match against a file exactly
963 relative to the current directory and match against a file exactly
964 for efficiency.
964 for efficiency.
965
965
966 If some linkrev points to revisions filtered by the current repoview, we'll
966 If some linkrev points to revisions filtered by the current repoview, we'll
967 work around it to return a non-filtered value.
967 work around it to return a non-filtered value.
968 """
968 """
969
969
970 # i18n: "filelog" is a keyword
970 # i18n: "filelog" is a keyword
971 pat = getstring(x, _("filelog requires a pattern"))
971 pat = getstring(x, _("filelog requires a pattern"))
972 s = set()
972 s = set()
973 cl = repo.changelog
973 cl = repo.changelog
974
974
975 if not matchmod.patkind(pat):
975 if not matchmod.patkind(pat):
976 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
976 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
977 files = [f]
977 files = [f]
978 else:
978 else:
979 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
979 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
980 files = (f for f in repo[None] if m(f))
980 files = (f for f in repo[None] if m(f))
981
981
982 for f in files:
982 for f in files:
983 fl = repo.file(f)
983 fl = repo.file(f)
984 known = {}
984 known = {}
985 scanpos = 0
985 scanpos = 0
986 for fr in list(fl):
986 for fr in list(fl):
987 fn = fl.node(fr)
987 fn = fl.node(fr)
988 if fn in known:
988 if fn in known:
989 s.add(known[fn])
989 s.add(known[fn])
990 continue
990 continue
991
991
992 lr = fl.linkrev(fr)
992 lr = fl.linkrev(fr)
993 if lr in cl:
993 if lr in cl:
994 s.add(lr)
994 s.add(lr)
995 elif scanpos is not None:
995 elif scanpos is not None:
996 # lowest matching changeset is filtered, scan further
996 # lowest matching changeset is filtered, scan further
997 # ahead in changelog
997 # ahead in changelog
998 start = max(lr, scanpos) + 1
998 start = max(lr, scanpos) + 1
999 scanpos = None
999 scanpos = None
1000 for r in cl.revs(start):
1000 for r in cl.revs(start):
1001 # minimize parsing of non-matching entries
1001 # minimize parsing of non-matching entries
1002 if f in cl.revision(r) and f in cl.readfiles(r):
1002 if f in cl.revision(r) and f in cl.readfiles(r):
1003 try:
1003 try:
1004 # try to use manifest delta fastpath
1004 # try to use manifest delta fastpath
1005 n = repo[r].filenode(f)
1005 n = repo[r].filenode(f)
1006 if n not in known:
1006 if n not in known:
1007 if n == fn:
1007 if n == fn:
1008 s.add(r)
1008 s.add(r)
1009 scanpos = r
1009 scanpos = r
1010 break
1010 break
1011 else:
1011 else:
1012 known[n] = r
1012 known[n] = r
1013 except error.ManifestLookupError:
1013 except error.ManifestLookupError:
1014 # deletion in changelog
1014 # deletion in changelog
1015 continue
1015 continue
1016
1016
1017 return subset & s
1017 return subset & s
1018
1018
1019 @predicate('first(set, [n])', safe=True)
1019 @predicate('first(set, [n])', safe=True)
1020 def first(repo, subset, x):
1020 def first(repo, subset, x):
1021 """An alias for limit().
1021 """An alias for limit().
1022 """
1022 """
1023 return limit(repo, subset, x)
1023 return limit(repo, subset, x)
1024
1024
1025 def _follow(repo, subset, x, name, followfirst=False):
1025 def _follow(repo, subset, x, name, followfirst=False):
1026 l = getargs(x, 0, 2, _("%s takes no arguments or a pattern "
1026 l = getargs(x, 0, 2, _("%s takes no arguments or a pattern "
1027 "and an optional revset") % name)
1027 "and an optional revset") % name)
1028 c = repo['.']
1028 c = repo['.']
1029 if l:
1029 if l:
1030 x = getstring(l[0], _("%s expected a pattern") % name)
1030 x = getstring(l[0], _("%s expected a pattern") % name)
1031 rev = None
1031 rev = None
1032 if len(l) >= 2:
1032 if len(l) >= 2:
1033 revs = getset(repo, fullreposet(repo), l[1])
1033 revs = getset(repo, fullreposet(repo), l[1])
1034 if len(revs) != 1:
1034 if len(revs) != 1:
1035 raise error.RepoLookupError(
1035 raise error.RepoLookupError(
1036 _("%s expected one starting revision") % name)
1036 _("%s expected one starting revision") % name)
1037 rev = revs.last()
1037 rev = revs.last()
1038 c = repo[rev]
1038 c = repo[rev]
1039 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
1039 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
1040 ctx=repo[rev], default='path')
1040 ctx=repo[rev], default='path')
1041
1041
1042 files = c.manifest().walk(matcher)
1042 files = c.manifest().walk(matcher)
1043
1043
1044 s = set()
1044 s = set()
1045 for fname in files:
1045 for fname in files:
1046 fctx = c[fname]
1046 fctx = c[fname]
1047 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
1047 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
1048 # include the revision responsible for the most recent version
1048 # include the revision responsible for the most recent version
1049 s.add(fctx.introrev())
1049 s.add(fctx.introrev())
1050 else:
1050 else:
1051 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1051 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1052
1052
1053 return subset & s
1053 return subset & s
1054
1054
1055 @predicate('follow([pattern[, startrev]])', safe=True)
1055 @predicate('follow([pattern[, startrev]])', safe=True)
1056 def follow(repo, subset, x):
1056 def follow(repo, subset, x):
1057 """
1057 """
1058 An alias for ``::.`` (ancestors of the working directory's first parent).
1058 An alias for ``::.`` (ancestors of the working directory's first parent).
1059 If pattern is specified, the histories of files matching given
1059 If pattern is specified, the histories of files matching given
1060 pattern in the revision given by startrev are followed, including copies.
1060 pattern in the revision given by startrev are followed, including copies.
1061 """
1061 """
1062 return _follow(repo, subset, x, 'follow')
1062 return _follow(repo, subset, x, 'follow')
1063
1063
1064 @predicate('_followfirst', safe=True)
1064 @predicate('_followfirst', safe=True)
1065 def _followfirst(repo, subset, x):
1065 def _followfirst(repo, subset, x):
1066 # ``followfirst([pattern[, startrev]])``
1066 # ``followfirst([pattern[, startrev]])``
1067 # Like ``follow([pattern[, startrev]])`` but follows only the first parent
1067 # Like ``follow([pattern[, startrev]])`` but follows only the first parent
1068 # of every revisions or files revisions.
1068 # of every revisions or files revisions.
1069 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1069 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1070
1070
1071 @predicate('followlines(file, fromline, toline[, rev=.])', safe=True)
1071 @predicate('followlines(file, fromline, toline[, rev=.])', safe=True)
1072 def followlines(repo, subset, x):
1072 def followlines(repo, subset, x):
1073 """Changesets modifying `file` in line range ('fromline', 'toline').
1073 """Changesets modifying `file` in line range ('fromline', 'toline').
1074
1074
1075 Line range corresponds to 'file' content at 'rev' and should hence be
1075 Line range corresponds to 'file' content at 'rev' and should hence be
1076 consistent with file size. If rev is not specified, working directory's
1076 consistent with file size. If rev is not specified, working directory's
1077 parent is used.
1077 parent is used.
1078 """
1078 """
1079 from . import context # avoid circular import issues
1079 from . import context # avoid circular import issues
1080
1080
1081 args = getargsdict(x, 'followlines', 'file *lines rev')
1081 args = getargsdict(x, 'followlines', 'file *lines rev')
1082 if len(args['lines']) != 2:
1082 if len(args['lines']) != 2:
1083 raise error.ParseError(_("followlines takes at least three arguments"))
1083 raise error.ParseError(_("followlines takes at least three arguments"))
1084
1084
1085 rev = '.'
1085 rev = '.'
1086 if 'rev' in args:
1086 if 'rev' in args:
1087 revs = getset(repo, fullreposet(repo), args['rev'])
1087 revs = getset(repo, fullreposet(repo), args['rev'])
1088 if len(revs) != 1:
1088 if len(revs) != 1:
1089 raise error.ParseError(
1089 raise error.ParseError(
1090 _("followlines expects exactly one revision"))
1090 _("followlines expects exactly one revision"))
1091 rev = revs.last()
1091 rev = revs.last()
1092
1092
1093 pat = getstring(args['file'], _("followlines requires a pattern"))
1093 pat = getstring(args['file'], _("followlines requires a pattern"))
1094 if not matchmod.patkind(pat):
1094 if not matchmod.patkind(pat):
1095 fname = pathutil.canonpath(repo.root, repo.getcwd(), pat)
1095 fname = pathutil.canonpath(repo.root, repo.getcwd(), pat)
1096 else:
1096 else:
1097 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[rev])
1097 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[rev])
1098 files = [f for f in repo[rev] if m(f)]
1098 files = [f for f in repo[rev] if m(f)]
1099 if len(files) != 1:
1099 if len(files) != 1:
1100 raise error.ParseError(_("followlines expects exactly one file"))
1100 raise error.ParseError(_("followlines expects exactly one file"))
1101 fname = files[0]
1101 fname = files[0]
1102
1102
1103 try:
1103 try:
1104 fromline, toline = [int(getsymbol(a)) for a in args['lines']]
1104 fromline, toline = [int(getsymbol(a)) for a in args['lines']]
1105 except ValueError:
1105 except ValueError:
1106 raise error.ParseError(_("line range bounds must be integers"))
1106 raise error.ParseError(_("line range bounds must be integers"))
1107 if toline - fromline < 0:
1107 if toline - fromline < 0:
1108 raise error.ParseError(_("line range must be positive"))
1108 raise error.ParseError(_("line range must be positive"))
1109 if fromline < 1:
1109 if fromline < 1:
1110 raise error.ParseError(_("fromline must be strictly positive"))
1110 raise error.ParseError(_("fromline must be strictly positive"))
1111 fromline -= 1
1111 fromline -= 1
1112
1112
1113 fctx = repo[rev].filectx(fname)
1113 fctx = repo[rev].filectx(fname)
1114 revs = (c.rev() for c in context.blockancestors(fctx, fromline, toline))
1114 revs = (c.rev() for c in context.blockancestors(fctx, fromline, toline))
1115 return subset & generatorset(revs, iterasc=False)
1115 return subset & generatorset(revs, iterasc=False)
1116
1116
1117 @predicate('all()', safe=True)
1117 @predicate('all()', safe=True)
1118 def getall(repo, subset, x):
1118 def getall(repo, subset, x):
1119 """All changesets, the same as ``0:tip``.
1119 """All changesets, the same as ``0:tip``.
1120 """
1120 """
1121 # i18n: "all" is a keyword
1121 # i18n: "all" is a keyword
1122 getargs(x, 0, 0, _("all takes no arguments"))
1122 getargs(x, 0, 0, _("all takes no arguments"))
1123 return subset & spanset(repo) # drop "null" if any
1123 return subset & spanset(repo) # drop "null" if any
1124
1124
1125 @predicate('grep(regex)')
1125 @predicate('grep(regex)')
1126 def grep(repo, subset, x):
1126 def grep(repo, subset, x):
1127 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1127 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1128 to ensure special escape characters are handled correctly. Unlike
1128 to ensure special escape characters are handled correctly. Unlike
1129 ``keyword(string)``, the match is case-sensitive.
1129 ``keyword(string)``, the match is case-sensitive.
1130 """
1130 """
1131 try:
1131 try:
1132 # i18n: "grep" is a keyword
1132 # i18n: "grep" is a keyword
1133 gr = re.compile(getstring(x, _("grep requires a string")))
1133 gr = re.compile(getstring(x, _("grep requires a string")))
1134 except re.error as e:
1134 except re.error as e:
1135 raise error.ParseError(_('invalid match pattern: %s') % e)
1135 raise error.ParseError(_('invalid match pattern: %s') % e)
1136
1136
1137 def matches(x):
1137 def matches(x):
1138 c = repo[x]
1138 c = repo[x]
1139 for e in c.files() + [c.user(), c.description()]:
1139 for e in c.files() + [c.user(), c.description()]:
1140 if gr.search(e):
1140 if gr.search(e):
1141 return True
1141 return True
1142 return False
1142 return False
1143
1143
1144 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1144 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1145
1145
1146 @predicate('_matchfiles', safe=True)
1146 @predicate('_matchfiles', safe=True)
1147 def _matchfiles(repo, subset, x):
1147 def _matchfiles(repo, subset, x):
1148 # _matchfiles takes a revset list of prefixed arguments:
1148 # _matchfiles takes a revset list of prefixed arguments:
1149 #
1149 #
1150 # [p:foo, i:bar, x:baz]
1150 # [p:foo, i:bar, x:baz]
1151 #
1151 #
1152 # builds a match object from them and filters subset. Allowed
1152 # builds a match object from them and filters subset. Allowed
1153 # prefixes are 'p:' for regular patterns, 'i:' for include
1153 # prefixes are 'p:' for regular patterns, 'i:' for include
1154 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1154 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1155 # a revision identifier, or the empty string to reference the
1155 # a revision identifier, or the empty string to reference the
1156 # working directory, from which the match object is
1156 # working directory, from which the match object is
1157 # initialized. Use 'd:' to set the default matching mode, default
1157 # initialized. Use 'd:' to set the default matching mode, default
1158 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1158 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1159
1159
1160 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1160 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1161 pats, inc, exc = [], [], []
1161 pats, inc, exc = [], [], []
1162 rev, default = None, None
1162 rev, default = None, None
1163 for arg in l:
1163 for arg in l:
1164 s = getstring(arg, "_matchfiles requires string arguments")
1164 s = getstring(arg, "_matchfiles requires string arguments")
1165 prefix, value = s[:2], s[2:]
1165 prefix, value = s[:2], s[2:]
1166 if prefix == 'p:':
1166 if prefix == 'p:':
1167 pats.append(value)
1167 pats.append(value)
1168 elif prefix == 'i:':
1168 elif prefix == 'i:':
1169 inc.append(value)
1169 inc.append(value)
1170 elif prefix == 'x:':
1170 elif prefix == 'x:':
1171 exc.append(value)
1171 exc.append(value)
1172 elif prefix == 'r:':
1172 elif prefix == 'r:':
1173 if rev is not None:
1173 if rev is not None:
1174 raise error.ParseError('_matchfiles expected at most one '
1174 raise error.ParseError('_matchfiles expected at most one '
1175 'revision')
1175 'revision')
1176 if value != '': # empty means working directory; leave rev as None
1176 if value != '': # empty means working directory; leave rev as None
1177 rev = value
1177 rev = value
1178 elif prefix == 'd:':
1178 elif prefix == 'd:':
1179 if default is not None:
1179 if default is not None:
1180 raise error.ParseError('_matchfiles expected at most one '
1180 raise error.ParseError('_matchfiles expected at most one '
1181 'default mode')
1181 'default mode')
1182 default = value
1182 default = value
1183 else:
1183 else:
1184 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1184 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1185 if not default:
1185 if not default:
1186 default = 'glob'
1186 default = 'glob'
1187
1187
1188 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1188 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1189 exclude=exc, ctx=repo[rev], default=default)
1189 exclude=exc, ctx=repo[rev], default=default)
1190
1190
1191 # This directly read the changelog data as creating changectx for all
1191 # This directly read the changelog data as creating changectx for all
1192 # revisions is quite expensive.
1192 # revisions is quite expensive.
1193 getfiles = repo.changelog.readfiles
1193 getfiles = repo.changelog.readfiles
1194 wdirrev = node.wdirrev
1194 wdirrev = node.wdirrev
1195 def matches(x):
1195 def matches(x):
1196 if x == wdirrev:
1196 if x == wdirrev:
1197 files = repo[x].files()
1197 files = repo[x].files()
1198 else:
1198 else:
1199 files = getfiles(x)
1199 files = getfiles(x)
1200 for f in files:
1200 for f in files:
1201 if m(f):
1201 if m(f):
1202 return True
1202 return True
1203 return False
1203 return False
1204
1204
1205 return subset.filter(matches,
1205 return subset.filter(matches,
1206 condrepr=('<matchfiles patterns=%r, include=%r '
1206 condrepr=('<matchfiles patterns=%r, include=%r '
1207 'exclude=%r, default=%r, rev=%r>',
1207 'exclude=%r, default=%r, rev=%r>',
1208 pats, inc, exc, default, rev))
1208 pats, inc, exc, default, rev))
1209
1209
1210 @predicate('file(pattern)', safe=True)
1210 @predicate('file(pattern)', safe=True)
1211 def hasfile(repo, subset, x):
1211 def hasfile(repo, subset, x):
1212 """Changesets affecting files matched by pattern.
1212 """Changesets affecting files matched by pattern.
1213
1213
1214 For a faster but less accurate result, consider using ``filelog()``
1214 For a faster but less accurate result, consider using ``filelog()``
1215 instead.
1215 instead.
1216
1216
1217 This predicate uses ``glob:`` as the default kind of pattern.
1217 This predicate uses ``glob:`` as the default kind of pattern.
1218 """
1218 """
1219 # i18n: "file" is a keyword
1219 # i18n: "file" is a keyword
1220 pat = getstring(x, _("file requires a pattern"))
1220 pat = getstring(x, _("file requires a pattern"))
1221 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1221 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1222
1222
1223 @predicate('head()', safe=True)
1223 @predicate('head()', safe=True)
1224 def head(repo, subset, x):
1224 def head(repo, subset, x):
1225 """Changeset is a named branch head.
1225 """Changeset is a named branch head.
1226 """
1226 """
1227 # i18n: "head" is a keyword
1227 # i18n: "head" is a keyword
1228 getargs(x, 0, 0, _("head takes no arguments"))
1228 getargs(x, 0, 0, _("head takes no arguments"))
1229 hs = set()
1229 hs = set()
1230 cl = repo.changelog
1230 cl = repo.changelog
1231 for ls in repo.branchmap().itervalues():
1231 for ls in repo.branchmap().itervalues():
1232 hs.update(cl.rev(h) for h in ls)
1232 hs.update(cl.rev(h) for h in ls)
1233 return subset & baseset(hs)
1233 return subset & baseset(hs)
1234
1234
1235 @predicate('heads(set)', safe=True)
1235 @predicate('heads(set)', safe=True)
1236 def heads(repo, subset, x):
1236 def heads(repo, subset, x):
1237 """Members of set with no children in set.
1237 """Members of set with no children in set.
1238 """
1238 """
1239 s = getset(repo, subset, x)
1239 s = getset(repo, subset, x)
1240 ps = parents(repo, subset, x)
1240 ps = parents(repo, subset, x)
1241 return s - ps
1241 return s - ps
1242
1242
1243 @predicate('hidden()', safe=True)
1243 @predicate('hidden()', safe=True)
1244 def hidden(repo, subset, x):
1244 def hidden(repo, subset, x):
1245 """Hidden changesets.
1245 """Hidden changesets.
1246 """
1246 """
1247 # i18n: "hidden" is a keyword
1247 # i18n: "hidden" is a keyword
1248 getargs(x, 0, 0, _("hidden takes no arguments"))
1248 getargs(x, 0, 0, _("hidden takes no arguments"))
1249 hiddenrevs = repoview.filterrevs(repo, 'visible')
1249 hiddenrevs = repoview.filterrevs(repo, 'visible')
1250 return subset & hiddenrevs
1250 return subset & hiddenrevs
1251
1251
1252 @predicate('keyword(string)', safe=True)
1252 @predicate('keyword(string)', safe=True)
1253 def keyword(repo, subset, x):
1253 def keyword(repo, subset, x):
1254 """Search commit message, user name, and names of changed files for
1254 """Search commit message, user name, and names of changed files for
1255 string. The match is case-insensitive.
1255 string. The match is case-insensitive.
1256
1257 For a regular expression or case sensitive search of these fields, use
1258 ``grep(regex)``.
1256 """
1259 """
1257 # i18n: "keyword" is a keyword
1260 # i18n: "keyword" is a keyword
1258 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1261 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1259
1262
1260 def matches(r):
1263 def matches(r):
1261 c = repo[r]
1264 c = repo[r]
1262 return any(kw in encoding.lower(t)
1265 return any(kw in encoding.lower(t)
1263 for t in c.files() + [c.user(), c.description()])
1266 for t in c.files() + [c.user(), c.description()])
1264
1267
1265 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1268 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1266
1269
1267 @predicate('limit(set[, n[, offset]])', safe=True)
1270 @predicate('limit(set[, n[, offset]])', safe=True)
1268 def limit(repo, subset, x):
1271 def limit(repo, subset, x):
1269 """First n members of set, defaulting to 1, starting from offset.
1272 """First n members of set, defaulting to 1, starting from offset.
1270 """
1273 """
1271 args = getargsdict(x, 'limit', 'set n offset')
1274 args = getargsdict(x, 'limit', 'set n offset')
1272 if 'set' not in args:
1275 if 'set' not in args:
1273 # i18n: "limit" is a keyword
1276 # i18n: "limit" is a keyword
1274 raise error.ParseError(_("limit requires one to three arguments"))
1277 raise error.ParseError(_("limit requires one to three arguments"))
1275 try:
1278 try:
1276 lim, ofs = 1, 0
1279 lim, ofs = 1, 0
1277 if 'n' in args:
1280 if 'n' in args:
1278 # i18n: "limit" is a keyword
1281 # i18n: "limit" is a keyword
1279 lim = int(getstring(args['n'], _("limit requires a number")))
1282 lim = int(getstring(args['n'], _("limit requires a number")))
1280 if 'offset' in args:
1283 if 'offset' in args:
1281 # i18n: "limit" is a keyword
1284 # i18n: "limit" is a keyword
1282 ofs = int(getstring(args['offset'], _("limit requires a number")))
1285 ofs = int(getstring(args['offset'], _("limit requires a number")))
1283 if ofs < 0:
1286 if ofs < 0:
1284 raise error.ParseError(_("negative offset"))
1287 raise error.ParseError(_("negative offset"))
1285 except (TypeError, ValueError):
1288 except (TypeError, ValueError):
1286 # i18n: "limit" is a keyword
1289 # i18n: "limit" is a keyword
1287 raise error.ParseError(_("limit expects a number"))
1290 raise error.ParseError(_("limit expects a number"))
1288 os = getset(repo, fullreposet(repo), args['set'])
1291 os = getset(repo, fullreposet(repo), args['set'])
1289 result = []
1292 result = []
1290 it = iter(os)
1293 it = iter(os)
1291 for x in xrange(ofs):
1294 for x in xrange(ofs):
1292 y = next(it, None)
1295 y = next(it, None)
1293 if y is None:
1296 if y is None:
1294 break
1297 break
1295 for x in xrange(lim):
1298 for x in xrange(lim):
1296 y = next(it, None)
1299 y = next(it, None)
1297 if y is None:
1300 if y is None:
1298 break
1301 break
1299 elif y in subset:
1302 elif y in subset:
1300 result.append(y)
1303 result.append(y)
1301 return baseset(result, datarepr=('<limit n=%d, offset=%d, %r, %r>',
1304 return baseset(result, datarepr=('<limit n=%d, offset=%d, %r, %r>',
1302 lim, ofs, subset, os))
1305 lim, ofs, subset, os))
1303
1306
1304 @predicate('last(set, [n])', safe=True)
1307 @predicate('last(set, [n])', safe=True)
1305 def last(repo, subset, x):
1308 def last(repo, subset, x):
1306 """Last n members of set, defaulting to 1.
1309 """Last n members of set, defaulting to 1.
1307 """
1310 """
1308 # i18n: "last" is a keyword
1311 # i18n: "last" is a keyword
1309 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1312 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1310 try:
1313 try:
1311 lim = 1
1314 lim = 1
1312 if len(l) == 2:
1315 if len(l) == 2:
1313 # i18n: "last" is a keyword
1316 # i18n: "last" is a keyword
1314 lim = int(getstring(l[1], _("last requires a number")))
1317 lim = int(getstring(l[1], _("last requires a number")))
1315 except (TypeError, ValueError):
1318 except (TypeError, ValueError):
1316 # i18n: "last" is a keyword
1319 # i18n: "last" is a keyword
1317 raise error.ParseError(_("last expects a number"))
1320 raise error.ParseError(_("last expects a number"))
1318 os = getset(repo, fullreposet(repo), l[0])
1321 os = getset(repo, fullreposet(repo), l[0])
1319 os.reverse()
1322 os.reverse()
1320 result = []
1323 result = []
1321 it = iter(os)
1324 it = iter(os)
1322 for x in xrange(lim):
1325 for x in xrange(lim):
1323 y = next(it, None)
1326 y = next(it, None)
1324 if y is None:
1327 if y is None:
1325 break
1328 break
1326 elif y in subset:
1329 elif y in subset:
1327 result.append(y)
1330 result.append(y)
1328 return baseset(result, datarepr=('<last n=%d, %r, %r>', lim, subset, os))
1331 return baseset(result, datarepr=('<last n=%d, %r, %r>', lim, subset, os))
1329
1332
1330 @predicate('max(set)', safe=True)
1333 @predicate('max(set)', safe=True)
1331 def maxrev(repo, subset, x):
1334 def maxrev(repo, subset, x):
1332 """Changeset with highest revision number in set.
1335 """Changeset with highest revision number in set.
1333 """
1336 """
1334 os = getset(repo, fullreposet(repo), x)
1337 os = getset(repo, fullreposet(repo), x)
1335 try:
1338 try:
1336 m = os.max()
1339 m = os.max()
1337 if m in subset:
1340 if m in subset:
1338 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1341 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1339 except ValueError:
1342 except ValueError:
1340 # os.max() throws a ValueError when the collection is empty.
1343 # os.max() throws a ValueError when the collection is empty.
1341 # Same as python's max().
1344 # Same as python's max().
1342 pass
1345 pass
1343 return baseset(datarepr=('<max %r, %r>', subset, os))
1346 return baseset(datarepr=('<max %r, %r>', subset, os))
1344
1347
1345 @predicate('merge()', safe=True)
1348 @predicate('merge()', safe=True)
1346 def merge(repo, subset, x):
1349 def merge(repo, subset, x):
1347 """Changeset is a merge changeset.
1350 """Changeset is a merge changeset.
1348 """
1351 """
1349 # i18n: "merge" is a keyword
1352 # i18n: "merge" is a keyword
1350 getargs(x, 0, 0, _("merge takes no arguments"))
1353 getargs(x, 0, 0, _("merge takes no arguments"))
1351 cl = repo.changelog
1354 cl = repo.changelog
1352 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1355 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1353 condrepr='<merge>')
1356 condrepr='<merge>')
1354
1357
1355 @predicate('branchpoint()', safe=True)
1358 @predicate('branchpoint()', safe=True)
1356 def branchpoint(repo, subset, x):
1359 def branchpoint(repo, subset, x):
1357 """Changesets with more than one child.
1360 """Changesets with more than one child.
1358 """
1361 """
1359 # i18n: "branchpoint" is a keyword
1362 # i18n: "branchpoint" is a keyword
1360 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1363 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1361 cl = repo.changelog
1364 cl = repo.changelog
1362 if not subset:
1365 if not subset:
1363 return baseset()
1366 return baseset()
1364 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1367 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1365 # (and if it is not, it should.)
1368 # (and if it is not, it should.)
1366 baserev = min(subset)
1369 baserev = min(subset)
1367 parentscount = [0]*(len(repo) - baserev)
1370 parentscount = [0]*(len(repo) - baserev)
1368 for r in cl.revs(start=baserev + 1):
1371 for r in cl.revs(start=baserev + 1):
1369 for p in cl.parentrevs(r):
1372 for p in cl.parentrevs(r):
1370 if p >= baserev:
1373 if p >= baserev:
1371 parentscount[p - baserev] += 1
1374 parentscount[p - baserev] += 1
1372 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1375 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1373 condrepr='<branchpoint>')
1376 condrepr='<branchpoint>')
1374
1377
1375 @predicate('min(set)', safe=True)
1378 @predicate('min(set)', safe=True)
1376 def minrev(repo, subset, x):
1379 def minrev(repo, subset, x):
1377 """Changeset with lowest revision number in set.
1380 """Changeset with lowest revision number in set.
1378 """
1381 """
1379 os = getset(repo, fullreposet(repo), x)
1382 os = getset(repo, fullreposet(repo), x)
1380 try:
1383 try:
1381 m = os.min()
1384 m = os.min()
1382 if m in subset:
1385 if m in subset:
1383 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1386 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1384 except ValueError:
1387 except ValueError:
1385 # os.min() throws a ValueError when the collection is empty.
1388 # os.min() throws a ValueError when the collection is empty.
1386 # Same as python's min().
1389 # Same as python's min().
1387 pass
1390 pass
1388 return baseset(datarepr=('<min %r, %r>', subset, os))
1391 return baseset(datarepr=('<min %r, %r>', subset, os))
1389
1392
1390 @predicate('modifies(pattern)', safe=True)
1393 @predicate('modifies(pattern)', safe=True)
1391 def modifies(repo, subset, x):
1394 def modifies(repo, subset, x):
1392 """Changesets modifying files matched by pattern.
1395 """Changesets modifying files matched by pattern.
1393
1396
1394 The pattern without explicit kind like ``glob:`` is expected to be
1397 The pattern without explicit kind like ``glob:`` is expected to be
1395 relative to the current directory and match against a file or a
1398 relative to the current directory and match against a file or a
1396 directory.
1399 directory.
1397 """
1400 """
1398 # i18n: "modifies" is a keyword
1401 # i18n: "modifies" is a keyword
1399 pat = getstring(x, _("modifies requires a pattern"))
1402 pat = getstring(x, _("modifies requires a pattern"))
1400 return checkstatus(repo, subset, pat, 0)
1403 return checkstatus(repo, subset, pat, 0)
1401
1404
1402 @predicate('named(namespace)')
1405 @predicate('named(namespace)')
1403 def named(repo, subset, x):
1406 def named(repo, subset, x):
1404 """The changesets in a given namespace.
1407 """The changesets in a given namespace.
1405
1408
1406 If `namespace` starts with `re:`, the remainder of the string is treated as
1409 If `namespace` starts with `re:`, the remainder of the string is treated as
1407 a regular expression. To match a namespace that actually starts with `re:`,
1410 a regular expression. To match a namespace that actually starts with `re:`,
1408 use the prefix `literal:`.
1411 use the prefix `literal:`.
1409 """
1412 """
1410 # i18n: "named" is a keyword
1413 # i18n: "named" is a keyword
1411 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1414 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1412
1415
1413 ns = getstring(args[0],
1416 ns = getstring(args[0],
1414 # i18n: "named" is a keyword
1417 # i18n: "named" is a keyword
1415 _('the argument to named must be a string'))
1418 _('the argument to named must be a string'))
1416 kind, pattern, matcher = util.stringmatcher(ns)
1419 kind, pattern, matcher = util.stringmatcher(ns)
1417 namespaces = set()
1420 namespaces = set()
1418 if kind == 'literal':
1421 if kind == 'literal':
1419 if pattern not in repo.names:
1422 if pattern not in repo.names:
1420 raise error.RepoLookupError(_("namespace '%s' does not exist")
1423 raise error.RepoLookupError(_("namespace '%s' does not exist")
1421 % ns)
1424 % ns)
1422 namespaces.add(repo.names[pattern])
1425 namespaces.add(repo.names[pattern])
1423 else:
1426 else:
1424 for name, ns in repo.names.iteritems():
1427 for name, ns in repo.names.iteritems():
1425 if matcher(name):
1428 if matcher(name):
1426 namespaces.add(ns)
1429 namespaces.add(ns)
1427 if not namespaces:
1430 if not namespaces:
1428 raise error.RepoLookupError(_("no namespace exists"
1431 raise error.RepoLookupError(_("no namespace exists"
1429 " that match '%s'") % pattern)
1432 " that match '%s'") % pattern)
1430
1433
1431 names = set()
1434 names = set()
1432 for ns in namespaces:
1435 for ns in namespaces:
1433 for name in ns.listnames(repo):
1436 for name in ns.listnames(repo):
1434 if name not in ns.deprecated:
1437 if name not in ns.deprecated:
1435 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1438 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1436
1439
1437 names -= set([node.nullrev])
1440 names -= set([node.nullrev])
1438 return subset & names
1441 return subset & names
1439
1442
1440 @predicate('id(string)', safe=True)
1443 @predicate('id(string)', safe=True)
1441 def node_(repo, subset, x):
1444 def node_(repo, subset, x):
1442 """Revision non-ambiguously specified by the given hex string prefix.
1445 """Revision non-ambiguously specified by the given hex string prefix.
1443 """
1446 """
1444 # i18n: "id" is a keyword
1447 # i18n: "id" is a keyword
1445 l = getargs(x, 1, 1, _("id requires one argument"))
1448 l = getargs(x, 1, 1, _("id requires one argument"))
1446 # i18n: "id" is a keyword
1449 # i18n: "id" is a keyword
1447 n = getstring(l[0], _("id requires a string"))
1450 n = getstring(l[0], _("id requires a string"))
1448 if len(n) == 40:
1451 if len(n) == 40:
1449 try:
1452 try:
1450 rn = repo.changelog.rev(node.bin(n))
1453 rn = repo.changelog.rev(node.bin(n))
1451 except (LookupError, TypeError):
1454 except (LookupError, TypeError):
1452 rn = None
1455 rn = None
1453 else:
1456 else:
1454 rn = None
1457 rn = None
1455 pm = repo.changelog._partialmatch(n)
1458 pm = repo.changelog._partialmatch(n)
1456 if pm is not None:
1459 if pm is not None:
1457 rn = repo.changelog.rev(pm)
1460 rn = repo.changelog.rev(pm)
1458
1461
1459 if rn is None:
1462 if rn is None:
1460 return baseset()
1463 return baseset()
1461 result = baseset([rn])
1464 result = baseset([rn])
1462 return result & subset
1465 return result & subset
1463
1466
1464 @predicate('obsolete()', safe=True)
1467 @predicate('obsolete()', safe=True)
1465 def obsolete(repo, subset, x):
1468 def obsolete(repo, subset, x):
1466 """Mutable changeset with a newer version."""
1469 """Mutable changeset with a newer version."""
1467 # i18n: "obsolete" is a keyword
1470 # i18n: "obsolete" is a keyword
1468 getargs(x, 0, 0, _("obsolete takes no arguments"))
1471 getargs(x, 0, 0, _("obsolete takes no arguments"))
1469 obsoletes = obsmod.getrevs(repo, 'obsolete')
1472 obsoletes = obsmod.getrevs(repo, 'obsolete')
1470 return subset & obsoletes
1473 return subset & obsoletes
1471
1474
1472 @predicate('only(set, [set])', safe=True)
1475 @predicate('only(set, [set])', safe=True)
1473 def only(repo, subset, x):
1476 def only(repo, subset, x):
1474 """Changesets that are ancestors of the first set that are not ancestors
1477 """Changesets that are ancestors of the first set that are not ancestors
1475 of any other head in the repo. If a second set is specified, the result
1478 of any other head in the repo. If a second set is specified, the result
1476 is ancestors of the first set that are not ancestors of the second set
1479 is ancestors of the first set that are not ancestors of the second set
1477 (i.e. ::<set1> - ::<set2>).
1480 (i.e. ::<set1> - ::<set2>).
1478 """
1481 """
1479 cl = repo.changelog
1482 cl = repo.changelog
1480 # i18n: "only" is a keyword
1483 # i18n: "only" is a keyword
1481 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1484 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1482 include = getset(repo, fullreposet(repo), args[0])
1485 include = getset(repo, fullreposet(repo), args[0])
1483 if len(args) == 1:
1486 if len(args) == 1:
1484 if not include:
1487 if not include:
1485 return baseset()
1488 return baseset()
1486
1489
1487 descendants = set(_revdescendants(repo, include, False))
1490 descendants = set(_revdescendants(repo, include, False))
1488 exclude = [rev for rev in cl.headrevs()
1491 exclude = [rev for rev in cl.headrevs()
1489 if not rev in descendants and not rev in include]
1492 if not rev in descendants and not rev in include]
1490 else:
1493 else:
1491 exclude = getset(repo, fullreposet(repo), args[1])
1494 exclude = getset(repo, fullreposet(repo), args[1])
1492
1495
1493 results = set(cl.findmissingrevs(common=exclude, heads=include))
1496 results = set(cl.findmissingrevs(common=exclude, heads=include))
1494 # XXX we should turn this into a baseset instead of a set, smartset may do
1497 # XXX we should turn this into a baseset instead of a set, smartset may do
1495 # some optimizations from the fact this is a baseset.
1498 # some optimizations from the fact this is a baseset.
1496 return subset & results
1499 return subset & results
1497
1500
1498 @predicate('origin([set])', safe=True)
1501 @predicate('origin([set])', safe=True)
1499 def origin(repo, subset, x):
1502 def origin(repo, subset, x):
1500 """
1503 """
1501 Changesets that were specified as a source for the grafts, transplants or
1504 Changesets that were specified as a source for the grafts, transplants or
1502 rebases that created the given revisions. Omitting the optional set is the
1505 rebases that created the given revisions. Omitting the optional set is the
1503 same as passing all(). If a changeset created by these operations is itself
1506 same as passing all(). If a changeset created by these operations is itself
1504 specified as a source for one of these operations, only the source changeset
1507 specified as a source for one of these operations, only the source changeset
1505 for the first operation is selected.
1508 for the first operation is selected.
1506 """
1509 """
1507 if x is not None:
1510 if x is not None:
1508 dests = getset(repo, fullreposet(repo), x)
1511 dests = getset(repo, fullreposet(repo), x)
1509 else:
1512 else:
1510 dests = fullreposet(repo)
1513 dests = fullreposet(repo)
1511
1514
1512 def _firstsrc(rev):
1515 def _firstsrc(rev):
1513 src = _getrevsource(repo, rev)
1516 src = _getrevsource(repo, rev)
1514 if src is None:
1517 if src is None:
1515 return None
1518 return None
1516
1519
1517 while True:
1520 while True:
1518 prev = _getrevsource(repo, src)
1521 prev = _getrevsource(repo, src)
1519
1522
1520 if prev is None:
1523 if prev is None:
1521 return src
1524 return src
1522 src = prev
1525 src = prev
1523
1526
1524 o = set([_firstsrc(r) for r in dests])
1527 o = set([_firstsrc(r) for r in dests])
1525 o -= set([None])
1528 o -= set([None])
1526 # XXX we should turn this into a baseset instead of a set, smartset may do
1529 # XXX we should turn this into a baseset instead of a set, smartset may do
1527 # some optimizations from the fact this is a baseset.
1530 # some optimizations from the fact this is a baseset.
1528 return subset & o
1531 return subset & o
1529
1532
1530 @predicate('outgoing([path])', safe=True)
1533 @predicate('outgoing([path])', safe=True)
1531 def outgoing(repo, subset, x):
1534 def outgoing(repo, subset, x):
1532 """Changesets not found in the specified destination repository, or the
1535 """Changesets not found in the specified destination repository, or the
1533 default push location.
1536 default push location.
1534 """
1537 """
1535 # Avoid cycles.
1538 # Avoid cycles.
1536 from . import (
1539 from . import (
1537 discovery,
1540 discovery,
1538 hg,
1541 hg,
1539 )
1542 )
1540 # i18n: "outgoing" is a keyword
1543 # i18n: "outgoing" is a keyword
1541 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1544 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1542 # i18n: "outgoing" is a keyword
1545 # i18n: "outgoing" is a keyword
1543 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1546 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1544 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1547 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1545 dest, branches = hg.parseurl(dest)
1548 dest, branches = hg.parseurl(dest)
1546 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1549 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1547 if revs:
1550 if revs:
1548 revs = [repo.lookup(rev) for rev in revs]
1551 revs = [repo.lookup(rev) for rev in revs]
1549 other = hg.peer(repo, {}, dest)
1552 other = hg.peer(repo, {}, dest)
1550 repo.ui.pushbuffer()
1553 repo.ui.pushbuffer()
1551 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1554 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1552 repo.ui.popbuffer()
1555 repo.ui.popbuffer()
1553 cl = repo.changelog
1556 cl = repo.changelog
1554 o = set([cl.rev(r) for r in outgoing.missing])
1557 o = set([cl.rev(r) for r in outgoing.missing])
1555 return subset & o
1558 return subset & o
1556
1559
1557 @predicate('p1([set])', safe=True)
1560 @predicate('p1([set])', safe=True)
1558 def p1(repo, subset, x):
1561 def p1(repo, subset, x):
1559 """First parent of changesets in set, or the working directory.
1562 """First parent of changesets in set, or the working directory.
1560 """
1563 """
1561 if x is None:
1564 if x is None:
1562 p = repo[x].p1().rev()
1565 p = repo[x].p1().rev()
1563 if p >= 0:
1566 if p >= 0:
1564 return subset & baseset([p])
1567 return subset & baseset([p])
1565 return baseset()
1568 return baseset()
1566
1569
1567 ps = set()
1570 ps = set()
1568 cl = repo.changelog
1571 cl = repo.changelog
1569 for r in getset(repo, fullreposet(repo), x):
1572 for r in getset(repo, fullreposet(repo), x):
1570 ps.add(cl.parentrevs(r)[0])
1573 ps.add(cl.parentrevs(r)[0])
1571 ps -= set([node.nullrev])
1574 ps -= set([node.nullrev])
1572 # XXX we should turn this into a baseset instead of a set, smartset may do
1575 # XXX we should turn this into a baseset instead of a set, smartset may do
1573 # some optimizations from the fact this is a baseset.
1576 # some optimizations from the fact this is a baseset.
1574 return subset & ps
1577 return subset & ps
1575
1578
1576 @predicate('p2([set])', safe=True)
1579 @predicate('p2([set])', safe=True)
1577 def p2(repo, subset, x):
1580 def p2(repo, subset, x):
1578 """Second parent of changesets in set, or the working directory.
1581 """Second parent of changesets in set, or the working directory.
1579 """
1582 """
1580 if x is None:
1583 if x is None:
1581 ps = repo[x].parents()
1584 ps = repo[x].parents()
1582 try:
1585 try:
1583 p = ps[1].rev()
1586 p = ps[1].rev()
1584 if p >= 0:
1587 if p >= 0:
1585 return subset & baseset([p])
1588 return subset & baseset([p])
1586 return baseset()
1589 return baseset()
1587 except IndexError:
1590 except IndexError:
1588 return baseset()
1591 return baseset()
1589
1592
1590 ps = set()
1593 ps = set()
1591 cl = repo.changelog
1594 cl = repo.changelog
1592 for r in getset(repo, fullreposet(repo), x):
1595 for r in getset(repo, fullreposet(repo), x):
1593 ps.add(cl.parentrevs(r)[1])
1596 ps.add(cl.parentrevs(r)[1])
1594 ps -= set([node.nullrev])
1597 ps -= set([node.nullrev])
1595 # XXX we should turn this into a baseset instead of a set, smartset may do
1598 # XXX we should turn this into a baseset instead of a set, smartset may do
1596 # some optimizations from the fact this is a baseset.
1599 # some optimizations from the fact this is a baseset.
1597 return subset & ps
1600 return subset & ps
1598
1601
1599 def parentpost(repo, subset, x, order):
1602 def parentpost(repo, subset, x, order):
1600 return p1(repo, subset, x)
1603 return p1(repo, subset, x)
1601
1604
1602 @predicate('parents([set])', safe=True)
1605 @predicate('parents([set])', safe=True)
1603 def parents(repo, subset, x):
1606 def parents(repo, subset, x):
1604 """
1607 """
1605 The set of all parents for all changesets in set, or the working directory.
1608 The set of all parents for all changesets in set, or the working directory.
1606 """
1609 """
1607 if x is None:
1610 if x is None:
1608 ps = set(p.rev() for p in repo[x].parents())
1611 ps = set(p.rev() for p in repo[x].parents())
1609 else:
1612 else:
1610 ps = set()
1613 ps = set()
1611 cl = repo.changelog
1614 cl = repo.changelog
1612 up = ps.update
1615 up = ps.update
1613 parentrevs = cl.parentrevs
1616 parentrevs = cl.parentrevs
1614 for r in getset(repo, fullreposet(repo), x):
1617 for r in getset(repo, fullreposet(repo), x):
1615 if r == node.wdirrev:
1618 if r == node.wdirrev:
1616 up(p.rev() for p in repo[r].parents())
1619 up(p.rev() for p in repo[r].parents())
1617 else:
1620 else:
1618 up(parentrevs(r))
1621 up(parentrevs(r))
1619 ps -= set([node.nullrev])
1622 ps -= set([node.nullrev])
1620 return subset & ps
1623 return subset & ps
1621
1624
1622 def _phase(repo, subset, target):
1625 def _phase(repo, subset, target):
1623 """helper to select all rev in phase <target>"""
1626 """helper to select all rev in phase <target>"""
1624 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1627 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1625 if repo._phasecache._phasesets:
1628 if repo._phasecache._phasesets:
1626 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1629 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1627 s = baseset(s)
1630 s = baseset(s)
1628 s.sort() # set are non ordered, so we enforce ascending
1631 s.sort() # set are non ordered, so we enforce ascending
1629 return subset & s
1632 return subset & s
1630 else:
1633 else:
1631 phase = repo._phasecache.phase
1634 phase = repo._phasecache.phase
1632 condition = lambda r: phase(repo, r) == target
1635 condition = lambda r: phase(repo, r) == target
1633 return subset.filter(condition, condrepr=('<phase %r>', target),
1636 return subset.filter(condition, condrepr=('<phase %r>', target),
1634 cache=False)
1637 cache=False)
1635
1638
1636 @predicate('draft()', safe=True)
1639 @predicate('draft()', safe=True)
1637 def draft(repo, subset, x):
1640 def draft(repo, subset, x):
1638 """Changeset in draft phase."""
1641 """Changeset in draft phase."""
1639 # i18n: "draft" is a keyword
1642 # i18n: "draft" is a keyword
1640 getargs(x, 0, 0, _("draft takes no arguments"))
1643 getargs(x, 0, 0, _("draft takes no arguments"))
1641 target = phases.draft
1644 target = phases.draft
1642 return _phase(repo, subset, target)
1645 return _phase(repo, subset, target)
1643
1646
1644 @predicate('secret()', safe=True)
1647 @predicate('secret()', safe=True)
1645 def secret(repo, subset, x):
1648 def secret(repo, subset, x):
1646 """Changeset in secret phase."""
1649 """Changeset in secret phase."""
1647 # i18n: "secret" is a keyword
1650 # i18n: "secret" is a keyword
1648 getargs(x, 0, 0, _("secret takes no arguments"))
1651 getargs(x, 0, 0, _("secret takes no arguments"))
1649 target = phases.secret
1652 target = phases.secret
1650 return _phase(repo, subset, target)
1653 return _phase(repo, subset, target)
1651
1654
1652 def parentspec(repo, subset, x, n, order):
1655 def parentspec(repo, subset, x, n, order):
1653 """``set^0``
1656 """``set^0``
1654 The set.
1657 The set.
1655 ``set^1`` (or ``set^``), ``set^2``
1658 ``set^1`` (or ``set^``), ``set^2``
1656 First or second parent, respectively, of all changesets in set.
1659 First or second parent, respectively, of all changesets in set.
1657 """
1660 """
1658 try:
1661 try:
1659 n = int(n[1])
1662 n = int(n[1])
1660 if n not in (0, 1, 2):
1663 if n not in (0, 1, 2):
1661 raise ValueError
1664 raise ValueError
1662 except (TypeError, ValueError):
1665 except (TypeError, ValueError):
1663 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1666 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1664 ps = set()
1667 ps = set()
1665 cl = repo.changelog
1668 cl = repo.changelog
1666 for r in getset(repo, fullreposet(repo), x):
1669 for r in getset(repo, fullreposet(repo), x):
1667 if n == 0:
1670 if n == 0:
1668 ps.add(r)
1671 ps.add(r)
1669 elif n == 1:
1672 elif n == 1:
1670 ps.add(cl.parentrevs(r)[0])
1673 ps.add(cl.parentrevs(r)[0])
1671 elif n == 2:
1674 elif n == 2:
1672 parents = cl.parentrevs(r)
1675 parents = cl.parentrevs(r)
1673 if parents[1] != node.nullrev:
1676 if parents[1] != node.nullrev:
1674 ps.add(parents[1])
1677 ps.add(parents[1])
1675 return subset & ps
1678 return subset & ps
1676
1679
1677 @predicate('present(set)', safe=True)
1680 @predicate('present(set)', safe=True)
1678 def present(repo, subset, x):
1681 def present(repo, subset, x):
1679 """An empty set, if any revision in set isn't found; otherwise,
1682 """An empty set, if any revision in set isn't found; otherwise,
1680 all revisions in set.
1683 all revisions in set.
1681
1684
1682 If any of specified revisions is not present in the local repository,
1685 If any of specified revisions is not present in the local repository,
1683 the query is normally aborted. But this predicate allows the query
1686 the query is normally aborted. But this predicate allows the query
1684 to continue even in such cases.
1687 to continue even in such cases.
1685 """
1688 """
1686 try:
1689 try:
1687 return getset(repo, subset, x)
1690 return getset(repo, subset, x)
1688 except error.RepoLookupError:
1691 except error.RepoLookupError:
1689 return baseset()
1692 return baseset()
1690
1693
1691 # for internal use
1694 # for internal use
1692 @predicate('_notpublic', safe=True)
1695 @predicate('_notpublic', safe=True)
1693 def _notpublic(repo, subset, x):
1696 def _notpublic(repo, subset, x):
1694 getargs(x, 0, 0, "_notpublic takes no arguments")
1697 getargs(x, 0, 0, "_notpublic takes no arguments")
1695 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1698 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1696 if repo._phasecache._phasesets:
1699 if repo._phasecache._phasesets:
1697 s = set()
1700 s = set()
1698 for u in repo._phasecache._phasesets[1:]:
1701 for u in repo._phasecache._phasesets[1:]:
1699 s.update(u)
1702 s.update(u)
1700 s = baseset(s - repo.changelog.filteredrevs)
1703 s = baseset(s - repo.changelog.filteredrevs)
1701 s.sort()
1704 s.sort()
1702 return subset & s
1705 return subset & s
1703 else:
1706 else:
1704 phase = repo._phasecache.phase
1707 phase = repo._phasecache.phase
1705 target = phases.public
1708 target = phases.public
1706 condition = lambda r: phase(repo, r) != target
1709 condition = lambda r: phase(repo, r) != target
1707 return subset.filter(condition, condrepr=('<phase %r>', target),
1710 return subset.filter(condition, condrepr=('<phase %r>', target),
1708 cache=False)
1711 cache=False)
1709
1712
1710 @predicate('public()', safe=True)
1713 @predicate('public()', safe=True)
1711 def public(repo, subset, x):
1714 def public(repo, subset, x):
1712 """Changeset in public phase."""
1715 """Changeset in public phase."""
1713 # i18n: "public" is a keyword
1716 # i18n: "public" is a keyword
1714 getargs(x, 0, 0, _("public takes no arguments"))
1717 getargs(x, 0, 0, _("public takes no arguments"))
1715 phase = repo._phasecache.phase
1718 phase = repo._phasecache.phase
1716 target = phases.public
1719 target = phases.public
1717 condition = lambda r: phase(repo, r) == target
1720 condition = lambda r: phase(repo, r) == target
1718 return subset.filter(condition, condrepr=('<phase %r>', target),
1721 return subset.filter(condition, condrepr=('<phase %r>', target),
1719 cache=False)
1722 cache=False)
1720
1723
1721 @predicate('remote([id [,path]])', safe=True)
1724 @predicate('remote([id [,path]])', safe=True)
1722 def remote(repo, subset, x):
1725 def remote(repo, subset, x):
1723 """Local revision that corresponds to the given identifier in a
1726 """Local revision that corresponds to the given identifier in a
1724 remote repository, if present. Here, the '.' identifier is a
1727 remote repository, if present. Here, the '.' identifier is a
1725 synonym for the current local branch.
1728 synonym for the current local branch.
1726 """
1729 """
1727
1730
1728 from . import hg # avoid start-up nasties
1731 from . import hg # avoid start-up nasties
1729 # i18n: "remote" is a keyword
1732 # i18n: "remote" is a keyword
1730 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1733 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1731
1734
1732 q = '.'
1735 q = '.'
1733 if len(l) > 0:
1736 if len(l) > 0:
1734 # i18n: "remote" is a keyword
1737 # i18n: "remote" is a keyword
1735 q = getstring(l[0], _("remote requires a string id"))
1738 q = getstring(l[0], _("remote requires a string id"))
1736 if q == '.':
1739 if q == '.':
1737 q = repo['.'].branch()
1740 q = repo['.'].branch()
1738
1741
1739 dest = ''
1742 dest = ''
1740 if len(l) > 1:
1743 if len(l) > 1:
1741 # i18n: "remote" is a keyword
1744 # i18n: "remote" is a keyword
1742 dest = getstring(l[1], _("remote requires a repository path"))
1745 dest = getstring(l[1], _("remote requires a repository path"))
1743 dest = repo.ui.expandpath(dest or 'default')
1746 dest = repo.ui.expandpath(dest or 'default')
1744 dest, branches = hg.parseurl(dest)
1747 dest, branches = hg.parseurl(dest)
1745 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1748 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1746 if revs:
1749 if revs:
1747 revs = [repo.lookup(rev) for rev in revs]
1750 revs = [repo.lookup(rev) for rev in revs]
1748 other = hg.peer(repo, {}, dest)
1751 other = hg.peer(repo, {}, dest)
1749 n = other.lookup(q)
1752 n = other.lookup(q)
1750 if n in repo:
1753 if n in repo:
1751 r = repo[n].rev()
1754 r = repo[n].rev()
1752 if r in subset:
1755 if r in subset:
1753 return baseset([r])
1756 return baseset([r])
1754 return baseset()
1757 return baseset()
1755
1758
1756 @predicate('removes(pattern)', safe=True)
1759 @predicate('removes(pattern)', safe=True)
1757 def removes(repo, subset, x):
1760 def removes(repo, subset, x):
1758 """Changesets which remove files matching pattern.
1761 """Changesets which remove files matching pattern.
1759
1762
1760 The pattern without explicit kind like ``glob:`` is expected to be
1763 The pattern without explicit kind like ``glob:`` is expected to be
1761 relative to the current directory and match against a file or a
1764 relative to the current directory and match against a file or a
1762 directory.
1765 directory.
1763 """
1766 """
1764 # i18n: "removes" is a keyword
1767 # i18n: "removes" is a keyword
1765 pat = getstring(x, _("removes requires a pattern"))
1768 pat = getstring(x, _("removes requires a pattern"))
1766 return checkstatus(repo, subset, pat, 2)
1769 return checkstatus(repo, subset, pat, 2)
1767
1770
1768 @predicate('rev(number)', safe=True)
1771 @predicate('rev(number)', safe=True)
1769 def rev(repo, subset, x):
1772 def rev(repo, subset, x):
1770 """Revision with the given numeric identifier.
1773 """Revision with the given numeric identifier.
1771 """
1774 """
1772 # i18n: "rev" is a keyword
1775 # i18n: "rev" is a keyword
1773 l = getargs(x, 1, 1, _("rev requires one argument"))
1776 l = getargs(x, 1, 1, _("rev requires one argument"))
1774 try:
1777 try:
1775 # i18n: "rev" is a keyword
1778 # i18n: "rev" is a keyword
1776 l = int(getstring(l[0], _("rev requires a number")))
1779 l = int(getstring(l[0], _("rev requires a number")))
1777 except (TypeError, ValueError):
1780 except (TypeError, ValueError):
1778 # i18n: "rev" is a keyword
1781 # i18n: "rev" is a keyword
1779 raise error.ParseError(_("rev expects a number"))
1782 raise error.ParseError(_("rev expects a number"))
1780 if l not in repo.changelog and l != node.nullrev:
1783 if l not in repo.changelog and l != node.nullrev:
1781 return baseset()
1784 return baseset()
1782 return subset & baseset([l])
1785 return subset & baseset([l])
1783
1786
1784 @predicate('matching(revision [, field])', safe=True)
1787 @predicate('matching(revision [, field])', safe=True)
1785 def matching(repo, subset, x):
1788 def matching(repo, subset, x):
1786 """Changesets in which a given set of fields match the set of fields in the
1789 """Changesets in which a given set of fields match the set of fields in the
1787 selected revision or set.
1790 selected revision or set.
1788
1791
1789 To match more than one field pass the list of fields to match separated
1792 To match more than one field pass the list of fields to match separated
1790 by spaces (e.g. ``author description``).
1793 by spaces (e.g. ``author description``).
1791
1794
1792 Valid fields are most regular revision fields and some special fields.
1795 Valid fields are most regular revision fields and some special fields.
1793
1796
1794 Regular revision fields are ``description``, ``author``, ``branch``,
1797 Regular revision fields are ``description``, ``author``, ``branch``,
1795 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1798 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1796 and ``diff``.
1799 and ``diff``.
1797 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1800 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1798 contents of the revision. Two revisions matching their ``diff`` will
1801 contents of the revision. Two revisions matching their ``diff`` will
1799 also match their ``files``.
1802 also match their ``files``.
1800
1803
1801 Special fields are ``summary`` and ``metadata``:
1804 Special fields are ``summary`` and ``metadata``:
1802 ``summary`` matches the first line of the description.
1805 ``summary`` matches the first line of the description.
1803 ``metadata`` is equivalent to matching ``description user date``
1806 ``metadata`` is equivalent to matching ``description user date``
1804 (i.e. it matches the main metadata fields).
1807 (i.e. it matches the main metadata fields).
1805
1808
1806 ``metadata`` is the default field which is used when no fields are
1809 ``metadata`` is the default field which is used when no fields are
1807 specified. You can match more than one field at a time.
1810 specified. You can match more than one field at a time.
1808 """
1811 """
1809 # i18n: "matching" is a keyword
1812 # i18n: "matching" is a keyword
1810 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1813 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1811
1814
1812 revs = getset(repo, fullreposet(repo), l[0])
1815 revs = getset(repo, fullreposet(repo), l[0])
1813
1816
1814 fieldlist = ['metadata']
1817 fieldlist = ['metadata']
1815 if len(l) > 1:
1818 if len(l) > 1:
1816 fieldlist = getstring(l[1],
1819 fieldlist = getstring(l[1],
1817 # i18n: "matching" is a keyword
1820 # i18n: "matching" is a keyword
1818 _("matching requires a string "
1821 _("matching requires a string "
1819 "as its second argument")).split()
1822 "as its second argument")).split()
1820
1823
1821 # Make sure that there are no repeated fields,
1824 # Make sure that there are no repeated fields,
1822 # expand the 'special' 'metadata' field type
1825 # expand the 'special' 'metadata' field type
1823 # and check the 'files' whenever we check the 'diff'
1826 # and check the 'files' whenever we check the 'diff'
1824 fields = []
1827 fields = []
1825 for field in fieldlist:
1828 for field in fieldlist:
1826 if field == 'metadata':
1829 if field == 'metadata':
1827 fields += ['user', 'description', 'date']
1830 fields += ['user', 'description', 'date']
1828 elif field == 'diff':
1831 elif field == 'diff':
1829 # a revision matching the diff must also match the files
1832 # a revision matching the diff must also match the files
1830 # since matching the diff is very costly, make sure to
1833 # since matching the diff is very costly, make sure to
1831 # also match the files first
1834 # also match the files first
1832 fields += ['files', 'diff']
1835 fields += ['files', 'diff']
1833 else:
1836 else:
1834 if field == 'author':
1837 if field == 'author':
1835 field = 'user'
1838 field = 'user'
1836 fields.append(field)
1839 fields.append(field)
1837 fields = set(fields)
1840 fields = set(fields)
1838 if 'summary' in fields and 'description' in fields:
1841 if 'summary' in fields and 'description' in fields:
1839 # If a revision matches its description it also matches its summary
1842 # If a revision matches its description it also matches its summary
1840 fields.discard('summary')
1843 fields.discard('summary')
1841
1844
1842 # We may want to match more than one field
1845 # We may want to match more than one field
1843 # Not all fields take the same amount of time to be matched
1846 # Not all fields take the same amount of time to be matched
1844 # Sort the selected fields in order of increasing matching cost
1847 # Sort the selected fields in order of increasing matching cost
1845 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1848 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1846 'files', 'description', 'substate', 'diff']
1849 'files', 'description', 'substate', 'diff']
1847 def fieldkeyfunc(f):
1850 def fieldkeyfunc(f):
1848 try:
1851 try:
1849 return fieldorder.index(f)
1852 return fieldorder.index(f)
1850 except ValueError:
1853 except ValueError:
1851 # assume an unknown field is very costly
1854 # assume an unknown field is very costly
1852 return len(fieldorder)
1855 return len(fieldorder)
1853 fields = list(fields)
1856 fields = list(fields)
1854 fields.sort(key=fieldkeyfunc)
1857 fields.sort(key=fieldkeyfunc)
1855
1858
1856 # Each field will be matched with its own "getfield" function
1859 # Each field will be matched with its own "getfield" function
1857 # which will be added to the getfieldfuncs array of functions
1860 # which will be added to the getfieldfuncs array of functions
1858 getfieldfuncs = []
1861 getfieldfuncs = []
1859 _funcs = {
1862 _funcs = {
1860 'user': lambda r: repo[r].user(),
1863 'user': lambda r: repo[r].user(),
1861 'branch': lambda r: repo[r].branch(),
1864 'branch': lambda r: repo[r].branch(),
1862 'date': lambda r: repo[r].date(),
1865 'date': lambda r: repo[r].date(),
1863 'description': lambda r: repo[r].description(),
1866 'description': lambda r: repo[r].description(),
1864 'files': lambda r: repo[r].files(),
1867 'files': lambda r: repo[r].files(),
1865 'parents': lambda r: repo[r].parents(),
1868 'parents': lambda r: repo[r].parents(),
1866 'phase': lambda r: repo[r].phase(),
1869 'phase': lambda r: repo[r].phase(),
1867 'substate': lambda r: repo[r].substate,
1870 'substate': lambda r: repo[r].substate,
1868 'summary': lambda r: repo[r].description().splitlines()[0],
1871 'summary': lambda r: repo[r].description().splitlines()[0],
1869 'diff': lambda r: list(repo[r].diff(git=True),)
1872 'diff': lambda r: list(repo[r].diff(git=True),)
1870 }
1873 }
1871 for info in fields:
1874 for info in fields:
1872 getfield = _funcs.get(info, None)
1875 getfield = _funcs.get(info, None)
1873 if getfield is None:
1876 if getfield is None:
1874 raise error.ParseError(
1877 raise error.ParseError(
1875 # i18n: "matching" is a keyword
1878 # i18n: "matching" is a keyword
1876 _("unexpected field name passed to matching: %s") % info)
1879 _("unexpected field name passed to matching: %s") % info)
1877 getfieldfuncs.append(getfield)
1880 getfieldfuncs.append(getfield)
1878 # convert the getfield array of functions into a "getinfo" function
1881 # convert the getfield array of functions into a "getinfo" function
1879 # which returns an array of field values (or a single value if there
1882 # which returns an array of field values (or a single value if there
1880 # is only one field to match)
1883 # is only one field to match)
1881 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1884 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1882
1885
1883 def matches(x):
1886 def matches(x):
1884 for rev in revs:
1887 for rev in revs:
1885 target = getinfo(rev)
1888 target = getinfo(rev)
1886 match = True
1889 match = True
1887 for n, f in enumerate(getfieldfuncs):
1890 for n, f in enumerate(getfieldfuncs):
1888 if target[n] != f(x):
1891 if target[n] != f(x):
1889 match = False
1892 match = False
1890 if match:
1893 if match:
1891 return True
1894 return True
1892 return False
1895 return False
1893
1896
1894 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1897 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1895
1898
1896 @predicate('reverse(set)', safe=True, takeorder=True)
1899 @predicate('reverse(set)', safe=True, takeorder=True)
1897 def reverse(repo, subset, x, order):
1900 def reverse(repo, subset, x, order):
1898 """Reverse order of set.
1901 """Reverse order of set.
1899 """
1902 """
1900 l = getset(repo, subset, x)
1903 l = getset(repo, subset, x)
1901 if order == defineorder:
1904 if order == defineorder:
1902 l.reverse()
1905 l.reverse()
1903 return l
1906 return l
1904
1907
1905 @predicate('roots(set)', safe=True)
1908 @predicate('roots(set)', safe=True)
1906 def roots(repo, subset, x):
1909 def roots(repo, subset, x):
1907 """Changesets in set with no parent changeset in set.
1910 """Changesets in set with no parent changeset in set.
1908 """
1911 """
1909 s = getset(repo, fullreposet(repo), x)
1912 s = getset(repo, fullreposet(repo), x)
1910 parents = repo.changelog.parentrevs
1913 parents = repo.changelog.parentrevs
1911 def filter(r):
1914 def filter(r):
1912 for p in parents(r):
1915 for p in parents(r):
1913 if 0 <= p and p in s:
1916 if 0 <= p and p in s:
1914 return False
1917 return False
1915 return True
1918 return True
1916 return subset & s.filter(filter, condrepr='<roots>')
1919 return subset & s.filter(filter, condrepr='<roots>')
1917
1920
1918 _sortkeyfuncs = {
1921 _sortkeyfuncs = {
1919 'rev': lambda c: c.rev(),
1922 'rev': lambda c: c.rev(),
1920 'branch': lambda c: c.branch(),
1923 'branch': lambda c: c.branch(),
1921 'desc': lambda c: c.description(),
1924 'desc': lambda c: c.description(),
1922 'user': lambda c: c.user(),
1925 'user': lambda c: c.user(),
1923 'author': lambda c: c.user(),
1926 'author': lambda c: c.user(),
1924 'date': lambda c: c.date()[0],
1927 'date': lambda c: c.date()[0],
1925 }
1928 }
1926
1929
1927 def _getsortargs(x):
1930 def _getsortargs(x):
1928 """Parse sort options into (set, [(key, reverse)], opts)"""
1931 """Parse sort options into (set, [(key, reverse)], opts)"""
1929 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
1932 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
1930 if 'set' not in args:
1933 if 'set' not in args:
1931 # i18n: "sort" is a keyword
1934 # i18n: "sort" is a keyword
1932 raise error.ParseError(_('sort requires one or two arguments'))
1935 raise error.ParseError(_('sort requires one or two arguments'))
1933 keys = "rev"
1936 keys = "rev"
1934 if 'keys' in args:
1937 if 'keys' in args:
1935 # i18n: "sort" is a keyword
1938 # i18n: "sort" is a keyword
1936 keys = getstring(args['keys'], _("sort spec must be a string"))
1939 keys = getstring(args['keys'], _("sort spec must be a string"))
1937
1940
1938 keyflags = []
1941 keyflags = []
1939 for k in keys.split():
1942 for k in keys.split():
1940 fk = k
1943 fk = k
1941 reverse = (k[0] == '-')
1944 reverse = (k[0] == '-')
1942 if reverse:
1945 if reverse:
1943 k = k[1:]
1946 k = k[1:]
1944 if k not in _sortkeyfuncs and k != 'topo':
1947 if k not in _sortkeyfuncs and k != 'topo':
1945 raise error.ParseError(_("unknown sort key %r") % fk)
1948 raise error.ParseError(_("unknown sort key %r") % fk)
1946 keyflags.append((k, reverse))
1949 keyflags.append((k, reverse))
1947
1950
1948 if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
1951 if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
1949 # i18n: "topo" is a keyword
1952 # i18n: "topo" is a keyword
1950 raise error.ParseError(_('topo sort order cannot be combined '
1953 raise error.ParseError(_('topo sort order cannot be combined '
1951 'with other sort keys'))
1954 'with other sort keys'))
1952
1955
1953 opts = {}
1956 opts = {}
1954 if 'topo.firstbranch' in args:
1957 if 'topo.firstbranch' in args:
1955 if any(k == 'topo' for k, reverse in keyflags):
1958 if any(k == 'topo' for k, reverse in keyflags):
1956 opts['topo.firstbranch'] = args['topo.firstbranch']
1959 opts['topo.firstbranch'] = args['topo.firstbranch']
1957 else:
1960 else:
1958 # i18n: "topo" and "topo.firstbranch" are keywords
1961 # i18n: "topo" and "topo.firstbranch" are keywords
1959 raise error.ParseError(_('topo.firstbranch can only be used '
1962 raise error.ParseError(_('topo.firstbranch can only be used '
1960 'when using the topo sort key'))
1963 'when using the topo sort key'))
1961
1964
1962 return args['set'], keyflags, opts
1965 return args['set'], keyflags, opts
1963
1966
1964 @predicate('sort(set[, [-]key... [, ...]])', safe=True, takeorder=True)
1967 @predicate('sort(set[, [-]key... [, ...]])', safe=True, takeorder=True)
1965 def sort(repo, subset, x, order):
1968 def sort(repo, subset, x, order):
1966 """Sort set by keys. The default sort order is ascending, specify a key
1969 """Sort set by keys. The default sort order is ascending, specify a key
1967 as ``-key`` to sort in descending order.
1970 as ``-key`` to sort in descending order.
1968
1971
1969 The keys can be:
1972 The keys can be:
1970
1973
1971 - ``rev`` for the revision number,
1974 - ``rev`` for the revision number,
1972 - ``branch`` for the branch name,
1975 - ``branch`` for the branch name,
1973 - ``desc`` for the commit message (description),
1976 - ``desc`` for the commit message (description),
1974 - ``user`` for user name (``author`` can be used as an alias),
1977 - ``user`` for user name (``author`` can be used as an alias),
1975 - ``date`` for the commit date
1978 - ``date`` for the commit date
1976 - ``topo`` for a reverse topographical sort
1979 - ``topo`` for a reverse topographical sort
1977
1980
1978 The ``topo`` sort order cannot be combined with other sort keys. This sort
1981 The ``topo`` sort order cannot be combined with other sort keys. This sort
1979 takes one optional argument, ``topo.firstbranch``, which takes a revset that
1982 takes one optional argument, ``topo.firstbranch``, which takes a revset that
1980 specifies what topographical branches to prioritize in the sort.
1983 specifies what topographical branches to prioritize in the sort.
1981
1984
1982 """
1985 """
1983 s, keyflags, opts = _getsortargs(x)
1986 s, keyflags, opts = _getsortargs(x)
1984 revs = getset(repo, subset, s)
1987 revs = getset(repo, subset, s)
1985
1988
1986 if not keyflags or order != defineorder:
1989 if not keyflags or order != defineorder:
1987 return revs
1990 return revs
1988 if len(keyflags) == 1 and keyflags[0][0] == "rev":
1991 if len(keyflags) == 1 and keyflags[0][0] == "rev":
1989 revs.sort(reverse=keyflags[0][1])
1992 revs.sort(reverse=keyflags[0][1])
1990 return revs
1993 return revs
1991 elif keyflags[0][0] == "topo":
1994 elif keyflags[0][0] == "topo":
1992 firstbranch = ()
1995 firstbranch = ()
1993 if 'topo.firstbranch' in opts:
1996 if 'topo.firstbranch' in opts:
1994 firstbranch = getset(repo, subset, opts['topo.firstbranch'])
1997 firstbranch = getset(repo, subset, opts['topo.firstbranch'])
1995 revs = baseset(_toposort(revs, repo.changelog.parentrevs, firstbranch),
1998 revs = baseset(_toposort(revs, repo.changelog.parentrevs, firstbranch),
1996 istopo=True)
1999 istopo=True)
1997 if keyflags[0][1]:
2000 if keyflags[0][1]:
1998 revs.reverse()
2001 revs.reverse()
1999 return revs
2002 return revs
2000
2003
2001 # sort() is guaranteed to be stable
2004 # sort() is guaranteed to be stable
2002 ctxs = [repo[r] for r in revs]
2005 ctxs = [repo[r] for r in revs]
2003 for k, reverse in reversed(keyflags):
2006 for k, reverse in reversed(keyflags):
2004 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
2007 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
2005 return baseset([c.rev() for c in ctxs])
2008 return baseset([c.rev() for c in ctxs])
2006
2009
2007 def _toposort(revs, parentsfunc, firstbranch=()):
2010 def _toposort(revs, parentsfunc, firstbranch=()):
2008 """Yield revisions from heads to roots one (topo) branch at a time.
2011 """Yield revisions from heads to roots one (topo) branch at a time.
2009
2012
2010 This function aims to be used by a graph generator that wishes to minimize
2013 This function aims to be used by a graph generator that wishes to minimize
2011 the number of parallel branches and their interleaving.
2014 the number of parallel branches and their interleaving.
2012
2015
2013 Example iteration order (numbers show the "true" order in a changelog):
2016 Example iteration order (numbers show the "true" order in a changelog):
2014
2017
2015 o 4
2018 o 4
2016 |
2019 |
2017 o 1
2020 o 1
2018 |
2021 |
2019 | o 3
2022 | o 3
2020 | |
2023 | |
2021 | o 2
2024 | o 2
2022 |/
2025 |/
2023 o 0
2026 o 0
2024
2027
2025 Note that the ancestors of merges are understood by the current
2028 Note that the ancestors of merges are understood by the current
2026 algorithm to be on the same branch. This means no reordering will
2029 algorithm to be on the same branch. This means no reordering will
2027 occur behind a merge.
2030 occur behind a merge.
2028 """
2031 """
2029
2032
2030 ### Quick summary of the algorithm
2033 ### Quick summary of the algorithm
2031 #
2034 #
2032 # This function is based around a "retention" principle. We keep revisions
2035 # This function is based around a "retention" principle. We keep revisions
2033 # in memory until we are ready to emit a whole branch that immediately
2036 # in memory until we are ready to emit a whole branch that immediately
2034 # "merges" into an existing one. This reduces the number of parallel
2037 # "merges" into an existing one. This reduces the number of parallel
2035 # branches with interleaved revisions.
2038 # branches with interleaved revisions.
2036 #
2039 #
2037 # During iteration revs are split into two groups:
2040 # During iteration revs are split into two groups:
2038 # A) revision already emitted
2041 # A) revision already emitted
2039 # B) revision in "retention". They are stored as different subgroups.
2042 # B) revision in "retention". They are stored as different subgroups.
2040 #
2043 #
2041 # for each REV, we do the following logic:
2044 # for each REV, we do the following logic:
2042 #
2045 #
2043 # 1) if REV is a parent of (A), we will emit it. If there is a
2046 # 1) if REV is a parent of (A), we will emit it. If there is a
2044 # retention group ((B) above) that is blocked on REV being
2047 # retention group ((B) above) that is blocked on REV being
2045 # available, we emit all the revisions out of that retention
2048 # available, we emit all the revisions out of that retention
2046 # group first.
2049 # group first.
2047 #
2050 #
2048 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
2051 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
2049 # available, if such subgroup exist, we add REV to it and the subgroup is
2052 # available, if such subgroup exist, we add REV to it and the subgroup is
2050 # now awaiting for REV.parents() to be available.
2053 # now awaiting for REV.parents() to be available.
2051 #
2054 #
2052 # 3) finally if no such group existed in (B), we create a new subgroup.
2055 # 3) finally if no such group existed in (B), we create a new subgroup.
2053 #
2056 #
2054 #
2057 #
2055 # To bootstrap the algorithm, we emit the tipmost revision (which
2058 # To bootstrap the algorithm, we emit the tipmost revision (which
2056 # puts it in group (A) from above).
2059 # puts it in group (A) from above).
2057
2060
2058 revs.sort(reverse=True)
2061 revs.sort(reverse=True)
2059
2062
2060 # Set of parents of revision that have been emitted. They can be considered
2063 # Set of parents of revision that have been emitted. They can be considered
2061 # unblocked as the graph generator is already aware of them so there is no
2064 # unblocked as the graph generator is already aware of them so there is no
2062 # need to delay the revisions that reference them.
2065 # need to delay the revisions that reference them.
2063 #
2066 #
2064 # If someone wants to prioritize a branch over the others, pre-filling this
2067 # If someone wants to prioritize a branch over the others, pre-filling this
2065 # set will force all other branches to wait until this branch is ready to be
2068 # set will force all other branches to wait until this branch is ready to be
2066 # emitted.
2069 # emitted.
2067 unblocked = set(firstbranch)
2070 unblocked = set(firstbranch)
2068
2071
2069 # list of groups waiting to be displayed, each group is defined by:
2072 # list of groups waiting to be displayed, each group is defined by:
2070 #
2073 #
2071 # (revs: lists of revs waiting to be displayed,
2074 # (revs: lists of revs waiting to be displayed,
2072 # blocked: set of that cannot be displayed before those in 'revs')
2075 # blocked: set of that cannot be displayed before those in 'revs')
2073 #
2076 #
2074 # The second value ('blocked') correspond to parents of any revision in the
2077 # The second value ('blocked') correspond to parents of any revision in the
2075 # group ('revs') that is not itself contained in the group. The main idea
2078 # group ('revs') that is not itself contained in the group. The main idea
2076 # of this algorithm is to delay as much as possible the emission of any
2079 # of this algorithm is to delay as much as possible the emission of any
2077 # revision. This means waiting for the moment we are about to display
2080 # revision. This means waiting for the moment we are about to display
2078 # these parents to display the revs in a group.
2081 # these parents to display the revs in a group.
2079 #
2082 #
2080 # This first implementation is smart until it encounters a merge: it will
2083 # This first implementation is smart until it encounters a merge: it will
2081 # emit revs as soon as any parent is about to be emitted and can grow an
2084 # emit revs as soon as any parent is about to be emitted and can grow an
2082 # arbitrary number of revs in 'blocked'. In practice this mean we properly
2085 # arbitrary number of revs in 'blocked'. In practice this mean we properly
2083 # retains new branches but gives up on any special ordering for ancestors
2086 # retains new branches but gives up on any special ordering for ancestors
2084 # of merges. The implementation can be improved to handle this better.
2087 # of merges. The implementation can be improved to handle this better.
2085 #
2088 #
2086 # The first subgroup is special. It corresponds to all the revision that
2089 # The first subgroup is special. It corresponds to all the revision that
2087 # were already emitted. The 'revs' lists is expected to be empty and the
2090 # were already emitted. The 'revs' lists is expected to be empty and the
2088 # 'blocked' set contains the parents revisions of already emitted revision.
2091 # 'blocked' set contains the parents revisions of already emitted revision.
2089 #
2092 #
2090 # You could pre-seed the <parents> set of groups[0] to a specific
2093 # You could pre-seed the <parents> set of groups[0] to a specific
2091 # changesets to select what the first emitted branch should be.
2094 # changesets to select what the first emitted branch should be.
2092 groups = [([], unblocked)]
2095 groups = [([], unblocked)]
2093 pendingheap = []
2096 pendingheap = []
2094 pendingset = set()
2097 pendingset = set()
2095
2098
2096 heapq.heapify(pendingheap)
2099 heapq.heapify(pendingheap)
2097 heappop = heapq.heappop
2100 heappop = heapq.heappop
2098 heappush = heapq.heappush
2101 heappush = heapq.heappush
2099 for currentrev in revs:
2102 for currentrev in revs:
2100 # Heap works with smallest element, we want highest so we invert
2103 # Heap works with smallest element, we want highest so we invert
2101 if currentrev not in pendingset:
2104 if currentrev not in pendingset:
2102 heappush(pendingheap, -currentrev)
2105 heappush(pendingheap, -currentrev)
2103 pendingset.add(currentrev)
2106 pendingset.add(currentrev)
2104 # iterates on pending rev until after the current rev have been
2107 # iterates on pending rev until after the current rev have been
2105 # processed.
2108 # processed.
2106 rev = None
2109 rev = None
2107 while rev != currentrev:
2110 while rev != currentrev:
2108 rev = -heappop(pendingheap)
2111 rev = -heappop(pendingheap)
2109 pendingset.remove(rev)
2112 pendingset.remove(rev)
2110
2113
2111 # Seek for a subgroup blocked, waiting for the current revision.
2114 # Seek for a subgroup blocked, waiting for the current revision.
2112 matching = [i for i, g in enumerate(groups) if rev in g[1]]
2115 matching = [i for i, g in enumerate(groups) if rev in g[1]]
2113
2116
2114 if matching:
2117 if matching:
2115 # The main idea is to gather together all sets that are blocked
2118 # The main idea is to gather together all sets that are blocked
2116 # on the same revision.
2119 # on the same revision.
2117 #
2120 #
2118 # Groups are merged when a common blocking ancestor is
2121 # Groups are merged when a common blocking ancestor is
2119 # observed. For example, given two groups:
2122 # observed. For example, given two groups:
2120 #
2123 #
2121 # revs [5, 4] waiting for 1
2124 # revs [5, 4] waiting for 1
2122 # revs [3, 2] waiting for 1
2125 # revs [3, 2] waiting for 1
2123 #
2126 #
2124 # These two groups will be merged when we process
2127 # These two groups will be merged when we process
2125 # 1. In theory, we could have merged the groups when
2128 # 1. In theory, we could have merged the groups when
2126 # we added 2 to the group it is now in (we could have
2129 # we added 2 to the group it is now in (we could have
2127 # noticed the groups were both blocked on 1 then), but
2130 # noticed the groups were both blocked on 1 then), but
2128 # the way it works now makes the algorithm simpler.
2131 # the way it works now makes the algorithm simpler.
2129 #
2132 #
2130 # We also always keep the oldest subgroup first. We can
2133 # We also always keep the oldest subgroup first. We can
2131 # probably improve the behavior by having the longest set
2134 # probably improve the behavior by having the longest set
2132 # first. That way, graph algorithms could minimise the length
2135 # first. That way, graph algorithms could minimise the length
2133 # of parallel lines their drawing. This is currently not done.
2136 # of parallel lines their drawing. This is currently not done.
2134 targetidx = matching.pop(0)
2137 targetidx = matching.pop(0)
2135 trevs, tparents = groups[targetidx]
2138 trevs, tparents = groups[targetidx]
2136 for i in matching:
2139 for i in matching:
2137 gr = groups[i]
2140 gr = groups[i]
2138 trevs.extend(gr[0])
2141 trevs.extend(gr[0])
2139 tparents |= gr[1]
2142 tparents |= gr[1]
2140 # delete all merged subgroups (except the one we kept)
2143 # delete all merged subgroups (except the one we kept)
2141 # (starting from the last subgroup for performance and
2144 # (starting from the last subgroup for performance and
2142 # sanity reasons)
2145 # sanity reasons)
2143 for i in reversed(matching):
2146 for i in reversed(matching):
2144 del groups[i]
2147 del groups[i]
2145 else:
2148 else:
2146 # This is a new head. We create a new subgroup for it.
2149 # This is a new head. We create a new subgroup for it.
2147 targetidx = len(groups)
2150 targetidx = len(groups)
2148 groups.append(([], set([rev])))
2151 groups.append(([], set([rev])))
2149
2152
2150 gr = groups[targetidx]
2153 gr = groups[targetidx]
2151
2154
2152 # We now add the current nodes to this subgroups. This is done
2155 # We now add the current nodes to this subgroups. This is done
2153 # after the subgroup merging because all elements from a subgroup
2156 # after the subgroup merging because all elements from a subgroup
2154 # that relied on this rev must precede it.
2157 # that relied on this rev must precede it.
2155 #
2158 #
2156 # we also update the <parents> set to include the parents of the
2159 # we also update the <parents> set to include the parents of the
2157 # new nodes.
2160 # new nodes.
2158 if rev == currentrev: # only display stuff in rev
2161 if rev == currentrev: # only display stuff in rev
2159 gr[0].append(rev)
2162 gr[0].append(rev)
2160 gr[1].remove(rev)
2163 gr[1].remove(rev)
2161 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
2164 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
2162 gr[1].update(parents)
2165 gr[1].update(parents)
2163 for p in parents:
2166 for p in parents:
2164 if p not in pendingset:
2167 if p not in pendingset:
2165 pendingset.add(p)
2168 pendingset.add(p)
2166 heappush(pendingheap, -p)
2169 heappush(pendingheap, -p)
2167
2170
2168 # Look for a subgroup to display
2171 # Look for a subgroup to display
2169 #
2172 #
2170 # When unblocked is empty (if clause), we were not waiting for any
2173 # When unblocked is empty (if clause), we were not waiting for any
2171 # revisions during the first iteration (if no priority was given) or
2174 # revisions during the first iteration (if no priority was given) or
2172 # if we emitted a whole disconnected set of the graph (reached a
2175 # if we emitted a whole disconnected set of the graph (reached a
2173 # root). In that case we arbitrarily take the oldest known
2176 # root). In that case we arbitrarily take the oldest known
2174 # subgroup. The heuristic could probably be better.
2177 # subgroup. The heuristic could probably be better.
2175 #
2178 #
2176 # Otherwise (elif clause) if the subgroup is blocked on
2179 # Otherwise (elif clause) if the subgroup is blocked on
2177 # a revision we just emitted, we can safely emit it as
2180 # a revision we just emitted, we can safely emit it as
2178 # well.
2181 # well.
2179 if not unblocked:
2182 if not unblocked:
2180 if len(groups) > 1: # display other subset
2183 if len(groups) > 1: # display other subset
2181 targetidx = 1
2184 targetidx = 1
2182 gr = groups[1]
2185 gr = groups[1]
2183 elif not gr[1] & unblocked:
2186 elif not gr[1] & unblocked:
2184 gr = None
2187 gr = None
2185
2188
2186 if gr is not None:
2189 if gr is not None:
2187 # update the set of awaited revisions with the one from the
2190 # update the set of awaited revisions with the one from the
2188 # subgroup
2191 # subgroup
2189 unblocked |= gr[1]
2192 unblocked |= gr[1]
2190 # output all revisions in the subgroup
2193 # output all revisions in the subgroup
2191 for r in gr[0]:
2194 for r in gr[0]:
2192 yield r
2195 yield r
2193 # delete the subgroup that you just output
2196 # delete the subgroup that you just output
2194 # unless it is groups[0] in which case you just empty it.
2197 # unless it is groups[0] in which case you just empty it.
2195 if targetidx:
2198 if targetidx:
2196 del groups[targetidx]
2199 del groups[targetidx]
2197 else:
2200 else:
2198 gr[0][:] = []
2201 gr[0][:] = []
2199 # Check if we have some subgroup waiting for revisions we are not going to
2202 # Check if we have some subgroup waiting for revisions we are not going to
2200 # iterate over
2203 # iterate over
2201 for g in groups:
2204 for g in groups:
2202 for r in g[0]:
2205 for r in g[0]:
2203 yield r
2206 yield r
2204
2207
2205 @predicate('subrepo([pattern])')
2208 @predicate('subrepo([pattern])')
2206 def subrepo(repo, subset, x):
2209 def subrepo(repo, subset, x):
2207 """Changesets that add, modify or remove the given subrepo. If no subrepo
2210 """Changesets that add, modify or remove the given subrepo. If no subrepo
2208 pattern is named, any subrepo changes are returned.
2211 pattern is named, any subrepo changes are returned.
2209 """
2212 """
2210 # i18n: "subrepo" is a keyword
2213 # i18n: "subrepo" is a keyword
2211 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
2214 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
2212 pat = None
2215 pat = None
2213 if len(args) != 0:
2216 if len(args) != 0:
2214 pat = getstring(args[0], _("subrepo requires a pattern"))
2217 pat = getstring(args[0], _("subrepo requires a pattern"))
2215
2218
2216 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
2219 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
2217
2220
2218 def submatches(names):
2221 def submatches(names):
2219 k, p, m = util.stringmatcher(pat)
2222 k, p, m = util.stringmatcher(pat)
2220 for name in names:
2223 for name in names:
2221 if m(name):
2224 if m(name):
2222 yield name
2225 yield name
2223
2226
2224 def matches(x):
2227 def matches(x):
2225 c = repo[x]
2228 c = repo[x]
2226 s = repo.status(c.p1().node(), c.node(), match=m)
2229 s = repo.status(c.p1().node(), c.node(), match=m)
2227
2230
2228 if pat is None:
2231 if pat is None:
2229 return s.added or s.modified or s.removed
2232 return s.added or s.modified or s.removed
2230
2233
2231 if s.added:
2234 if s.added:
2232 return any(submatches(c.substate.keys()))
2235 return any(submatches(c.substate.keys()))
2233
2236
2234 if s.modified:
2237 if s.modified:
2235 subs = set(c.p1().substate.keys())
2238 subs = set(c.p1().substate.keys())
2236 subs.update(c.substate.keys())
2239 subs.update(c.substate.keys())
2237
2240
2238 for path in submatches(subs):
2241 for path in submatches(subs):
2239 if c.p1().substate.get(path) != c.substate.get(path):
2242 if c.p1().substate.get(path) != c.substate.get(path):
2240 return True
2243 return True
2241
2244
2242 if s.removed:
2245 if s.removed:
2243 return any(submatches(c.p1().substate.keys()))
2246 return any(submatches(c.p1().substate.keys()))
2244
2247
2245 return False
2248 return False
2246
2249
2247 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
2250 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
2248
2251
2249 def _substringmatcher(pattern):
2252 def _substringmatcher(pattern):
2250 kind, pattern, matcher = util.stringmatcher(pattern)
2253 kind, pattern, matcher = util.stringmatcher(pattern)
2251 if kind == 'literal':
2254 if kind == 'literal':
2252 matcher = lambda s: pattern in s
2255 matcher = lambda s: pattern in s
2253 return kind, pattern, matcher
2256 return kind, pattern, matcher
2254
2257
2255 @predicate('tag([name])', safe=True)
2258 @predicate('tag([name])', safe=True)
2256 def tag(repo, subset, x):
2259 def tag(repo, subset, x):
2257 """The specified tag by name, or all tagged revisions if no name is given.
2260 """The specified tag by name, or all tagged revisions if no name is given.
2258
2261
2259 If `name` starts with `re:`, the remainder of the name is treated as
2262 If `name` starts with `re:`, the remainder of the name is treated as
2260 a regular expression. To match a tag that actually starts with `re:`,
2263 a regular expression. To match a tag that actually starts with `re:`,
2261 use the prefix `literal:`.
2264 use the prefix `literal:`.
2262 """
2265 """
2263 # i18n: "tag" is a keyword
2266 # i18n: "tag" is a keyword
2264 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2267 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2265 cl = repo.changelog
2268 cl = repo.changelog
2266 if args:
2269 if args:
2267 pattern = getstring(args[0],
2270 pattern = getstring(args[0],
2268 # i18n: "tag" is a keyword
2271 # i18n: "tag" is a keyword
2269 _('the argument to tag must be a string'))
2272 _('the argument to tag must be a string'))
2270 kind, pattern, matcher = util.stringmatcher(pattern)
2273 kind, pattern, matcher = util.stringmatcher(pattern)
2271 if kind == 'literal':
2274 if kind == 'literal':
2272 # avoid resolving all tags
2275 # avoid resolving all tags
2273 tn = repo._tagscache.tags.get(pattern, None)
2276 tn = repo._tagscache.tags.get(pattern, None)
2274 if tn is None:
2277 if tn is None:
2275 raise error.RepoLookupError(_("tag '%s' does not exist")
2278 raise error.RepoLookupError(_("tag '%s' does not exist")
2276 % pattern)
2279 % pattern)
2277 s = set([repo[tn].rev()])
2280 s = set([repo[tn].rev()])
2278 else:
2281 else:
2279 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2282 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2280 else:
2283 else:
2281 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2284 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2282 return subset & s
2285 return subset & s
2283
2286
2284 @predicate('tagged', safe=True)
2287 @predicate('tagged', safe=True)
2285 def tagged(repo, subset, x):
2288 def tagged(repo, subset, x):
2286 return tag(repo, subset, x)
2289 return tag(repo, subset, x)
2287
2290
2288 @predicate('unstable()', safe=True)
2291 @predicate('unstable()', safe=True)
2289 def unstable(repo, subset, x):
2292 def unstable(repo, subset, x):
2290 """Non-obsolete changesets with obsolete ancestors.
2293 """Non-obsolete changesets with obsolete ancestors.
2291 """
2294 """
2292 # i18n: "unstable" is a keyword
2295 # i18n: "unstable" is a keyword
2293 getargs(x, 0, 0, _("unstable takes no arguments"))
2296 getargs(x, 0, 0, _("unstable takes no arguments"))
2294 unstables = obsmod.getrevs(repo, 'unstable')
2297 unstables = obsmod.getrevs(repo, 'unstable')
2295 return subset & unstables
2298 return subset & unstables
2296
2299
2297
2300
2298 @predicate('user(string)', safe=True)
2301 @predicate('user(string)', safe=True)
2299 def user(repo, subset, x):
2302 def user(repo, subset, x):
2300 """User name contains string. The match is case-insensitive.
2303 """User name contains string. The match is case-insensitive.
2301
2304
2302 If `string` starts with `re:`, the remainder of the string is treated as
2305 If `string` starts with `re:`, the remainder of the string is treated as
2303 a regular expression. To match a user that actually contains `re:`, use
2306 a regular expression. To match a user that actually contains `re:`, use
2304 the prefix `literal:`.
2307 the prefix `literal:`.
2305 """
2308 """
2306 return author(repo, subset, x)
2309 return author(repo, subset, x)
2307
2310
2308 @predicate('wdir', safe=True)
2311 @predicate('wdir', safe=True)
2309 def wdir(repo, subset, x):
2312 def wdir(repo, subset, x):
2310 """Working directory. (EXPERIMENTAL)"""
2313 """Working directory. (EXPERIMENTAL)"""
2311 # i18n: "wdir" is a keyword
2314 # i18n: "wdir" is a keyword
2312 getargs(x, 0, 0, _("wdir takes no arguments"))
2315 getargs(x, 0, 0, _("wdir takes no arguments"))
2313 if node.wdirrev in subset or isinstance(subset, fullreposet):
2316 if node.wdirrev in subset or isinstance(subset, fullreposet):
2314 return baseset([node.wdirrev])
2317 return baseset([node.wdirrev])
2315 return baseset()
2318 return baseset()
2316
2319
2317 def _orderedlist(repo, subset, x):
2320 def _orderedlist(repo, subset, x):
2318 s = getstring(x, "internal error")
2321 s = getstring(x, "internal error")
2319 if not s:
2322 if not s:
2320 return baseset()
2323 return baseset()
2321 # remove duplicates here. it's difficult for caller to deduplicate sets
2324 # remove duplicates here. it's difficult for caller to deduplicate sets
2322 # because different symbols can point to the same rev.
2325 # because different symbols can point to the same rev.
2323 cl = repo.changelog
2326 cl = repo.changelog
2324 ls = []
2327 ls = []
2325 seen = set()
2328 seen = set()
2326 for t in s.split('\0'):
2329 for t in s.split('\0'):
2327 try:
2330 try:
2328 # fast path for integer revision
2331 # fast path for integer revision
2329 r = int(t)
2332 r = int(t)
2330 if str(r) != t or r not in cl:
2333 if str(r) != t or r not in cl:
2331 raise ValueError
2334 raise ValueError
2332 revs = [r]
2335 revs = [r]
2333 except ValueError:
2336 except ValueError:
2334 revs = stringset(repo, subset, t)
2337 revs = stringset(repo, subset, t)
2335
2338
2336 for r in revs:
2339 for r in revs:
2337 if r in seen:
2340 if r in seen:
2338 continue
2341 continue
2339 if (r in subset
2342 if (r in subset
2340 or r == node.nullrev and isinstance(subset, fullreposet)):
2343 or r == node.nullrev and isinstance(subset, fullreposet)):
2341 ls.append(r)
2344 ls.append(r)
2342 seen.add(r)
2345 seen.add(r)
2343 return baseset(ls)
2346 return baseset(ls)
2344
2347
2345 # for internal use
2348 # for internal use
2346 @predicate('_list', safe=True, takeorder=True)
2349 @predicate('_list', safe=True, takeorder=True)
2347 def _list(repo, subset, x, order):
2350 def _list(repo, subset, x, order):
2348 if order == followorder:
2351 if order == followorder:
2349 # slow path to take the subset order
2352 # slow path to take the subset order
2350 return subset & _orderedlist(repo, fullreposet(repo), x)
2353 return subset & _orderedlist(repo, fullreposet(repo), x)
2351 else:
2354 else:
2352 return _orderedlist(repo, subset, x)
2355 return _orderedlist(repo, subset, x)
2353
2356
2354 def _orderedintlist(repo, subset, x):
2357 def _orderedintlist(repo, subset, x):
2355 s = getstring(x, "internal error")
2358 s = getstring(x, "internal error")
2356 if not s:
2359 if not s:
2357 return baseset()
2360 return baseset()
2358 ls = [int(r) for r in s.split('\0')]
2361 ls = [int(r) for r in s.split('\0')]
2359 s = subset
2362 s = subset
2360 return baseset([r for r in ls if r in s])
2363 return baseset([r for r in ls if r in s])
2361
2364
2362 # for internal use
2365 # for internal use
2363 @predicate('_intlist', safe=True, takeorder=True)
2366 @predicate('_intlist', safe=True, takeorder=True)
2364 def _intlist(repo, subset, x, order):
2367 def _intlist(repo, subset, x, order):
2365 if order == followorder:
2368 if order == followorder:
2366 # slow path to take the subset order
2369 # slow path to take the subset order
2367 return subset & _orderedintlist(repo, fullreposet(repo), x)
2370 return subset & _orderedintlist(repo, fullreposet(repo), x)
2368 else:
2371 else:
2369 return _orderedintlist(repo, subset, x)
2372 return _orderedintlist(repo, subset, x)
2370
2373
2371 def _orderedhexlist(repo, subset, x):
2374 def _orderedhexlist(repo, subset, x):
2372 s = getstring(x, "internal error")
2375 s = getstring(x, "internal error")
2373 if not s:
2376 if not s:
2374 return baseset()
2377 return baseset()
2375 cl = repo.changelog
2378 cl = repo.changelog
2376 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2379 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2377 s = subset
2380 s = subset
2378 return baseset([r for r in ls if r in s])
2381 return baseset([r for r in ls if r in s])
2379
2382
2380 # for internal use
2383 # for internal use
2381 @predicate('_hexlist', safe=True, takeorder=True)
2384 @predicate('_hexlist', safe=True, takeorder=True)
2382 def _hexlist(repo, subset, x, order):
2385 def _hexlist(repo, subset, x, order):
2383 if order == followorder:
2386 if order == followorder:
2384 # slow path to take the subset order
2387 # slow path to take the subset order
2385 return subset & _orderedhexlist(repo, fullreposet(repo), x)
2388 return subset & _orderedhexlist(repo, fullreposet(repo), x)
2386 else:
2389 else:
2387 return _orderedhexlist(repo, subset, x)
2390 return _orderedhexlist(repo, subset, x)
2388
2391
2389 methods = {
2392 methods = {
2390 "range": rangeset,
2393 "range": rangeset,
2391 "rangepre": rangepre,
2394 "rangepre": rangepre,
2392 "dagrange": dagrange,
2395 "dagrange": dagrange,
2393 "string": stringset,
2396 "string": stringset,
2394 "symbol": stringset,
2397 "symbol": stringset,
2395 "and": andset,
2398 "and": andset,
2396 "or": orset,
2399 "or": orset,
2397 "not": notset,
2400 "not": notset,
2398 "difference": differenceset,
2401 "difference": differenceset,
2399 "list": listset,
2402 "list": listset,
2400 "keyvalue": keyvaluepair,
2403 "keyvalue": keyvaluepair,
2401 "func": func,
2404 "func": func,
2402 "ancestor": ancestorspec,
2405 "ancestor": ancestorspec,
2403 "parent": parentspec,
2406 "parent": parentspec,
2404 "parentpost": parentpost,
2407 "parentpost": parentpost,
2405 }
2408 }
2406
2409
2407 # Constants for ordering requirement, used in _analyze():
2410 # Constants for ordering requirement, used in _analyze():
2408 #
2411 #
2409 # If 'define', any nested functions and operations can change the ordering of
2412 # If 'define', any nested functions and operations can change the ordering of
2410 # the entries in the set. If 'follow', any nested functions and operations
2413 # the entries in the set. If 'follow', any nested functions and operations
2411 # should take the ordering specified by the first operand to the '&' operator.
2414 # should take the ordering specified by the first operand to the '&' operator.
2412 #
2415 #
2413 # For instance,
2416 # For instance,
2414 #
2417 #
2415 # X & (Y | Z)
2418 # X & (Y | Z)
2416 # ^ ^^^^^^^
2419 # ^ ^^^^^^^
2417 # | follow
2420 # | follow
2418 # define
2421 # define
2419 #
2422 #
2420 # will be evaluated as 'or(y(x()), z(x()))', where 'x()' can change the order
2423 # will be evaluated as 'or(y(x()), z(x()))', where 'x()' can change the order
2421 # of the entries in the set, but 'y()', 'z()' and 'or()' shouldn't.
2424 # of the entries in the set, but 'y()', 'z()' and 'or()' shouldn't.
2422 #
2425 #
2423 # 'any' means the order doesn't matter. For instance,
2426 # 'any' means the order doesn't matter. For instance,
2424 #
2427 #
2425 # X & !Y
2428 # X & !Y
2426 # ^
2429 # ^
2427 # any
2430 # any
2428 #
2431 #
2429 # 'y()' can either enforce its ordering requirement or take the ordering
2432 # 'y()' can either enforce its ordering requirement or take the ordering
2430 # specified by 'x()' because 'not()' doesn't care the order.
2433 # specified by 'x()' because 'not()' doesn't care the order.
2431 #
2434 #
2432 # Transition of ordering requirement:
2435 # Transition of ordering requirement:
2433 #
2436 #
2434 # 1. starts with 'define'
2437 # 1. starts with 'define'
2435 # 2. shifts to 'follow' by 'x & y'
2438 # 2. shifts to 'follow' by 'x & y'
2436 # 3. changes back to 'define' on function call 'f(x)' or function-like
2439 # 3. changes back to 'define' on function call 'f(x)' or function-like
2437 # operation 'x (f) y' because 'f' may have its own ordering requirement
2440 # operation 'x (f) y' because 'f' may have its own ordering requirement
2438 # for 'x' and 'y' (e.g. 'first(x)')
2441 # for 'x' and 'y' (e.g. 'first(x)')
2439 #
2442 #
2440 anyorder = 'any' # don't care the order
2443 anyorder = 'any' # don't care the order
2441 defineorder = 'define' # should define the order
2444 defineorder = 'define' # should define the order
2442 followorder = 'follow' # must follow the current order
2445 followorder = 'follow' # must follow the current order
2443
2446
2444 # transition table for 'x & y', from the current expression 'x' to 'y'
2447 # transition table for 'x & y', from the current expression 'x' to 'y'
2445 _tofolloworder = {
2448 _tofolloworder = {
2446 anyorder: anyorder,
2449 anyorder: anyorder,
2447 defineorder: followorder,
2450 defineorder: followorder,
2448 followorder: followorder,
2451 followorder: followorder,
2449 }
2452 }
2450
2453
2451 def _matchonly(revs, bases):
2454 def _matchonly(revs, bases):
2452 """
2455 """
2453 >>> f = lambda *args: _matchonly(*map(parse, args))
2456 >>> f = lambda *args: _matchonly(*map(parse, args))
2454 >>> f('ancestors(A)', 'not ancestors(B)')
2457 >>> f('ancestors(A)', 'not ancestors(B)')
2455 ('list', ('symbol', 'A'), ('symbol', 'B'))
2458 ('list', ('symbol', 'A'), ('symbol', 'B'))
2456 """
2459 """
2457 if (revs is not None
2460 if (revs is not None
2458 and revs[0] == 'func'
2461 and revs[0] == 'func'
2459 and getsymbol(revs[1]) == 'ancestors'
2462 and getsymbol(revs[1]) == 'ancestors'
2460 and bases is not None
2463 and bases is not None
2461 and bases[0] == 'not'
2464 and bases[0] == 'not'
2462 and bases[1][0] == 'func'
2465 and bases[1][0] == 'func'
2463 and getsymbol(bases[1][1]) == 'ancestors'):
2466 and getsymbol(bases[1][1]) == 'ancestors'):
2464 return ('list', revs[2], bases[1][2])
2467 return ('list', revs[2], bases[1][2])
2465
2468
2466 def _fixops(x):
2469 def _fixops(x):
2467 """Rewrite raw parsed tree to resolve ambiguous syntax which cannot be
2470 """Rewrite raw parsed tree to resolve ambiguous syntax which cannot be
2468 handled well by our simple top-down parser"""
2471 handled well by our simple top-down parser"""
2469 if not isinstance(x, tuple):
2472 if not isinstance(x, tuple):
2470 return x
2473 return x
2471
2474
2472 op = x[0]
2475 op = x[0]
2473 if op == 'parent':
2476 if op == 'parent':
2474 # x^:y means (x^) : y, not x ^ (:y)
2477 # x^:y means (x^) : y, not x ^ (:y)
2475 # x^: means (x^) :, not x ^ (:)
2478 # x^: means (x^) :, not x ^ (:)
2476 post = ('parentpost', x[1])
2479 post = ('parentpost', x[1])
2477 if x[2][0] == 'dagrangepre':
2480 if x[2][0] == 'dagrangepre':
2478 return _fixops(('dagrange', post, x[2][1]))
2481 return _fixops(('dagrange', post, x[2][1]))
2479 elif x[2][0] == 'rangepre':
2482 elif x[2][0] == 'rangepre':
2480 return _fixops(('range', post, x[2][1]))
2483 return _fixops(('range', post, x[2][1]))
2481 elif x[2][0] == 'rangeall':
2484 elif x[2][0] == 'rangeall':
2482 return _fixops(('rangepost', post))
2485 return _fixops(('rangepost', post))
2483 elif op == 'or':
2486 elif op == 'or':
2484 # make number of arguments deterministic:
2487 # make number of arguments deterministic:
2485 # x + y + z -> (or x y z) -> (or (list x y z))
2488 # x + y + z -> (or x y z) -> (or (list x y z))
2486 return (op, _fixops(('list',) + x[1:]))
2489 return (op, _fixops(('list',) + x[1:]))
2487
2490
2488 return (op,) + tuple(_fixops(y) for y in x[1:])
2491 return (op,) + tuple(_fixops(y) for y in x[1:])
2489
2492
2490 def _analyze(x, order):
2493 def _analyze(x, order):
2491 if x is None:
2494 if x is None:
2492 return x
2495 return x
2493
2496
2494 op = x[0]
2497 op = x[0]
2495 if op == 'minus':
2498 if op == 'minus':
2496 return _analyze(('and', x[1], ('not', x[2])), order)
2499 return _analyze(('and', x[1], ('not', x[2])), order)
2497 elif op == 'only':
2500 elif op == 'only':
2498 t = ('func', ('symbol', 'only'), ('list', x[1], x[2]))
2501 t = ('func', ('symbol', 'only'), ('list', x[1], x[2]))
2499 return _analyze(t, order)
2502 return _analyze(t, order)
2500 elif op == 'onlypost':
2503 elif op == 'onlypost':
2501 return _analyze(('func', ('symbol', 'only'), x[1]), order)
2504 return _analyze(('func', ('symbol', 'only'), x[1]), order)
2502 elif op == 'dagrangepre':
2505 elif op == 'dagrangepre':
2503 return _analyze(('func', ('symbol', 'ancestors'), x[1]), order)
2506 return _analyze(('func', ('symbol', 'ancestors'), x[1]), order)
2504 elif op == 'dagrangepost':
2507 elif op == 'dagrangepost':
2505 return _analyze(('func', ('symbol', 'descendants'), x[1]), order)
2508 return _analyze(('func', ('symbol', 'descendants'), x[1]), order)
2506 elif op == 'rangeall':
2509 elif op == 'rangeall':
2507 return _analyze(('rangepre', ('string', 'tip')), order)
2510 return _analyze(('rangepre', ('string', 'tip')), order)
2508 elif op == 'rangepost':
2511 elif op == 'rangepost':
2509 return _analyze(('range', x[1], ('string', 'tip')), order)
2512 return _analyze(('range', x[1], ('string', 'tip')), order)
2510 elif op == 'negate':
2513 elif op == 'negate':
2511 s = getstring(x[1], _("can't negate that"))
2514 s = getstring(x[1], _("can't negate that"))
2512 return _analyze(('string', '-' + s), order)
2515 return _analyze(('string', '-' + s), order)
2513 elif op in ('string', 'symbol'):
2516 elif op in ('string', 'symbol'):
2514 return x
2517 return x
2515 elif op == 'and':
2518 elif op == 'and':
2516 ta = _analyze(x[1], order)
2519 ta = _analyze(x[1], order)
2517 tb = _analyze(x[2], _tofolloworder[order])
2520 tb = _analyze(x[2], _tofolloworder[order])
2518 return (op, ta, tb, order)
2521 return (op, ta, tb, order)
2519 elif op == 'or':
2522 elif op == 'or':
2520 return (op, _analyze(x[1], order), order)
2523 return (op, _analyze(x[1], order), order)
2521 elif op == 'not':
2524 elif op == 'not':
2522 return (op, _analyze(x[1], anyorder), order)
2525 return (op, _analyze(x[1], anyorder), order)
2523 elif op in ('rangepre', 'parentpost'):
2526 elif op in ('rangepre', 'parentpost'):
2524 return (op, _analyze(x[1], defineorder), order)
2527 return (op, _analyze(x[1], defineorder), order)
2525 elif op == 'group':
2528 elif op == 'group':
2526 return _analyze(x[1], order)
2529 return _analyze(x[1], order)
2527 elif op in ('dagrange', 'range', 'parent', 'ancestor'):
2530 elif op in ('dagrange', 'range', 'parent', 'ancestor'):
2528 ta = _analyze(x[1], defineorder)
2531 ta = _analyze(x[1], defineorder)
2529 tb = _analyze(x[2], defineorder)
2532 tb = _analyze(x[2], defineorder)
2530 return (op, ta, tb, order)
2533 return (op, ta, tb, order)
2531 elif op == 'list':
2534 elif op == 'list':
2532 return (op,) + tuple(_analyze(y, order) for y in x[1:])
2535 return (op,) + tuple(_analyze(y, order) for y in x[1:])
2533 elif op == 'keyvalue':
2536 elif op == 'keyvalue':
2534 return (op, x[1], _analyze(x[2], order))
2537 return (op, x[1], _analyze(x[2], order))
2535 elif op == 'func':
2538 elif op == 'func':
2536 f = getsymbol(x[1])
2539 f = getsymbol(x[1])
2537 d = defineorder
2540 d = defineorder
2538 if f == 'present':
2541 if f == 'present':
2539 # 'present(set)' is known to return the argument set with no
2542 # 'present(set)' is known to return the argument set with no
2540 # modification, so forward the current order to its argument
2543 # modification, so forward the current order to its argument
2541 d = order
2544 d = order
2542 return (op, x[1], _analyze(x[2], d), order)
2545 return (op, x[1], _analyze(x[2], d), order)
2543 raise ValueError('invalid operator %r' % op)
2546 raise ValueError('invalid operator %r' % op)
2544
2547
2545 def analyze(x, order=defineorder):
2548 def analyze(x, order=defineorder):
2546 """Transform raw parsed tree to evaluatable tree which can be fed to
2549 """Transform raw parsed tree to evaluatable tree which can be fed to
2547 optimize() or getset()
2550 optimize() or getset()
2548
2551
2549 All pseudo operations should be mapped to real operations or functions
2552 All pseudo operations should be mapped to real operations or functions
2550 defined in methods or symbols table respectively.
2553 defined in methods or symbols table respectively.
2551
2554
2552 'order' specifies how the current expression 'x' is ordered (see the
2555 'order' specifies how the current expression 'x' is ordered (see the
2553 constants defined above.)
2556 constants defined above.)
2554 """
2557 """
2555 return _analyze(x, order)
2558 return _analyze(x, order)
2556
2559
2557 def _optimize(x, small):
2560 def _optimize(x, small):
2558 if x is None:
2561 if x is None:
2559 return 0, x
2562 return 0, x
2560
2563
2561 smallbonus = 1
2564 smallbonus = 1
2562 if small:
2565 if small:
2563 smallbonus = .5
2566 smallbonus = .5
2564
2567
2565 op = x[0]
2568 op = x[0]
2566 if op in ('string', 'symbol'):
2569 if op in ('string', 'symbol'):
2567 return smallbonus, x # single revisions are small
2570 return smallbonus, x # single revisions are small
2568 elif op == 'and':
2571 elif op == 'and':
2569 wa, ta = _optimize(x[1], True)
2572 wa, ta = _optimize(x[1], True)
2570 wb, tb = _optimize(x[2], True)
2573 wb, tb = _optimize(x[2], True)
2571 order = x[3]
2574 order = x[3]
2572 w = min(wa, wb)
2575 w = min(wa, wb)
2573
2576
2574 # (::x and not ::y)/(not ::y and ::x) have a fast path
2577 # (::x and not ::y)/(not ::y and ::x) have a fast path
2575 tm = _matchonly(ta, tb) or _matchonly(tb, ta)
2578 tm = _matchonly(ta, tb) or _matchonly(tb, ta)
2576 if tm:
2579 if tm:
2577 return w, ('func', ('symbol', 'only'), tm, order)
2580 return w, ('func', ('symbol', 'only'), tm, order)
2578
2581
2579 if tb is not None and tb[0] == 'not':
2582 if tb is not None and tb[0] == 'not':
2580 return wa, ('difference', ta, tb[1], order)
2583 return wa, ('difference', ta, tb[1], order)
2581
2584
2582 if wa > wb:
2585 if wa > wb:
2583 return w, (op, tb, ta, order)
2586 return w, (op, tb, ta, order)
2584 return w, (op, ta, tb, order)
2587 return w, (op, ta, tb, order)
2585 elif op == 'or':
2588 elif op == 'or':
2586 # fast path for machine-generated expression, that is likely to have
2589 # fast path for machine-generated expression, that is likely to have
2587 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2590 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2588 order = x[2]
2591 order = x[2]
2589 ws, ts, ss = [], [], []
2592 ws, ts, ss = [], [], []
2590 def flushss():
2593 def flushss():
2591 if not ss:
2594 if not ss:
2592 return
2595 return
2593 if len(ss) == 1:
2596 if len(ss) == 1:
2594 w, t = ss[0]
2597 w, t = ss[0]
2595 else:
2598 else:
2596 s = '\0'.join(t[1] for w, t in ss)
2599 s = '\0'.join(t[1] for w, t in ss)
2597 y = ('func', ('symbol', '_list'), ('string', s), order)
2600 y = ('func', ('symbol', '_list'), ('string', s), order)
2598 w, t = _optimize(y, False)
2601 w, t = _optimize(y, False)
2599 ws.append(w)
2602 ws.append(w)
2600 ts.append(t)
2603 ts.append(t)
2601 del ss[:]
2604 del ss[:]
2602 for y in getlist(x[1]):
2605 for y in getlist(x[1]):
2603 w, t = _optimize(y, False)
2606 w, t = _optimize(y, False)
2604 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2607 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2605 ss.append((w, t))
2608 ss.append((w, t))
2606 continue
2609 continue
2607 flushss()
2610 flushss()
2608 ws.append(w)
2611 ws.append(w)
2609 ts.append(t)
2612 ts.append(t)
2610 flushss()
2613 flushss()
2611 if len(ts) == 1:
2614 if len(ts) == 1:
2612 return ws[0], ts[0] # 'or' operation is fully optimized out
2615 return ws[0], ts[0] # 'or' operation is fully optimized out
2613 # we can't reorder trees by weight because it would change the order.
2616 # we can't reorder trees by weight because it would change the order.
2614 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2617 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2615 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2618 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2616 return max(ws), (op, ('list',) + tuple(ts), order)
2619 return max(ws), (op, ('list',) + tuple(ts), order)
2617 elif op == 'not':
2620 elif op == 'not':
2618 # Optimize not public() to _notpublic() because we have a fast version
2621 # Optimize not public() to _notpublic() because we have a fast version
2619 if x[1][:3] == ('func', ('symbol', 'public'), None):
2622 if x[1][:3] == ('func', ('symbol', 'public'), None):
2620 order = x[1][3]
2623 order = x[1][3]
2621 newsym = ('func', ('symbol', '_notpublic'), None, order)
2624 newsym = ('func', ('symbol', '_notpublic'), None, order)
2622 o = _optimize(newsym, not small)
2625 o = _optimize(newsym, not small)
2623 return o[0], o[1]
2626 return o[0], o[1]
2624 else:
2627 else:
2625 o = _optimize(x[1], not small)
2628 o = _optimize(x[1], not small)
2626 order = x[2]
2629 order = x[2]
2627 return o[0], (op, o[1], order)
2630 return o[0], (op, o[1], order)
2628 elif op in ('rangepre', 'parentpost'):
2631 elif op in ('rangepre', 'parentpost'):
2629 o = _optimize(x[1], small)
2632 o = _optimize(x[1], small)
2630 order = x[2]
2633 order = x[2]
2631 return o[0], (op, o[1], order)
2634 return o[0], (op, o[1], order)
2632 elif op in ('dagrange', 'range', 'parent', 'ancestor'):
2635 elif op in ('dagrange', 'range', 'parent', 'ancestor'):
2633 wa, ta = _optimize(x[1], small)
2636 wa, ta = _optimize(x[1], small)
2634 wb, tb = _optimize(x[2], small)
2637 wb, tb = _optimize(x[2], small)
2635 order = x[3]
2638 order = x[3]
2636 return wa + wb, (op, ta, tb, order)
2639 return wa + wb, (op, ta, tb, order)
2637 elif op == 'list':
2640 elif op == 'list':
2638 ws, ts = zip(*(_optimize(y, small) for y in x[1:]))
2641 ws, ts = zip(*(_optimize(y, small) for y in x[1:]))
2639 return sum(ws), (op,) + ts
2642 return sum(ws), (op,) + ts
2640 elif op == 'keyvalue':
2643 elif op == 'keyvalue':
2641 w, t = _optimize(x[2], small)
2644 w, t = _optimize(x[2], small)
2642 return w, (op, x[1], t)
2645 return w, (op, x[1], t)
2643 elif op == 'func':
2646 elif op == 'func':
2644 f = getsymbol(x[1])
2647 f = getsymbol(x[1])
2645 wa, ta = _optimize(x[2], small)
2648 wa, ta = _optimize(x[2], small)
2646 if f in ('author', 'branch', 'closed', 'date', 'desc', 'file', 'grep',
2649 if f in ('author', 'branch', 'closed', 'date', 'desc', 'file', 'grep',
2647 'keyword', 'outgoing', 'user', 'destination'):
2650 'keyword', 'outgoing', 'user', 'destination'):
2648 w = 10 # slow
2651 w = 10 # slow
2649 elif f in ('modifies', 'adds', 'removes'):
2652 elif f in ('modifies', 'adds', 'removes'):
2650 w = 30 # slower
2653 w = 30 # slower
2651 elif f == "contains":
2654 elif f == "contains":
2652 w = 100 # very slow
2655 w = 100 # very slow
2653 elif f == "ancestor":
2656 elif f == "ancestor":
2654 w = 1 * smallbonus
2657 w = 1 * smallbonus
2655 elif f in ('reverse', 'limit', 'first', 'wdir', '_intlist'):
2658 elif f in ('reverse', 'limit', 'first', 'wdir', '_intlist'):
2656 w = 0
2659 w = 0
2657 elif f == "sort":
2660 elif f == "sort":
2658 w = 10 # assume most sorts look at changelog
2661 w = 10 # assume most sorts look at changelog
2659 else:
2662 else:
2660 w = 1
2663 w = 1
2661 order = x[3]
2664 order = x[3]
2662 return w + wa, (op, x[1], ta, order)
2665 return w + wa, (op, x[1], ta, order)
2663 raise ValueError('invalid operator %r' % op)
2666 raise ValueError('invalid operator %r' % op)
2664
2667
2665 def optimize(tree):
2668 def optimize(tree):
2666 """Optimize evaluatable tree
2669 """Optimize evaluatable tree
2667
2670
2668 All pseudo operations should be transformed beforehand.
2671 All pseudo operations should be transformed beforehand.
2669 """
2672 """
2670 _weight, newtree = _optimize(tree, small=True)
2673 _weight, newtree = _optimize(tree, small=True)
2671 return newtree
2674 return newtree
2672
2675
2673 # the set of valid characters for the initial letter of symbols in
2676 # the set of valid characters for the initial letter of symbols in
2674 # alias declarations and definitions
2677 # alias declarations and definitions
2675 _aliassyminitletters = _syminitletters | set(pycompat.sysstr('$'))
2678 _aliassyminitletters = _syminitletters | set(pycompat.sysstr('$'))
2676
2679
2677 def _parsewith(spec, lookup=None, syminitletters=None):
2680 def _parsewith(spec, lookup=None, syminitletters=None):
2678 """Generate a parse tree of given spec with given tokenizing options
2681 """Generate a parse tree of given spec with given tokenizing options
2679
2682
2680 >>> _parsewith('foo($1)', syminitletters=_aliassyminitletters)
2683 >>> _parsewith('foo($1)', syminitletters=_aliassyminitletters)
2681 ('func', ('symbol', 'foo'), ('symbol', '$1'))
2684 ('func', ('symbol', 'foo'), ('symbol', '$1'))
2682 >>> _parsewith('$1')
2685 >>> _parsewith('$1')
2683 Traceback (most recent call last):
2686 Traceback (most recent call last):
2684 ...
2687 ...
2685 ParseError: ("syntax error in revset '$1'", 0)
2688 ParseError: ("syntax error in revset '$1'", 0)
2686 >>> _parsewith('foo bar')
2689 >>> _parsewith('foo bar')
2687 Traceback (most recent call last):
2690 Traceback (most recent call last):
2688 ...
2691 ...
2689 ParseError: ('invalid token', 4)
2692 ParseError: ('invalid token', 4)
2690 """
2693 """
2691 p = parser.parser(elements)
2694 p = parser.parser(elements)
2692 tree, pos = p.parse(tokenize(spec, lookup=lookup,
2695 tree, pos = p.parse(tokenize(spec, lookup=lookup,
2693 syminitletters=syminitletters))
2696 syminitletters=syminitletters))
2694 if pos != len(spec):
2697 if pos != len(spec):
2695 raise error.ParseError(_('invalid token'), pos)
2698 raise error.ParseError(_('invalid token'), pos)
2696 return _fixops(parser.simplifyinfixops(tree, ('list', 'or')))
2699 return _fixops(parser.simplifyinfixops(tree, ('list', 'or')))
2697
2700
2698 class _aliasrules(parser.basealiasrules):
2701 class _aliasrules(parser.basealiasrules):
2699 """Parsing and expansion rule set of revset aliases"""
2702 """Parsing and expansion rule set of revset aliases"""
2700 _section = _('revset alias')
2703 _section = _('revset alias')
2701
2704
2702 @staticmethod
2705 @staticmethod
2703 def _parse(spec):
2706 def _parse(spec):
2704 """Parse alias declaration/definition ``spec``
2707 """Parse alias declaration/definition ``spec``
2705
2708
2706 This allows symbol names to use also ``$`` as an initial letter
2709 This allows symbol names to use also ``$`` as an initial letter
2707 (for backward compatibility), and callers of this function should
2710 (for backward compatibility), and callers of this function should
2708 examine whether ``$`` is used also for unexpected symbols or not.
2711 examine whether ``$`` is used also for unexpected symbols or not.
2709 """
2712 """
2710 return _parsewith(spec, syminitletters=_aliassyminitletters)
2713 return _parsewith(spec, syminitletters=_aliassyminitletters)
2711
2714
2712 @staticmethod
2715 @staticmethod
2713 def _trygetfunc(tree):
2716 def _trygetfunc(tree):
2714 if tree[0] == 'func' and tree[1][0] == 'symbol':
2717 if tree[0] == 'func' and tree[1][0] == 'symbol':
2715 return tree[1][1], getlist(tree[2])
2718 return tree[1][1], getlist(tree[2])
2716
2719
2717 def expandaliases(ui, tree):
2720 def expandaliases(ui, tree):
2718 aliases = _aliasrules.buildmap(ui.configitems('revsetalias'))
2721 aliases = _aliasrules.buildmap(ui.configitems('revsetalias'))
2719 tree = _aliasrules.expand(aliases, tree)
2722 tree = _aliasrules.expand(aliases, tree)
2720 # warn about problematic (but not referred) aliases
2723 # warn about problematic (but not referred) aliases
2721 for name, alias in sorted(aliases.iteritems()):
2724 for name, alias in sorted(aliases.iteritems()):
2722 if alias.error and not alias.warned:
2725 if alias.error and not alias.warned:
2723 ui.warn(_('warning: %s\n') % (alias.error))
2726 ui.warn(_('warning: %s\n') % (alias.error))
2724 alias.warned = True
2727 alias.warned = True
2725 return tree
2728 return tree
2726
2729
2727 def foldconcat(tree):
2730 def foldconcat(tree):
2728 """Fold elements to be concatenated by `##`
2731 """Fold elements to be concatenated by `##`
2729 """
2732 """
2730 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2733 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2731 return tree
2734 return tree
2732 if tree[0] == '_concat':
2735 if tree[0] == '_concat':
2733 pending = [tree]
2736 pending = [tree]
2734 l = []
2737 l = []
2735 while pending:
2738 while pending:
2736 e = pending.pop()
2739 e = pending.pop()
2737 if e[0] == '_concat':
2740 if e[0] == '_concat':
2738 pending.extend(reversed(e[1:]))
2741 pending.extend(reversed(e[1:]))
2739 elif e[0] in ('string', 'symbol'):
2742 elif e[0] in ('string', 'symbol'):
2740 l.append(e[1])
2743 l.append(e[1])
2741 else:
2744 else:
2742 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2745 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2743 raise error.ParseError(msg)
2746 raise error.ParseError(msg)
2744 return ('string', ''.join(l))
2747 return ('string', ''.join(l))
2745 else:
2748 else:
2746 return tuple(foldconcat(t) for t in tree)
2749 return tuple(foldconcat(t) for t in tree)
2747
2750
2748 def parse(spec, lookup=None):
2751 def parse(spec, lookup=None):
2749 return _parsewith(spec, lookup=lookup)
2752 return _parsewith(spec, lookup=lookup)
2750
2753
2751 def posttreebuilthook(tree, repo):
2754 def posttreebuilthook(tree, repo):
2752 # hook for extensions to execute code on the optimized tree
2755 # hook for extensions to execute code on the optimized tree
2753 pass
2756 pass
2754
2757
2755 def match(ui, spec, repo=None, order=defineorder):
2758 def match(ui, spec, repo=None, order=defineorder):
2756 """Create a matcher for a single revision spec
2759 """Create a matcher for a single revision spec
2757
2760
2758 If order=followorder, a matcher takes the ordering specified by the input
2761 If order=followorder, a matcher takes the ordering specified by the input
2759 set.
2762 set.
2760 """
2763 """
2761 return matchany(ui, [spec], repo=repo, order=order)
2764 return matchany(ui, [spec], repo=repo, order=order)
2762
2765
2763 def matchany(ui, specs, repo=None, order=defineorder):
2766 def matchany(ui, specs, repo=None, order=defineorder):
2764 """Create a matcher that will include any revisions matching one of the
2767 """Create a matcher that will include any revisions matching one of the
2765 given specs
2768 given specs
2766
2769
2767 If order=followorder, a matcher takes the ordering specified by the input
2770 If order=followorder, a matcher takes the ordering specified by the input
2768 set.
2771 set.
2769 """
2772 """
2770 if not specs:
2773 if not specs:
2771 def mfunc(repo, subset=None):
2774 def mfunc(repo, subset=None):
2772 return baseset()
2775 return baseset()
2773 return mfunc
2776 return mfunc
2774 if not all(specs):
2777 if not all(specs):
2775 raise error.ParseError(_("empty query"))
2778 raise error.ParseError(_("empty query"))
2776 lookup = None
2779 lookup = None
2777 if repo:
2780 if repo:
2778 lookup = repo.__contains__
2781 lookup = repo.__contains__
2779 if len(specs) == 1:
2782 if len(specs) == 1:
2780 tree = parse(specs[0], lookup)
2783 tree = parse(specs[0], lookup)
2781 else:
2784 else:
2782 tree = ('or', ('list',) + tuple(parse(s, lookup) for s in specs))
2785 tree = ('or', ('list',) + tuple(parse(s, lookup) for s in specs))
2783
2786
2784 if ui:
2787 if ui:
2785 tree = expandaliases(ui, tree)
2788 tree = expandaliases(ui, tree)
2786 tree = foldconcat(tree)
2789 tree = foldconcat(tree)
2787 tree = analyze(tree, order)
2790 tree = analyze(tree, order)
2788 tree = optimize(tree)
2791 tree = optimize(tree)
2789 posttreebuilthook(tree, repo)
2792 posttreebuilthook(tree, repo)
2790 return makematcher(tree)
2793 return makematcher(tree)
2791
2794
2792 def makematcher(tree):
2795 def makematcher(tree):
2793 """Create a matcher from an evaluatable tree"""
2796 """Create a matcher from an evaluatable tree"""
2794 def mfunc(repo, subset=None):
2797 def mfunc(repo, subset=None):
2795 if subset is None:
2798 if subset is None:
2796 subset = fullreposet(repo)
2799 subset = fullreposet(repo)
2797 if util.safehasattr(subset, 'isascending'):
2800 if util.safehasattr(subset, 'isascending'):
2798 result = getset(repo, subset, tree)
2801 result = getset(repo, subset, tree)
2799 else:
2802 else:
2800 result = getset(repo, baseset(subset), tree)
2803 result = getset(repo, baseset(subset), tree)
2801 return result
2804 return result
2802 return mfunc
2805 return mfunc
2803
2806
2804 def formatspec(expr, *args):
2807 def formatspec(expr, *args):
2805 '''
2808 '''
2806 This is a convenience function for using revsets internally, and
2809 This is a convenience function for using revsets internally, and
2807 escapes arguments appropriately. Aliases are intentionally ignored
2810 escapes arguments appropriately. Aliases are intentionally ignored
2808 so that intended expression behavior isn't accidentally subverted.
2811 so that intended expression behavior isn't accidentally subverted.
2809
2812
2810 Supported arguments:
2813 Supported arguments:
2811
2814
2812 %r = revset expression, parenthesized
2815 %r = revset expression, parenthesized
2813 %d = int(arg), no quoting
2816 %d = int(arg), no quoting
2814 %s = string(arg), escaped and single-quoted
2817 %s = string(arg), escaped and single-quoted
2815 %b = arg.branch(), escaped and single-quoted
2818 %b = arg.branch(), escaped and single-quoted
2816 %n = hex(arg), single-quoted
2819 %n = hex(arg), single-quoted
2817 %% = a literal '%'
2820 %% = a literal '%'
2818
2821
2819 Prefixing the type with 'l' specifies a parenthesized list of that type.
2822 Prefixing the type with 'l' specifies a parenthesized list of that type.
2820
2823
2821 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2824 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2822 '(10 or 11):: and ((this()) or (that()))'
2825 '(10 or 11):: and ((this()) or (that()))'
2823 >>> formatspec('%d:: and not %d::', 10, 20)
2826 >>> formatspec('%d:: and not %d::', 10, 20)
2824 '10:: and not 20::'
2827 '10:: and not 20::'
2825 >>> formatspec('%ld or %ld', [], [1])
2828 >>> formatspec('%ld or %ld', [], [1])
2826 "_list('') or 1"
2829 "_list('') or 1"
2827 >>> formatspec('keyword(%s)', 'foo\\xe9')
2830 >>> formatspec('keyword(%s)', 'foo\\xe9')
2828 "keyword('foo\\\\xe9')"
2831 "keyword('foo\\\\xe9')"
2829 >>> b = lambda: 'default'
2832 >>> b = lambda: 'default'
2830 >>> b.branch = b
2833 >>> b.branch = b
2831 >>> formatspec('branch(%b)', b)
2834 >>> formatspec('branch(%b)', b)
2832 "branch('default')"
2835 "branch('default')"
2833 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2836 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2834 "root(_list('a\\x00b\\x00c\\x00d'))"
2837 "root(_list('a\\x00b\\x00c\\x00d'))"
2835 '''
2838 '''
2836
2839
2837 def quote(s):
2840 def quote(s):
2838 return repr(str(s))
2841 return repr(str(s))
2839
2842
2840 def argtype(c, arg):
2843 def argtype(c, arg):
2841 if c == 'd':
2844 if c == 'd':
2842 return str(int(arg))
2845 return str(int(arg))
2843 elif c == 's':
2846 elif c == 's':
2844 return quote(arg)
2847 return quote(arg)
2845 elif c == 'r':
2848 elif c == 'r':
2846 parse(arg) # make sure syntax errors are confined
2849 parse(arg) # make sure syntax errors are confined
2847 return '(%s)' % arg
2850 return '(%s)' % arg
2848 elif c == 'n':
2851 elif c == 'n':
2849 return quote(node.hex(arg))
2852 return quote(node.hex(arg))
2850 elif c == 'b':
2853 elif c == 'b':
2851 return quote(arg.branch())
2854 return quote(arg.branch())
2852
2855
2853 def listexp(s, t):
2856 def listexp(s, t):
2854 l = len(s)
2857 l = len(s)
2855 if l == 0:
2858 if l == 0:
2856 return "_list('')"
2859 return "_list('')"
2857 elif l == 1:
2860 elif l == 1:
2858 return argtype(t, s[0])
2861 return argtype(t, s[0])
2859 elif t == 'd':
2862 elif t == 'd':
2860 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2863 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2861 elif t == 's':
2864 elif t == 's':
2862 return "_list('%s')" % "\0".join(s)
2865 return "_list('%s')" % "\0".join(s)
2863 elif t == 'n':
2866 elif t == 'n':
2864 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2867 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2865 elif t == 'b':
2868 elif t == 'b':
2866 return "_list('%s')" % "\0".join(a.branch() for a in s)
2869 return "_list('%s')" % "\0".join(a.branch() for a in s)
2867
2870
2868 m = l // 2
2871 m = l // 2
2869 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2872 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2870
2873
2871 ret = ''
2874 ret = ''
2872 pos = 0
2875 pos = 0
2873 arg = 0
2876 arg = 0
2874 while pos < len(expr):
2877 while pos < len(expr):
2875 c = expr[pos]
2878 c = expr[pos]
2876 if c == '%':
2879 if c == '%':
2877 pos += 1
2880 pos += 1
2878 d = expr[pos]
2881 d = expr[pos]
2879 if d == '%':
2882 if d == '%':
2880 ret += d
2883 ret += d
2881 elif d in 'dsnbr':
2884 elif d in 'dsnbr':
2882 ret += argtype(d, args[arg])
2885 ret += argtype(d, args[arg])
2883 arg += 1
2886 arg += 1
2884 elif d == 'l':
2887 elif d == 'l':
2885 # a list of some type
2888 # a list of some type
2886 pos += 1
2889 pos += 1
2887 d = expr[pos]
2890 d = expr[pos]
2888 ret += listexp(list(args[arg]), d)
2891 ret += listexp(list(args[arg]), d)
2889 arg += 1
2892 arg += 1
2890 else:
2893 else:
2891 raise error.Abort(_('unexpected revspec format character %s')
2894 raise error.Abort(_('unexpected revspec format character %s')
2892 % d)
2895 % d)
2893 else:
2896 else:
2894 ret += c
2897 ret += c
2895 pos += 1
2898 pos += 1
2896
2899
2897 return ret
2900 return ret
2898
2901
2899 def prettyformat(tree):
2902 def prettyformat(tree):
2900 return parser.prettyformat(tree, ('string', 'symbol'))
2903 return parser.prettyformat(tree, ('string', 'symbol'))
2901
2904
2902 def depth(tree):
2905 def depth(tree):
2903 if isinstance(tree, tuple):
2906 if isinstance(tree, tuple):
2904 return max(map(depth, tree)) + 1
2907 return max(map(depth, tree)) + 1
2905 else:
2908 else:
2906 return 0
2909 return 0
2907
2910
2908 def funcsused(tree):
2911 def funcsused(tree):
2909 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2912 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2910 return set()
2913 return set()
2911 else:
2914 else:
2912 funcs = set()
2915 funcs = set()
2913 for s in tree[1:]:
2916 for s in tree[1:]:
2914 funcs |= funcsused(s)
2917 funcs |= funcsused(s)
2915 if tree[0] == 'func':
2918 if tree[0] == 'func':
2916 funcs.add(tree[1][1])
2919 funcs.add(tree[1][1])
2917 return funcs
2920 return funcs
2918
2921
2919 def _formatsetrepr(r):
2922 def _formatsetrepr(r):
2920 """Format an optional printable representation of a set
2923 """Format an optional printable representation of a set
2921
2924
2922 ======== =================================
2925 ======== =================================
2923 type(r) example
2926 type(r) example
2924 ======== =================================
2927 ======== =================================
2925 tuple ('<not %r>', other)
2928 tuple ('<not %r>', other)
2926 str '<branch closed>'
2929 str '<branch closed>'
2927 callable lambda: '<branch %r>' % sorted(b)
2930 callable lambda: '<branch %r>' % sorted(b)
2928 object other
2931 object other
2929 ======== =================================
2932 ======== =================================
2930 """
2933 """
2931 if r is None:
2934 if r is None:
2932 return ''
2935 return ''
2933 elif isinstance(r, tuple):
2936 elif isinstance(r, tuple):
2934 return r[0] % r[1:]
2937 return r[0] % r[1:]
2935 elif isinstance(r, str):
2938 elif isinstance(r, str):
2936 return r
2939 return r
2937 elif callable(r):
2940 elif callable(r):
2938 return r()
2941 return r()
2939 else:
2942 else:
2940 return repr(r)
2943 return repr(r)
2941
2944
2942 class abstractsmartset(object):
2945 class abstractsmartset(object):
2943
2946
2944 def __nonzero__(self):
2947 def __nonzero__(self):
2945 """True if the smartset is not empty"""
2948 """True if the smartset is not empty"""
2946 raise NotImplementedError()
2949 raise NotImplementedError()
2947
2950
2948 def __contains__(self, rev):
2951 def __contains__(self, rev):
2949 """provide fast membership testing"""
2952 """provide fast membership testing"""
2950 raise NotImplementedError()
2953 raise NotImplementedError()
2951
2954
2952 def __iter__(self):
2955 def __iter__(self):
2953 """iterate the set in the order it is supposed to be iterated"""
2956 """iterate the set in the order it is supposed to be iterated"""
2954 raise NotImplementedError()
2957 raise NotImplementedError()
2955
2958
2956 # Attributes containing a function to perform a fast iteration in a given
2959 # Attributes containing a function to perform a fast iteration in a given
2957 # direction. A smartset can have none, one, or both defined.
2960 # direction. A smartset can have none, one, or both defined.
2958 #
2961 #
2959 # Default value is None instead of a function returning None to avoid
2962 # Default value is None instead of a function returning None to avoid
2960 # initializing an iterator just for testing if a fast method exists.
2963 # initializing an iterator just for testing if a fast method exists.
2961 fastasc = None
2964 fastasc = None
2962 fastdesc = None
2965 fastdesc = None
2963
2966
2964 def isascending(self):
2967 def isascending(self):
2965 """True if the set will iterate in ascending order"""
2968 """True if the set will iterate in ascending order"""
2966 raise NotImplementedError()
2969 raise NotImplementedError()
2967
2970
2968 def isdescending(self):
2971 def isdescending(self):
2969 """True if the set will iterate in descending order"""
2972 """True if the set will iterate in descending order"""
2970 raise NotImplementedError()
2973 raise NotImplementedError()
2971
2974
2972 def istopo(self):
2975 def istopo(self):
2973 """True if the set will iterate in topographical order"""
2976 """True if the set will iterate in topographical order"""
2974 raise NotImplementedError()
2977 raise NotImplementedError()
2975
2978
2976 def min(self):
2979 def min(self):
2977 """return the minimum element in the set"""
2980 """return the minimum element in the set"""
2978 if self.fastasc is None:
2981 if self.fastasc is None:
2979 v = min(self)
2982 v = min(self)
2980 else:
2983 else:
2981 for v in self.fastasc():
2984 for v in self.fastasc():
2982 break
2985 break
2983 else:
2986 else:
2984 raise ValueError('arg is an empty sequence')
2987 raise ValueError('arg is an empty sequence')
2985 self.min = lambda: v
2988 self.min = lambda: v
2986 return v
2989 return v
2987
2990
2988 def max(self):
2991 def max(self):
2989 """return the maximum element in the set"""
2992 """return the maximum element in the set"""
2990 if self.fastdesc is None:
2993 if self.fastdesc is None:
2991 return max(self)
2994 return max(self)
2992 else:
2995 else:
2993 for v in self.fastdesc():
2996 for v in self.fastdesc():
2994 break
2997 break
2995 else:
2998 else:
2996 raise ValueError('arg is an empty sequence')
2999 raise ValueError('arg is an empty sequence')
2997 self.max = lambda: v
3000 self.max = lambda: v
2998 return v
3001 return v
2999
3002
3000 def first(self):
3003 def first(self):
3001 """return the first element in the set (user iteration perspective)
3004 """return the first element in the set (user iteration perspective)
3002
3005
3003 Return None if the set is empty"""
3006 Return None if the set is empty"""
3004 raise NotImplementedError()
3007 raise NotImplementedError()
3005
3008
3006 def last(self):
3009 def last(self):
3007 """return the last element in the set (user iteration perspective)
3010 """return the last element in the set (user iteration perspective)
3008
3011
3009 Return None if the set is empty"""
3012 Return None if the set is empty"""
3010 raise NotImplementedError()
3013 raise NotImplementedError()
3011
3014
3012 def __len__(self):
3015 def __len__(self):
3013 """return the length of the smartsets
3016 """return the length of the smartsets
3014
3017
3015 This can be expensive on smartset that could be lazy otherwise."""
3018 This can be expensive on smartset that could be lazy otherwise."""
3016 raise NotImplementedError()
3019 raise NotImplementedError()
3017
3020
3018 def reverse(self):
3021 def reverse(self):
3019 """reverse the expected iteration order"""
3022 """reverse the expected iteration order"""
3020 raise NotImplementedError()
3023 raise NotImplementedError()
3021
3024
3022 def sort(self, reverse=True):
3025 def sort(self, reverse=True):
3023 """get the set to iterate in an ascending or descending order"""
3026 """get the set to iterate in an ascending or descending order"""
3024 raise NotImplementedError()
3027 raise NotImplementedError()
3025
3028
3026 def __and__(self, other):
3029 def __and__(self, other):
3027 """Returns a new object with the intersection of the two collections.
3030 """Returns a new object with the intersection of the two collections.
3028
3031
3029 This is part of the mandatory API for smartset."""
3032 This is part of the mandatory API for smartset."""
3030 if isinstance(other, fullreposet):
3033 if isinstance(other, fullreposet):
3031 return self
3034 return self
3032 return self.filter(other.__contains__, condrepr=other, cache=False)
3035 return self.filter(other.__contains__, condrepr=other, cache=False)
3033
3036
3034 def __add__(self, other):
3037 def __add__(self, other):
3035 """Returns a new object with the union of the two collections.
3038 """Returns a new object with the union of the two collections.
3036
3039
3037 This is part of the mandatory API for smartset."""
3040 This is part of the mandatory API for smartset."""
3038 return addset(self, other)
3041 return addset(self, other)
3039
3042
3040 def __sub__(self, other):
3043 def __sub__(self, other):
3041 """Returns a new object with the substraction of the two collections.
3044 """Returns a new object with the substraction of the two collections.
3042
3045
3043 This is part of the mandatory API for smartset."""
3046 This is part of the mandatory API for smartset."""
3044 c = other.__contains__
3047 c = other.__contains__
3045 return self.filter(lambda r: not c(r), condrepr=('<not %r>', other),
3048 return self.filter(lambda r: not c(r), condrepr=('<not %r>', other),
3046 cache=False)
3049 cache=False)
3047
3050
3048 def filter(self, condition, condrepr=None, cache=True):
3051 def filter(self, condition, condrepr=None, cache=True):
3049 """Returns this smartset filtered by condition as a new smartset.
3052 """Returns this smartset filtered by condition as a new smartset.
3050
3053
3051 `condition` is a callable which takes a revision number and returns a
3054 `condition` is a callable which takes a revision number and returns a
3052 boolean. Optional `condrepr` provides a printable representation of
3055 boolean. Optional `condrepr` provides a printable representation of
3053 the given `condition`.
3056 the given `condition`.
3054
3057
3055 This is part of the mandatory API for smartset."""
3058 This is part of the mandatory API for smartset."""
3056 # builtin cannot be cached. but do not needs to
3059 # builtin cannot be cached. but do not needs to
3057 if cache and util.safehasattr(condition, 'func_code'):
3060 if cache and util.safehasattr(condition, 'func_code'):
3058 condition = util.cachefunc(condition)
3061 condition = util.cachefunc(condition)
3059 return filteredset(self, condition, condrepr)
3062 return filteredset(self, condition, condrepr)
3060
3063
3061 class baseset(abstractsmartset):
3064 class baseset(abstractsmartset):
3062 """Basic data structure that represents a revset and contains the basic
3065 """Basic data structure that represents a revset and contains the basic
3063 operation that it should be able to perform.
3066 operation that it should be able to perform.
3064
3067
3065 Every method in this class should be implemented by any smartset class.
3068 Every method in this class should be implemented by any smartset class.
3066 """
3069 """
3067 def __init__(self, data=(), datarepr=None, istopo=False):
3070 def __init__(self, data=(), datarepr=None, istopo=False):
3068 """
3071 """
3069 datarepr: a tuple of (format, obj, ...), a function or an object that
3072 datarepr: a tuple of (format, obj, ...), a function or an object that
3070 provides a printable representation of the given data.
3073 provides a printable representation of the given data.
3071 """
3074 """
3072 self._ascending = None
3075 self._ascending = None
3073 self._istopo = istopo
3076 self._istopo = istopo
3074 if not isinstance(data, list):
3077 if not isinstance(data, list):
3075 if isinstance(data, set):
3078 if isinstance(data, set):
3076 self._set = data
3079 self._set = data
3077 # set has no order we pick one for stability purpose
3080 # set has no order we pick one for stability purpose
3078 self._ascending = True
3081 self._ascending = True
3079 data = list(data)
3082 data = list(data)
3080 self._list = data
3083 self._list = data
3081 self._datarepr = datarepr
3084 self._datarepr = datarepr
3082
3085
3083 @util.propertycache
3086 @util.propertycache
3084 def _set(self):
3087 def _set(self):
3085 return set(self._list)
3088 return set(self._list)
3086
3089
3087 @util.propertycache
3090 @util.propertycache
3088 def _asclist(self):
3091 def _asclist(self):
3089 asclist = self._list[:]
3092 asclist = self._list[:]
3090 asclist.sort()
3093 asclist.sort()
3091 return asclist
3094 return asclist
3092
3095
3093 def __iter__(self):
3096 def __iter__(self):
3094 if self._ascending is None:
3097 if self._ascending is None:
3095 return iter(self._list)
3098 return iter(self._list)
3096 elif self._ascending:
3099 elif self._ascending:
3097 return iter(self._asclist)
3100 return iter(self._asclist)
3098 else:
3101 else:
3099 return reversed(self._asclist)
3102 return reversed(self._asclist)
3100
3103
3101 def fastasc(self):
3104 def fastasc(self):
3102 return iter(self._asclist)
3105 return iter(self._asclist)
3103
3106
3104 def fastdesc(self):
3107 def fastdesc(self):
3105 return reversed(self._asclist)
3108 return reversed(self._asclist)
3106
3109
3107 @util.propertycache
3110 @util.propertycache
3108 def __contains__(self):
3111 def __contains__(self):
3109 return self._set.__contains__
3112 return self._set.__contains__
3110
3113
3111 def __nonzero__(self):
3114 def __nonzero__(self):
3112 return bool(self._list)
3115 return bool(self._list)
3113
3116
3114 def sort(self, reverse=False):
3117 def sort(self, reverse=False):
3115 self._ascending = not bool(reverse)
3118 self._ascending = not bool(reverse)
3116 self._istopo = False
3119 self._istopo = False
3117
3120
3118 def reverse(self):
3121 def reverse(self):
3119 if self._ascending is None:
3122 if self._ascending is None:
3120 self._list.reverse()
3123 self._list.reverse()
3121 else:
3124 else:
3122 self._ascending = not self._ascending
3125 self._ascending = not self._ascending
3123 self._istopo = False
3126 self._istopo = False
3124
3127
3125 def __len__(self):
3128 def __len__(self):
3126 return len(self._list)
3129 return len(self._list)
3127
3130
3128 def isascending(self):
3131 def isascending(self):
3129 """Returns True if the collection is ascending order, False if not.
3132 """Returns True if the collection is ascending order, False if not.
3130
3133
3131 This is part of the mandatory API for smartset."""
3134 This is part of the mandatory API for smartset."""
3132 if len(self) <= 1:
3135 if len(self) <= 1:
3133 return True
3136 return True
3134 return self._ascending is not None and self._ascending
3137 return self._ascending is not None and self._ascending
3135
3138
3136 def isdescending(self):
3139 def isdescending(self):
3137 """Returns True if the collection is descending order, False if not.
3140 """Returns True if the collection is descending order, False if not.
3138
3141
3139 This is part of the mandatory API for smartset."""
3142 This is part of the mandatory API for smartset."""
3140 if len(self) <= 1:
3143 if len(self) <= 1:
3141 return True
3144 return True
3142 return self._ascending is not None and not self._ascending
3145 return self._ascending is not None and not self._ascending
3143
3146
3144 def istopo(self):
3147 def istopo(self):
3145 """Is the collection is in topographical order or not.
3148 """Is the collection is in topographical order or not.
3146
3149
3147 This is part of the mandatory API for smartset."""
3150 This is part of the mandatory API for smartset."""
3148 if len(self) <= 1:
3151 if len(self) <= 1:
3149 return True
3152 return True
3150 return self._istopo
3153 return self._istopo
3151
3154
3152 def first(self):
3155 def first(self):
3153 if self:
3156 if self:
3154 if self._ascending is None:
3157 if self._ascending is None:
3155 return self._list[0]
3158 return self._list[0]
3156 elif self._ascending:
3159 elif self._ascending:
3157 return self._asclist[0]
3160 return self._asclist[0]
3158 else:
3161 else:
3159 return self._asclist[-1]
3162 return self._asclist[-1]
3160 return None
3163 return None
3161
3164
3162 def last(self):
3165 def last(self):
3163 if self:
3166 if self:
3164 if self._ascending is None:
3167 if self._ascending is None:
3165 return self._list[-1]
3168 return self._list[-1]
3166 elif self._ascending:
3169 elif self._ascending:
3167 return self._asclist[-1]
3170 return self._asclist[-1]
3168 else:
3171 else:
3169 return self._asclist[0]
3172 return self._asclist[0]
3170 return None
3173 return None
3171
3174
3172 def __repr__(self):
3175 def __repr__(self):
3173 d = {None: '', False: '-', True: '+'}[self._ascending]
3176 d = {None: '', False: '-', True: '+'}[self._ascending]
3174 s = _formatsetrepr(self._datarepr)
3177 s = _formatsetrepr(self._datarepr)
3175 if not s:
3178 if not s:
3176 l = self._list
3179 l = self._list
3177 # if _list has been built from a set, it might have a different
3180 # if _list has been built from a set, it might have a different
3178 # order from one python implementation to another.
3181 # order from one python implementation to another.
3179 # We fallback to the sorted version for a stable output.
3182 # We fallback to the sorted version for a stable output.
3180 if self._ascending is not None:
3183 if self._ascending is not None:
3181 l = self._asclist
3184 l = self._asclist
3182 s = repr(l)
3185 s = repr(l)
3183 return '<%s%s %s>' % (type(self).__name__, d, s)
3186 return '<%s%s %s>' % (type(self).__name__, d, s)
3184
3187
3185 class filteredset(abstractsmartset):
3188 class filteredset(abstractsmartset):
3186 """Duck type for baseset class which iterates lazily over the revisions in
3189 """Duck type for baseset class which iterates lazily over the revisions in
3187 the subset and contains a function which tests for membership in the
3190 the subset and contains a function which tests for membership in the
3188 revset
3191 revset
3189 """
3192 """
3190 def __init__(self, subset, condition=lambda x: True, condrepr=None):
3193 def __init__(self, subset, condition=lambda x: True, condrepr=None):
3191 """
3194 """
3192 condition: a function that decide whether a revision in the subset
3195 condition: a function that decide whether a revision in the subset
3193 belongs to the revset or not.
3196 belongs to the revset or not.
3194 condrepr: a tuple of (format, obj, ...), a function or an object that
3197 condrepr: a tuple of (format, obj, ...), a function or an object that
3195 provides a printable representation of the given condition.
3198 provides a printable representation of the given condition.
3196 """
3199 """
3197 self._subset = subset
3200 self._subset = subset
3198 self._condition = condition
3201 self._condition = condition
3199 self._condrepr = condrepr
3202 self._condrepr = condrepr
3200
3203
3201 def __contains__(self, x):
3204 def __contains__(self, x):
3202 return x in self._subset and self._condition(x)
3205 return x in self._subset and self._condition(x)
3203
3206
3204 def __iter__(self):
3207 def __iter__(self):
3205 return self._iterfilter(self._subset)
3208 return self._iterfilter(self._subset)
3206
3209
3207 def _iterfilter(self, it):
3210 def _iterfilter(self, it):
3208 cond = self._condition
3211 cond = self._condition
3209 for x in it:
3212 for x in it:
3210 if cond(x):
3213 if cond(x):
3211 yield x
3214 yield x
3212
3215
3213 @property
3216 @property
3214 def fastasc(self):
3217 def fastasc(self):
3215 it = self._subset.fastasc
3218 it = self._subset.fastasc
3216 if it is None:
3219 if it is None:
3217 return None
3220 return None
3218 return lambda: self._iterfilter(it())
3221 return lambda: self._iterfilter(it())
3219
3222
3220 @property
3223 @property
3221 def fastdesc(self):
3224 def fastdesc(self):
3222 it = self._subset.fastdesc
3225 it = self._subset.fastdesc
3223 if it is None:
3226 if it is None:
3224 return None
3227 return None
3225 return lambda: self._iterfilter(it())
3228 return lambda: self._iterfilter(it())
3226
3229
3227 def __nonzero__(self):
3230 def __nonzero__(self):
3228 fast = None
3231 fast = None
3229 candidates = [self.fastasc if self.isascending() else None,
3232 candidates = [self.fastasc if self.isascending() else None,
3230 self.fastdesc if self.isdescending() else None,
3233 self.fastdesc if self.isdescending() else None,
3231 self.fastasc,
3234 self.fastasc,
3232 self.fastdesc]
3235 self.fastdesc]
3233 for candidate in candidates:
3236 for candidate in candidates:
3234 if candidate is not None:
3237 if candidate is not None:
3235 fast = candidate
3238 fast = candidate
3236 break
3239 break
3237
3240
3238 if fast is not None:
3241 if fast is not None:
3239 it = fast()
3242 it = fast()
3240 else:
3243 else:
3241 it = self
3244 it = self
3242
3245
3243 for r in it:
3246 for r in it:
3244 return True
3247 return True
3245 return False
3248 return False
3246
3249
3247 def __len__(self):
3250 def __len__(self):
3248 # Basic implementation to be changed in future patches.
3251 # Basic implementation to be changed in future patches.
3249 # until this gets improved, we use generator expression
3252 # until this gets improved, we use generator expression
3250 # here, since list comprehensions are free to call __len__ again
3253 # here, since list comprehensions are free to call __len__ again
3251 # causing infinite recursion
3254 # causing infinite recursion
3252 l = baseset(r for r in self)
3255 l = baseset(r for r in self)
3253 return len(l)
3256 return len(l)
3254
3257
3255 def sort(self, reverse=False):
3258 def sort(self, reverse=False):
3256 self._subset.sort(reverse=reverse)
3259 self._subset.sort(reverse=reverse)
3257
3260
3258 def reverse(self):
3261 def reverse(self):
3259 self._subset.reverse()
3262 self._subset.reverse()
3260
3263
3261 def isascending(self):
3264 def isascending(self):
3262 return self._subset.isascending()
3265 return self._subset.isascending()
3263
3266
3264 def isdescending(self):
3267 def isdescending(self):
3265 return self._subset.isdescending()
3268 return self._subset.isdescending()
3266
3269
3267 def istopo(self):
3270 def istopo(self):
3268 return self._subset.istopo()
3271 return self._subset.istopo()
3269
3272
3270 def first(self):
3273 def first(self):
3271 for x in self:
3274 for x in self:
3272 return x
3275 return x
3273 return None
3276 return None
3274
3277
3275 def last(self):
3278 def last(self):
3276 it = None
3279 it = None
3277 if self.isascending():
3280 if self.isascending():
3278 it = self.fastdesc
3281 it = self.fastdesc
3279 elif self.isdescending():
3282 elif self.isdescending():
3280 it = self.fastasc
3283 it = self.fastasc
3281 if it is not None:
3284 if it is not None:
3282 for x in it():
3285 for x in it():
3283 return x
3286 return x
3284 return None #empty case
3287 return None #empty case
3285 else:
3288 else:
3286 x = None
3289 x = None
3287 for x in self:
3290 for x in self:
3288 pass
3291 pass
3289 return x
3292 return x
3290
3293
3291 def __repr__(self):
3294 def __repr__(self):
3292 xs = [repr(self._subset)]
3295 xs = [repr(self._subset)]
3293 s = _formatsetrepr(self._condrepr)
3296 s = _formatsetrepr(self._condrepr)
3294 if s:
3297 if s:
3295 xs.append(s)
3298 xs.append(s)
3296 return '<%s %s>' % (type(self).__name__, ', '.join(xs))
3299 return '<%s %s>' % (type(self).__name__, ', '.join(xs))
3297
3300
3298 def _iterordered(ascending, iter1, iter2):
3301 def _iterordered(ascending, iter1, iter2):
3299 """produce an ordered iteration from two iterators with the same order
3302 """produce an ordered iteration from two iterators with the same order
3300
3303
3301 The ascending is used to indicated the iteration direction.
3304 The ascending is used to indicated the iteration direction.
3302 """
3305 """
3303 choice = max
3306 choice = max
3304 if ascending:
3307 if ascending:
3305 choice = min
3308 choice = min
3306
3309
3307 val1 = None
3310 val1 = None
3308 val2 = None
3311 val2 = None
3309 try:
3312 try:
3310 # Consume both iterators in an ordered way until one is empty
3313 # Consume both iterators in an ordered way until one is empty
3311 while True:
3314 while True:
3312 if val1 is None:
3315 if val1 is None:
3313 val1 = next(iter1)
3316 val1 = next(iter1)
3314 if val2 is None:
3317 if val2 is None:
3315 val2 = next(iter2)
3318 val2 = next(iter2)
3316 n = choice(val1, val2)
3319 n = choice(val1, val2)
3317 yield n
3320 yield n
3318 if val1 == n:
3321 if val1 == n:
3319 val1 = None
3322 val1 = None
3320 if val2 == n:
3323 if val2 == n:
3321 val2 = None
3324 val2 = None
3322 except StopIteration:
3325 except StopIteration:
3323 # Flush any remaining values and consume the other one
3326 # Flush any remaining values and consume the other one
3324 it = iter2
3327 it = iter2
3325 if val1 is not None:
3328 if val1 is not None:
3326 yield val1
3329 yield val1
3327 it = iter1
3330 it = iter1
3328 elif val2 is not None:
3331 elif val2 is not None:
3329 # might have been equality and both are empty
3332 # might have been equality and both are empty
3330 yield val2
3333 yield val2
3331 for val in it:
3334 for val in it:
3332 yield val
3335 yield val
3333
3336
3334 class addset(abstractsmartset):
3337 class addset(abstractsmartset):
3335 """Represent the addition of two sets
3338 """Represent the addition of two sets
3336
3339
3337 Wrapper structure for lazily adding two structures without losing much
3340 Wrapper structure for lazily adding two structures without losing much
3338 performance on the __contains__ method
3341 performance on the __contains__ method
3339
3342
3340 If the ascending attribute is set, that means the two structures are
3343 If the ascending attribute is set, that means the two structures are
3341 ordered in either an ascending or descending way. Therefore, we can add
3344 ordered in either an ascending or descending way. Therefore, we can add
3342 them maintaining the order by iterating over both at the same time
3345 them maintaining the order by iterating over both at the same time
3343
3346
3344 >>> xs = baseset([0, 3, 2])
3347 >>> xs = baseset([0, 3, 2])
3345 >>> ys = baseset([5, 2, 4])
3348 >>> ys = baseset([5, 2, 4])
3346
3349
3347 >>> rs = addset(xs, ys)
3350 >>> rs = addset(xs, ys)
3348 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3351 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3349 (True, True, False, True, 0, 4)
3352 (True, True, False, True, 0, 4)
3350 >>> rs = addset(xs, baseset([]))
3353 >>> rs = addset(xs, baseset([]))
3351 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3354 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3352 (True, True, False, 0, 2)
3355 (True, True, False, 0, 2)
3353 >>> rs = addset(baseset([]), baseset([]))
3356 >>> rs = addset(baseset([]), baseset([]))
3354 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3357 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3355 (False, False, None, None)
3358 (False, False, None, None)
3356
3359
3357 iterate unsorted:
3360 iterate unsorted:
3358 >>> rs = addset(xs, ys)
3361 >>> rs = addset(xs, ys)
3359 >>> # (use generator because pypy could call len())
3362 >>> # (use generator because pypy could call len())
3360 >>> list(x for x in rs) # without _genlist
3363 >>> list(x for x in rs) # without _genlist
3361 [0, 3, 2, 5, 4]
3364 [0, 3, 2, 5, 4]
3362 >>> assert not rs._genlist
3365 >>> assert not rs._genlist
3363 >>> len(rs)
3366 >>> len(rs)
3364 5
3367 5
3365 >>> [x for x in rs] # with _genlist
3368 >>> [x for x in rs] # with _genlist
3366 [0, 3, 2, 5, 4]
3369 [0, 3, 2, 5, 4]
3367 >>> assert rs._genlist
3370 >>> assert rs._genlist
3368
3371
3369 iterate ascending:
3372 iterate ascending:
3370 >>> rs = addset(xs, ys, ascending=True)
3373 >>> rs = addset(xs, ys, ascending=True)
3371 >>> # (use generator because pypy could call len())
3374 >>> # (use generator because pypy could call len())
3372 >>> list(x for x in rs), list(x for x in rs.fastasc()) # without _asclist
3375 >>> list(x for x in rs), list(x for x in rs.fastasc()) # without _asclist
3373 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3376 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3374 >>> assert not rs._asclist
3377 >>> assert not rs._asclist
3375 >>> len(rs)
3378 >>> len(rs)
3376 5
3379 5
3377 >>> [x for x in rs], [x for x in rs.fastasc()]
3380 >>> [x for x in rs], [x for x in rs.fastasc()]
3378 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3381 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3379 >>> assert rs._asclist
3382 >>> assert rs._asclist
3380
3383
3381 iterate descending:
3384 iterate descending:
3382 >>> rs = addset(xs, ys, ascending=False)
3385 >>> rs = addset(xs, ys, ascending=False)
3383 >>> # (use generator because pypy could call len())
3386 >>> # (use generator because pypy could call len())
3384 >>> list(x for x in rs), list(x for x in rs.fastdesc()) # without _asclist
3387 >>> list(x for x in rs), list(x for x in rs.fastdesc()) # without _asclist
3385 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3388 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3386 >>> assert not rs._asclist
3389 >>> assert not rs._asclist
3387 >>> len(rs)
3390 >>> len(rs)
3388 5
3391 5
3389 >>> [x for x in rs], [x for x in rs.fastdesc()]
3392 >>> [x for x in rs], [x for x in rs.fastdesc()]
3390 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3393 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3391 >>> assert rs._asclist
3394 >>> assert rs._asclist
3392
3395
3393 iterate ascending without fastasc:
3396 iterate ascending without fastasc:
3394 >>> rs = addset(xs, generatorset(ys), ascending=True)
3397 >>> rs = addset(xs, generatorset(ys), ascending=True)
3395 >>> assert rs.fastasc is None
3398 >>> assert rs.fastasc is None
3396 >>> [x for x in rs]
3399 >>> [x for x in rs]
3397 [0, 2, 3, 4, 5]
3400 [0, 2, 3, 4, 5]
3398
3401
3399 iterate descending without fastdesc:
3402 iterate descending without fastdesc:
3400 >>> rs = addset(generatorset(xs), ys, ascending=False)
3403 >>> rs = addset(generatorset(xs), ys, ascending=False)
3401 >>> assert rs.fastdesc is None
3404 >>> assert rs.fastdesc is None
3402 >>> [x for x in rs]
3405 >>> [x for x in rs]
3403 [5, 4, 3, 2, 0]
3406 [5, 4, 3, 2, 0]
3404 """
3407 """
3405 def __init__(self, revs1, revs2, ascending=None):
3408 def __init__(self, revs1, revs2, ascending=None):
3406 self._r1 = revs1
3409 self._r1 = revs1
3407 self._r2 = revs2
3410 self._r2 = revs2
3408 self._iter = None
3411 self._iter = None
3409 self._ascending = ascending
3412 self._ascending = ascending
3410 self._genlist = None
3413 self._genlist = None
3411 self._asclist = None
3414 self._asclist = None
3412
3415
3413 def __len__(self):
3416 def __len__(self):
3414 return len(self._list)
3417 return len(self._list)
3415
3418
3416 def __nonzero__(self):
3419 def __nonzero__(self):
3417 return bool(self._r1) or bool(self._r2)
3420 return bool(self._r1) or bool(self._r2)
3418
3421
3419 @util.propertycache
3422 @util.propertycache
3420 def _list(self):
3423 def _list(self):
3421 if not self._genlist:
3424 if not self._genlist:
3422 self._genlist = baseset(iter(self))
3425 self._genlist = baseset(iter(self))
3423 return self._genlist
3426 return self._genlist
3424
3427
3425 def __iter__(self):
3428 def __iter__(self):
3426 """Iterate over both collections without repeating elements
3429 """Iterate over both collections without repeating elements
3427
3430
3428 If the ascending attribute is not set, iterate over the first one and
3431 If the ascending attribute is not set, iterate over the first one and
3429 then over the second one checking for membership on the first one so we
3432 then over the second one checking for membership on the first one so we
3430 dont yield any duplicates.
3433 dont yield any duplicates.
3431
3434
3432 If the ascending attribute is set, iterate over both collections at the
3435 If the ascending attribute is set, iterate over both collections at the
3433 same time, yielding only one value at a time in the given order.
3436 same time, yielding only one value at a time in the given order.
3434 """
3437 """
3435 if self._ascending is None:
3438 if self._ascending is None:
3436 if self._genlist:
3439 if self._genlist:
3437 return iter(self._genlist)
3440 return iter(self._genlist)
3438 def arbitraryordergen():
3441 def arbitraryordergen():
3439 for r in self._r1:
3442 for r in self._r1:
3440 yield r
3443 yield r
3441 inr1 = self._r1.__contains__
3444 inr1 = self._r1.__contains__
3442 for r in self._r2:
3445 for r in self._r2:
3443 if not inr1(r):
3446 if not inr1(r):
3444 yield r
3447 yield r
3445 return arbitraryordergen()
3448 return arbitraryordergen()
3446 # try to use our own fast iterator if it exists
3449 # try to use our own fast iterator if it exists
3447 self._trysetasclist()
3450 self._trysetasclist()
3448 if self._ascending:
3451 if self._ascending:
3449 attr = 'fastasc'
3452 attr = 'fastasc'
3450 else:
3453 else:
3451 attr = 'fastdesc'
3454 attr = 'fastdesc'
3452 it = getattr(self, attr)
3455 it = getattr(self, attr)
3453 if it is not None:
3456 if it is not None:
3454 return it()
3457 return it()
3455 # maybe half of the component supports fast
3458 # maybe half of the component supports fast
3456 # get iterator for _r1
3459 # get iterator for _r1
3457 iter1 = getattr(self._r1, attr)
3460 iter1 = getattr(self._r1, attr)
3458 if iter1 is None:
3461 if iter1 is None:
3459 # let's avoid side effect (not sure it matters)
3462 # let's avoid side effect (not sure it matters)
3460 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3463 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3461 else:
3464 else:
3462 iter1 = iter1()
3465 iter1 = iter1()
3463 # get iterator for _r2
3466 # get iterator for _r2
3464 iter2 = getattr(self._r2, attr)
3467 iter2 = getattr(self._r2, attr)
3465 if iter2 is None:
3468 if iter2 is None:
3466 # let's avoid side effect (not sure it matters)
3469 # let's avoid side effect (not sure it matters)
3467 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3470 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3468 else:
3471 else:
3469 iter2 = iter2()
3472 iter2 = iter2()
3470 return _iterordered(self._ascending, iter1, iter2)
3473 return _iterordered(self._ascending, iter1, iter2)
3471
3474
3472 def _trysetasclist(self):
3475 def _trysetasclist(self):
3473 """populate the _asclist attribute if possible and necessary"""
3476 """populate the _asclist attribute if possible and necessary"""
3474 if self._genlist is not None and self._asclist is None:
3477 if self._genlist is not None and self._asclist is None:
3475 self._asclist = sorted(self._genlist)
3478 self._asclist = sorted(self._genlist)
3476
3479
3477 @property
3480 @property
3478 def fastasc(self):
3481 def fastasc(self):
3479 self._trysetasclist()
3482 self._trysetasclist()
3480 if self._asclist is not None:
3483 if self._asclist is not None:
3481 return self._asclist.__iter__
3484 return self._asclist.__iter__
3482 iter1 = self._r1.fastasc
3485 iter1 = self._r1.fastasc
3483 iter2 = self._r2.fastasc
3486 iter2 = self._r2.fastasc
3484 if None in (iter1, iter2):
3487 if None in (iter1, iter2):
3485 return None
3488 return None
3486 return lambda: _iterordered(True, iter1(), iter2())
3489 return lambda: _iterordered(True, iter1(), iter2())
3487
3490
3488 @property
3491 @property
3489 def fastdesc(self):
3492 def fastdesc(self):
3490 self._trysetasclist()
3493 self._trysetasclist()
3491 if self._asclist is not None:
3494 if self._asclist is not None:
3492 return self._asclist.__reversed__
3495 return self._asclist.__reversed__
3493 iter1 = self._r1.fastdesc
3496 iter1 = self._r1.fastdesc
3494 iter2 = self._r2.fastdesc
3497 iter2 = self._r2.fastdesc
3495 if None in (iter1, iter2):
3498 if None in (iter1, iter2):
3496 return None
3499 return None
3497 return lambda: _iterordered(False, iter1(), iter2())
3500 return lambda: _iterordered(False, iter1(), iter2())
3498
3501
3499 def __contains__(self, x):
3502 def __contains__(self, x):
3500 return x in self._r1 or x in self._r2
3503 return x in self._r1 or x in self._r2
3501
3504
3502 def sort(self, reverse=False):
3505 def sort(self, reverse=False):
3503 """Sort the added set
3506 """Sort the added set
3504
3507
3505 For this we use the cached list with all the generated values and if we
3508 For this we use the cached list with all the generated values and if we
3506 know they are ascending or descending we can sort them in a smart way.
3509 know they are ascending or descending we can sort them in a smart way.
3507 """
3510 """
3508 self._ascending = not reverse
3511 self._ascending = not reverse
3509
3512
3510 def isascending(self):
3513 def isascending(self):
3511 return self._ascending is not None and self._ascending
3514 return self._ascending is not None and self._ascending
3512
3515
3513 def isdescending(self):
3516 def isdescending(self):
3514 return self._ascending is not None and not self._ascending
3517 return self._ascending is not None and not self._ascending
3515
3518
3516 def istopo(self):
3519 def istopo(self):
3517 # not worth the trouble asserting if the two sets combined are still
3520 # not worth the trouble asserting if the two sets combined are still
3518 # in topographical order. Use the sort() predicate to explicitly sort
3521 # in topographical order. Use the sort() predicate to explicitly sort
3519 # again instead.
3522 # again instead.
3520 return False
3523 return False
3521
3524
3522 def reverse(self):
3525 def reverse(self):
3523 if self._ascending is None:
3526 if self._ascending is None:
3524 self._list.reverse()
3527 self._list.reverse()
3525 else:
3528 else:
3526 self._ascending = not self._ascending
3529 self._ascending = not self._ascending
3527
3530
3528 def first(self):
3531 def first(self):
3529 for x in self:
3532 for x in self:
3530 return x
3533 return x
3531 return None
3534 return None
3532
3535
3533 def last(self):
3536 def last(self):
3534 self.reverse()
3537 self.reverse()
3535 val = self.first()
3538 val = self.first()
3536 self.reverse()
3539 self.reverse()
3537 return val
3540 return val
3538
3541
3539 def __repr__(self):
3542 def __repr__(self):
3540 d = {None: '', False: '-', True: '+'}[self._ascending]
3543 d = {None: '', False: '-', True: '+'}[self._ascending]
3541 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3544 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3542
3545
3543 class generatorset(abstractsmartset):
3546 class generatorset(abstractsmartset):
3544 """Wrap a generator for lazy iteration
3547 """Wrap a generator for lazy iteration
3545
3548
3546 Wrapper structure for generators that provides lazy membership and can
3549 Wrapper structure for generators that provides lazy membership and can
3547 be iterated more than once.
3550 be iterated more than once.
3548 When asked for membership it generates values until either it finds the
3551 When asked for membership it generates values until either it finds the
3549 requested one or has gone through all the elements in the generator
3552 requested one or has gone through all the elements in the generator
3550 """
3553 """
3551 def __init__(self, gen, iterasc=None):
3554 def __init__(self, gen, iterasc=None):
3552 """
3555 """
3553 gen: a generator producing the values for the generatorset.
3556 gen: a generator producing the values for the generatorset.
3554 """
3557 """
3555 self._gen = gen
3558 self._gen = gen
3556 self._asclist = None
3559 self._asclist = None
3557 self._cache = {}
3560 self._cache = {}
3558 self._genlist = []
3561 self._genlist = []
3559 self._finished = False
3562 self._finished = False
3560 self._ascending = True
3563 self._ascending = True
3561 if iterasc is not None:
3564 if iterasc is not None:
3562 if iterasc:
3565 if iterasc:
3563 self.fastasc = self._iterator
3566 self.fastasc = self._iterator
3564 self.__contains__ = self._asccontains
3567 self.__contains__ = self._asccontains
3565 else:
3568 else:
3566 self.fastdesc = self._iterator
3569 self.fastdesc = self._iterator
3567 self.__contains__ = self._desccontains
3570 self.__contains__ = self._desccontains
3568
3571
3569 def __nonzero__(self):
3572 def __nonzero__(self):
3570 # Do not use 'for r in self' because it will enforce the iteration
3573 # Do not use 'for r in self' because it will enforce the iteration
3571 # order (default ascending), possibly unrolling a whole descending
3574 # order (default ascending), possibly unrolling a whole descending
3572 # iterator.
3575 # iterator.
3573 if self._genlist:
3576 if self._genlist:
3574 return True
3577 return True
3575 for r in self._consumegen():
3578 for r in self._consumegen():
3576 return True
3579 return True
3577 return False
3580 return False
3578
3581
3579 def __contains__(self, x):
3582 def __contains__(self, x):
3580 if x in self._cache:
3583 if x in self._cache:
3581 return self._cache[x]
3584 return self._cache[x]
3582
3585
3583 # Use new values only, as existing values would be cached.
3586 # Use new values only, as existing values would be cached.
3584 for l in self._consumegen():
3587 for l in self._consumegen():
3585 if l == x:
3588 if l == x:
3586 return True
3589 return True
3587
3590
3588 self._cache[x] = False
3591 self._cache[x] = False
3589 return False
3592 return False
3590
3593
3591 def _asccontains(self, x):
3594 def _asccontains(self, x):
3592 """version of contains optimised for ascending generator"""
3595 """version of contains optimised for ascending generator"""
3593 if x in self._cache:
3596 if x in self._cache:
3594 return self._cache[x]
3597 return self._cache[x]
3595
3598
3596 # Use new values only, as existing values would be cached.
3599 # Use new values only, as existing values would be cached.
3597 for l in self._consumegen():
3600 for l in self._consumegen():
3598 if l == x:
3601 if l == x:
3599 return True
3602 return True
3600 if l > x:
3603 if l > x:
3601 break
3604 break
3602
3605
3603 self._cache[x] = False
3606 self._cache[x] = False
3604 return False
3607 return False
3605
3608
3606 def _desccontains(self, x):
3609 def _desccontains(self, x):
3607 """version of contains optimised for descending generator"""
3610 """version of contains optimised for descending generator"""
3608 if x in self._cache:
3611 if x in self._cache:
3609 return self._cache[x]
3612 return self._cache[x]
3610
3613
3611 # Use new values only, as existing values would be cached.
3614 # Use new values only, as existing values would be cached.
3612 for l in self._consumegen():
3615 for l in self._consumegen():
3613 if l == x:
3616 if l == x:
3614 return True
3617 return True
3615 if l < x:
3618 if l < x:
3616 break
3619 break
3617
3620
3618 self._cache[x] = False
3621 self._cache[x] = False
3619 return False
3622 return False
3620
3623
3621 def __iter__(self):
3624 def __iter__(self):
3622 if self._ascending:
3625 if self._ascending:
3623 it = self.fastasc
3626 it = self.fastasc
3624 else:
3627 else:
3625 it = self.fastdesc
3628 it = self.fastdesc
3626 if it is not None:
3629 if it is not None:
3627 return it()
3630 return it()
3628 # we need to consume the iterator
3631 # we need to consume the iterator
3629 for x in self._consumegen():
3632 for x in self._consumegen():
3630 pass
3633 pass
3631 # recall the same code
3634 # recall the same code
3632 return iter(self)
3635 return iter(self)
3633
3636
3634 def _iterator(self):
3637 def _iterator(self):
3635 if self._finished:
3638 if self._finished:
3636 return iter(self._genlist)
3639 return iter(self._genlist)
3637
3640
3638 # We have to use this complex iteration strategy to allow multiple
3641 # We have to use this complex iteration strategy to allow multiple
3639 # iterations at the same time. We need to be able to catch revision
3642 # iterations at the same time. We need to be able to catch revision
3640 # removed from _consumegen and added to genlist in another instance.
3643 # removed from _consumegen and added to genlist in another instance.
3641 #
3644 #
3642 # Getting rid of it would provide an about 15% speed up on this
3645 # Getting rid of it would provide an about 15% speed up on this
3643 # iteration.
3646 # iteration.
3644 genlist = self._genlist
3647 genlist = self._genlist
3645 nextrev = self._consumegen().next
3648 nextrev = self._consumegen().next
3646 _len = len # cache global lookup
3649 _len = len # cache global lookup
3647 def gen():
3650 def gen():
3648 i = 0
3651 i = 0
3649 while True:
3652 while True:
3650 if i < _len(genlist):
3653 if i < _len(genlist):
3651 yield genlist[i]
3654 yield genlist[i]
3652 else:
3655 else:
3653 yield nextrev()
3656 yield nextrev()
3654 i += 1
3657 i += 1
3655 return gen()
3658 return gen()
3656
3659
3657 def _consumegen(self):
3660 def _consumegen(self):
3658 cache = self._cache
3661 cache = self._cache
3659 genlist = self._genlist.append
3662 genlist = self._genlist.append
3660 for item in self._gen:
3663 for item in self._gen:
3661 cache[item] = True
3664 cache[item] = True
3662 genlist(item)
3665 genlist(item)
3663 yield item
3666 yield item
3664 if not self._finished:
3667 if not self._finished:
3665 self._finished = True
3668 self._finished = True
3666 asc = self._genlist[:]
3669 asc = self._genlist[:]
3667 asc.sort()
3670 asc.sort()
3668 self._asclist = asc
3671 self._asclist = asc
3669 self.fastasc = asc.__iter__
3672 self.fastasc = asc.__iter__
3670 self.fastdesc = asc.__reversed__
3673 self.fastdesc = asc.__reversed__
3671
3674
3672 def __len__(self):
3675 def __len__(self):
3673 for x in self._consumegen():
3676 for x in self._consumegen():
3674 pass
3677 pass
3675 return len(self._genlist)
3678 return len(self._genlist)
3676
3679
3677 def sort(self, reverse=False):
3680 def sort(self, reverse=False):
3678 self._ascending = not reverse
3681 self._ascending = not reverse
3679
3682
3680 def reverse(self):
3683 def reverse(self):
3681 self._ascending = not self._ascending
3684 self._ascending = not self._ascending
3682
3685
3683 def isascending(self):
3686 def isascending(self):
3684 return self._ascending
3687 return self._ascending
3685
3688
3686 def isdescending(self):
3689 def isdescending(self):
3687 return not self._ascending
3690 return not self._ascending
3688
3691
3689 def istopo(self):
3692 def istopo(self):
3690 # not worth the trouble asserting if the two sets combined are still
3693 # not worth the trouble asserting if the two sets combined are still
3691 # in topographical order. Use the sort() predicate to explicitly sort
3694 # in topographical order. Use the sort() predicate to explicitly sort
3692 # again instead.
3695 # again instead.
3693 return False
3696 return False
3694
3697
3695 def first(self):
3698 def first(self):
3696 if self._ascending:
3699 if self._ascending:
3697 it = self.fastasc
3700 it = self.fastasc
3698 else:
3701 else:
3699 it = self.fastdesc
3702 it = self.fastdesc
3700 if it is None:
3703 if it is None:
3701 # we need to consume all and try again
3704 # we need to consume all and try again
3702 for x in self._consumegen():
3705 for x in self._consumegen():
3703 pass
3706 pass
3704 return self.first()
3707 return self.first()
3705 return next(it(), None)
3708 return next(it(), None)
3706
3709
3707 def last(self):
3710 def last(self):
3708 if self._ascending:
3711 if self._ascending:
3709 it = self.fastdesc
3712 it = self.fastdesc
3710 else:
3713 else:
3711 it = self.fastasc
3714 it = self.fastasc
3712 if it is None:
3715 if it is None:
3713 # we need to consume all and try again
3716 # we need to consume all and try again
3714 for x in self._consumegen():
3717 for x in self._consumegen():
3715 pass
3718 pass
3716 return self.first()
3719 return self.first()
3717 return next(it(), None)
3720 return next(it(), None)
3718
3721
3719 def __repr__(self):
3722 def __repr__(self):
3720 d = {False: '-', True: '+'}[self._ascending]
3723 d = {False: '-', True: '+'}[self._ascending]
3721 return '<%s%s>' % (type(self).__name__, d)
3724 return '<%s%s>' % (type(self).__name__, d)
3722
3725
3723 class spanset(abstractsmartset):
3726 class spanset(abstractsmartset):
3724 """Duck type for baseset class which represents a range of revisions and
3727 """Duck type for baseset class which represents a range of revisions and
3725 can work lazily and without having all the range in memory
3728 can work lazily and without having all the range in memory
3726
3729
3727 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3730 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3728 notable points:
3731 notable points:
3729 - when x < y it will be automatically descending,
3732 - when x < y it will be automatically descending,
3730 - revision filtered with this repoview will be skipped.
3733 - revision filtered with this repoview will be skipped.
3731
3734
3732 """
3735 """
3733 def __init__(self, repo, start=0, end=None):
3736 def __init__(self, repo, start=0, end=None):
3734 """
3737 """
3735 start: first revision included the set
3738 start: first revision included the set
3736 (default to 0)
3739 (default to 0)
3737 end: first revision excluded (last+1)
3740 end: first revision excluded (last+1)
3738 (default to len(repo)
3741 (default to len(repo)
3739
3742
3740 Spanset will be descending if `end` < `start`.
3743 Spanset will be descending if `end` < `start`.
3741 """
3744 """
3742 if end is None:
3745 if end is None:
3743 end = len(repo)
3746 end = len(repo)
3744 self._ascending = start <= end
3747 self._ascending = start <= end
3745 if not self._ascending:
3748 if not self._ascending:
3746 start, end = end + 1, start +1
3749 start, end = end + 1, start +1
3747 self._start = start
3750 self._start = start
3748 self._end = end
3751 self._end = end
3749 self._hiddenrevs = repo.changelog.filteredrevs
3752 self._hiddenrevs = repo.changelog.filteredrevs
3750
3753
3751 def sort(self, reverse=False):
3754 def sort(self, reverse=False):
3752 self._ascending = not reverse
3755 self._ascending = not reverse
3753
3756
3754 def reverse(self):
3757 def reverse(self):
3755 self._ascending = not self._ascending
3758 self._ascending = not self._ascending
3756
3759
3757 def istopo(self):
3760 def istopo(self):
3758 # not worth the trouble asserting if the two sets combined are still
3761 # not worth the trouble asserting if the two sets combined are still
3759 # in topographical order. Use the sort() predicate to explicitly sort
3762 # in topographical order. Use the sort() predicate to explicitly sort
3760 # again instead.
3763 # again instead.
3761 return False
3764 return False
3762
3765
3763 def _iterfilter(self, iterrange):
3766 def _iterfilter(self, iterrange):
3764 s = self._hiddenrevs
3767 s = self._hiddenrevs
3765 for r in iterrange:
3768 for r in iterrange:
3766 if r not in s:
3769 if r not in s:
3767 yield r
3770 yield r
3768
3771
3769 def __iter__(self):
3772 def __iter__(self):
3770 if self._ascending:
3773 if self._ascending:
3771 return self.fastasc()
3774 return self.fastasc()
3772 else:
3775 else:
3773 return self.fastdesc()
3776 return self.fastdesc()
3774
3777
3775 def fastasc(self):
3778 def fastasc(self):
3776 iterrange = xrange(self._start, self._end)
3779 iterrange = xrange(self._start, self._end)
3777 if self._hiddenrevs:
3780 if self._hiddenrevs:
3778 return self._iterfilter(iterrange)
3781 return self._iterfilter(iterrange)
3779 return iter(iterrange)
3782 return iter(iterrange)
3780
3783
3781 def fastdesc(self):
3784 def fastdesc(self):
3782 iterrange = xrange(self._end - 1, self._start - 1, -1)
3785 iterrange = xrange(self._end - 1, self._start - 1, -1)
3783 if self._hiddenrevs:
3786 if self._hiddenrevs:
3784 return self._iterfilter(iterrange)
3787 return self._iterfilter(iterrange)
3785 return iter(iterrange)
3788 return iter(iterrange)
3786
3789
3787 def __contains__(self, rev):
3790 def __contains__(self, rev):
3788 hidden = self._hiddenrevs
3791 hidden = self._hiddenrevs
3789 return ((self._start <= rev < self._end)
3792 return ((self._start <= rev < self._end)
3790 and not (hidden and rev in hidden))
3793 and not (hidden and rev in hidden))
3791
3794
3792 def __nonzero__(self):
3795 def __nonzero__(self):
3793 for r in self:
3796 for r in self:
3794 return True
3797 return True
3795 return False
3798 return False
3796
3799
3797 def __len__(self):
3800 def __len__(self):
3798 if not self._hiddenrevs:
3801 if not self._hiddenrevs:
3799 return abs(self._end - self._start)
3802 return abs(self._end - self._start)
3800 else:
3803 else:
3801 count = 0
3804 count = 0
3802 start = self._start
3805 start = self._start
3803 end = self._end
3806 end = self._end
3804 for rev in self._hiddenrevs:
3807 for rev in self._hiddenrevs:
3805 if (end < rev <= start) or (start <= rev < end):
3808 if (end < rev <= start) or (start <= rev < end):
3806 count += 1
3809 count += 1
3807 return abs(self._end - self._start) - count
3810 return abs(self._end - self._start) - count
3808
3811
3809 def isascending(self):
3812 def isascending(self):
3810 return self._ascending
3813 return self._ascending
3811
3814
3812 def isdescending(self):
3815 def isdescending(self):
3813 return not self._ascending
3816 return not self._ascending
3814
3817
3815 def first(self):
3818 def first(self):
3816 if self._ascending:
3819 if self._ascending:
3817 it = self.fastasc
3820 it = self.fastasc
3818 else:
3821 else:
3819 it = self.fastdesc
3822 it = self.fastdesc
3820 for x in it():
3823 for x in it():
3821 return x
3824 return x
3822 return None
3825 return None
3823
3826
3824 def last(self):
3827 def last(self):
3825 if self._ascending:
3828 if self._ascending:
3826 it = self.fastdesc
3829 it = self.fastdesc
3827 else:
3830 else:
3828 it = self.fastasc
3831 it = self.fastasc
3829 for x in it():
3832 for x in it():
3830 return x
3833 return x
3831 return None
3834 return None
3832
3835
3833 def __repr__(self):
3836 def __repr__(self):
3834 d = {False: '-', True: '+'}[self._ascending]
3837 d = {False: '-', True: '+'}[self._ascending]
3835 return '<%s%s %d:%d>' % (type(self).__name__, d,
3838 return '<%s%s %d:%d>' % (type(self).__name__, d,
3836 self._start, self._end - 1)
3839 self._start, self._end - 1)
3837
3840
3838 class fullreposet(spanset):
3841 class fullreposet(spanset):
3839 """a set containing all revisions in the repo
3842 """a set containing all revisions in the repo
3840
3843
3841 This class exists to host special optimization and magic to handle virtual
3844 This class exists to host special optimization and magic to handle virtual
3842 revisions such as "null".
3845 revisions such as "null".
3843 """
3846 """
3844
3847
3845 def __init__(self, repo):
3848 def __init__(self, repo):
3846 super(fullreposet, self).__init__(repo)
3849 super(fullreposet, self).__init__(repo)
3847
3850
3848 def __and__(self, other):
3851 def __and__(self, other):
3849 """As self contains the whole repo, all of the other set should also be
3852 """As self contains the whole repo, all of the other set should also be
3850 in self. Therefore `self & other = other`.
3853 in self. Therefore `self & other = other`.
3851
3854
3852 This boldly assumes the other contains valid revs only.
3855 This boldly assumes the other contains valid revs only.
3853 """
3856 """
3854 # other not a smartset, make is so
3857 # other not a smartset, make is so
3855 if not util.safehasattr(other, 'isascending'):
3858 if not util.safehasattr(other, 'isascending'):
3856 # filter out hidden revision
3859 # filter out hidden revision
3857 # (this boldly assumes all smartset are pure)
3860 # (this boldly assumes all smartset are pure)
3858 #
3861 #
3859 # `other` was used with "&", let's assume this is a set like
3862 # `other` was used with "&", let's assume this is a set like
3860 # object.
3863 # object.
3861 other = baseset(other - self._hiddenrevs)
3864 other = baseset(other - self._hiddenrevs)
3862
3865
3863 other.sort(reverse=self.isdescending())
3866 other.sort(reverse=self.isdescending())
3864 return other
3867 return other
3865
3868
3866 def prettyformatset(revs):
3869 def prettyformatset(revs):
3867 lines = []
3870 lines = []
3868 rs = repr(revs)
3871 rs = repr(revs)
3869 p = 0
3872 p = 0
3870 while p < len(rs):
3873 while p < len(rs):
3871 q = rs.find('<', p + 1)
3874 q = rs.find('<', p + 1)
3872 if q < 0:
3875 if q < 0:
3873 q = len(rs)
3876 q = len(rs)
3874 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3877 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3875 assert l >= 0
3878 assert l >= 0
3876 lines.append((l, rs[p:q].rstrip()))
3879 lines.append((l, rs[p:q].rstrip()))
3877 p = q
3880 p = q
3878 return '\n'.join(' ' * l + s for l, s in lines)
3881 return '\n'.join(' ' * l + s for l, s in lines)
3879
3882
3880 def loadpredicate(ui, extname, registrarobj):
3883 def loadpredicate(ui, extname, registrarobj):
3881 """Load revset predicates from specified registrarobj
3884 """Load revset predicates from specified registrarobj
3882 """
3885 """
3883 for name, func in registrarobj._table.iteritems():
3886 for name, func in registrarobj._table.iteritems():
3884 symbols[name] = func
3887 symbols[name] = func
3885 if func._safe:
3888 if func._safe:
3886 safesymbols.add(name)
3889 safesymbols.add(name)
3887
3890
3888 # load built-in predicates explicitly to setup safesymbols
3891 # load built-in predicates explicitly to setup safesymbols
3889 loadpredicate(None, None, predicate)
3892 loadpredicate(None, None, predicate)
3890
3893
3891 # tell hggettext to extract docstrings from these functions:
3894 # tell hggettext to extract docstrings from these functions:
3892 i18nfunctions = symbols.values()
3895 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now