##// END OF EJS Templates
revset: add stub to handle parentpost operation...
Yuya Nishihara -
r29931:d2d1be30 default
parent child Browse files
Show More
@@ -1,3785 +1,3788 b''
1 # revset.py - revision set queries for mercurial
1 # revset.py - revision set queries for mercurial
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import heapq
10 import heapq
11 import re
11 import re
12
12
13 from .i18n import _
13 from .i18n import _
14 from . import (
14 from . import (
15 destutil,
15 destutil,
16 encoding,
16 encoding,
17 error,
17 error,
18 hbisect,
18 hbisect,
19 match as matchmod,
19 match as matchmod,
20 node,
20 node,
21 obsolete as obsmod,
21 obsolete as obsmod,
22 parser,
22 parser,
23 pathutil,
23 pathutil,
24 phases,
24 phases,
25 registrar,
25 registrar,
26 repoview,
26 repoview,
27 util,
27 util,
28 )
28 )
29
29
30 def _revancestors(repo, revs, followfirst):
30 def _revancestors(repo, revs, followfirst):
31 """Like revlog.ancestors(), but supports followfirst."""
31 """Like revlog.ancestors(), but supports followfirst."""
32 if followfirst:
32 if followfirst:
33 cut = 1
33 cut = 1
34 else:
34 else:
35 cut = None
35 cut = None
36 cl = repo.changelog
36 cl = repo.changelog
37
37
38 def iterate():
38 def iterate():
39 revs.sort(reverse=True)
39 revs.sort(reverse=True)
40 irevs = iter(revs)
40 irevs = iter(revs)
41 h = []
41 h = []
42
42
43 inputrev = next(irevs, None)
43 inputrev = next(irevs, None)
44 if inputrev is not None:
44 if inputrev is not None:
45 heapq.heappush(h, -inputrev)
45 heapq.heappush(h, -inputrev)
46
46
47 seen = set()
47 seen = set()
48 while h:
48 while h:
49 current = -heapq.heappop(h)
49 current = -heapq.heappop(h)
50 if current == inputrev:
50 if current == inputrev:
51 inputrev = next(irevs, None)
51 inputrev = next(irevs, None)
52 if inputrev is not None:
52 if inputrev is not None:
53 heapq.heappush(h, -inputrev)
53 heapq.heappush(h, -inputrev)
54 if current not in seen:
54 if current not in seen:
55 seen.add(current)
55 seen.add(current)
56 yield current
56 yield current
57 for parent in cl.parentrevs(current)[:cut]:
57 for parent in cl.parentrevs(current)[:cut]:
58 if parent != node.nullrev:
58 if parent != node.nullrev:
59 heapq.heappush(h, -parent)
59 heapq.heappush(h, -parent)
60
60
61 return generatorset(iterate(), iterasc=False)
61 return generatorset(iterate(), iterasc=False)
62
62
63 def _revdescendants(repo, revs, followfirst):
63 def _revdescendants(repo, revs, followfirst):
64 """Like revlog.descendants() but supports followfirst."""
64 """Like revlog.descendants() but supports followfirst."""
65 if followfirst:
65 if followfirst:
66 cut = 1
66 cut = 1
67 else:
67 else:
68 cut = None
68 cut = None
69
69
70 def iterate():
70 def iterate():
71 cl = repo.changelog
71 cl = repo.changelog
72 # XXX this should be 'parentset.min()' assuming 'parentset' is a
72 # XXX this should be 'parentset.min()' assuming 'parentset' is a
73 # smartset (and if it is not, it should.)
73 # smartset (and if it is not, it should.)
74 first = min(revs)
74 first = min(revs)
75 nullrev = node.nullrev
75 nullrev = node.nullrev
76 if first == nullrev:
76 if first == nullrev:
77 # Are there nodes with a null first parent and a non-null
77 # Are there nodes with a null first parent and a non-null
78 # second one? Maybe. Do we care? Probably not.
78 # second one? Maybe. Do we care? Probably not.
79 for i in cl:
79 for i in cl:
80 yield i
80 yield i
81 else:
81 else:
82 seen = set(revs)
82 seen = set(revs)
83 for i in cl.revs(first + 1):
83 for i in cl.revs(first + 1):
84 for x in cl.parentrevs(i)[:cut]:
84 for x in cl.parentrevs(i)[:cut]:
85 if x != nullrev and x in seen:
85 if x != nullrev and x in seen:
86 seen.add(i)
86 seen.add(i)
87 yield i
87 yield i
88 break
88 break
89
89
90 return generatorset(iterate(), iterasc=True)
90 return generatorset(iterate(), iterasc=True)
91
91
92 def _reachablerootspure(repo, minroot, roots, heads, includepath):
92 def _reachablerootspure(repo, minroot, roots, heads, includepath):
93 """return (heads(::<roots> and ::<heads>))
93 """return (heads(::<roots> and ::<heads>))
94
94
95 If includepath is True, return (<roots>::<heads>)."""
95 If includepath is True, return (<roots>::<heads>)."""
96 if not roots:
96 if not roots:
97 return []
97 return []
98 parentrevs = repo.changelog.parentrevs
98 parentrevs = repo.changelog.parentrevs
99 roots = set(roots)
99 roots = set(roots)
100 visit = list(heads)
100 visit = list(heads)
101 reachable = set()
101 reachable = set()
102 seen = {}
102 seen = {}
103 # prefetch all the things! (because python is slow)
103 # prefetch all the things! (because python is slow)
104 reached = reachable.add
104 reached = reachable.add
105 dovisit = visit.append
105 dovisit = visit.append
106 nextvisit = visit.pop
106 nextvisit = visit.pop
107 # open-code the post-order traversal due to the tiny size of
107 # open-code the post-order traversal due to the tiny size of
108 # sys.getrecursionlimit()
108 # sys.getrecursionlimit()
109 while visit:
109 while visit:
110 rev = nextvisit()
110 rev = nextvisit()
111 if rev in roots:
111 if rev in roots:
112 reached(rev)
112 reached(rev)
113 if not includepath:
113 if not includepath:
114 continue
114 continue
115 parents = parentrevs(rev)
115 parents = parentrevs(rev)
116 seen[rev] = parents
116 seen[rev] = parents
117 for parent in parents:
117 for parent in parents:
118 if parent >= minroot and parent not in seen:
118 if parent >= minroot and parent not in seen:
119 dovisit(parent)
119 dovisit(parent)
120 if not reachable:
120 if not reachable:
121 return baseset()
121 return baseset()
122 if not includepath:
122 if not includepath:
123 return reachable
123 return reachable
124 for rev in sorted(seen):
124 for rev in sorted(seen):
125 for parent in seen[rev]:
125 for parent in seen[rev]:
126 if parent in reachable:
126 if parent in reachable:
127 reached(rev)
127 reached(rev)
128 return reachable
128 return reachable
129
129
130 def reachableroots(repo, roots, heads, includepath=False):
130 def reachableroots(repo, roots, heads, includepath=False):
131 """return (heads(::<roots> and ::<heads>))
131 """return (heads(::<roots> and ::<heads>))
132
132
133 If includepath is True, return (<roots>::<heads>)."""
133 If includepath is True, return (<roots>::<heads>)."""
134 if not roots:
134 if not roots:
135 return baseset()
135 return baseset()
136 minroot = roots.min()
136 minroot = roots.min()
137 roots = list(roots)
137 roots = list(roots)
138 heads = list(heads)
138 heads = list(heads)
139 try:
139 try:
140 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
140 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
141 except AttributeError:
141 except AttributeError:
142 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
142 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
143 revs = baseset(revs)
143 revs = baseset(revs)
144 revs.sort()
144 revs.sort()
145 return revs
145 return revs
146
146
147 elements = {
147 elements = {
148 # token-type: binding-strength, primary, prefix, infix, suffix
148 # token-type: binding-strength, primary, prefix, infix, suffix
149 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
149 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
150 "##": (20, None, None, ("_concat", 20), None),
150 "##": (20, None, None, ("_concat", 20), None),
151 "~": (18, None, None, ("ancestor", 18), None),
151 "~": (18, None, None, ("ancestor", 18), None),
152 "^": (18, None, None, ("parent", 18), "parentpost"),
152 "^": (18, None, None, ("parent", 18), "parentpost"),
153 "-": (5, None, ("negate", 19), ("minus", 5), None),
153 "-": (5, None, ("negate", 19), ("minus", 5), None),
154 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"),
154 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"),
155 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"),
155 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"),
156 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), "rangepost"),
156 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), "rangepost"),
157 "not": (10, None, ("not", 10), None, None),
157 "not": (10, None, ("not", 10), None, None),
158 "!": (10, None, ("not", 10), None, None),
158 "!": (10, None, ("not", 10), None, None),
159 "and": (5, None, None, ("and", 5), None),
159 "and": (5, None, None, ("and", 5), None),
160 "&": (5, None, None, ("and", 5), None),
160 "&": (5, None, None, ("and", 5), None),
161 "%": (5, None, None, ("only", 5), "onlypost"),
161 "%": (5, None, None, ("only", 5), "onlypost"),
162 "or": (4, None, None, ("or", 4), None),
162 "or": (4, None, None, ("or", 4), None),
163 "|": (4, None, None, ("or", 4), None),
163 "|": (4, None, None, ("or", 4), None),
164 "+": (4, None, None, ("or", 4), None),
164 "+": (4, None, None, ("or", 4), None),
165 "=": (3, None, None, ("keyvalue", 3), None),
165 "=": (3, None, None, ("keyvalue", 3), None),
166 ",": (2, None, None, ("list", 2), None),
166 ",": (2, None, None, ("list", 2), None),
167 ")": (0, None, None, None, None),
167 ")": (0, None, None, None, None),
168 "symbol": (0, "symbol", None, None, None),
168 "symbol": (0, "symbol", None, None, None),
169 "string": (0, "string", None, None, None),
169 "string": (0, "string", None, None, None),
170 "end": (0, None, None, None, None),
170 "end": (0, None, None, None, None),
171 }
171 }
172
172
173 keywords = set(['and', 'or', 'not'])
173 keywords = set(['and', 'or', 'not'])
174
174
175 # default set of valid characters for the initial letter of symbols
175 # default set of valid characters for the initial letter of symbols
176 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
176 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
177 if c.isalnum() or c in '._@' or ord(c) > 127)
177 if c.isalnum() or c in '._@' or ord(c) > 127)
178
178
179 # default set of valid characters for non-initial letters of symbols
179 # default set of valid characters for non-initial letters of symbols
180 _symletters = set(c for c in [chr(i) for i in xrange(256)]
180 _symletters = set(c for c in [chr(i) for i in xrange(256)]
181 if c.isalnum() or c in '-._/@' or ord(c) > 127)
181 if c.isalnum() or c in '-._/@' or ord(c) > 127)
182
182
183 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
183 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
184 '''
184 '''
185 Parse a revset statement into a stream of tokens
185 Parse a revset statement into a stream of tokens
186
186
187 ``syminitletters`` is the set of valid characters for the initial
187 ``syminitletters`` is the set of valid characters for the initial
188 letter of symbols.
188 letter of symbols.
189
189
190 By default, character ``c`` is recognized as valid for initial
190 By default, character ``c`` is recognized as valid for initial
191 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
191 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
192
192
193 ``symletters`` is the set of valid characters for non-initial
193 ``symletters`` is the set of valid characters for non-initial
194 letters of symbols.
194 letters of symbols.
195
195
196 By default, character ``c`` is recognized as valid for non-initial
196 By default, character ``c`` is recognized as valid for non-initial
197 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
197 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
198
198
199 Check that @ is a valid unquoted token character (issue3686):
199 Check that @ is a valid unquoted token character (issue3686):
200 >>> list(tokenize("@::"))
200 >>> list(tokenize("@::"))
201 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
201 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
202
202
203 '''
203 '''
204 if syminitletters is None:
204 if syminitletters is None:
205 syminitletters = _syminitletters
205 syminitletters = _syminitletters
206 if symletters is None:
206 if symletters is None:
207 symletters = _symletters
207 symletters = _symletters
208
208
209 if program and lookup:
209 if program and lookup:
210 # attempt to parse old-style ranges first to deal with
210 # attempt to parse old-style ranges first to deal with
211 # things like old-tag which contain query metacharacters
211 # things like old-tag which contain query metacharacters
212 parts = program.split(':', 1)
212 parts = program.split(':', 1)
213 if all(lookup(sym) for sym in parts if sym):
213 if all(lookup(sym) for sym in parts if sym):
214 if parts[0]:
214 if parts[0]:
215 yield ('symbol', parts[0], 0)
215 yield ('symbol', parts[0], 0)
216 if len(parts) > 1:
216 if len(parts) > 1:
217 s = len(parts[0])
217 s = len(parts[0])
218 yield (':', None, s)
218 yield (':', None, s)
219 if parts[1]:
219 if parts[1]:
220 yield ('symbol', parts[1], s + 1)
220 yield ('symbol', parts[1], s + 1)
221 yield ('end', None, len(program))
221 yield ('end', None, len(program))
222 return
222 return
223
223
224 pos, l = 0, len(program)
224 pos, l = 0, len(program)
225 while pos < l:
225 while pos < l:
226 c = program[pos]
226 c = program[pos]
227 if c.isspace(): # skip inter-token whitespace
227 if c.isspace(): # skip inter-token whitespace
228 pass
228 pass
229 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
229 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
230 yield ('::', None, pos)
230 yield ('::', None, pos)
231 pos += 1 # skip ahead
231 pos += 1 # skip ahead
232 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
232 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
233 yield ('..', None, pos)
233 yield ('..', None, pos)
234 pos += 1 # skip ahead
234 pos += 1 # skip ahead
235 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
235 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
236 yield ('##', None, pos)
236 yield ('##', None, pos)
237 pos += 1 # skip ahead
237 pos += 1 # skip ahead
238 elif c in "():=,-|&+!~^%": # handle simple operators
238 elif c in "():=,-|&+!~^%": # handle simple operators
239 yield (c, None, pos)
239 yield (c, None, pos)
240 elif (c in '"\'' or c == 'r' and
240 elif (c in '"\'' or c == 'r' and
241 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
241 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
242 if c == 'r':
242 if c == 'r':
243 pos += 1
243 pos += 1
244 c = program[pos]
244 c = program[pos]
245 decode = lambda x: x
245 decode = lambda x: x
246 else:
246 else:
247 decode = parser.unescapestr
247 decode = parser.unescapestr
248 pos += 1
248 pos += 1
249 s = pos
249 s = pos
250 while pos < l: # find closing quote
250 while pos < l: # find closing quote
251 d = program[pos]
251 d = program[pos]
252 if d == '\\': # skip over escaped characters
252 if d == '\\': # skip over escaped characters
253 pos += 2
253 pos += 2
254 continue
254 continue
255 if d == c:
255 if d == c:
256 yield ('string', decode(program[s:pos]), s)
256 yield ('string', decode(program[s:pos]), s)
257 break
257 break
258 pos += 1
258 pos += 1
259 else:
259 else:
260 raise error.ParseError(_("unterminated string"), s)
260 raise error.ParseError(_("unterminated string"), s)
261 # gather up a symbol/keyword
261 # gather up a symbol/keyword
262 elif c in syminitletters:
262 elif c in syminitletters:
263 s = pos
263 s = pos
264 pos += 1
264 pos += 1
265 while pos < l: # find end of symbol
265 while pos < l: # find end of symbol
266 d = program[pos]
266 d = program[pos]
267 if d not in symletters:
267 if d not in symletters:
268 break
268 break
269 if d == '.' and program[pos - 1] == '.': # special case for ..
269 if d == '.' and program[pos - 1] == '.': # special case for ..
270 pos -= 1
270 pos -= 1
271 break
271 break
272 pos += 1
272 pos += 1
273 sym = program[s:pos]
273 sym = program[s:pos]
274 if sym in keywords: # operator keywords
274 if sym in keywords: # operator keywords
275 yield (sym, None, s)
275 yield (sym, None, s)
276 elif '-' in sym:
276 elif '-' in sym:
277 # some jerk gave us foo-bar-baz, try to check if it's a symbol
277 # some jerk gave us foo-bar-baz, try to check if it's a symbol
278 if lookup and lookup(sym):
278 if lookup and lookup(sym):
279 # looks like a real symbol
279 # looks like a real symbol
280 yield ('symbol', sym, s)
280 yield ('symbol', sym, s)
281 else:
281 else:
282 # looks like an expression
282 # looks like an expression
283 parts = sym.split('-')
283 parts = sym.split('-')
284 for p in parts[:-1]:
284 for p in parts[:-1]:
285 if p: # possible consecutive -
285 if p: # possible consecutive -
286 yield ('symbol', p, s)
286 yield ('symbol', p, s)
287 s += len(p)
287 s += len(p)
288 yield ('-', None, pos)
288 yield ('-', None, pos)
289 s += 1
289 s += 1
290 if parts[-1]: # possible trailing -
290 if parts[-1]: # possible trailing -
291 yield ('symbol', parts[-1], s)
291 yield ('symbol', parts[-1], s)
292 else:
292 else:
293 yield ('symbol', sym, s)
293 yield ('symbol', sym, s)
294 pos -= 1
294 pos -= 1
295 else:
295 else:
296 raise error.ParseError(_("syntax error in revset '%s'") %
296 raise error.ParseError(_("syntax error in revset '%s'") %
297 program, pos)
297 program, pos)
298 pos += 1
298 pos += 1
299 yield ('end', None, pos)
299 yield ('end', None, pos)
300
300
301 # helpers
301 # helpers
302
302
303 def getsymbol(x):
303 def getsymbol(x):
304 if x and x[0] == 'symbol':
304 if x and x[0] == 'symbol':
305 return x[1]
305 return x[1]
306 raise error.ParseError(_('not a symbol'))
306 raise error.ParseError(_('not a symbol'))
307
307
308 def getstring(x, err):
308 def getstring(x, err):
309 if x and (x[0] == 'string' or x[0] == 'symbol'):
309 if x and (x[0] == 'string' or x[0] == 'symbol'):
310 return x[1]
310 return x[1]
311 raise error.ParseError(err)
311 raise error.ParseError(err)
312
312
313 def getlist(x):
313 def getlist(x):
314 if not x:
314 if not x:
315 return []
315 return []
316 if x[0] == 'list':
316 if x[0] == 'list':
317 return list(x[1:])
317 return list(x[1:])
318 return [x]
318 return [x]
319
319
320 def getargs(x, min, max, err):
320 def getargs(x, min, max, err):
321 l = getlist(x)
321 l = getlist(x)
322 if len(l) < min or (max >= 0 and len(l) > max):
322 if len(l) < min or (max >= 0 and len(l) > max):
323 raise error.ParseError(err)
323 raise error.ParseError(err)
324 return l
324 return l
325
325
326 def getargsdict(x, funcname, keys):
326 def getargsdict(x, funcname, keys):
327 return parser.buildargsdict(getlist(x), funcname, keys.split(),
327 return parser.buildargsdict(getlist(x), funcname, keys.split(),
328 keyvaluenode='keyvalue', keynode='symbol')
328 keyvaluenode='keyvalue', keynode='symbol')
329
329
330 def getset(repo, subset, x):
330 def getset(repo, subset, x):
331 if not x:
331 if not x:
332 raise error.ParseError(_("missing argument"))
332 raise error.ParseError(_("missing argument"))
333 s = methods[x[0]](repo, subset, *x[1:])
333 s = methods[x[0]](repo, subset, *x[1:])
334 if util.safehasattr(s, 'isascending'):
334 if util.safehasattr(s, 'isascending'):
335 return s
335 return s
336 # else case should not happen, because all non-func are internal,
336 # else case should not happen, because all non-func are internal,
337 # ignoring for now.
337 # ignoring for now.
338 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
338 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
339 repo.ui.deprecwarn('revset "%s" uses list instead of smartset'
339 repo.ui.deprecwarn('revset "%s" uses list instead of smartset'
340 % x[1][1],
340 % x[1][1],
341 '3.9')
341 '3.9')
342 return baseset(s)
342 return baseset(s)
343
343
344 def _getrevsource(repo, r):
344 def _getrevsource(repo, r):
345 extra = repo[r].extra()
345 extra = repo[r].extra()
346 for label in ('source', 'transplant_source', 'rebase_source'):
346 for label in ('source', 'transplant_source', 'rebase_source'):
347 if label in extra:
347 if label in extra:
348 try:
348 try:
349 return repo[extra[label]].rev()
349 return repo[extra[label]].rev()
350 except error.RepoLookupError:
350 except error.RepoLookupError:
351 pass
351 pass
352 return None
352 return None
353
353
354 # operator methods
354 # operator methods
355
355
356 def stringset(repo, subset, x):
356 def stringset(repo, subset, x):
357 x = repo[x].rev()
357 x = repo[x].rev()
358 if (x in subset
358 if (x in subset
359 or x == node.nullrev and isinstance(subset, fullreposet)):
359 or x == node.nullrev and isinstance(subset, fullreposet)):
360 return baseset([x])
360 return baseset([x])
361 return baseset()
361 return baseset()
362
362
363 def rangeset(repo, subset, x, y):
363 def rangeset(repo, subset, x, y):
364 m = getset(repo, fullreposet(repo), x)
364 m = getset(repo, fullreposet(repo), x)
365 n = getset(repo, fullreposet(repo), y)
365 n = getset(repo, fullreposet(repo), y)
366
366
367 if not m or not n:
367 if not m or not n:
368 return baseset()
368 return baseset()
369 m, n = m.first(), n.last()
369 m, n = m.first(), n.last()
370
370
371 if m == n:
371 if m == n:
372 r = baseset([m])
372 r = baseset([m])
373 elif n == node.wdirrev:
373 elif n == node.wdirrev:
374 r = spanset(repo, m, len(repo)) + baseset([n])
374 r = spanset(repo, m, len(repo)) + baseset([n])
375 elif m == node.wdirrev:
375 elif m == node.wdirrev:
376 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
376 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
377 elif m < n:
377 elif m < n:
378 r = spanset(repo, m, n + 1)
378 r = spanset(repo, m, n + 1)
379 else:
379 else:
380 r = spanset(repo, m, n - 1)
380 r = spanset(repo, m, n - 1)
381 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
381 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
382 # necessary to ensure we preserve the order in subset.
382 # necessary to ensure we preserve the order in subset.
383 #
383 #
384 # This has performance implication, carrying the sorting over when possible
384 # This has performance implication, carrying the sorting over when possible
385 # would be more efficient.
385 # would be more efficient.
386 return r & subset
386 return r & subset
387
387
388 def dagrange(repo, subset, x, y):
388 def dagrange(repo, subset, x, y):
389 r = fullreposet(repo)
389 r = fullreposet(repo)
390 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
390 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
391 includepath=True)
391 includepath=True)
392 return subset & xs
392 return subset & xs
393
393
394 def andset(repo, subset, x, y):
394 def andset(repo, subset, x, y):
395 return getset(repo, getset(repo, subset, x), y)
395 return getset(repo, getset(repo, subset, x), y)
396
396
397 def differenceset(repo, subset, x, y):
397 def differenceset(repo, subset, x, y):
398 return getset(repo, subset, x) - getset(repo, subset, y)
398 return getset(repo, subset, x) - getset(repo, subset, y)
399
399
400 def _orsetlist(repo, subset, xs):
400 def _orsetlist(repo, subset, xs):
401 assert xs
401 assert xs
402 if len(xs) == 1:
402 if len(xs) == 1:
403 return getset(repo, subset, xs[0])
403 return getset(repo, subset, xs[0])
404 p = len(xs) // 2
404 p = len(xs) // 2
405 a = _orsetlist(repo, subset, xs[:p])
405 a = _orsetlist(repo, subset, xs[:p])
406 b = _orsetlist(repo, subset, xs[p:])
406 b = _orsetlist(repo, subset, xs[p:])
407 return a + b
407 return a + b
408
408
409 def orset(repo, subset, x):
409 def orset(repo, subset, x):
410 return _orsetlist(repo, subset, getlist(x))
410 return _orsetlist(repo, subset, getlist(x))
411
411
412 def notset(repo, subset, x):
412 def notset(repo, subset, x):
413 return subset - getset(repo, subset, x)
413 return subset - getset(repo, subset, x)
414
414
415 def listset(repo, subset, *xs):
415 def listset(repo, subset, *xs):
416 raise error.ParseError(_("can't use a list in this context"),
416 raise error.ParseError(_("can't use a list in this context"),
417 hint=_('see hg help "revsets.x or y"'))
417 hint=_('see hg help "revsets.x or y"'))
418
418
419 def keyvaluepair(repo, subset, k, v):
419 def keyvaluepair(repo, subset, k, v):
420 raise error.ParseError(_("can't use a key-value pair in this context"))
420 raise error.ParseError(_("can't use a key-value pair in this context"))
421
421
422 def func(repo, subset, a, b):
422 def func(repo, subset, a, b):
423 f = getsymbol(a)
423 f = getsymbol(a)
424 if f in symbols:
424 if f in symbols:
425 return symbols[f](repo, subset, b)
425 return symbols[f](repo, subset, b)
426
426
427 keep = lambda fn: getattr(fn, '__doc__', None) is not None
427 keep = lambda fn: getattr(fn, '__doc__', None) is not None
428
428
429 syms = [s for (s, fn) in symbols.items() if keep(fn)]
429 syms = [s for (s, fn) in symbols.items() if keep(fn)]
430 raise error.UnknownIdentifier(f, syms)
430 raise error.UnknownIdentifier(f, syms)
431
431
432 # functions
432 # functions
433
433
434 # symbols are callables like:
434 # symbols are callables like:
435 # fn(repo, subset, x)
435 # fn(repo, subset, x)
436 # with:
436 # with:
437 # repo - current repository instance
437 # repo - current repository instance
438 # subset - of revisions to be examined
438 # subset - of revisions to be examined
439 # x - argument in tree form
439 # x - argument in tree form
440 symbols = {}
440 symbols = {}
441
441
442 # symbols which can't be used for a DoS attack for any given input
442 # symbols which can't be used for a DoS attack for any given input
443 # (e.g. those which accept regexes as plain strings shouldn't be included)
443 # (e.g. those which accept regexes as plain strings shouldn't be included)
444 # functions that just return a lot of changesets (like all) don't count here
444 # functions that just return a lot of changesets (like all) don't count here
445 safesymbols = set()
445 safesymbols = set()
446
446
447 predicate = registrar.revsetpredicate()
447 predicate = registrar.revsetpredicate()
448
448
449 @predicate('_destupdate')
449 @predicate('_destupdate')
450 def _destupdate(repo, subset, x):
450 def _destupdate(repo, subset, x):
451 # experimental revset for update destination
451 # experimental revset for update destination
452 args = getargsdict(x, 'limit', 'clean check')
452 args = getargsdict(x, 'limit', 'clean check')
453 return subset & baseset([destutil.destupdate(repo, **args)[0]])
453 return subset & baseset([destutil.destupdate(repo, **args)[0]])
454
454
455 @predicate('_destmerge')
455 @predicate('_destmerge')
456 def _destmerge(repo, subset, x):
456 def _destmerge(repo, subset, x):
457 # experimental revset for merge destination
457 # experimental revset for merge destination
458 sourceset = None
458 sourceset = None
459 if x is not None:
459 if x is not None:
460 sourceset = getset(repo, fullreposet(repo), x)
460 sourceset = getset(repo, fullreposet(repo), x)
461 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
461 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
462
462
463 @predicate('adds(pattern)', safe=True)
463 @predicate('adds(pattern)', safe=True)
464 def adds(repo, subset, x):
464 def adds(repo, subset, x):
465 """Changesets that add a file matching pattern.
465 """Changesets that add a file matching pattern.
466
466
467 The pattern without explicit kind like ``glob:`` is expected to be
467 The pattern without explicit kind like ``glob:`` is expected to be
468 relative to the current directory and match against a file or a
468 relative to the current directory and match against a file or a
469 directory.
469 directory.
470 """
470 """
471 # i18n: "adds" is a keyword
471 # i18n: "adds" is a keyword
472 pat = getstring(x, _("adds requires a pattern"))
472 pat = getstring(x, _("adds requires a pattern"))
473 return checkstatus(repo, subset, pat, 1)
473 return checkstatus(repo, subset, pat, 1)
474
474
475 @predicate('ancestor(*changeset)', safe=True)
475 @predicate('ancestor(*changeset)', safe=True)
476 def ancestor(repo, subset, x):
476 def ancestor(repo, subset, x):
477 """A greatest common ancestor of the changesets.
477 """A greatest common ancestor of the changesets.
478
478
479 Accepts 0 or more changesets.
479 Accepts 0 or more changesets.
480 Will return empty list when passed no args.
480 Will return empty list when passed no args.
481 Greatest common ancestor of a single changeset is that changeset.
481 Greatest common ancestor of a single changeset is that changeset.
482 """
482 """
483 # i18n: "ancestor" is a keyword
483 # i18n: "ancestor" is a keyword
484 l = getlist(x)
484 l = getlist(x)
485 rl = fullreposet(repo)
485 rl = fullreposet(repo)
486 anc = None
486 anc = None
487
487
488 # (getset(repo, rl, i) for i in l) generates a list of lists
488 # (getset(repo, rl, i) for i in l) generates a list of lists
489 for revs in (getset(repo, rl, i) for i in l):
489 for revs in (getset(repo, rl, i) for i in l):
490 for r in revs:
490 for r in revs:
491 if anc is None:
491 if anc is None:
492 anc = repo[r]
492 anc = repo[r]
493 else:
493 else:
494 anc = anc.ancestor(repo[r])
494 anc = anc.ancestor(repo[r])
495
495
496 if anc is not None and anc.rev() in subset:
496 if anc is not None and anc.rev() in subset:
497 return baseset([anc.rev()])
497 return baseset([anc.rev()])
498 return baseset()
498 return baseset()
499
499
500 def _ancestors(repo, subset, x, followfirst=False):
500 def _ancestors(repo, subset, x, followfirst=False):
501 heads = getset(repo, fullreposet(repo), x)
501 heads = getset(repo, fullreposet(repo), x)
502 if not heads:
502 if not heads:
503 return baseset()
503 return baseset()
504 s = _revancestors(repo, heads, followfirst)
504 s = _revancestors(repo, heads, followfirst)
505 return subset & s
505 return subset & s
506
506
507 @predicate('ancestors(set)', safe=True)
507 @predicate('ancestors(set)', safe=True)
508 def ancestors(repo, subset, x):
508 def ancestors(repo, subset, x):
509 """Changesets that are ancestors of a changeset in set.
509 """Changesets that are ancestors of a changeset in set.
510 """
510 """
511 return _ancestors(repo, subset, x)
511 return _ancestors(repo, subset, x)
512
512
513 @predicate('_firstancestors', safe=True)
513 @predicate('_firstancestors', safe=True)
514 def _firstancestors(repo, subset, x):
514 def _firstancestors(repo, subset, x):
515 # ``_firstancestors(set)``
515 # ``_firstancestors(set)``
516 # Like ``ancestors(set)`` but follows only the first parents.
516 # Like ``ancestors(set)`` but follows only the first parents.
517 return _ancestors(repo, subset, x, followfirst=True)
517 return _ancestors(repo, subset, x, followfirst=True)
518
518
519 def ancestorspec(repo, subset, x, n):
519 def ancestorspec(repo, subset, x, n):
520 """``set~n``
520 """``set~n``
521 Changesets that are the Nth ancestor (first parents only) of a changeset
521 Changesets that are the Nth ancestor (first parents only) of a changeset
522 in set.
522 in set.
523 """
523 """
524 try:
524 try:
525 n = int(n[1])
525 n = int(n[1])
526 except (TypeError, ValueError):
526 except (TypeError, ValueError):
527 raise error.ParseError(_("~ expects a number"))
527 raise error.ParseError(_("~ expects a number"))
528 ps = set()
528 ps = set()
529 cl = repo.changelog
529 cl = repo.changelog
530 for r in getset(repo, fullreposet(repo), x):
530 for r in getset(repo, fullreposet(repo), x):
531 for i in range(n):
531 for i in range(n):
532 r = cl.parentrevs(r)[0]
532 r = cl.parentrevs(r)[0]
533 ps.add(r)
533 ps.add(r)
534 return subset & ps
534 return subset & ps
535
535
536 @predicate('author(string)', safe=True)
536 @predicate('author(string)', safe=True)
537 def author(repo, subset, x):
537 def author(repo, subset, x):
538 """Alias for ``user(string)``.
538 """Alias for ``user(string)``.
539 """
539 """
540 # i18n: "author" is a keyword
540 # i18n: "author" is a keyword
541 n = encoding.lower(getstring(x, _("author requires a string")))
541 n = encoding.lower(getstring(x, _("author requires a string")))
542 kind, pattern, matcher = _substringmatcher(n)
542 kind, pattern, matcher = _substringmatcher(n)
543 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())),
543 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())),
544 condrepr=('<user %r>', n))
544 condrepr=('<user %r>', n))
545
545
546 @predicate('bisect(string)', safe=True)
546 @predicate('bisect(string)', safe=True)
547 def bisect(repo, subset, x):
547 def bisect(repo, subset, x):
548 """Changesets marked in the specified bisect status:
548 """Changesets marked in the specified bisect status:
549
549
550 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
550 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
551 - ``goods``, ``bads`` : csets topologically good/bad
551 - ``goods``, ``bads`` : csets topologically good/bad
552 - ``range`` : csets taking part in the bisection
552 - ``range`` : csets taking part in the bisection
553 - ``pruned`` : csets that are goods, bads or skipped
553 - ``pruned`` : csets that are goods, bads or skipped
554 - ``untested`` : csets whose fate is yet unknown
554 - ``untested`` : csets whose fate is yet unknown
555 - ``ignored`` : csets ignored due to DAG topology
555 - ``ignored`` : csets ignored due to DAG topology
556 - ``current`` : the cset currently being bisected
556 - ``current`` : the cset currently being bisected
557 """
557 """
558 # i18n: "bisect" is a keyword
558 # i18n: "bisect" is a keyword
559 status = getstring(x, _("bisect requires a string")).lower()
559 status = getstring(x, _("bisect requires a string")).lower()
560 state = set(hbisect.get(repo, status))
560 state = set(hbisect.get(repo, status))
561 return subset & state
561 return subset & state
562
562
563 # Backward-compatibility
563 # Backward-compatibility
564 # - no help entry so that we do not advertise it any more
564 # - no help entry so that we do not advertise it any more
565 @predicate('bisected', safe=True)
565 @predicate('bisected', safe=True)
566 def bisected(repo, subset, x):
566 def bisected(repo, subset, x):
567 return bisect(repo, subset, x)
567 return bisect(repo, subset, x)
568
568
569 @predicate('bookmark([name])', safe=True)
569 @predicate('bookmark([name])', safe=True)
570 def bookmark(repo, subset, x):
570 def bookmark(repo, subset, x):
571 """The named bookmark or all bookmarks.
571 """The named bookmark or all bookmarks.
572
572
573 If `name` starts with `re:`, the remainder of the name is treated as
573 If `name` starts with `re:`, the remainder of the name is treated as
574 a regular expression. To match a bookmark that actually starts with `re:`,
574 a regular expression. To match a bookmark that actually starts with `re:`,
575 use the prefix `literal:`.
575 use the prefix `literal:`.
576 """
576 """
577 # i18n: "bookmark" is a keyword
577 # i18n: "bookmark" is a keyword
578 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
578 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
579 if args:
579 if args:
580 bm = getstring(args[0],
580 bm = getstring(args[0],
581 # i18n: "bookmark" is a keyword
581 # i18n: "bookmark" is a keyword
582 _('the argument to bookmark must be a string'))
582 _('the argument to bookmark must be a string'))
583 kind, pattern, matcher = util.stringmatcher(bm)
583 kind, pattern, matcher = util.stringmatcher(bm)
584 bms = set()
584 bms = set()
585 if kind == 'literal':
585 if kind == 'literal':
586 bmrev = repo._bookmarks.get(pattern, None)
586 bmrev = repo._bookmarks.get(pattern, None)
587 if not bmrev:
587 if not bmrev:
588 raise error.RepoLookupError(_("bookmark '%s' does not exist")
588 raise error.RepoLookupError(_("bookmark '%s' does not exist")
589 % pattern)
589 % pattern)
590 bms.add(repo[bmrev].rev())
590 bms.add(repo[bmrev].rev())
591 else:
591 else:
592 matchrevs = set()
592 matchrevs = set()
593 for name, bmrev in repo._bookmarks.iteritems():
593 for name, bmrev in repo._bookmarks.iteritems():
594 if matcher(name):
594 if matcher(name):
595 matchrevs.add(bmrev)
595 matchrevs.add(bmrev)
596 if not matchrevs:
596 if not matchrevs:
597 raise error.RepoLookupError(_("no bookmarks exist"
597 raise error.RepoLookupError(_("no bookmarks exist"
598 " that match '%s'") % pattern)
598 " that match '%s'") % pattern)
599 for bmrev in matchrevs:
599 for bmrev in matchrevs:
600 bms.add(repo[bmrev].rev())
600 bms.add(repo[bmrev].rev())
601 else:
601 else:
602 bms = set([repo[r].rev()
602 bms = set([repo[r].rev()
603 for r in repo._bookmarks.values()])
603 for r in repo._bookmarks.values()])
604 bms -= set([node.nullrev])
604 bms -= set([node.nullrev])
605 return subset & bms
605 return subset & bms
606
606
607 @predicate('branch(string or set)', safe=True)
607 @predicate('branch(string or set)', safe=True)
608 def branch(repo, subset, x):
608 def branch(repo, subset, x):
609 """
609 """
610 All changesets belonging to the given branch or the branches of the given
610 All changesets belonging to the given branch or the branches of the given
611 changesets.
611 changesets.
612
612
613 If `string` starts with `re:`, the remainder of the name is treated as
613 If `string` starts with `re:`, the remainder of the name is treated as
614 a regular expression. To match a branch that actually starts with `re:`,
614 a regular expression. To match a branch that actually starts with `re:`,
615 use the prefix `literal:`.
615 use the prefix `literal:`.
616 """
616 """
617 getbi = repo.revbranchcache().branchinfo
617 getbi = repo.revbranchcache().branchinfo
618
618
619 try:
619 try:
620 b = getstring(x, '')
620 b = getstring(x, '')
621 except error.ParseError:
621 except error.ParseError:
622 # not a string, but another revspec, e.g. tip()
622 # not a string, but another revspec, e.g. tip()
623 pass
623 pass
624 else:
624 else:
625 kind, pattern, matcher = util.stringmatcher(b)
625 kind, pattern, matcher = util.stringmatcher(b)
626 if kind == 'literal':
626 if kind == 'literal':
627 # note: falls through to the revspec case if no branch with
627 # note: falls through to the revspec case if no branch with
628 # this name exists and pattern kind is not specified explicitly
628 # this name exists and pattern kind is not specified explicitly
629 if pattern in repo.branchmap():
629 if pattern in repo.branchmap():
630 return subset.filter(lambda r: matcher(getbi(r)[0]),
630 return subset.filter(lambda r: matcher(getbi(r)[0]),
631 condrepr=('<branch %r>', b))
631 condrepr=('<branch %r>', b))
632 if b.startswith('literal:'):
632 if b.startswith('literal:'):
633 raise error.RepoLookupError(_("branch '%s' does not exist")
633 raise error.RepoLookupError(_("branch '%s' does not exist")
634 % pattern)
634 % pattern)
635 else:
635 else:
636 return subset.filter(lambda r: matcher(getbi(r)[0]),
636 return subset.filter(lambda r: matcher(getbi(r)[0]),
637 condrepr=('<branch %r>', b))
637 condrepr=('<branch %r>', b))
638
638
639 s = getset(repo, fullreposet(repo), x)
639 s = getset(repo, fullreposet(repo), x)
640 b = set()
640 b = set()
641 for r in s:
641 for r in s:
642 b.add(getbi(r)[0])
642 b.add(getbi(r)[0])
643 c = s.__contains__
643 c = s.__contains__
644 return subset.filter(lambda r: c(r) or getbi(r)[0] in b,
644 return subset.filter(lambda r: c(r) or getbi(r)[0] in b,
645 condrepr=lambda: '<branch %r>' % sorted(b))
645 condrepr=lambda: '<branch %r>' % sorted(b))
646
646
647 @predicate('bumped()', safe=True)
647 @predicate('bumped()', safe=True)
648 def bumped(repo, subset, x):
648 def bumped(repo, subset, x):
649 """Mutable changesets marked as successors of public changesets.
649 """Mutable changesets marked as successors of public changesets.
650
650
651 Only non-public and non-obsolete changesets can be `bumped`.
651 Only non-public and non-obsolete changesets can be `bumped`.
652 """
652 """
653 # i18n: "bumped" is a keyword
653 # i18n: "bumped" is a keyword
654 getargs(x, 0, 0, _("bumped takes no arguments"))
654 getargs(x, 0, 0, _("bumped takes no arguments"))
655 bumped = obsmod.getrevs(repo, 'bumped')
655 bumped = obsmod.getrevs(repo, 'bumped')
656 return subset & bumped
656 return subset & bumped
657
657
658 @predicate('bundle()', safe=True)
658 @predicate('bundle()', safe=True)
659 def bundle(repo, subset, x):
659 def bundle(repo, subset, x):
660 """Changesets in the bundle.
660 """Changesets in the bundle.
661
661
662 Bundle must be specified by the -R option."""
662 Bundle must be specified by the -R option."""
663
663
664 try:
664 try:
665 bundlerevs = repo.changelog.bundlerevs
665 bundlerevs = repo.changelog.bundlerevs
666 except AttributeError:
666 except AttributeError:
667 raise error.Abort(_("no bundle provided - specify with -R"))
667 raise error.Abort(_("no bundle provided - specify with -R"))
668 return subset & bundlerevs
668 return subset & bundlerevs
669
669
670 def checkstatus(repo, subset, pat, field):
670 def checkstatus(repo, subset, pat, field):
671 hasset = matchmod.patkind(pat) == 'set'
671 hasset = matchmod.patkind(pat) == 'set'
672
672
673 mcache = [None]
673 mcache = [None]
674 def matches(x):
674 def matches(x):
675 c = repo[x]
675 c = repo[x]
676 if not mcache[0] or hasset:
676 if not mcache[0] or hasset:
677 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
677 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
678 m = mcache[0]
678 m = mcache[0]
679 fname = None
679 fname = None
680 if not m.anypats() and len(m.files()) == 1:
680 if not m.anypats() and len(m.files()) == 1:
681 fname = m.files()[0]
681 fname = m.files()[0]
682 if fname is not None:
682 if fname is not None:
683 if fname not in c.files():
683 if fname not in c.files():
684 return False
684 return False
685 else:
685 else:
686 for f in c.files():
686 for f in c.files():
687 if m(f):
687 if m(f):
688 break
688 break
689 else:
689 else:
690 return False
690 return False
691 files = repo.status(c.p1().node(), c.node())[field]
691 files = repo.status(c.p1().node(), c.node())[field]
692 if fname is not None:
692 if fname is not None:
693 if fname in files:
693 if fname in files:
694 return True
694 return True
695 else:
695 else:
696 for f in files:
696 for f in files:
697 if m(f):
697 if m(f):
698 return True
698 return True
699
699
700 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
700 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
701
701
702 def _children(repo, subset, parentset):
702 def _children(repo, subset, parentset):
703 if not parentset:
703 if not parentset:
704 return baseset()
704 return baseset()
705 cs = set()
705 cs = set()
706 pr = repo.changelog.parentrevs
706 pr = repo.changelog.parentrevs
707 minrev = parentset.min()
707 minrev = parentset.min()
708 for r in subset:
708 for r in subset:
709 if r <= minrev:
709 if r <= minrev:
710 continue
710 continue
711 for p in pr(r):
711 for p in pr(r):
712 if p in parentset:
712 if p in parentset:
713 cs.add(r)
713 cs.add(r)
714 return baseset(cs)
714 return baseset(cs)
715
715
716 @predicate('children(set)', safe=True)
716 @predicate('children(set)', safe=True)
717 def children(repo, subset, x):
717 def children(repo, subset, x):
718 """Child changesets of changesets in set.
718 """Child changesets of changesets in set.
719 """
719 """
720 s = getset(repo, fullreposet(repo), x)
720 s = getset(repo, fullreposet(repo), x)
721 cs = _children(repo, subset, s)
721 cs = _children(repo, subset, s)
722 return subset & cs
722 return subset & cs
723
723
724 @predicate('closed()', safe=True)
724 @predicate('closed()', safe=True)
725 def closed(repo, subset, x):
725 def closed(repo, subset, x):
726 """Changeset is closed.
726 """Changeset is closed.
727 """
727 """
728 # i18n: "closed" is a keyword
728 # i18n: "closed" is a keyword
729 getargs(x, 0, 0, _("closed takes no arguments"))
729 getargs(x, 0, 0, _("closed takes no arguments"))
730 return subset.filter(lambda r: repo[r].closesbranch(),
730 return subset.filter(lambda r: repo[r].closesbranch(),
731 condrepr='<branch closed>')
731 condrepr='<branch closed>')
732
732
733 @predicate('contains(pattern)')
733 @predicate('contains(pattern)')
734 def contains(repo, subset, x):
734 def contains(repo, subset, x):
735 """The revision's manifest contains a file matching pattern (but might not
735 """The revision's manifest contains a file matching pattern (but might not
736 modify it). See :hg:`help patterns` for information about file patterns.
736 modify it). See :hg:`help patterns` for information about file patterns.
737
737
738 The pattern without explicit kind like ``glob:`` is expected to be
738 The pattern without explicit kind like ``glob:`` is expected to be
739 relative to the current directory and match against a file exactly
739 relative to the current directory and match against a file exactly
740 for efficiency.
740 for efficiency.
741 """
741 """
742 # i18n: "contains" is a keyword
742 # i18n: "contains" is a keyword
743 pat = getstring(x, _("contains requires a pattern"))
743 pat = getstring(x, _("contains requires a pattern"))
744
744
745 def matches(x):
745 def matches(x):
746 if not matchmod.patkind(pat):
746 if not matchmod.patkind(pat):
747 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
747 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
748 if pats in repo[x]:
748 if pats in repo[x]:
749 return True
749 return True
750 else:
750 else:
751 c = repo[x]
751 c = repo[x]
752 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
752 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
753 for f in c.manifest():
753 for f in c.manifest():
754 if m(f):
754 if m(f):
755 return True
755 return True
756 return False
756 return False
757
757
758 return subset.filter(matches, condrepr=('<contains %r>', pat))
758 return subset.filter(matches, condrepr=('<contains %r>', pat))
759
759
760 @predicate('converted([id])', safe=True)
760 @predicate('converted([id])', safe=True)
761 def converted(repo, subset, x):
761 def converted(repo, subset, x):
762 """Changesets converted from the given identifier in the old repository if
762 """Changesets converted from the given identifier in the old repository if
763 present, or all converted changesets if no identifier is specified.
763 present, or all converted changesets if no identifier is specified.
764 """
764 """
765
765
766 # There is exactly no chance of resolving the revision, so do a simple
766 # There is exactly no chance of resolving the revision, so do a simple
767 # string compare and hope for the best
767 # string compare and hope for the best
768
768
769 rev = None
769 rev = None
770 # i18n: "converted" is a keyword
770 # i18n: "converted" is a keyword
771 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
771 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
772 if l:
772 if l:
773 # i18n: "converted" is a keyword
773 # i18n: "converted" is a keyword
774 rev = getstring(l[0], _('converted requires a revision'))
774 rev = getstring(l[0], _('converted requires a revision'))
775
775
776 def _matchvalue(r):
776 def _matchvalue(r):
777 source = repo[r].extra().get('convert_revision', None)
777 source = repo[r].extra().get('convert_revision', None)
778 return source is not None and (rev is None or source.startswith(rev))
778 return source is not None and (rev is None or source.startswith(rev))
779
779
780 return subset.filter(lambda r: _matchvalue(r),
780 return subset.filter(lambda r: _matchvalue(r),
781 condrepr=('<converted %r>', rev))
781 condrepr=('<converted %r>', rev))
782
782
783 @predicate('date(interval)', safe=True)
783 @predicate('date(interval)', safe=True)
784 def date(repo, subset, x):
784 def date(repo, subset, x):
785 """Changesets within the interval, see :hg:`help dates`.
785 """Changesets within the interval, see :hg:`help dates`.
786 """
786 """
787 # i18n: "date" is a keyword
787 # i18n: "date" is a keyword
788 ds = getstring(x, _("date requires a string"))
788 ds = getstring(x, _("date requires a string"))
789 dm = util.matchdate(ds)
789 dm = util.matchdate(ds)
790 return subset.filter(lambda x: dm(repo[x].date()[0]),
790 return subset.filter(lambda x: dm(repo[x].date()[0]),
791 condrepr=('<date %r>', ds))
791 condrepr=('<date %r>', ds))
792
792
793 @predicate('desc(string)', safe=True)
793 @predicate('desc(string)', safe=True)
794 def desc(repo, subset, x):
794 def desc(repo, subset, x):
795 """Search commit message for string. The match is case-insensitive.
795 """Search commit message for string. The match is case-insensitive.
796 """
796 """
797 # i18n: "desc" is a keyword
797 # i18n: "desc" is a keyword
798 ds = encoding.lower(getstring(x, _("desc requires a string")))
798 ds = encoding.lower(getstring(x, _("desc requires a string")))
799
799
800 def matches(x):
800 def matches(x):
801 c = repo[x]
801 c = repo[x]
802 return ds in encoding.lower(c.description())
802 return ds in encoding.lower(c.description())
803
803
804 return subset.filter(matches, condrepr=('<desc %r>', ds))
804 return subset.filter(matches, condrepr=('<desc %r>', ds))
805
805
806 def _descendants(repo, subset, x, followfirst=False):
806 def _descendants(repo, subset, x, followfirst=False):
807 roots = getset(repo, fullreposet(repo), x)
807 roots = getset(repo, fullreposet(repo), x)
808 if not roots:
808 if not roots:
809 return baseset()
809 return baseset()
810 s = _revdescendants(repo, roots, followfirst)
810 s = _revdescendants(repo, roots, followfirst)
811
811
812 # Both sets need to be ascending in order to lazily return the union
812 # Both sets need to be ascending in order to lazily return the union
813 # in the correct order.
813 # in the correct order.
814 base = subset & roots
814 base = subset & roots
815 desc = subset & s
815 desc = subset & s
816 result = base + desc
816 result = base + desc
817 if subset.isascending():
817 if subset.isascending():
818 result.sort()
818 result.sort()
819 elif subset.isdescending():
819 elif subset.isdescending():
820 result.sort(reverse=True)
820 result.sort(reverse=True)
821 else:
821 else:
822 result = subset & result
822 result = subset & result
823 return result
823 return result
824
824
825 @predicate('descendants(set)', safe=True)
825 @predicate('descendants(set)', safe=True)
826 def descendants(repo, subset, x):
826 def descendants(repo, subset, x):
827 """Changesets which are descendants of changesets in set.
827 """Changesets which are descendants of changesets in set.
828 """
828 """
829 return _descendants(repo, subset, x)
829 return _descendants(repo, subset, x)
830
830
831 @predicate('_firstdescendants', safe=True)
831 @predicate('_firstdescendants', safe=True)
832 def _firstdescendants(repo, subset, x):
832 def _firstdescendants(repo, subset, x):
833 # ``_firstdescendants(set)``
833 # ``_firstdescendants(set)``
834 # Like ``descendants(set)`` but follows only the first parents.
834 # Like ``descendants(set)`` but follows only the first parents.
835 return _descendants(repo, subset, x, followfirst=True)
835 return _descendants(repo, subset, x, followfirst=True)
836
836
837 @predicate('destination([set])', safe=True)
837 @predicate('destination([set])', safe=True)
838 def destination(repo, subset, x):
838 def destination(repo, subset, x):
839 """Changesets that were created by a graft, transplant or rebase operation,
839 """Changesets that were created by a graft, transplant or rebase operation,
840 with the given revisions specified as the source. Omitting the optional set
840 with the given revisions specified as the source. Omitting the optional set
841 is the same as passing all().
841 is the same as passing all().
842 """
842 """
843 if x is not None:
843 if x is not None:
844 sources = getset(repo, fullreposet(repo), x)
844 sources = getset(repo, fullreposet(repo), x)
845 else:
845 else:
846 sources = fullreposet(repo)
846 sources = fullreposet(repo)
847
847
848 dests = set()
848 dests = set()
849
849
850 # subset contains all of the possible destinations that can be returned, so
850 # subset contains all of the possible destinations that can be returned, so
851 # iterate over them and see if their source(s) were provided in the arg set.
851 # iterate over them and see if their source(s) were provided in the arg set.
852 # Even if the immediate src of r is not in the arg set, src's source (or
852 # Even if the immediate src of r is not in the arg set, src's source (or
853 # further back) may be. Scanning back further than the immediate src allows
853 # further back) may be. Scanning back further than the immediate src allows
854 # transitive transplants and rebases to yield the same results as transitive
854 # transitive transplants and rebases to yield the same results as transitive
855 # grafts.
855 # grafts.
856 for r in subset:
856 for r in subset:
857 src = _getrevsource(repo, r)
857 src = _getrevsource(repo, r)
858 lineage = None
858 lineage = None
859
859
860 while src is not None:
860 while src is not None:
861 if lineage is None:
861 if lineage is None:
862 lineage = list()
862 lineage = list()
863
863
864 lineage.append(r)
864 lineage.append(r)
865
865
866 # The visited lineage is a match if the current source is in the arg
866 # The visited lineage is a match if the current source is in the arg
867 # set. Since every candidate dest is visited by way of iterating
867 # set. Since every candidate dest is visited by way of iterating
868 # subset, any dests further back in the lineage will be tested by a
868 # subset, any dests further back in the lineage will be tested by a
869 # different iteration over subset. Likewise, if the src was already
869 # different iteration over subset. Likewise, if the src was already
870 # selected, the current lineage can be selected without going back
870 # selected, the current lineage can be selected without going back
871 # further.
871 # further.
872 if src in sources or src in dests:
872 if src in sources or src in dests:
873 dests.update(lineage)
873 dests.update(lineage)
874 break
874 break
875
875
876 r = src
876 r = src
877 src = _getrevsource(repo, r)
877 src = _getrevsource(repo, r)
878
878
879 return subset.filter(dests.__contains__,
879 return subset.filter(dests.__contains__,
880 condrepr=lambda: '<destination %r>' % sorted(dests))
880 condrepr=lambda: '<destination %r>' % sorted(dests))
881
881
882 @predicate('divergent()', safe=True)
882 @predicate('divergent()', safe=True)
883 def divergent(repo, subset, x):
883 def divergent(repo, subset, x):
884 """
884 """
885 Final successors of changesets with an alternative set of final successors.
885 Final successors of changesets with an alternative set of final successors.
886 """
886 """
887 # i18n: "divergent" is a keyword
887 # i18n: "divergent" is a keyword
888 getargs(x, 0, 0, _("divergent takes no arguments"))
888 getargs(x, 0, 0, _("divergent takes no arguments"))
889 divergent = obsmod.getrevs(repo, 'divergent')
889 divergent = obsmod.getrevs(repo, 'divergent')
890 return subset & divergent
890 return subset & divergent
891
891
892 @predicate('extinct()', safe=True)
892 @predicate('extinct()', safe=True)
893 def extinct(repo, subset, x):
893 def extinct(repo, subset, x):
894 """Obsolete changesets with obsolete descendants only.
894 """Obsolete changesets with obsolete descendants only.
895 """
895 """
896 # i18n: "extinct" is a keyword
896 # i18n: "extinct" is a keyword
897 getargs(x, 0, 0, _("extinct takes no arguments"))
897 getargs(x, 0, 0, _("extinct takes no arguments"))
898 extincts = obsmod.getrevs(repo, 'extinct')
898 extincts = obsmod.getrevs(repo, 'extinct')
899 return subset & extincts
899 return subset & extincts
900
900
901 @predicate('extra(label, [value])', safe=True)
901 @predicate('extra(label, [value])', safe=True)
902 def extra(repo, subset, x):
902 def extra(repo, subset, x):
903 """Changesets with the given label in the extra metadata, with the given
903 """Changesets with the given label in the extra metadata, with the given
904 optional value.
904 optional value.
905
905
906 If `value` starts with `re:`, the remainder of the value is treated as
906 If `value` starts with `re:`, the remainder of the value is treated as
907 a regular expression. To match a value that actually starts with `re:`,
907 a regular expression. To match a value that actually starts with `re:`,
908 use the prefix `literal:`.
908 use the prefix `literal:`.
909 """
909 """
910 args = getargsdict(x, 'extra', 'label value')
910 args = getargsdict(x, 'extra', 'label value')
911 if 'label' not in args:
911 if 'label' not in args:
912 # i18n: "extra" is a keyword
912 # i18n: "extra" is a keyword
913 raise error.ParseError(_('extra takes at least 1 argument'))
913 raise error.ParseError(_('extra takes at least 1 argument'))
914 # i18n: "extra" is a keyword
914 # i18n: "extra" is a keyword
915 label = getstring(args['label'], _('first argument to extra must be '
915 label = getstring(args['label'], _('first argument to extra must be '
916 'a string'))
916 'a string'))
917 value = None
917 value = None
918
918
919 if 'value' in args:
919 if 'value' in args:
920 # i18n: "extra" is a keyword
920 # i18n: "extra" is a keyword
921 value = getstring(args['value'], _('second argument to extra must be '
921 value = getstring(args['value'], _('second argument to extra must be '
922 'a string'))
922 'a string'))
923 kind, value, matcher = util.stringmatcher(value)
923 kind, value, matcher = util.stringmatcher(value)
924
924
925 def _matchvalue(r):
925 def _matchvalue(r):
926 extra = repo[r].extra()
926 extra = repo[r].extra()
927 return label in extra and (value is None or matcher(extra[label]))
927 return label in extra and (value is None or matcher(extra[label]))
928
928
929 return subset.filter(lambda r: _matchvalue(r),
929 return subset.filter(lambda r: _matchvalue(r),
930 condrepr=('<extra[%r] %r>', label, value))
930 condrepr=('<extra[%r] %r>', label, value))
931
931
932 @predicate('filelog(pattern)', safe=True)
932 @predicate('filelog(pattern)', safe=True)
933 def filelog(repo, subset, x):
933 def filelog(repo, subset, x):
934 """Changesets connected to the specified filelog.
934 """Changesets connected to the specified filelog.
935
935
936 For performance reasons, visits only revisions mentioned in the file-level
936 For performance reasons, visits only revisions mentioned in the file-level
937 filelog, rather than filtering through all changesets (much faster, but
937 filelog, rather than filtering through all changesets (much faster, but
938 doesn't include deletes or duplicate changes). For a slower, more accurate
938 doesn't include deletes or duplicate changes). For a slower, more accurate
939 result, use ``file()``.
939 result, use ``file()``.
940
940
941 The pattern without explicit kind like ``glob:`` is expected to be
941 The pattern without explicit kind like ``glob:`` is expected to be
942 relative to the current directory and match against a file exactly
942 relative to the current directory and match against a file exactly
943 for efficiency.
943 for efficiency.
944
944
945 If some linkrev points to revisions filtered by the current repoview, we'll
945 If some linkrev points to revisions filtered by the current repoview, we'll
946 work around it to return a non-filtered value.
946 work around it to return a non-filtered value.
947 """
947 """
948
948
949 # i18n: "filelog" is a keyword
949 # i18n: "filelog" is a keyword
950 pat = getstring(x, _("filelog requires a pattern"))
950 pat = getstring(x, _("filelog requires a pattern"))
951 s = set()
951 s = set()
952 cl = repo.changelog
952 cl = repo.changelog
953
953
954 if not matchmod.patkind(pat):
954 if not matchmod.patkind(pat):
955 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
955 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
956 files = [f]
956 files = [f]
957 else:
957 else:
958 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
958 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
959 files = (f for f in repo[None] if m(f))
959 files = (f for f in repo[None] if m(f))
960
960
961 for f in files:
961 for f in files:
962 fl = repo.file(f)
962 fl = repo.file(f)
963 known = {}
963 known = {}
964 scanpos = 0
964 scanpos = 0
965 for fr in list(fl):
965 for fr in list(fl):
966 fn = fl.node(fr)
966 fn = fl.node(fr)
967 if fn in known:
967 if fn in known:
968 s.add(known[fn])
968 s.add(known[fn])
969 continue
969 continue
970
970
971 lr = fl.linkrev(fr)
971 lr = fl.linkrev(fr)
972 if lr in cl:
972 if lr in cl:
973 s.add(lr)
973 s.add(lr)
974 elif scanpos is not None:
974 elif scanpos is not None:
975 # lowest matching changeset is filtered, scan further
975 # lowest matching changeset is filtered, scan further
976 # ahead in changelog
976 # ahead in changelog
977 start = max(lr, scanpos) + 1
977 start = max(lr, scanpos) + 1
978 scanpos = None
978 scanpos = None
979 for r in cl.revs(start):
979 for r in cl.revs(start):
980 # minimize parsing of non-matching entries
980 # minimize parsing of non-matching entries
981 if f in cl.revision(r) and f in cl.readfiles(r):
981 if f in cl.revision(r) and f in cl.readfiles(r):
982 try:
982 try:
983 # try to use manifest delta fastpath
983 # try to use manifest delta fastpath
984 n = repo[r].filenode(f)
984 n = repo[r].filenode(f)
985 if n not in known:
985 if n not in known:
986 if n == fn:
986 if n == fn:
987 s.add(r)
987 s.add(r)
988 scanpos = r
988 scanpos = r
989 break
989 break
990 else:
990 else:
991 known[n] = r
991 known[n] = r
992 except error.ManifestLookupError:
992 except error.ManifestLookupError:
993 # deletion in changelog
993 # deletion in changelog
994 continue
994 continue
995
995
996 return subset & s
996 return subset & s
997
997
998 @predicate('first(set, [n])', safe=True)
998 @predicate('first(set, [n])', safe=True)
999 def first(repo, subset, x):
999 def first(repo, subset, x):
1000 """An alias for limit().
1000 """An alias for limit().
1001 """
1001 """
1002 return limit(repo, subset, x)
1002 return limit(repo, subset, x)
1003
1003
1004 def _follow(repo, subset, x, name, followfirst=False):
1004 def _follow(repo, subset, x, name, followfirst=False):
1005 l = getargs(x, 0, 2, _("%s takes no arguments or a pattern "
1005 l = getargs(x, 0, 2, _("%s takes no arguments or a pattern "
1006 "and an optional revset") % name)
1006 "and an optional revset") % name)
1007 c = repo['.']
1007 c = repo['.']
1008 if l:
1008 if l:
1009 x = getstring(l[0], _("%s expected a pattern") % name)
1009 x = getstring(l[0], _("%s expected a pattern") % name)
1010 rev = None
1010 rev = None
1011 if len(l) >= 2:
1011 if len(l) >= 2:
1012 rev = getset(repo, fullreposet(repo), l[1]).last()
1012 rev = getset(repo, fullreposet(repo), l[1]).last()
1013 if rev is None:
1013 if rev is None:
1014 raise error.RepoLookupError(
1014 raise error.RepoLookupError(
1015 _("%s: starting revision set cannot be empty") % name)
1015 _("%s: starting revision set cannot be empty") % name)
1016 c = repo[rev]
1016 c = repo[rev]
1017 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
1017 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
1018 ctx=repo[rev], default='path')
1018 ctx=repo[rev], default='path')
1019
1019
1020 files = c.manifest().walk(matcher)
1020 files = c.manifest().walk(matcher)
1021
1021
1022 s = set()
1022 s = set()
1023 for fname in files:
1023 for fname in files:
1024 fctx = c[fname]
1024 fctx = c[fname]
1025 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
1025 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
1026 # include the revision responsible for the most recent version
1026 # include the revision responsible for the most recent version
1027 s.add(fctx.introrev())
1027 s.add(fctx.introrev())
1028 else:
1028 else:
1029 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1029 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1030
1030
1031 return subset & s
1031 return subset & s
1032
1032
1033 @predicate('follow([pattern[, startrev]])', safe=True)
1033 @predicate('follow([pattern[, startrev]])', safe=True)
1034 def follow(repo, subset, x):
1034 def follow(repo, subset, x):
1035 """
1035 """
1036 An alias for ``::.`` (ancestors of the working directory's first parent).
1036 An alias for ``::.`` (ancestors of the working directory's first parent).
1037 If pattern is specified, the histories of files matching given
1037 If pattern is specified, the histories of files matching given
1038 pattern in the revision given by startrev are followed, including copies.
1038 pattern in the revision given by startrev are followed, including copies.
1039 """
1039 """
1040 return _follow(repo, subset, x, 'follow')
1040 return _follow(repo, subset, x, 'follow')
1041
1041
1042 @predicate('_followfirst', safe=True)
1042 @predicate('_followfirst', safe=True)
1043 def _followfirst(repo, subset, x):
1043 def _followfirst(repo, subset, x):
1044 # ``followfirst([pattern[, startrev]])``
1044 # ``followfirst([pattern[, startrev]])``
1045 # Like ``follow([pattern[, startrev]])`` but follows only the first parent
1045 # Like ``follow([pattern[, startrev]])`` but follows only the first parent
1046 # of every revisions or files revisions.
1046 # of every revisions or files revisions.
1047 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1047 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1048
1048
1049 @predicate('all()', safe=True)
1049 @predicate('all()', safe=True)
1050 def getall(repo, subset, x):
1050 def getall(repo, subset, x):
1051 """All changesets, the same as ``0:tip``.
1051 """All changesets, the same as ``0:tip``.
1052 """
1052 """
1053 # i18n: "all" is a keyword
1053 # i18n: "all" is a keyword
1054 getargs(x, 0, 0, _("all takes no arguments"))
1054 getargs(x, 0, 0, _("all takes no arguments"))
1055 return subset & spanset(repo) # drop "null" if any
1055 return subset & spanset(repo) # drop "null" if any
1056
1056
1057 @predicate('grep(regex)')
1057 @predicate('grep(regex)')
1058 def grep(repo, subset, x):
1058 def grep(repo, subset, x):
1059 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1059 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1060 to ensure special escape characters are handled correctly. Unlike
1060 to ensure special escape characters are handled correctly. Unlike
1061 ``keyword(string)``, the match is case-sensitive.
1061 ``keyword(string)``, the match is case-sensitive.
1062 """
1062 """
1063 try:
1063 try:
1064 # i18n: "grep" is a keyword
1064 # i18n: "grep" is a keyword
1065 gr = re.compile(getstring(x, _("grep requires a string")))
1065 gr = re.compile(getstring(x, _("grep requires a string")))
1066 except re.error as e:
1066 except re.error as e:
1067 raise error.ParseError(_('invalid match pattern: %s') % e)
1067 raise error.ParseError(_('invalid match pattern: %s') % e)
1068
1068
1069 def matches(x):
1069 def matches(x):
1070 c = repo[x]
1070 c = repo[x]
1071 for e in c.files() + [c.user(), c.description()]:
1071 for e in c.files() + [c.user(), c.description()]:
1072 if gr.search(e):
1072 if gr.search(e):
1073 return True
1073 return True
1074 return False
1074 return False
1075
1075
1076 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1076 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1077
1077
1078 @predicate('_matchfiles', safe=True)
1078 @predicate('_matchfiles', safe=True)
1079 def _matchfiles(repo, subset, x):
1079 def _matchfiles(repo, subset, x):
1080 # _matchfiles takes a revset list of prefixed arguments:
1080 # _matchfiles takes a revset list of prefixed arguments:
1081 #
1081 #
1082 # [p:foo, i:bar, x:baz]
1082 # [p:foo, i:bar, x:baz]
1083 #
1083 #
1084 # builds a match object from them and filters subset. Allowed
1084 # builds a match object from them and filters subset. Allowed
1085 # prefixes are 'p:' for regular patterns, 'i:' for include
1085 # prefixes are 'p:' for regular patterns, 'i:' for include
1086 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1086 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1087 # a revision identifier, or the empty string to reference the
1087 # a revision identifier, or the empty string to reference the
1088 # working directory, from which the match object is
1088 # working directory, from which the match object is
1089 # initialized. Use 'd:' to set the default matching mode, default
1089 # initialized. Use 'd:' to set the default matching mode, default
1090 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1090 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1091
1091
1092 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1092 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1093 pats, inc, exc = [], [], []
1093 pats, inc, exc = [], [], []
1094 rev, default = None, None
1094 rev, default = None, None
1095 for arg in l:
1095 for arg in l:
1096 s = getstring(arg, "_matchfiles requires string arguments")
1096 s = getstring(arg, "_matchfiles requires string arguments")
1097 prefix, value = s[:2], s[2:]
1097 prefix, value = s[:2], s[2:]
1098 if prefix == 'p:':
1098 if prefix == 'p:':
1099 pats.append(value)
1099 pats.append(value)
1100 elif prefix == 'i:':
1100 elif prefix == 'i:':
1101 inc.append(value)
1101 inc.append(value)
1102 elif prefix == 'x:':
1102 elif prefix == 'x:':
1103 exc.append(value)
1103 exc.append(value)
1104 elif prefix == 'r:':
1104 elif prefix == 'r:':
1105 if rev is not None:
1105 if rev is not None:
1106 raise error.ParseError('_matchfiles expected at most one '
1106 raise error.ParseError('_matchfiles expected at most one '
1107 'revision')
1107 'revision')
1108 if value != '': # empty means working directory; leave rev as None
1108 if value != '': # empty means working directory; leave rev as None
1109 rev = value
1109 rev = value
1110 elif prefix == 'd:':
1110 elif prefix == 'd:':
1111 if default is not None:
1111 if default is not None:
1112 raise error.ParseError('_matchfiles expected at most one '
1112 raise error.ParseError('_matchfiles expected at most one '
1113 'default mode')
1113 'default mode')
1114 default = value
1114 default = value
1115 else:
1115 else:
1116 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1116 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1117 if not default:
1117 if not default:
1118 default = 'glob'
1118 default = 'glob'
1119
1119
1120 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1120 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1121 exclude=exc, ctx=repo[rev], default=default)
1121 exclude=exc, ctx=repo[rev], default=default)
1122
1122
1123 # This directly read the changelog data as creating changectx for all
1123 # This directly read the changelog data as creating changectx for all
1124 # revisions is quite expensive.
1124 # revisions is quite expensive.
1125 getfiles = repo.changelog.readfiles
1125 getfiles = repo.changelog.readfiles
1126 wdirrev = node.wdirrev
1126 wdirrev = node.wdirrev
1127 def matches(x):
1127 def matches(x):
1128 if x == wdirrev:
1128 if x == wdirrev:
1129 files = repo[x].files()
1129 files = repo[x].files()
1130 else:
1130 else:
1131 files = getfiles(x)
1131 files = getfiles(x)
1132 for f in files:
1132 for f in files:
1133 if m(f):
1133 if m(f):
1134 return True
1134 return True
1135 return False
1135 return False
1136
1136
1137 return subset.filter(matches,
1137 return subset.filter(matches,
1138 condrepr=('<matchfiles patterns=%r, include=%r '
1138 condrepr=('<matchfiles patterns=%r, include=%r '
1139 'exclude=%r, default=%r, rev=%r>',
1139 'exclude=%r, default=%r, rev=%r>',
1140 pats, inc, exc, default, rev))
1140 pats, inc, exc, default, rev))
1141
1141
1142 @predicate('file(pattern)', safe=True)
1142 @predicate('file(pattern)', safe=True)
1143 def hasfile(repo, subset, x):
1143 def hasfile(repo, subset, x):
1144 """Changesets affecting files matched by pattern.
1144 """Changesets affecting files matched by pattern.
1145
1145
1146 For a faster but less accurate result, consider using ``filelog()``
1146 For a faster but less accurate result, consider using ``filelog()``
1147 instead.
1147 instead.
1148
1148
1149 This predicate uses ``glob:`` as the default kind of pattern.
1149 This predicate uses ``glob:`` as the default kind of pattern.
1150 """
1150 """
1151 # i18n: "file" is a keyword
1151 # i18n: "file" is a keyword
1152 pat = getstring(x, _("file requires a pattern"))
1152 pat = getstring(x, _("file requires a pattern"))
1153 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1153 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1154
1154
1155 @predicate('head()', safe=True)
1155 @predicate('head()', safe=True)
1156 def head(repo, subset, x):
1156 def head(repo, subset, x):
1157 """Changeset is a named branch head.
1157 """Changeset is a named branch head.
1158 """
1158 """
1159 # i18n: "head" is a keyword
1159 # i18n: "head" is a keyword
1160 getargs(x, 0, 0, _("head takes no arguments"))
1160 getargs(x, 0, 0, _("head takes no arguments"))
1161 hs = set()
1161 hs = set()
1162 cl = repo.changelog
1162 cl = repo.changelog
1163 for ls in repo.branchmap().itervalues():
1163 for ls in repo.branchmap().itervalues():
1164 hs.update(cl.rev(h) for h in ls)
1164 hs.update(cl.rev(h) for h in ls)
1165 return subset & baseset(hs)
1165 return subset & baseset(hs)
1166
1166
1167 @predicate('heads(set)', safe=True)
1167 @predicate('heads(set)', safe=True)
1168 def heads(repo, subset, x):
1168 def heads(repo, subset, x):
1169 """Members of set with no children in set.
1169 """Members of set with no children in set.
1170 """
1170 """
1171 s = getset(repo, subset, x)
1171 s = getset(repo, subset, x)
1172 ps = parents(repo, subset, x)
1172 ps = parents(repo, subset, x)
1173 return s - ps
1173 return s - ps
1174
1174
1175 @predicate('hidden()', safe=True)
1175 @predicate('hidden()', safe=True)
1176 def hidden(repo, subset, x):
1176 def hidden(repo, subset, x):
1177 """Hidden changesets.
1177 """Hidden changesets.
1178 """
1178 """
1179 # i18n: "hidden" is a keyword
1179 # i18n: "hidden" is a keyword
1180 getargs(x, 0, 0, _("hidden takes no arguments"))
1180 getargs(x, 0, 0, _("hidden takes no arguments"))
1181 hiddenrevs = repoview.filterrevs(repo, 'visible')
1181 hiddenrevs = repoview.filterrevs(repo, 'visible')
1182 return subset & hiddenrevs
1182 return subset & hiddenrevs
1183
1183
1184 @predicate('keyword(string)', safe=True)
1184 @predicate('keyword(string)', safe=True)
1185 def keyword(repo, subset, x):
1185 def keyword(repo, subset, x):
1186 """Search commit message, user name, and names of changed files for
1186 """Search commit message, user name, and names of changed files for
1187 string. The match is case-insensitive.
1187 string. The match is case-insensitive.
1188 """
1188 """
1189 # i18n: "keyword" is a keyword
1189 # i18n: "keyword" is a keyword
1190 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1190 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1191
1191
1192 def matches(r):
1192 def matches(r):
1193 c = repo[r]
1193 c = repo[r]
1194 return any(kw in encoding.lower(t)
1194 return any(kw in encoding.lower(t)
1195 for t in c.files() + [c.user(), c.description()])
1195 for t in c.files() + [c.user(), c.description()])
1196
1196
1197 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1197 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1198
1198
1199 @predicate('limit(set[, n[, offset]])', safe=True)
1199 @predicate('limit(set[, n[, offset]])', safe=True)
1200 def limit(repo, subset, x):
1200 def limit(repo, subset, x):
1201 """First n members of set, defaulting to 1, starting from offset.
1201 """First n members of set, defaulting to 1, starting from offset.
1202 """
1202 """
1203 args = getargsdict(x, 'limit', 'set n offset')
1203 args = getargsdict(x, 'limit', 'set n offset')
1204 if 'set' not in args:
1204 if 'set' not in args:
1205 # i18n: "limit" is a keyword
1205 # i18n: "limit" is a keyword
1206 raise error.ParseError(_("limit requires one to three arguments"))
1206 raise error.ParseError(_("limit requires one to three arguments"))
1207 try:
1207 try:
1208 lim, ofs = 1, 0
1208 lim, ofs = 1, 0
1209 if 'n' in args:
1209 if 'n' in args:
1210 # i18n: "limit" is a keyword
1210 # i18n: "limit" is a keyword
1211 lim = int(getstring(args['n'], _("limit requires a number")))
1211 lim = int(getstring(args['n'], _("limit requires a number")))
1212 if 'offset' in args:
1212 if 'offset' in args:
1213 # i18n: "limit" is a keyword
1213 # i18n: "limit" is a keyword
1214 ofs = int(getstring(args['offset'], _("limit requires a number")))
1214 ofs = int(getstring(args['offset'], _("limit requires a number")))
1215 if ofs < 0:
1215 if ofs < 0:
1216 raise error.ParseError(_("negative offset"))
1216 raise error.ParseError(_("negative offset"))
1217 except (TypeError, ValueError):
1217 except (TypeError, ValueError):
1218 # i18n: "limit" is a keyword
1218 # i18n: "limit" is a keyword
1219 raise error.ParseError(_("limit expects a number"))
1219 raise error.ParseError(_("limit expects a number"))
1220 os = getset(repo, fullreposet(repo), args['set'])
1220 os = getset(repo, fullreposet(repo), args['set'])
1221 result = []
1221 result = []
1222 it = iter(os)
1222 it = iter(os)
1223 for x in xrange(ofs):
1223 for x in xrange(ofs):
1224 y = next(it, None)
1224 y = next(it, None)
1225 if y is None:
1225 if y is None:
1226 break
1226 break
1227 for x in xrange(lim):
1227 for x in xrange(lim):
1228 y = next(it, None)
1228 y = next(it, None)
1229 if y is None:
1229 if y is None:
1230 break
1230 break
1231 elif y in subset:
1231 elif y in subset:
1232 result.append(y)
1232 result.append(y)
1233 return baseset(result, datarepr=('<limit n=%d, offset=%d, %r, %r>',
1233 return baseset(result, datarepr=('<limit n=%d, offset=%d, %r, %r>',
1234 lim, ofs, subset, os))
1234 lim, ofs, subset, os))
1235
1235
1236 @predicate('last(set, [n])', safe=True)
1236 @predicate('last(set, [n])', safe=True)
1237 def last(repo, subset, x):
1237 def last(repo, subset, x):
1238 """Last n members of set, defaulting to 1.
1238 """Last n members of set, defaulting to 1.
1239 """
1239 """
1240 # i18n: "last" is a keyword
1240 # i18n: "last" is a keyword
1241 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1241 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1242 try:
1242 try:
1243 lim = 1
1243 lim = 1
1244 if len(l) == 2:
1244 if len(l) == 2:
1245 # i18n: "last" is a keyword
1245 # i18n: "last" is a keyword
1246 lim = int(getstring(l[1], _("last requires a number")))
1246 lim = int(getstring(l[1], _("last requires a number")))
1247 except (TypeError, ValueError):
1247 except (TypeError, ValueError):
1248 # i18n: "last" is a keyword
1248 # i18n: "last" is a keyword
1249 raise error.ParseError(_("last expects a number"))
1249 raise error.ParseError(_("last expects a number"))
1250 os = getset(repo, fullreposet(repo), l[0])
1250 os = getset(repo, fullreposet(repo), l[0])
1251 os.reverse()
1251 os.reverse()
1252 result = []
1252 result = []
1253 it = iter(os)
1253 it = iter(os)
1254 for x in xrange(lim):
1254 for x in xrange(lim):
1255 y = next(it, None)
1255 y = next(it, None)
1256 if y is None:
1256 if y is None:
1257 break
1257 break
1258 elif y in subset:
1258 elif y in subset:
1259 result.append(y)
1259 result.append(y)
1260 return baseset(result, datarepr=('<last n=%d, %r, %r>', lim, subset, os))
1260 return baseset(result, datarepr=('<last n=%d, %r, %r>', lim, subset, os))
1261
1261
1262 @predicate('max(set)', safe=True)
1262 @predicate('max(set)', safe=True)
1263 def maxrev(repo, subset, x):
1263 def maxrev(repo, subset, x):
1264 """Changeset with highest revision number in set.
1264 """Changeset with highest revision number in set.
1265 """
1265 """
1266 os = getset(repo, fullreposet(repo), x)
1266 os = getset(repo, fullreposet(repo), x)
1267 try:
1267 try:
1268 m = os.max()
1268 m = os.max()
1269 if m in subset:
1269 if m in subset:
1270 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1270 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1271 except ValueError:
1271 except ValueError:
1272 # os.max() throws a ValueError when the collection is empty.
1272 # os.max() throws a ValueError when the collection is empty.
1273 # Same as python's max().
1273 # Same as python's max().
1274 pass
1274 pass
1275 return baseset(datarepr=('<max %r, %r>', subset, os))
1275 return baseset(datarepr=('<max %r, %r>', subset, os))
1276
1276
1277 @predicate('merge()', safe=True)
1277 @predicate('merge()', safe=True)
1278 def merge(repo, subset, x):
1278 def merge(repo, subset, x):
1279 """Changeset is a merge changeset.
1279 """Changeset is a merge changeset.
1280 """
1280 """
1281 # i18n: "merge" is a keyword
1281 # i18n: "merge" is a keyword
1282 getargs(x, 0, 0, _("merge takes no arguments"))
1282 getargs(x, 0, 0, _("merge takes no arguments"))
1283 cl = repo.changelog
1283 cl = repo.changelog
1284 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1284 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1285 condrepr='<merge>')
1285 condrepr='<merge>')
1286
1286
1287 @predicate('branchpoint()', safe=True)
1287 @predicate('branchpoint()', safe=True)
1288 def branchpoint(repo, subset, x):
1288 def branchpoint(repo, subset, x):
1289 """Changesets with more than one child.
1289 """Changesets with more than one child.
1290 """
1290 """
1291 # i18n: "branchpoint" is a keyword
1291 # i18n: "branchpoint" is a keyword
1292 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1292 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1293 cl = repo.changelog
1293 cl = repo.changelog
1294 if not subset:
1294 if not subset:
1295 return baseset()
1295 return baseset()
1296 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1296 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1297 # (and if it is not, it should.)
1297 # (and if it is not, it should.)
1298 baserev = min(subset)
1298 baserev = min(subset)
1299 parentscount = [0]*(len(repo) - baserev)
1299 parentscount = [0]*(len(repo) - baserev)
1300 for r in cl.revs(start=baserev + 1):
1300 for r in cl.revs(start=baserev + 1):
1301 for p in cl.parentrevs(r):
1301 for p in cl.parentrevs(r):
1302 if p >= baserev:
1302 if p >= baserev:
1303 parentscount[p - baserev] += 1
1303 parentscount[p - baserev] += 1
1304 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1304 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1305 condrepr='<branchpoint>')
1305 condrepr='<branchpoint>')
1306
1306
1307 @predicate('min(set)', safe=True)
1307 @predicate('min(set)', safe=True)
1308 def minrev(repo, subset, x):
1308 def minrev(repo, subset, x):
1309 """Changeset with lowest revision number in set.
1309 """Changeset with lowest revision number in set.
1310 """
1310 """
1311 os = getset(repo, fullreposet(repo), x)
1311 os = getset(repo, fullreposet(repo), x)
1312 try:
1312 try:
1313 m = os.min()
1313 m = os.min()
1314 if m in subset:
1314 if m in subset:
1315 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1315 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1316 except ValueError:
1316 except ValueError:
1317 # os.min() throws a ValueError when the collection is empty.
1317 # os.min() throws a ValueError when the collection is empty.
1318 # Same as python's min().
1318 # Same as python's min().
1319 pass
1319 pass
1320 return baseset(datarepr=('<min %r, %r>', subset, os))
1320 return baseset(datarepr=('<min %r, %r>', subset, os))
1321
1321
1322 @predicate('modifies(pattern)', safe=True)
1322 @predicate('modifies(pattern)', safe=True)
1323 def modifies(repo, subset, x):
1323 def modifies(repo, subset, x):
1324 """Changesets modifying files matched by pattern.
1324 """Changesets modifying files matched by pattern.
1325
1325
1326 The pattern without explicit kind like ``glob:`` is expected to be
1326 The pattern without explicit kind like ``glob:`` is expected to be
1327 relative to the current directory and match against a file or a
1327 relative to the current directory and match against a file or a
1328 directory.
1328 directory.
1329 """
1329 """
1330 # i18n: "modifies" is a keyword
1330 # i18n: "modifies" is a keyword
1331 pat = getstring(x, _("modifies requires a pattern"))
1331 pat = getstring(x, _("modifies requires a pattern"))
1332 return checkstatus(repo, subset, pat, 0)
1332 return checkstatus(repo, subset, pat, 0)
1333
1333
1334 @predicate('named(namespace)')
1334 @predicate('named(namespace)')
1335 def named(repo, subset, x):
1335 def named(repo, subset, x):
1336 """The changesets in a given namespace.
1336 """The changesets in a given namespace.
1337
1337
1338 If `namespace` starts with `re:`, the remainder of the string is treated as
1338 If `namespace` starts with `re:`, the remainder of the string is treated as
1339 a regular expression. To match a namespace that actually starts with `re:`,
1339 a regular expression. To match a namespace that actually starts with `re:`,
1340 use the prefix `literal:`.
1340 use the prefix `literal:`.
1341 """
1341 """
1342 # i18n: "named" is a keyword
1342 # i18n: "named" is a keyword
1343 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1343 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1344
1344
1345 ns = getstring(args[0],
1345 ns = getstring(args[0],
1346 # i18n: "named" is a keyword
1346 # i18n: "named" is a keyword
1347 _('the argument to named must be a string'))
1347 _('the argument to named must be a string'))
1348 kind, pattern, matcher = util.stringmatcher(ns)
1348 kind, pattern, matcher = util.stringmatcher(ns)
1349 namespaces = set()
1349 namespaces = set()
1350 if kind == 'literal':
1350 if kind == 'literal':
1351 if pattern not in repo.names:
1351 if pattern not in repo.names:
1352 raise error.RepoLookupError(_("namespace '%s' does not exist")
1352 raise error.RepoLookupError(_("namespace '%s' does not exist")
1353 % ns)
1353 % ns)
1354 namespaces.add(repo.names[pattern])
1354 namespaces.add(repo.names[pattern])
1355 else:
1355 else:
1356 for name, ns in repo.names.iteritems():
1356 for name, ns in repo.names.iteritems():
1357 if matcher(name):
1357 if matcher(name):
1358 namespaces.add(ns)
1358 namespaces.add(ns)
1359 if not namespaces:
1359 if not namespaces:
1360 raise error.RepoLookupError(_("no namespace exists"
1360 raise error.RepoLookupError(_("no namespace exists"
1361 " that match '%s'") % pattern)
1361 " that match '%s'") % pattern)
1362
1362
1363 names = set()
1363 names = set()
1364 for ns in namespaces:
1364 for ns in namespaces:
1365 for name in ns.listnames(repo):
1365 for name in ns.listnames(repo):
1366 if name not in ns.deprecated:
1366 if name not in ns.deprecated:
1367 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1367 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1368
1368
1369 names -= set([node.nullrev])
1369 names -= set([node.nullrev])
1370 return subset & names
1370 return subset & names
1371
1371
1372 @predicate('id(string)', safe=True)
1372 @predicate('id(string)', safe=True)
1373 def node_(repo, subset, x):
1373 def node_(repo, subset, x):
1374 """Revision non-ambiguously specified by the given hex string prefix.
1374 """Revision non-ambiguously specified by the given hex string prefix.
1375 """
1375 """
1376 # i18n: "id" is a keyword
1376 # i18n: "id" is a keyword
1377 l = getargs(x, 1, 1, _("id requires one argument"))
1377 l = getargs(x, 1, 1, _("id requires one argument"))
1378 # i18n: "id" is a keyword
1378 # i18n: "id" is a keyword
1379 n = getstring(l[0], _("id requires a string"))
1379 n = getstring(l[0], _("id requires a string"))
1380 if len(n) == 40:
1380 if len(n) == 40:
1381 try:
1381 try:
1382 rn = repo.changelog.rev(node.bin(n))
1382 rn = repo.changelog.rev(node.bin(n))
1383 except (LookupError, TypeError):
1383 except (LookupError, TypeError):
1384 rn = None
1384 rn = None
1385 else:
1385 else:
1386 rn = None
1386 rn = None
1387 pm = repo.changelog._partialmatch(n)
1387 pm = repo.changelog._partialmatch(n)
1388 if pm is not None:
1388 if pm is not None:
1389 rn = repo.changelog.rev(pm)
1389 rn = repo.changelog.rev(pm)
1390
1390
1391 if rn is None:
1391 if rn is None:
1392 return baseset()
1392 return baseset()
1393 result = baseset([rn])
1393 result = baseset([rn])
1394 return result & subset
1394 return result & subset
1395
1395
1396 @predicate('obsolete()', safe=True)
1396 @predicate('obsolete()', safe=True)
1397 def obsolete(repo, subset, x):
1397 def obsolete(repo, subset, x):
1398 """Mutable changeset with a newer version."""
1398 """Mutable changeset with a newer version."""
1399 # i18n: "obsolete" is a keyword
1399 # i18n: "obsolete" is a keyword
1400 getargs(x, 0, 0, _("obsolete takes no arguments"))
1400 getargs(x, 0, 0, _("obsolete takes no arguments"))
1401 obsoletes = obsmod.getrevs(repo, 'obsolete')
1401 obsoletes = obsmod.getrevs(repo, 'obsolete')
1402 return subset & obsoletes
1402 return subset & obsoletes
1403
1403
1404 @predicate('only(set, [set])', safe=True)
1404 @predicate('only(set, [set])', safe=True)
1405 def only(repo, subset, x):
1405 def only(repo, subset, x):
1406 """Changesets that are ancestors of the first set that are not ancestors
1406 """Changesets that are ancestors of the first set that are not ancestors
1407 of any other head in the repo. If a second set is specified, the result
1407 of any other head in the repo. If a second set is specified, the result
1408 is ancestors of the first set that are not ancestors of the second set
1408 is ancestors of the first set that are not ancestors of the second set
1409 (i.e. ::<set1> - ::<set2>).
1409 (i.e. ::<set1> - ::<set2>).
1410 """
1410 """
1411 cl = repo.changelog
1411 cl = repo.changelog
1412 # i18n: "only" is a keyword
1412 # i18n: "only" is a keyword
1413 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1413 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1414 include = getset(repo, fullreposet(repo), args[0])
1414 include = getset(repo, fullreposet(repo), args[0])
1415 if len(args) == 1:
1415 if len(args) == 1:
1416 if not include:
1416 if not include:
1417 return baseset()
1417 return baseset()
1418
1418
1419 descendants = set(_revdescendants(repo, include, False))
1419 descendants = set(_revdescendants(repo, include, False))
1420 exclude = [rev for rev in cl.headrevs()
1420 exclude = [rev for rev in cl.headrevs()
1421 if not rev in descendants and not rev in include]
1421 if not rev in descendants and not rev in include]
1422 else:
1422 else:
1423 exclude = getset(repo, fullreposet(repo), args[1])
1423 exclude = getset(repo, fullreposet(repo), args[1])
1424
1424
1425 results = set(cl.findmissingrevs(common=exclude, heads=include))
1425 results = set(cl.findmissingrevs(common=exclude, heads=include))
1426 # XXX we should turn this into a baseset instead of a set, smartset may do
1426 # XXX we should turn this into a baseset instead of a set, smartset may do
1427 # some optimisations from the fact this is a baseset.
1427 # some optimisations from the fact this is a baseset.
1428 return subset & results
1428 return subset & results
1429
1429
1430 @predicate('origin([set])', safe=True)
1430 @predicate('origin([set])', safe=True)
1431 def origin(repo, subset, x):
1431 def origin(repo, subset, x):
1432 """
1432 """
1433 Changesets that were specified as a source for the grafts, transplants or
1433 Changesets that were specified as a source for the grafts, transplants or
1434 rebases that created the given revisions. Omitting the optional set is the
1434 rebases that created the given revisions. Omitting the optional set is the
1435 same as passing all(). If a changeset created by these operations is itself
1435 same as passing all(). If a changeset created by these operations is itself
1436 specified as a source for one of these operations, only the source changeset
1436 specified as a source for one of these operations, only the source changeset
1437 for the first operation is selected.
1437 for the first operation is selected.
1438 """
1438 """
1439 if x is not None:
1439 if x is not None:
1440 dests = getset(repo, fullreposet(repo), x)
1440 dests = getset(repo, fullreposet(repo), x)
1441 else:
1441 else:
1442 dests = fullreposet(repo)
1442 dests = fullreposet(repo)
1443
1443
1444 def _firstsrc(rev):
1444 def _firstsrc(rev):
1445 src = _getrevsource(repo, rev)
1445 src = _getrevsource(repo, rev)
1446 if src is None:
1446 if src is None:
1447 return None
1447 return None
1448
1448
1449 while True:
1449 while True:
1450 prev = _getrevsource(repo, src)
1450 prev = _getrevsource(repo, src)
1451
1451
1452 if prev is None:
1452 if prev is None:
1453 return src
1453 return src
1454 src = prev
1454 src = prev
1455
1455
1456 o = set([_firstsrc(r) for r in dests])
1456 o = set([_firstsrc(r) for r in dests])
1457 o -= set([None])
1457 o -= set([None])
1458 # XXX we should turn this into a baseset instead of a set, smartset may do
1458 # XXX we should turn this into a baseset instead of a set, smartset may do
1459 # some optimisations from the fact this is a baseset.
1459 # some optimisations from the fact this is a baseset.
1460 return subset & o
1460 return subset & o
1461
1461
1462 @predicate('outgoing([path])', safe=True)
1462 @predicate('outgoing([path])', safe=True)
1463 def outgoing(repo, subset, x):
1463 def outgoing(repo, subset, x):
1464 """Changesets not found in the specified destination repository, or the
1464 """Changesets not found in the specified destination repository, or the
1465 default push location.
1465 default push location.
1466 """
1466 """
1467 # Avoid cycles.
1467 # Avoid cycles.
1468 from . import (
1468 from . import (
1469 discovery,
1469 discovery,
1470 hg,
1470 hg,
1471 )
1471 )
1472 # i18n: "outgoing" is a keyword
1472 # i18n: "outgoing" is a keyword
1473 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1473 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1474 # i18n: "outgoing" is a keyword
1474 # i18n: "outgoing" is a keyword
1475 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1475 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1476 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1476 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1477 dest, branches = hg.parseurl(dest)
1477 dest, branches = hg.parseurl(dest)
1478 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1478 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1479 if revs:
1479 if revs:
1480 revs = [repo.lookup(rev) for rev in revs]
1480 revs = [repo.lookup(rev) for rev in revs]
1481 other = hg.peer(repo, {}, dest)
1481 other = hg.peer(repo, {}, dest)
1482 repo.ui.pushbuffer()
1482 repo.ui.pushbuffer()
1483 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1483 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1484 repo.ui.popbuffer()
1484 repo.ui.popbuffer()
1485 cl = repo.changelog
1485 cl = repo.changelog
1486 o = set([cl.rev(r) for r in outgoing.missing])
1486 o = set([cl.rev(r) for r in outgoing.missing])
1487 return subset & o
1487 return subset & o
1488
1488
1489 @predicate('p1([set])', safe=True)
1489 @predicate('p1([set])', safe=True)
1490 def p1(repo, subset, x):
1490 def p1(repo, subset, x):
1491 """First parent of changesets in set, or the working directory.
1491 """First parent of changesets in set, or the working directory.
1492 """
1492 """
1493 if x is None:
1493 if x is None:
1494 p = repo[x].p1().rev()
1494 p = repo[x].p1().rev()
1495 if p >= 0:
1495 if p >= 0:
1496 return subset & baseset([p])
1496 return subset & baseset([p])
1497 return baseset()
1497 return baseset()
1498
1498
1499 ps = set()
1499 ps = set()
1500 cl = repo.changelog
1500 cl = repo.changelog
1501 for r in getset(repo, fullreposet(repo), x):
1501 for r in getset(repo, fullreposet(repo), x):
1502 ps.add(cl.parentrevs(r)[0])
1502 ps.add(cl.parentrevs(r)[0])
1503 ps -= set([node.nullrev])
1503 ps -= set([node.nullrev])
1504 # XXX we should turn this into a baseset instead of a set, smartset may do
1504 # XXX we should turn this into a baseset instead of a set, smartset may do
1505 # some optimisations from the fact this is a baseset.
1505 # some optimisations from the fact this is a baseset.
1506 return subset & ps
1506 return subset & ps
1507
1507
1508 @predicate('p2([set])', safe=True)
1508 @predicate('p2([set])', safe=True)
1509 def p2(repo, subset, x):
1509 def p2(repo, subset, x):
1510 """Second parent of changesets in set, or the working directory.
1510 """Second parent of changesets in set, or the working directory.
1511 """
1511 """
1512 if x is None:
1512 if x is None:
1513 ps = repo[x].parents()
1513 ps = repo[x].parents()
1514 try:
1514 try:
1515 p = ps[1].rev()
1515 p = ps[1].rev()
1516 if p >= 0:
1516 if p >= 0:
1517 return subset & baseset([p])
1517 return subset & baseset([p])
1518 return baseset()
1518 return baseset()
1519 except IndexError:
1519 except IndexError:
1520 return baseset()
1520 return baseset()
1521
1521
1522 ps = set()
1522 ps = set()
1523 cl = repo.changelog
1523 cl = repo.changelog
1524 for r in getset(repo, fullreposet(repo), x):
1524 for r in getset(repo, fullreposet(repo), x):
1525 ps.add(cl.parentrevs(r)[1])
1525 ps.add(cl.parentrevs(r)[1])
1526 ps -= set([node.nullrev])
1526 ps -= set([node.nullrev])
1527 # XXX we should turn this into a baseset instead of a set, smartset may do
1527 # XXX we should turn this into a baseset instead of a set, smartset may do
1528 # some optimisations from the fact this is a baseset.
1528 # some optimisations from the fact this is a baseset.
1529 return subset & ps
1529 return subset & ps
1530
1530
1531 def parentpost(repo, subset, x):
1532 return p1(repo, subset, x)
1533
1531 @predicate('parents([set])', safe=True)
1534 @predicate('parents([set])', safe=True)
1532 def parents(repo, subset, x):
1535 def parents(repo, subset, x):
1533 """
1536 """
1534 The set of all parents for all changesets in set, or the working directory.
1537 The set of all parents for all changesets in set, or the working directory.
1535 """
1538 """
1536 if x is None:
1539 if x is None:
1537 ps = set(p.rev() for p in repo[x].parents())
1540 ps = set(p.rev() for p in repo[x].parents())
1538 else:
1541 else:
1539 ps = set()
1542 ps = set()
1540 cl = repo.changelog
1543 cl = repo.changelog
1541 up = ps.update
1544 up = ps.update
1542 parentrevs = cl.parentrevs
1545 parentrevs = cl.parentrevs
1543 for r in getset(repo, fullreposet(repo), x):
1546 for r in getset(repo, fullreposet(repo), x):
1544 if r == node.wdirrev:
1547 if r == node.wdirrev:
1545 up(p.rev() for p in repo[r].parents())
1548 up(p.rev() for p in repo[r].parents())
1546 else:
1549 else:
1547 up(parentrevs(r))
1550 up(parentrevs(r))
1548 ps -= set([node.nullrev])
1551 ps -= set([node.nullrev])
1549 return subset & ps
1552 return subset & ps
1550
1553
1551 def _phase(repo, subset, target):
1554 def _phase(repo, subset, target):
1552 """helper to select all rev in phase <target>"""
1555 """helper to select all rev in phase <target>"""
1553 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1556 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1554 if repo._phasecache._phasesets:
1557 if repo._phasecache._phasesets:
1555 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1558 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1556 s = baseset(s)
1559 s = baseset(s)
1557 s.sort() # set are non ordered, so we enforce ascending
1560 s.sort() # set are non ordered, so we enforce ascending
1558 return subset & s
1561 return subset & s
1559 else:
1562 else:
1560 phase = repo._phasecache.phase
1563 phase = repo._phasecache.phase
1561 condition = lambda r: phase(repo, r) == target
1564 condition = lambda r: phase(repo, r) == target
1562 return subset.filter(condition, condrepr=('<phase %r>', target),
1565 return subset.filter(condition, condrepr=('<phase %r>', target),
1563 cache=False)
1566 cache=False)
1564
1567
1565 @predicate('draft()', safe=True)
1568 @predicate('draft()', safe=True)
1566 def draft(repo, subset, x):
1569 def draft(repo, subset, x):
1567 """Changeset in draft phase."""
1570 """Changeset in draft phase."""
1568 # i18n: "draft" is a keyword
1571 # i18n: "draft" is a keyword
1569 getargs(x, 0, 0, _("draft takes no arguments"))
1572 getargs(x, 0, 0, _("draft takes no arguments"))
1570 target = phases.draft
1573 target = phases.draft
1571 return _phase(repo, subset, target)
1574 return _phase(repo, subset, target)
1572
1575
1573 @predicate('secret()', safe=True)
1576 @predicate('secret()', safe=True)
1574 def secret(repo, subset, x):
1577 def secret(repo, subset, x):
1575 """Changeset in secret phase."""
1578 """Changeset in secret phase."""
1576 # i18n: "secret" is a keyword
1579 # i18n: "secret" is a keyword
1577 getargs(x, 0, 0, _("secret takes no arguments"))
1580 getargs(x, 0, 0, _("secret takes no arguments"))
1578 target = phases.secret
1581 target = phases.secret
1579 return _phase(repo, subset, target)
1582 return _phase(repo, subset, target)
1580
1583
1581 def parentspec(repo, subset, x, n):
1584 def parentspec(repo, subset, x, n):
1582 """``set^0``
1585 """``set^0``
1583 The set.
1586 The set.
1584 ``set^1`` (or ``set^``), ``set^2``
1587 ``set^1`` (or ``set^``), ``set^2``
1585 First or second parent, respectively, of all changesets in set.
1588 First or second parent, respectively, of all changesets in set.
1586 """
1589 """
1587 try:
1590 try:
1588 n = int(n[1])
1591 n = int(n[1])
1589 if n not in (0, 1, 2):
1592 if n not in (0, 1, 2):
1590 raise ValueError
1593 raise ValueError
1591 except (TypeError, ValueError):
1594 except (TypeError, ValueError):
1592 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1595 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1593 ps = set()
1596 ps = set()
1594 cl = repo.changelog
1597 cl = repo.changelog
1595 for r in getset(repo, fullreposet(repo), x):
1598 for r in getset(repo, fullreposet(repo), x):
1596 if n == 0:
1599 if n == 0:
1597 ps.add(r)
1600 ps.add(r)
1598 elif n == 1:
1601 elif n == 1:
1599 ps.add(cl.parentrevs(r)[0])
1602 ps.add(cl.parentrevs(r)[0])
1600 elif n == 2:
1603 elif n == 2:
1601 parents = cl.parentrevs(r)
1604 parents = cl.parentrevs(r)
1602 if len(parents) > 1:
1605 if len(parents) > 1:
1603 ps.add(parents[1])
1606 ps.add(parents[1])
1604 return subset & ps
1607 return subset & ps
1605
1608
1606 @predicate('present(set)', safe=True)
1609 @predicate('present(set)', safe=True)
1607 def present(repo, subset, x):
1610 def present(repo, subset, x):
1608 """An empty set, if any revision in set isn't found; otherwise,
1611 """An empty set, if any revision in set isn't found; otherwise,
1609 all revisions in set.
1612 all revisions in set.
1610
1613
1611 If any of specified revisions is not present in the local repository,
1614 If any of specified revisions is not present in the local repository,
1612 the query is normally aborted. But this predicate allows the query
1615 the query is normally aborted. But this predicate allows the query
1613 to continue even in such cases.
1616 to continue even in such cases.
1614 """
1617 """
1615 try:
1618 try:
1616 return getset(repo, subset, x)
1619 return getset(repo, subset, x)
1617 except error.RepoLookupError:
1620 except error.RepoLookupError:
1618 return baseset()
1621 return baseset()
1619
1622
1620 # for internal use
1623 # for internal use
1621 @predicate('_notpublic', safe=True)
1624 @predicate('_notpublic', safe=True)
1622 def _notpublic(repo, subset, x):
1625 def _notpublic(repo, subset, x):
1623 getargs(x, 0, 0, "_notpublic takes no arguments")
1626 getargs(x, 0, 0, "_notpublic takes no arguments")
1624 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1627 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1625 if repo._phasecache._phasesets:
1628 if repo._phasecache._phasesets:
1626 s = set()
1629 s = set()
1627 for u in repo._phasecache._phasesets[1:]:
1630 for u in repo._phasecache._phasesets[1:]:
1628 s.update(u)
1631 s.update(u)
1629 s = baseset(s - repo.changelog.filteredrevs)
1632 s = baseset(s - repo.changelog.filteredrevs)
1630 s.sort()
1633 s.sort()
1631 return subset & s
1634 return subset & s
1632 else:
1635 else:
1633 phase = repo._phasecache.phase
1636 phase = repo._phasecache.phase
1634 target = phases.public
1637 target = phases.public
1635 condition = lambda r: phase(repo, r) != target
1638 condition = lambda r: phase(repo, r) != target
1636 return subset.filter(condition, condrepr=('<phase %r>', target),
1639 return subset.filter(condition, condrepr=('<phase %r>', target),
1637 cache=False)
1640 cache=False)
1638
1641
1639 @predicate('public()', safe=True)
1642 @predicate('public()', safe=True)
1640 def public(repo, subset, x):
1643 def public(repo, subset, x):
1641 """Changeset in public phase."""
1644 """Changeset in public phase."""
1642 # i18n: "public" is a keyword
1645 # i18n: "public" is a keyword
1643 getargs(x, 0, 0, _("public takes no arguments"))
1646 getargs(x, 0, 0, _("public takes no arguments"))
1644 phase = repo._phasecache.phase
1647 phase = repo._phasecache.phase
1645 target = phases.public
1648 target = phases.public
1646 condition = lambda r: phase(repo, r) == target
1649 condition = lambda r: phase(repo, r) == target
1647 return subset.filter(condition, condrepr=('<phase %r>', target),
1650 return subset.filter(condition, condrepr=('<phase %r>', target),
1648 cache=False)
1651 cache=False)
1649
1652
1650 @predicate('remote([id [,path]])', safe=True)
1653 @predicate('remote([id [,path]])', safe=True)
1651 def remote(repo, subset, x):
1654 def remote(repo, subset, x):
1652 """Local revision that corresponds to the given identifier in a
1655 """Local revision that corresponds to the given identifier in a
1653 remote repository, if present. Here, the '.' identifier is a
1656 remote repository, if present. Here, the '.' identifier is a
1654 synonym for the current local branch.
1657 synonym for the current local branch.
1655 """
1658 """
1656
1659
1657 from . import hg # avoid start-up nasties
1660 from . import hg # avoid start-up nasties
1658 # i18n: "remote" is a keyword
1661 # i18n: "remote" is a keyword
1659 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1662 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1660
1663
1661 q = '.'
1664 q = '.'
1662 if len(l) > 0:
1665 if len(l) > 0:
1663 # i18n: "remote" is a keyword
1666 # i18n: "remote" is a keyword
1664 q = getstring(l[0], _("remote requires a string id"))
1667 q = getstring(l[0], _("remote requires a string id"))
1665 if q == '.':
1668 if q == '.':
1666 q = repo['.'].branch()
1669 q = repo['.'].branch()
1667
1670
1668 dest = ''
1671 dest = ''
1669 if len(l) > 1:
1672 if len(l) > 1:
1670 # i18n: "remote" is a keyword
1673 # i18n: "remote" is a keyword
1671 dest = getstring(l[1], _("remote requires a repository path"))
1674 dest = getstring(l[1], _("remote requires a repository path"))
1672 dest = repo.ui.expandpath(dest or 'default')
1675 dest = repo.ui.expandpath(dest or 'default')
1673 dest, branches = hg.parseurl(dest)
1676 dest, branches = hg.parseurl(dest)
1674 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1677 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1675 if revs:
1678 if revs:
1676 revs = [repo.lookup(rev) for rev in revs]
1679 revs = [repo.lookup(rev) for rev in revs]
1677 other = hg.peer(repo, {}, dest)
1680 other = hg.peer(repo, {}, dest)
1678 n = other.lookup(q)
1681 n = other.lookup(q)
1679 if n in repo:
1682 if n in repo:
1680 r = repo[n].rev()
1683 r = repo[n].rev()
1681 if r in subset:
1684 if r in subset:
1682 return baseset([r])
1685 return baseset([r])
1683 return baseset()
1686 return baseset()
1684
1687
1685 @predicate('removes(pattern)', safe=True)
1688 @predicate('removes(pattern)', safe=True)
1686 def removes(repo, subset, x):
1689 def removes(repo, subset, x):
1687 """Changesets which remove files matching pattern.
1690 """Changesets which remove files matching pattern.
1688
1691
1689 The pattern without explicit kind like ``glob:`` is expected to be
1692 The pattern without explicit kind like ``glob:`` is expected to be
1690 relative to the current directory and match against a file or a
1693 relative to the current directory and match against a file or a
1691 directory.
1694 directory.
1692 """
1695 """
1693 # i18n: "removes" is a keyword
1696 # i18n: "removes" is a keyword
1694 pat = getstring(x, _("removes requires a pattern"))
1697 pat = getstring(x, _("removes requires a pattern"))
1695 return checkstatus(repo, subset, pat, 2)
1698 return checkstatus(repo, subset, pat, 2)
1696
1699
1697 @predicate('rev(number)', safe=True)
1700 @predicate('rev(number)', safe=True)
1698 def rev(repo, subset, x):
1701 def rev(repo, subset, x):
1699 """Revision with the given numeric identifier.
1702 """Revision with the given numeric identifier.
1700 """
1703 """
1701 # i18n: "rev" is a keyword
1704 # i18n: "rev" is a keyword
1702 l = getargs(x, 1, 1, _("rev requires one argument"))
1705 l = getargs(x, 1, 1, _("rev requires one argument"))
1703 try:
1706 try:
1704 # i18n: "rev" is a keyword
1707 # i18n: "rev" is a keyword
1705 l = int(getstring(l[0], _("rev requires a number")))
1708 l = int(getstring(l[0], _("rev requires a number")))
1706 except (TypeError, ValueError):
1709 except (TypeError, ValueError):
1707 # i18n: "rev" is a keyword
1710 # i18n: "rev" is a keyword
1708 raise error.ParseError(_("rev expects a number"))
1711 raise error.ParseError(_("rev expects a number"))
1709 if l not in repo.changelog and l != node.nullrev:
1712 if l not in repo.changelog and l != node.nullrev:
1710 return baseset()
1713 return baseset()
1711 return subset & baseset([l])
1714 return subset & baseset([l])
1712
1715
1713 @predicate('matching(revision [, field])', safe=True)
1716 @predicate('matching(revision [, field])', safe=True)
1714 def matching(repo, subset, x):
1717 def matching(repo, subset, x):
1715 """Changesets in which a given set of fields match the set of fields in the
1718 """Changesets in which a given set of fields match the set of fields in the
1716 selected revision or set.
1719 selected revision or set.
1717
1720
1718 To match more than one field pass the list of fields to match separated
1721 To match more than one field pass the list of fields to match separated
1719 by spaces (e.g. ``author description``).
1722 by spaces (e.g. ``author description``).
1720
1723
1721 Valid fields are most regular revision fields and some special fields.
1724 Valid fields are most regular revision fields and some special fields.
1722
1725
1723 Regular revision fields are ``description``, ``author``, ``branch``,
1726 Regular revision fields are ``description``, ``author``, ``branch``,
1724 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1727 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1725 and ``diff``.
1728 and ``diff``.
1726 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1729 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1727 contents of the revision. Two revisions matching their ``diff`` will
1730 contents of the revision. Two revisions matching their ``diff`` will
1728 also match their ``files``.
1731 also match their ``files``.
1729
1732
1730 Special fields are ``summary`` and ``metadata``:
1733 Special fields are ``summary`` and ``metadata``:
1731 ``summary`` matches the first line of the description.
1734 ``summary`` matches the first line of the description.
1732 ``metadata`` is equivalent to matching ``description user date``
1735 ``metadata`` is equivalent to matching ``description user date``
1733 (i.e. it matches the main metadata fields).
1736 (i.e. it matches the main metadata fields).
1734
1737
1735 ``metadata`` is the default field which is used when no fields are
1738 ``metadata`` is the default field which is used when no fields are
1736 specified. You can match more than one field at a time.
1739 specified. You can match more than one field at a time.
1737 """
1740 """
1738 # i18n: "matching" is a keyword
1741 # i18n: "matching" is a keyword
1739 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1742 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1740
1743
1741 revs = getset(repo, fullreposet(repo), l[0])
1744 revs = getset(repo, fullreposet(repo), l[0])
1742
1745
1743 fieldlist = ['metadata']
1746 fieldlist = ['metadata']
1744 if len(l) > 1:
1747 if len(l) > 1:
1745 fieldlist = getstring(l[1],
1748 fieldlist = getstring(l[1],
1746 # i18n: "matching" is a keyword
1749 # i18n: "matching" is a keyword
1747 _("matching requires a string "
1750 _("matching requires a string "
1748 "as its second argument")).split()
1751 "as its second argument")).split()
1749
1752
1750 # Make sure that there are no repeated fields,
1753 # Make sure that there are no repeated fields,
1751 # expand the 'special' 'metadata' field type
1754 # expand the 'special' 'metadata' field type
1752 # and check the 'files' whenever we check the 'diff'
1755 # and check the 'files' whenever we check the 'diff'
1753 fields = []
1756 fields = []
1754 for field in fieldlist:
1757 for field in fieldlist:
1755 if field == 'metadata':
1758 if field == 'metadata':
1756 fields += ['user', 'description', 'date']
1759 fields += ['user', 'description', 'date']
1757 elif field == 'diff':
1760 elif field == 'diff':
1758 # a revision matching the diff must also match the files
1761 # a revision matching the diff must also match the files
1759 # since matching the diff is very costly, make sure to
1762 # since matching the diff is very costly, make sure to
1760 # also match the files first
1763 # also match the files first
1761 fields += ['files', 'diff']
1764 fields += ['files', 'diff']
1762 else:
1765 else:
1763 if field == 'author':
1766 if field == 'author':
1764 field = 'user'
1767 field = 'user'
1765 fields.append(field)
1768 fields.append(field)
1766 fields = set(fields)
1769 fields = set(fields)
1767 if 'summary' in fields and 'description' in fields:
1770 if 'summary' in fields and 'description' in fields:
1768 # If a revision matches its description it also matches its summary
1771 # If a revision matches its description it also matches its summary
1769 fields.discard('summary')
1772 fields.discard('summary')
1770
1773
1771 # We may want to match more than one field
1774 # We may want to match more than one field
1772 # Not all fields take the same amount of time to be matched
1775 # Not all fields take the same amount of time to be matched
1773 # Sort the selected fields in order of increasing matching cost
1776 # Sort the selected fields in order of increasing matching cost
1774 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1777 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1775 'files', 'description', 'substate', 'diff']
1778 'files', 'description', 'substate', 'diff']
1776 def fieldkeyfunc(f):
1779 def fieldkeyfunc(f):
1777 try:
1780 try:
1778 return fieldorder.index(f)
1781 return fieldorder.index(f)
1779 except ValueError:
1782 except ValueError:
1780 # assume an unknown field is very costly
1783 # assume an unknown field is very costly
1781 return len(fieldorder)
1784 return len(fieldorder)
1782 fields = list(fields)
1785 fields = list(fields)
1783 fields.sort(key=fieldkeyfunc)
1786 fields.sort(key=fieldkeyfunc)
1784
1787
1785 # Each field will be matched with its own "getfield" function
1788 # Each field will be matched with its own "getfield" function
1786 # which will be added to the getfieldfuncs array of functions
1789 # which will be added to the getfieldfuncs array of functions
1787 getfieldfuncs = []
1790 getfieldfuncs = []
1788 _funcs = {
1791 _funcs = {
1789 'user': lambda r: repo[r].user(),
1792 'user': lambda r: repo[r].user(),
1790 'branch': lambda r: repo[r].branch(),
1793 'branch': lambda r: repo[r].branch(),
1791 'date': lambda r: repo[r].date(),
1794 'date': lambda r: repo[r].date(),
1792 'description': lambda r: repo[r].description(),
1795 'description': lambda r: repo[r].description(),
1793 'files': lambda r: repo[r].files(),
1796 'files': lambda r: repo[r].files(),
1794 'parents': lambda r: repo[r].parents(),
1797 'parents': lambda r: repo[r].parents(),
1795 'phase': lambda r: repo[r].phase(),
1798 'phase': lambda r: repo[r].phase(),
1796 'substate': lambda r: repo[r].substate,
1799 'substate': lambda r: repo[r].substate,
1797 'summary': lambda r: repo[r].description().splitlines()[0],
1800 'summary': lambda r: repo[r].description().splitlines()[0],
1798 'diff': lambda r: list(repo[r].diff(git=True),)
1801 'diff': lambda r: list(repo[r].diff(git=True),)
1799 }
1802 }
1800 for info in fields:
1803 for info in fields:
1801 getfield = _funcs.get(info, None)
1804 getfield = _funcs.get(info, None)
1802 if getfield is None:
1805 if getfield is None:
1803 raise error.ParseError(
1806 raise error.ParseError(
1804 # i18n: "matching" is a keyword
1807 # i18n: "matching" is a keyword
1805 _("unexpected field name passed to matching: %s") % info)
1808 _("unexpected field name passed to matching: %s") % info)
1806 getfieldfuncs.append(getfield)
1809 getfieldfuncs.append(getfield)
1807 # convert the getfield array of functions into a "getinfo" function
1810 # convert the getfield array of functions into a "getinfo" function
1808 # which returns an array of field values (or a single value if there
1811 # which returns an array of field values (or a single value if there
1809 # is only one field to match)
1812 # is only one field to match)
1810 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1813 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1811
1814
1812 def matches(x):
1815 def matches(x):
1813 for rev in revs:
1816 for rev in revs:
1814 target = getinfo(rev)
1817 target = getinfo(rev)
1815 match = True
1818 match = True
1816 for n, f in enumerate(getfieldfuncs):
1819 for n, f in enumerate(getfieldfuncs):
1817 if target[n] != f(x):
1820 if target[n] != f(x):
1818 match = False
1821 match = False
1819 if match:
1822 if match:
1820 return True
1823 return True
1821 return False
1824 return False
1822
1825
1823 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1826 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1824
1827
1825 @predicate('reverse(set)', safe=True)
1828 @predicate('reverse(set)', safe=True)
1826 def reverse(repo, subset, x):
1829 def reverse(repo, subset, x):
1827 """Reverse order of set.
1830 """Reverse order of set.
1828 """
1831 """
1829 l = getset(repo, subset, x)
1832 l = getset(repo, subset, x)
1830 l.reverse()
1833 l.reverse()
1831 return l
1834 return l
1832
1835
1833 @predicate('roots(set)', safe=True)
1836 @predicate('roots(set)', safe=True)
1834 def roots(repo, subset, x):
1837 def roots(repo, subset, x):
1835 """Changesets in set with no parent changeset in set.
1838 """Changesets in set with no parent changeset in set.
1836 """
1839 """
1837 s = getset(repo, fullreposet(repo), x)
1840 s = getset(repo, fullreposet(repo), x)
1838 parents = repo.changelog.parentrevs
1841 parents = repo.changelog.parentrevs
1839 def filter(r):
1842 def filter(r):
1840 for p in parents(r):
1843 for p in parents(r):
1841 if 0 <= p and p in s:
1844 if 0 <= p and p in s:
1842 return False
1845 return False
1843 return True
1846 return True
1844 return subset & s.filter(filter, condrepr='<roots>')
1847 return subset & s.filter(filter, condrepr='<roots>')
1845
1848
1846 _sortkeyfuncs = {
1849 _sortkeyfuncs = {
1847 'rev': lambda c: c.rev(),
1850 'rev': lambda c: c.rev(),
1848 'branch': lambda c: c.branch(),
1851 'branch': lambda c: c.branch(),
1849 'desc': lambda c: c.description(),
1852 'desc': lambda c: c.description(),
1850 'user': lambda c: c.user(),
1853 'user': lambda c: c.user(),
1851 'author': lambda c: c.user(),
1854 'author': lambda c: c.user(),
1852 'date': lambda c: c.date()[0],
1855 'date': lambda c: c.date()[0],
1853 }
1856 }
1854
1857
1855 def _getsortargs(x):
1858 def _getsortargs(x):
1856 """Parse sort options into (set, [(key, reverse)], opts)"""
1859 """Parse sort options into (set, [(key, reverse)], opts)"""
1857 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
1860 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
1858 if 'set' not in args:
1861 if 'set' not in args:
1859 # i18n: "sort" is a keyword
1862 # i18n: "sort" is a keyword
1860 raise error.ParseError(_('sort requires one or two arguments'))
1863 raise error.ParseError(_('sort requires one or two arguments'))
1861 keys = "rev"
1864 keys = "rev"
1862 if 'keys' in args:
1865 if 'keys' in args:
1863 # i18n: "sort" is a keyword
1866 # i18n: "sort" is a keyword
1864 keys = getstring(args['keys'], _("sort spec must be a string"))
1867 keys = getstring(args['keys'], _("sort spec must be a string"))
1865
1868
1866 keyflags = []
1869 keyflags = []
1867 for k in keys.split():
1870 for k in keys.split():
1868 fk = k
1871 fk = k
1869 reverse = (k[0] == '-')
1872 reverse = (k[0] == '-')
1870 if reverse:
1873 if reverse:
1871 k = k[1:]
1874 k = k[1:]
1872 if k not in _sortkeyfuncs and k != 'topo':
1875 if k not in _sortkeyfuncs and k != 'topo':
1873 raise error.ParseError(_("unknown sort key %r") % fk)
1876 raise error.ParseError(_("unknown sort key %r") % fk)
1874 keyflags.append((k, reverse))
1877 keyflags.append((k, reverse))
1875
1878
1876 if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
1879 if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
1877 # i18n: "topo" is a keyword
1880 # i18n: "topo" is a keyword
1878 raise error.ParseError(_('topo sort order cannot be combined '
1881 raise error.ParseError(_('topo sort order cannot be combined '
1879 'with other sort keys'))
1882 'with other sort keys'))
1880
1883
1881 opts = {}
1884 opts = {}
1882 if 'topo.firstbranch' in args:
1885 if 'topo.firstbranch' in args:
1883 if any(k == 'topo' for k, reverse in keyflags):
1886 if any(k == 'topo' for k, reverse in keyflags):
1884 opts['topo.firstbranch'] = args['topo.firstbranch']
1887 opts['topo.firstbranch'] = args['topo.firstbranch']
1885 else:
1888 else:
1886 # i18n: "topo" and "topo.firstbranch" are keywords
1889 # i18n: "topo" and "topo.firstbranch" are keywords
1887 raise error.ParseError(_('topo.firstbranch can only be used '
1890 raise error.ParseError(_('topo.firstbranch can only be used '
1888 'when using the topo sort key'))
1891 'when using the topo sort key'))
1889
1892
1890 return args['set'], keyflags, opts
1893 return args['set'], keyflags, opts
1891
1894
1892 @predicate('sort(set[, [-]key... [, ...]])', safe=True)
1895 @predicate('sort(set[, [-]key... [, ...]])', safe=True)
1893 def sort(repo, subset, x):
1896 def sort(repo, subset, x):
1894 """Sort set by keys. The default sort order is ascending, specify a key
1897 """Sort set by keys. The default sort order is ascending, specify a key
1895 as ``-key`` to sort in descending order.
1898 as ``-key`` to sort in descending order.
1896
1899
1897 The keys can be:
1900 The keys can be:
1898
1901
1899 - ``rev`` for the revision number,
1902 - ``rev`` for the revision number,
1900 - ``branch`` for the branch name,
1903 - ``branch`` for the branch name,
1901 - ``desc`` for the commit message (description),
1904 - ``desc`` for the commit message (description),
1902 - ``user`` for user name (``author`` can be used as an alias),
1905 - ``user`` for user name (``author`` can be used as an alias),
1903 - ``date`` for the commit date
1906 - ``date`` for the commit date
1904 - ``topo`` for a reverse topographical sort
1907 - ``topo`` for a reverse topographical sort
1905
1908
1906 The ``topo`` sort order cannot be combined with other sort keys. This sort
1909 The ``topo`` sort order cannot be combined with other sort keys. This sort
1907 takes one optional argument, ``topo.firstbranch``, which takes a revset that
1910 takes one optional argument, ``topo.firstbranch``, which takes a revset that
1908 specifies what topographical branches to prioritize in the sort.
1911 specifies what topographical branches to prioritize in the sort.
1909
1912
1910 """
1913 """
1911 s, keyflags, opts = _getsortargs(x)
1914 s, keyflags, opts = _getsortargs(x)
1912 revs = getset(repo, subset, s)
1915 revs = getset(repo, subset, s)
1913
1916
1914 if not keyflags:
1917 if not keyflags:
1915 return revs
1918 return revs
1916 if len(keyflags) == 1 and keyflags[0][0] == "rev":
1919 if len(keyflags) == 1 and keyflags[0][0] == "rev":
1917 revs.sort(reverse=keyflags[0][1])
1920 revs.sort(reverse=keyflags[0][1])
1918 return revs
1921 return revs
1919 elif keyflags[0][0] == "topo":
1922 elif keyflags[0][0] == "topo":
1920 firstbranch = ()
1923 firstbranch = ()
1921 if 'topo.firstbranch' in opts:
1924 if 'topo.firstbranch' in opts:
1922 firstbranch = getset(repo, subset, opts['topo.firstbranch'])
1925 firstbranch = getset(repo, subset, opts['topo.firstbranch'])
1923 revs = baseset(_toposort(revs, repo.changelog.parentrevs, firstbranch),
1926 revs = baseset(_toposort(revs, repo.changelog.parentrevs, firstbranch),
1924 istopo=True)
1927 istopo=True)
1925 if keyflags[0][1]:
1928 if keyflags[0][1]:
1926 revs.reverse()
1929 revs.reverse()
1927 return revs
1930 return revs
1928
1931
1929 # sort() is guaranteed to be stable
1932 # sort() is guaranteed to be stable
1930 ctxs = [repo[r] for r in revs]
1933 ctxs = [repo[r] for r in revs]
1931 for k, reverse in reversed(keyflags):
1934 for k, reverse in reversed(keyflags):
1932 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
1935 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
1933 return baseset([c.rev() for c in ctxs])
1936 return baseset([c.rev() for c in ctxs])
1934
1937
1935 def _toposort(revs, parentsfunc, firstbranch=()):
1938 def _toposort(revs, parentsfunc, firstbranch=()):
1936 """Yield revisions from heads to roots one (topo) branch at a time.
1939 """Yield revisions from heads to roots one (topo) branch at a time.
1937
1940
1938 This function aims to be used by a graph generator that wishes to minimize
1941 This function aims to be used by a graph generator that wishes to minimize
1939 the number of parallel branches and their interleaving.
1942 the number of parallel branches and their interleaving.
1940
1943
1941 Example iteration order (numbers show the "true" order in a changelog):
1944 Example iteration order (numbers show the "true" order in a changelog):
1942
1945
1943 o 4
1946 o 4
1944 |
1947 |
1945 o 1
1948 o 1
1946 |
1949 |
1947 | o 3
1950 | o 3
1948 | |
1951 | |
1949 | o 2
1952 | o 2
1950 |/
1953 |/
1951 o 0
1954 o 0
1952
1955
1953 Note that the ancestors of merges are understood by the current
1956 Note that the ancestors of merges are understood by the current
1954 algorithm to be on the same branch. This means no reordering will
1957 algorithm to be on the same branch. This means no reordering will
1955 occur behind a merge.
1958 occur behind a merge.
1956 """
1959 """
1957
1960
1958 ### Quick summary of the algorithm
1961 ### Quick summary of the algorithm
1959 #
1962 #
1960 # This function is based around a "retention" principle. We keep revisions
1963 # This function is based around a "retention" principle. We keep revisions
1961 # in memory until we are ready to emit a whole branch that immediately
1964 # in memory until we are ready to emit a whole branch that immediately
1962 # "merges" into an existing one. This reduces the number of parallel
1965 # "merges" into an existing one. This reduces the number of parallel
1963 # branches with interleaved revisions.
1966 # branches with interleaved revisions.
1964 #
1967 #
1965 # During iteration revs are split into two groups:
1968 # During iteration revs are split into two groups:
1966 # A) revision already emitted
1969 # A) revision already emitted
1967 # B) revision in "retention". They are stored as different subgroups.
1970 # B) revision in "retention". They are stored as different subgroups.
1968 #
1971 #
1969 # for each REV, we do the following logic:
1972 # for each REV, we do the following logic:
1970 #
1973 #
1971 # 1) if REV is a parent of (A), we will emit it. If there is a
1974 # 1) if REV is a parent of (A), we will emit it. If there is a
1972 # retention group ((B) above) that is blocked on REV being
1975 # retention group ((B) above) that is blocked on REV being
1973 # available, we emit all the revisions out of that retention
1976 # available, we emit all the revisions out of that retention
1974 # group first.
1977 # group first.
1975 #
1978 #
1976 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
1979 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
1977 # available, if such subgroup exist, we add REV to it and the subgroup is
1980 # available, if such subgroup exist, we add REV to it and the subgroup is
1978 # now awaiting for REV.parents() to be available.
1981 # now awaiting for REV.parents() to be available.
1979 #
1982 #
1980 # 3) finally if no such group existed in (B), we create a new subgroup.
1983 # 3) finally if no such group existed in (B), we create a new subgroup.
1981 #
1984 #
1982 #
1985 #
1983 # To bootstrap the algorithm, we emit the tipmost revision (which
1986 # To bootstrap the algorithm, we emit the tipmost revision (which
1984 # puts it in group (A) from above).
1987 # puts it in group (A) from above).
1985
1988
1986 revs.sort(reverse=True)
1989 revs.sort(reverse=True)
1987
1990
1988 # Set of parents of revision that have been emitted. They can be considered
1991 # Set of parents of revision that have been emitted. They can be considered
1989 # unblocked as the graph generator is already aware of them so there is no
1992 # unblocked as the graph generator is already aware of them so there is no
1990 # need to delay the revisions that reference them.
1993 # need to delay the revisions that reference them.
1991 #
1994 #
1992 # If someone wants to prioritize a branch over the others, pre-filling this
1995 # If someone wants to prioritize a branch over the others, pre-filling this
1993 # set will force all other branches to wait until this branch is ready to be
1996 # set will force all other branches to wait until this branch is ready to be
1994 # emitted.
1997 # emitted.
1995 unblocked = set(firstbranch)
1998 unblocked = set(firstbranch)
1996
1999
1997 # list of groups waiting to be displayed, each group is defined by:
2000 # list of groups waiting to be displayed, each group is defined by:
1998 #
2001 #
1999 # (revs: lists of revs waiting to be displayed,
2002 # (revs: lists of revs waiting to be displayed,
2000 # blocked: set of that cannot be displayed before those in 'revs')
2003 # blocked: set of that cannot be displayed before those in 'revs')
2001 #
2004 #
2002 # The second value ('blocked') correspond to parents of any revision in the
2005 # The second value ('blocked') correspond to parents of any revision in the
2003 # group ('revs') that is not itself contained in the group. The main idea
2006 # group ('revs') that is not itself contained in the group. The main idea
2004 # of this algorithm is to delay as much as possible the emission of any
2007 # of this algorithm is to delay as much as possible the emission of any
2005 # revision. This means waiting for the moment we are about to display
2008 # revision. This means waiting for the moment we are about to display
2006 # these parents to display the revs in a group.
2009 # these parents to display the revs in a group.
2007 #
2010 #
2008 # This first implementation is smart until it encounters a merge: it will
2011 # This first implementation is smart until it encounters a merge: it will
2009 # emit revs as soon as any parent is about to be emitted and can grow an
2012 # emit revs as soon as any parent is about to be emitted and can grow an
2010 # arbitrary number of revs in 'blocked'. In practice this mean we properly
2013 # arbitrary number of revs in 'blocked'. In practice this mean we properly
2011 # retains new branches but gives up on any special ordering for ancestors
2014 # retains new branches but gives up on any special ordering for ancestors
2012 # of merges. The implementation can be improved to handle this better.
2015 # of merges. The implementation can be improved to handle this better.
2013 #
2016 #
2014 # The first subgroup is special. It corresponds to all the revision that
2017 # The first subgroup is special. It corresponds to all the revision that
2015 # were already emitted. The 'revs' lists is expected to be empty and the
2018 # were already emitted. The 'revs' lists is expected to be empty and the
2016 # 'blocked' set contains the parents revisions of already emitted revision.
2019 # 'blocked' set contains the parents revisions of already emitted revision.
2017 #
2020 #
2018 # You could pre-seed the <parents> set of groups[0] to a specific
2021 # You could pre-seed the <parents> set of groups[0] to a specific
2019 # changesets to select what the first emitted branch should be.
2022 # changesets to select what the first emitted branch should be.
2020 groups = [([], unblocked)]
2023 groups = [([], unblocked)]
2021 pendingheap = []
2024 pendingheap = []
2022 pendingset = set()
2025 pendingset = set()
2023
2026
2024 heapq.heapify(pendingheap)
2027 heapq.heapify(pendingheap)
2025 heappop = heapq.heappop
2028 heappop = heapq.heappop
2026 heappush = heapq.heappush
2029 heappush = heapq.heappush
2027 for currentrev in revs:
2030 for currentrev in revs:
2028 # Heap works with smallest element, we want highest so we invert
2031 # Heap works with smallest element, we want highest so we invert
2029 if currentrev not in pendingset:
2032 if currentrev not in pendingset:
2030 heappush(pendingheap, -currentrev)
2033 heappush(pendingheap, -currentrev)
2031 pendingset.add(currentrev)
2034 pendingset.add(currentrev)
2032 # iterates on pending rev until after the current rev have been
2035 # iterates on pending rev until after the current rev have been
2033 # processed.
2036 # processed.
2034 rev = None
2037 rev = None
2035 while rev != currentrev:
2038 while rev != currentrev:
2036 rev = -heappop(pendingheap)
2039 rev = -heappop(pendingheap)
2037 pendingset.remove(rev)
2040 pendingset.remove(rev)
2038
2041
2039 # Seek for a subgroup blocked, waiting for the current revision.
2042 # Seek for a subgroup blocked, waiting for the current revision.
2040 matching = [i for i, g in enumerate(groups) if rev in g[1]]
2043 matching = [i for i, g in enumerate(groups) if rev in g[1]]
2041
2044
2042 if matching:
2045 if matching:
2043 # The main idea is to gather together all sets that are blocked
2046 # The main idea is to gather together all sets that are blocked
2044 # on the same revision.
2047 # on the same revision.
2045 #
2048 #
2046 # Groups are merged when a common blocking ancestor is
2049 # Groups are merged when a common blocking ancestor is
2047 # observed. For example, given two groups:
2050 # observed. For example, given two groups:
2048 #
2051 #
2049 # revs [5, 4] waiting for 1
2052 # revs [5, 4] waiting for 1
2050 # revs [3, 2] waiting for 1
2053 # revs [3, 2] waiting for 1
2051 #
2054 #
2052 # These two groups will be merged when we process
2055 # These two groups will be merged when we process
2053 # 1. In theory, we could have merged the groups when
2056 # 1. In theory, we could have merged the groups when
2054 # we added 2 to the group it is now in (we could have
2057 # we added 2 to the group it is now in (we could have
2055 # noticed the groups were both blocked on 1 then), but
2058 # noticed the groups were both blocked on 1 then), but
2056 # the way it works now makes the algorithm simpler.
2059 # the way it works now makes the algorithm simpler.
2057 #
2060 #
2058 # We also always keep the oldest subgroup first. We can
2061 # We also always keep the oldest subgroup first. We can
2059 # probably improve the behavior by having the longest set
2062 # probably improve the behavior by having the longest set
2060 # first. That way, graph algorithms could minimise the length
2063 # first. That way, graph algorithms could minimise the length
2061 # of parallel lines their drawing. This is currently not done.
2064 # of parallel lines their drawing. This is currently not done.
2062 targetidx = matching.pop(0)
2065 targetidx = matching.pop(0)
2063 trevs, tparents = groups[targetidx]
2066 trevs, tparents = groups[targetidx]
2064 for i in matching:
2067 for i in matching:
2065 gr = groups[i]
2068 gr = groups[i]
2066 trevs.extend(gr[0])
2069 trevs.extend(gr[0])
2067 tparents |= gr[1]
2070 tparents |= gr[1]
2068 # delete all merged subgroups (except the one we kept)
2071 # delete all merged subgroups (except the one we kept)
2069 # (starting from the last subgroup for performance and
2072 # (starting from the last subgroup for performance and
2070 # sanity reasons)
2073 # sanity reasons)
2071 for i in reversed(matching):
2074 for i in reversed(matching):
2072 del groups[i]
2075 del groups[i]
2073 else:
2076 else:
2074 # This is a new head. We create a new subgroup for it.
2077 # This is a new head. We create a new subgroup for it.
2075 targetidx = len(groups)
2078 targetidx = len(groups)
2076 groups.append(([], set([rev])))
2079 groups.append(([], set([rev])))
2077
2080
2078 gr = groups[targetidx]
2081 gr = groups[targetidx]
2079
2082
2080 # We now add the current nodes to this subgroups. This is done
2083 # We now add the current nodes to this subgroups. This is done
2081 # after the subgroup merging because all elements from a subgroup
2084 # after the subgroup merging because all elements from a subgroup
2082 # that relied on this rev must precede it.
2085 # that relied on this rev must precede it.
2083 #
2086 #
2084 # we also update the <parents> set to include the parents of the
2087 # we also update the <parents> set to include the parents of the
2085 # new nodes.
2088 # new nodes.
2086 if rev == currentrev: # only display stuff in rev
2089 if rev == currentrev: # only display stuff in rev
2087 gr[0].append(rev)
2090 gr[0].append(rev)
2088 gr[1].remove(rev)
2091 gr[1].remove(rev)
2089 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
2092 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
2090 gr[1].update(parents)
2093 gr[1].update(parents)
2091 for p in parents:
2094 for p in parents:
2092 if p not in pendingset:
2095 if p not in pendingset:
2093 pendingset.add(p)
2096 pendingset.add(p)
2094 heappush(pendingheap, -p)
2097 heappush(pendingheap, -p)
2095
2098
2096 # Look for a subgroup to display
2099 # Look for a subgroup to display
2097 #
2100 #
2098 # When unblocked is empty (if clause), we were not waiting for any
2101 # When unblocked is empty (if clause), we were not waiting for any
2099 # revisions during the first iteration (if no priority was given) or
2102 # revisions during the first iteration (if no priority was given) or
2100 # if we emitted a whole disconnected set of the graph (reached a
2103 # if we emitted a whole disconnected set of the graph (reached a
2101 # root). In that case we arbitrarily take the oldest known
2104 # root). In that case we arbitrarily take the oldest known
2102 # subgroup. The heuristic could probably be better.
2105 # subgroup. The heuristic could probably be better.
2103 #
2106 #
2104 # Otherwise (elif clause) if the subgroup is blocked on
2107 # Otherwise (elif clause) if the subgroup is blocked on
2105 # a revision we just emitted, we can safely emit it as
2108 # a revision we just emitted, we can safely emit it as
2106 # well.
2109 # well.
2107 if not unblocked:
2110 if not unblocked:
2108 if len(groups) > 1: # display other subset
2111 if len(groups) > 1: # display other subset
2109 targetidx = 1
2112 targetidx = 1
2110 gr = groups[1]
2113 gr = groups[1]
2111 elif not gr[1] & unblocked:
2114 elif not gr[1] & unblocked:
2112 gr = None
2115 gr = None
2113
2116
2114 if gr is not None:
2117 if gr is not None:
2115 # update the set of awaited revisions with the one from the
2118 # update the set of awaited revisions with the one from the
2116 # subgroup
2119 # subgroup
2117 unblocked |= gr[1]
2120 unblocked |= gr[1]
2118 # output all revisions in the subgroup
2121 # output all revisions in the subgroup
2119 for r in gr[0]:
2122 for r in gr[0]:
2120 yield r
2123 yield r
2121 # delete the subgroup that you just output
2124 # delete the subgroup that you just output
2122 # unless it is groups[0] in which case you just empty it.
2125 # unless it is groups[0] in which case you just empty it.
2123 if targetidx:
2126 if targetidx:
2124 del groups[targetidx]
2127 del groups[targetidx]
2125 else:
2128 else:
2126 gr[0][:] = []
2129 gr[0][:] = []
2127 # Check if we have some subgroup waiting for revisions we are not going to
2130 # Check if we have some subgroup waiting for revisions we are not going to
2128 # iterate over
2131 # iterate over
2129 for g in groups:
2132 for g in groups:
2130 for r in g[0]:
2133 for r in g[0]:
2131 yield r
2134 yield r
2132
2135
2133 @predicate('subrepo([pattern])')
2136 @predicate('subrepo([pattern])')
2134 def subrepo(repo, subset, x):
2137 def subrepo(repo, subset, x):
2135 """Changesets that add, modify or remove the given subrepo. If no subrepo
2138 """Changesets that add, modify or remove the given subrepo. If no subrepo
2136 pattern is named, any subrepo changes are returned.
2139 pattern is named, any subrepo changes are returned.
2137 """
2140 """
2138 # i18n: "subrepo" is a keyword
2141 # i18n: "subrepo" is a keyword
2139 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
2142 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
2140 pat = None
2143 pat = None
2141 if len(args) != 0:
2144 if len(args) != 0:
2142 pat = getstring(args[0], _("subrepo requires a pattern"))
2145 pat = getstring(args[0], _("subrepo requires a pattern"))
2143
2146
2144 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
2147 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
2145
2148
2146 def submatches(names):
2149 def submatches(names):
2147 k, p, m = util.stringmatcher(pat)
2150 k, p, m = util.stringmatcher(pat)
2148 for name in names:
2151 for name in names:
2149 if m(name):
2152 if m(name):
2150 yield name
2153 yield name
2151
2154
2152 def matches(x):
2155 def matches(x):
2153 c = repo[x]
2156 c = repo[x]
2154 s = repo.status(c.p1().node(), c.node(), match=m)
2157 s = repo.status(c.p1().node(), c.node(), match=m)
2155
2158
2156 if pat is None:
2159 if pat is None:
2157 return s.added or s.modified or s.removed
2160 return s.added or s.modified or s.removed
2158
2161
2159 if s.added:
2162 if s.added:
2160 return any(submatches(c.substate.keys()))
2163 return any(submatches(c.substate.keys()))
2161
2164
2162 if s.modified:
2165 if s.modified:
2163 subs = set(c.p1().substate.keys())
2166 subs = set(c.p1().substate.keys())
2164 subs.update(c.substate.keys())
2167 subs.update(c.substate.keys())
2165
2168
2166 for path in submatches(subs):
2169 for path in submatches(subs):
2167 if c.p1().substate.get(path) != c.substate.get(path):
2170 if c.p1().substate.get(path) != c.substate.get(path):
2168 return True
2171 return True
2169
2172
2170 if s.removed:
2173 if s.removed:
2171 return any(submatches(c.p1().substate.keys()))
2174 return any(submatches(c.p1().substate.keys()))
2172
2175
2173 return False
2176 return False
2174
2177
2175 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
2178 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
2176
2179
2177 def _substringmatcher(pattern):
2180 def _substringmatcher(pattern):
2178 kind, pattern, matcher = util.stringmatcher(pattern)
2181 kind, pattern, matcher = util.stringmatcher(pattern)
2179 if kind == 'literal':
2182 if kind == 'literal':
2180 matcher = lambda s: pattern in s
2183 matcher = lambda s: pattern in s
2181 return kind, pattern, matcher
2184 return kind, pattern, matcher
2182
2185
2183 @predicate('tag([name])', safe=True)
2186 @predicate('tag([name])', safe=True)
2184 def tag(repo, subset, x):
2187 def tag(repo, subset, x):
2185 """The specified tag by name, or all tagged revisions if no name is given.
2188 """The specified tag by name, or all tagged revisions if no name is given.
2186
2189
2187 If `name` starts with `re:`, the remainder of the name is treated as
2190 If `name` starts with `re:`, the remainder of the name is treated as
2188 a regular expression. To match a tag that actually starts with `re:`,
2191 a regular expression. To match a tag that actually starts with `re:`,
2189 use the prefix `literal:`.
2192 use the prefix `literal:`.
2190 """
2193 """
2191 # i18n: "tag" is a keyword
2194 # i18n: "tag" is a keyword
2192 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2195 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2193 cl = repo.changelog
2196 cl = repo.changelog
2194 if args:
2197 if args:
2195 pattern = getstring(args[0],
2198 pattern = getstring(args[0],
2196 # i18n: "tag" is a keyword
2199 # i18n: "tag" is a keyword
2197 _('the argument to tag must be a string'))
2200 _('the argument to tag must be a string'))
2198 kind, pattern, matcher = util.stringmatcher(pattern)
2201 kind, pattern, matcher = util.stringmatcher(pattern)
2199 if kind == 'literal':
2202 if kind == 'literal':
2200 # avoid resolving all tags
2203 # avoid resolving all tags
2201 tn = repo._tagscache.tags.get(pattern, None)
2204 tn = repo._tagscache.tags.get(pattern, None)
2202 if tn is None:
2205 if tn is None:
2203 raise error.RepoLookupError(_("tag '%s' does not exist")
2206 raise error.RepoLookupError(_("tag '%s' does not exist")
2204 % pattern)
2207 % pattern)
2205 s = set([repo[tn].rev()])
2208 s = set([repo[tn].rev()])
2206 else:
2209 else:
2207 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2210 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2208 else:
2211 else:
2209 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2212 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2210 return subset & s
2213 return subset & s
2211
2214
2212 @predicate('tagged', safe=True)
2215 @predicate('tagged', safe=True)
2213 def tagged(repo, subset, x):
2216 def tagged(repo, subset, x):
2214 return tag(repo, subset, x)
2217 return tag(repo, subset, x)
2215
2218
2216 @predicate('unstable()', safe=True)
2219 @predicate('unstable()', safe=True)
2217 def unstable(repo, subset, x):
2220 def unstable(repo, subset, x):
2218 """Non-obsolete changesets with obsolete ancestors.
2221 """Non-obsolete changesets with obsolete ancestors.
2219 """
2222 """
2220 # i18n: "unstable" is a keyword
2223 # i18n: "unstable" is a keyword
2221 getargs(x, 0, 0, _("unstable takes no arguments"))
2224 getargs(x, 0, 0, _("unstable takes no arguments"))
2222 unstables = obsmod.getrevs(repo, 'unstable')
2225 unstables = obsmod.getrevs(repo, 'unstable')
2223 return subset & unstables
2226 return subset & unstables
2224
2227
2225
2228
2226 @predicate('user(string)', safe=True)
2229 @predicate('user(string)', safe=True)
2227 def user(repo, subset, x):
2230 def user(repo, subset, x):
2228 """User name contains string. The match is case-insensitive.
2231 """User name contains string. The match is case-insensitive.
2229
2232
2230 If `string` starts with `re:`, the remainder of the string is treated as
2233 If `string` starts with `re:`, the remainder of the string is treated as
2231 a regular expression. To match a user that actually contains `re:`, use
2234 a regular expression. To match a user that actually contains `re:`, use
2232 the prefix `literal:`.
2235 the prefix `literal:`.
2233 """
2236 """
2234 return author(repo, subset, x)
2237 return author(repo, subset, x)
2235
2238
2236 # experimental
2239 # experimental
2237 @predicate('wdir', safe=True)
2240 @predicate('wdir', safe=True)
2238 def wdir(repo, subset, x):
2241 def wdir(repo, subset, x):
2239 # i18n: "wdir" is a keyword
2242 # i18n: "wdir" is a keyword
2240 getargs(x, 0, 0, _("wdir takes no arguments"))
2243 getargs(x, 0, 0, _("wdir takes no arguments"))
2241 if node.wdirrev in subset or isinstance(subset, fullreposet):
2244 if node.wdirrev in subset or isinstance(subset, fullreposet):
2242 return baseset([node.wdirrev])
2245 return baseset([node.wdirrev])
2243 return baseset()
2246 return baseset()
2244
2247
2245 # for internal use
2248 # for internal use
2246 @predicate('_list', safe=True)
2249 @predicate('_list', safe=True)
2247 def _list(repo, subset, x):
2250 def _list(repo, subset, x):
2248 s = getstring(x, "internal error")
2251 s = getstring(x, "internal error")
2249 if not s:
2252 if not s:
2250 return baseset()
2253 return baseset()
2251 # remove duplicates here. it's difficult for caller to deduplicate sets
2254 # remove duplicates here. it's difficult for caller to deduplicate sets
2252 # because different symbols can point to the same rev.
2255 # because different symbols can point to the same rev.
2253 cl = repo.changelog
2256 cl = repo.changelog
2254 ls = []
2257 ls = []
2255 seen = set()
2258 seen = set()
2256 for t in s.split('\0'):
2259 for t in s.split('\0'):
2257 try:
2260 try:
2258 # fast path for integer revision
2261 # fast path for integer revision
2259 r = int(t)
2262 r = int(t)
2260 if str(r) != t or r not in cl:
2263 if str(r) != t or r not in cl:
2261 raise ValueError
2264 raise ValueError
2262 revs = [r]
2265 revs = [r]
2263 except ValueError:
2266 except ValueError:
2264 revs = stringset(repo, subset, t)
2267 revs = stringset(repo, subset, t)
2265
2268
2266 for r in revs:
2269 for r in revs:
2267 if r in seen:
2270 if r in seen:
2268 continue
2271 continue
2269 if (r in subset
2272 if (r in subset
2270 or r == node.nullrev and isinstance(subset, fullreposet)):
2273 or r == node.nullrev and isinstance(subset, fullreposet)):
2271 ls.append(r)
2274 ls.append(r)
2272 seen.add(r)
2275 seen.add(r)
2273 return baseset(ls)
2276 return baseset(ls)
2274
2277
2275 # for internal use
2278 # for internal use
2276 @predicate('_intlist', safe=True)
2279 @predicate('_intlist', safe=True)
2277 def _intlist(repo, subset, x):
2280 def _intlist(repo, subset, x):
2278 s = getstring(x, "internal error")
2281 s = getstring(x, "internal error")
2279 if not s:
2282 if not s:
2280 return baseset()
2283 return baseset()
2281 ls = [int(r) for r in s.split('\0')]
2284 ls = [int(r) for r in s.split('\0')]
2282 s = subset
2285 s = subset
2283 return baseset([r for r in ls if r in s])
2286 return baseset([r for r in ls if r in s])
2284
2287
2285 # for internal use
2288 # for internal use
2286 @predicate('_hexlist', safe=True)
2289 @predicate('_hexlist', safe=True)
2287 def _hexlist(repo, subset, x):
2290 def _hexlist(repo, subset, x):
2288 s = getstring(x, "internal error")
2291 s = getstring(x, "internal error")
2289 if not s:
2292 if not s:
2290 return baseset()
2293 return baseset()
2291 cl = repo.changelog
2294 cl = repo.changelog
2292 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2295 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2293 s = subset
2296 s = subset
2294 return baseset([r for r in ls if r in s])
2297 return baseset([r for r in ls if r in s])
2295
2298
2296 methods = {
2299 methods = {
2297 "range": rangeset,
2300 "range": rangeset,
2298 "dagrange": dagrange,
2301 "dagrange": dagrange,
2299 "string": stringset,
2302 "string": stringset,
2300 "symbol": stringset,
2303 "symbol": stringset,
2301 "and": andset,
2304 "and": andset,
2302 "or": orset,
2305 "or": orset,
2303 "not": notset,
2306 "not": notset,
2304 "difference": differenceset,
2307 "difference": differenceset,
2305 "list": listset,
2308 "list": listset,
2306 "keyvalue": keyvaluepair,
2309 "keyvalue": keyvaluepair,
2307 "func": func,
2310 "func": func,
2308 "ancestor": ancestorspec,
2311 "ancestor": ancestorspec,
2309 "parent": parentspec,
2312 "parent": parentspec,
2310 "parentpost": p1,
2313 "parentpost": parentpost,
2311 }
2314 }
2312
2315
2313 # Constants for ordering requirement, used in _analyze():
2316 # Constants for ordering requirement, used in _analyze():
2314 #
2317 #
2315 # If 'define', any nested functions and operations can change the ordering of
2318 # If 'define', any nested functions and operations can change the ordering of
2316 # the entries in the set. If 'follow', any nested functions and operations
2319 # the entries in the set. If 'follow', any nested functions and operations
2317 # should take the ordering specified by the first operand to the '&' operator.
2320 # should take the ordering specified by the first operand to the '&' operator.
2318 #
2321 #
2319 # For instance,
2322 # For instance,
2320 #
2323 #
2321 # X & (Y | Z)
2324 # X & (Y | Z)
2322 # ^ ^^^^^^^
2325 # ^ ^^^^^^^
2323 # | follow
2326 # | follow
2324 # define
2327 # define
2325 #
2328 #
2326 # will be evaluated as 'or(y(x()), z(x()))', where 'x()' can change the order
2329 # will be evaluated as 'or(y(x()), z(x()))', where 'x()' can change the order
2327 # of the entries in the set, but 'y()', 'z()' and 'or()' shouldn't.
2330 # of the entries in the set, but 'y()', 'z()' and 'or()' shouldn't.
2328 #
2331 #
2329 # 'any' means the order doesn't matter. For instance,
2332 # 'any' means the order doesn't matter. For instance,
2330 #
2333 #
2331 # X & !Y
2334 # X & !Y
2332 # ^
2335 # ^
2333 # any
2336 # any
2334 #
2337 #
2335 # 'y()' can either enforce its ordering requirement or take the ordering
2338 # 'y()' can either enforce its ordering requirement or take the ordering
2336 # specified by 'x()' because 'not()' doesn't care the order.
2339 # specified by 'x()' because 'not()' doesn't care the order.
2337 #
2340 #
2338 # Transition of ordering requirement:
2341 # Transition of ordering requirement:
2339 #
2342 #
2340 # 1. starts with 'define'
2343 # 1. starts with 'define'
2341 # 2. shifts to 'follow' by 'x & y'
2344 # 2. shifts to 'follow' by 'x & y'
2342 # 3. changes back to 'define' on function call 'f(x)' or function-like
2345 # 3. changes back to 'define' on function call 'f(x)' or function-like
2343 # operation 'x (f) y' because 'f' may have its own ordering requirement
2346 # operation 'x (f) y' because 'f' may have its own ordering requirement
2344 # for 'x' and 'y' (e.g. 'first(x)')
2347 # for 'x' and 'y' (e.g. 'first(x)')
2345 #
2348 #
2346 anyorder = 'any' # don't care the order
2349 anyorder = 'any' # don't care the order
2347 defineorder = 'define' # should define the order
2350 defineorder = 'define' # should define the order
2348 followorder = 'follow' # must follow the current order
2351 followorder = 'follow' # must follow the current order
2349
2352
2350 # transition table for 'x & y', from the current expression 'x' to 'y'
2353 # transition table for 'x & y', from the current expression 'x' to 'y'
2351 _tofolloworder = {
2354 _tofolloworder = {
2352 anyorder: anyorder,
2355 anyorder: anyorder,
2353 defineorder: followorder,
2356 defineorder: followorder,
2354 followorder: followorder,
2357 followorder: followorder,
2355 }
2358 }
2356
2359
2357 def _matchonly(revs, bases):
2360 def _matchonly(revs, bases):
2358 """
2361 """
2359 >>> f = lambda *args: _matchonly(*map(parse, args))
2362 >>> f = lambda *args: _matchonly(*map(parse, args))
2360 >>> f('ancestors(A)', 'not ancestors(B)')
2363 >>> f('ancestors(A)', 'not ancestors(B)')
2361 ('list', ('symbol', 'A'), ('symbol', 'B'))
2364 ('list', ('symbol', 'A'), ('symbol', 'B'))
2362 """
2365 """
2363 if (revs is not None
2366 if (revs is not None
2364 and revs[0] == 'func'
2367 and revs[0] == 'func'
2365 and getsymbol(revs[1]) == 'ancestors'
2368 and getsymbol(revs[1]) == 'ancestors'
2366 and bases is not None
2369 and bases is not None
2367 and bases[0] == 'not'
2370 and bases[0] == 'not'
2368 and bases[1][0] == 'func'
2371 and bases[1][0] == 'func'
2369 and getsymbol(bases[1][1]) == 'ancestors'):
2372 and getsymbol(bases[1][1]) == 'ancestors'):
2370 return ('list', revs[2], bases[1][2])
2373 return ('list', revs[2], bases[1][2])
2371
2374
2372 def _fixops(x):
2375 def _fixops(x):
2373 """Rewrite raw parsed tree to resolve ambiguous syntax which cannot be
2376 """Rewrite raw parsed tree to resolve ambiguous syntax which cannot be
2374 handled well by our simple top-down parser"""
2377 handled well by our simple top-down parser"""
2375 if not isinstance(x, tuple):
2378 if not isinstance(x, tuple):
2376 return x
2379 return x
2377
2380
2378 op = x[0]
2381 op = x[0]
2379 if op == 'parent':
2382 if op == 'parent':
2380 # x^:y means (x^) : y, not x ^ (:y)
2383 # x^:y means (x^) : y, not x ^ (:y)
2381 # x^: means (x^) :, not x ^ (:)
2384 # x^: means (x^) :, not x ^ (:)
2382 post = ('parentpost', x[1])
2385 post = ('parentpost', x[1])
2383 if x[2][0] == 'dagrangepre':
2386 if x[2][0] == 'dagrangepre':
2384 return _fixops(('dagrange', post, x[2][1]))
2387 return _fixops(('dagrange', post, x[2][1]))
2385 elif x[2][0] == 'rangepre':
2388 elif x[2][0] == 'rangepre':
2386 return _fixops(('range', post, x[2][1]))
2389 return _fixops(('range', post, x[2][1]))
2387 elif x[2][0] == 'rangeall':
2390 elif x[2][0] == 'rangeall':
2388 return _fixops(('rangepost', post))
2391 return _fixops(('rangepost', post))
2389 elif op == 'or':
2392 elif op == 'or':
2390 # make number of arguments deterministic:
2393 # make number of arguments deterministic:
2391 # x + y + z -> (or x y z) -> (or (list x y z))
2394 # x + y + z -> (or x y z) -> (or (list x y z))
2392 return (op, _fixops(('list',) + x[1:]))
2395 return (op, _fixops(('list',) + x[1:]))
2393
2396
2394 return (op,) + tuple(_fixops(y) for y in x[1:])
2397 return (op,) + tuple(_fixops(y) for y in x[1:])
2395
2398
2396 def _analyze(x, order):
2399 def _analyze(x, order):
2397 if x is None:
2400 if x is None:
2398 return x
2401 return x
2399
2402
2400 op = x[0]
2403 op = x[0]
2401 if op == 'minus':
2404 if op == 'minus':
2402 return _analyze(('and', x[1], ('not', x[2])), order)
2405 return _analyze(('and', x[1], ('not', x[2])), order)
2403 elif op == 'only':
2406 elif op == 'only':
2404 t = ('func', ('symbol', 'only'), ('list', x[1], x[2]))
2407 t = ('func', ('symbol', 'only'), ('list', x[1], x[2]))
2405 return _analyze(t, order)
2408 return _analyze(t, order)
2406 elif op == 'onlypost':
2409 elif op == 'onlypost':
2407 return _analyze(('func', ('symbol', 'only'), x[1]), order)
2410 return _analyze(('func', ('symbol', 'only'), x[1]), order)
2408 elif op == 'dagrangepre':
2411 elif op == 'dagrangepre':
2409 return _analyze(('func', ('symbol', 'ancestors'), x[1]), order)
2412 return _analyze(('func', ('symbol', 'ancestors'), x[1]), order)
2410 elif op == 'dagrangepost':
2413 elif op == 'dagrangepost':
2411 return _analyze(('func', ('symbol', 'descendants'), x[1]), order)
2414 return _analyze(('func', ('symbol', 'descendants'), x[1]), order)
2412 elif op == 'rangeall':
2415 elif op == 'rangeall':
2413 return _analyze(('range', ('string', '0'), ('string', 'tip')), order)
2416 return _analyze(('range', ('string', '0'), ('string', 'tip')), order)
2414 elif op == 'rangepre':
2417 elif op == 'rangepre':
2415 return _analyze(('range', ('string', '0'), x[1]), order)
2418 return _analyze(('range', ('string', '0'), x[1]), order)
2416 elif op == 'rangepost':
2419 elif op == 'rangepost':
2417 return _analyze(('range', x[1], ('string', 'tip')), order)
2420 return _analyze(('range', x[1], ('string', 'tip')), order)
2418 elif op == 'negate':
2421 elif op == 'negate':
2419 s = getstring(x[1], _("can't negate that"))
2422 s = getstring(x[1], _("can't negate that"))
2420 return _analyze(('string', '-' + s), order)
2423 return _analyze(('string', '-' + s), order)
2421 elif op in ('string', 'symbol'):
2424 elif op in ('string', 'symbol'):
2422 return x
2425 return x
2423 elif op == 'and':
2426 elif op == 'and':
2424 ta = _analyze(x[1], order)
2427 ta = _analyze(x[1], order)
2425 tb = _analyze(x[2], _tofolloworder[order])
2428 tb = _analyze(x[2], _tofolloworder[order])
2426 return (op, ta, tb)
2429 return (op, ta, tb)
2427 elif op == 'or':
2430 elif op == 'or':
2428 return (op, _analyze(x[1], order))
2431 return (op, _analyze(x[1], order))
2429 elif op == 'not':
2432 elif op == 'not':
2430 return (op, _analyze(x[1], anyorder))
2433 return (op, _analyze(x[1], anyorder))
2431 elif op == 'parentpost':
2434 elif op == 'parentpost':
2432 return (op, _analyze(x[1], defineorder))
2435 return (op, _analyze(x[1], defineorder))
2433 elif op == 'group':
2436 elif op == 'group':
2434 return _analyze(x[1], order)
2437 return _analyze(x[1], order)
2435 elif op in ('dagrange', 'range', 'parent', 'ancestor'):
2438 elif op in ('dagrange', 'range', 'parent', 'ancestor'):
2436 ta = _analyze(x[1], defineorder)
2439 ta = _analyze(x[1], defineorder)
2437 tb = _analyze(x[2], defineorder)
2440 tb = _analyze(x[2], defineorder)
2438 return (op, ta, tb)
2441 return (op, ta, tb)
2439 elif op == 'list':
2442 elif op == 'list':
2440 return (op,) + tuple(_analyze(y, order) for y in x[1:])
2443 return (op,) + tuple(_analyze(y, order) for y in x[1:])
2441 elif op == 'keyvalue':
2444 elif op == 'keyvalue':
2442 return (op, x[1], _analyze(x[2], order))
2445 return (op, x[1], _analyze(x[2], order))
2443 elif op == 'func':
2446 elif op == 'func':
2444 return (op, x[1], _analyze(x[2], defineorder))
2447 return (op, x[1], _analyze(x[2], defineorder))
2445 raise ValueError('invalid operator %r' % op)
2448 raise ValueError('invalid operator %r' % op)
2446
2449
2447 def analyze(x, order=defineorder):
2450 def analyze(x, order=defineorder):
2448 """Transform raw parsed tree to evaluatable tree which can be fed to
2451 """Transform raw parsed tree to evaluatable tree which can be fed to
2449 optimize() or getset()
2452 optimize() or getset()
2450
2453
2451 All pseudo operations should be mapped to real operations or functions
2454 All pseudo operations should be mapped to real operations or functions
2452 defined in methods or symbols table respectively.
2455 defined in methods or symbols table respectively.
2453
2456
2454 'order' specifies how the current expression 'x' is ordered (see the
2457 'order' specifies how the current expression 'x' is ordered (see the
2455 constants defined above.)
2458 constants defined above.)
2456 """
2459 """
2457 return _analyze(x, order)
2460 return _analyze(x, order)
2458
2461
2459 def _optimize(x, small):
2462 def _optimize(x, small):
2460 if x is None:
2463 if x is None:
2461 return 0, x
2464 return 0, x
2462
2465
2463 smallbonus = 1
2466 smallbonus = 1
2464 if small:
2467 if small:
2465 smallbonus = .5
2468 smallbonus = .5
2466
2469
2467 op = x[0]
2470 op = x[0]
2468 if op in ('string', 'symbol'):
2471 if op in ('string', 'symbol'):
2469 return smallbonus, x # single revisions are small
2472 return smallbonus, x # single revisions are small
2470 elif op == 'and':
2473 elif op == 'and':
2471 wa, ta = _optimize(x[1], True)
2474 wa, ta = _optimize(x[1], True)
2472 wb, tb = _optimize(x[2], True)
2475 wb, tb = _optimize(x[2], True)
2473 w = min(wa, wb)
2476 w = min(wa, wb)
2474
2477
2475 # (::x and not ::y)/(not ::y and ::x) have a fast path
2478 # (::x and not ::y)/(not ::y and ::x) have a fast path
2476 tm = _matchonly(ta, tb) or _matchonly(tb, ta)
2479 tm = _matchonly(ta, tb) or _matchonly(tb, ta)
2477 if tm:
2480 if tm:
2478 return w, ('func', ('symbol', 'only'), tm)
2481 return w, ('func', ('symbol', 'only'), tm)
2479
2482
2480 if tb is not None and tb[0] == 'not':
2483 if tb is not None and tb[0] == 'not':
2481 return wa, ('difference', ta, tb[1])
2484 return wa, ('difference', ta, tb[1])
2482
2485
2483 if wa > wb:
2486 if wa > wb:
2484 return w, (op, tb, ta)
2487 return w, (op, tb, ta)
2485 return w, (op, ta, tb)
2488 return w, (op, ta, tb)
2486 elif op == 'or':
2489 elif op == 'or':
2487 # fast path for machine-generated expression, that is likely to have
2490 # fast path for machine-generated expression, that is likely to have
2488 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2491 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2489 ws, ts, ss = [], [], []
2492 ws, ts, ss = [], [], []
2490 def flushss():
2493 def flushss():
2491 if not ss:
2494 if not ss:
2492 return
2495 return
2493 if len(ss) == 1:
2496 if len(ss) == 1:
2494 w, t = ss[0]
2497 w, t = ss[0]
2495 else:
2498 else:
2496 s = '\0'.join(t[1] for w, t in ss)
2499 s = '\0'.join(t[1] for w, t in ss)
2497 y = ('func', ('symbol', '_list'), ('string', s))
2500 y = ('func', ('symbol', '_list'), ('string', s))
2498 w, t = _optimize(y, False)
2501 w, t = _optimize(y, False)
2499 ws.append(w)
2502 ws.append(w)
2500 ts.append(t)
2503 ts.append(t)
2501 del ss[:]
2504 del ss[:]
2502 for y in getlist(x[1]):
2505 for y in getlist(x[1]):
2503 w, t = _optimize(y, False)
2506 w, t = _optimize(y, False)
2504 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2507 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2505 ss.append((w, t))
2508 ss.append((w, t))
2506 continue
2509 continue
2507 flushss()
2510 flushss()
2508 ws.append(w)
2511 ws.append(w)
2509 ts.append(t)
2512 ts.append(t)
2510 flushss()
2513 flushss()
2511 if len(ts) == 1:
2514 if len(ts) == 1:
2512 return ws[0], ts[0] # 'or' operation is fully optimized out
2515 return ws[0], ts[0] # 'or' operation is fully optimized out
2513 # we can't reorder trees by weight because it would change the order.
2516 # we can't reorder trees by weight because it would change the order.
2514 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2517 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2515 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2518 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2516 return max(ws), (op, ('list',) + tuple(ts))
2519 return max(ws), (op, ('list',) + tuple(ts))
2517 elif op == 'not':
2520 elif op == 'not':
2518 # Optimize not public() to _notpublic() because we have a fast version
2521 # Optimize not public() to _notpublic() because we have a fast version
2519 if x[1] == ('func', ('symbol', 'public'), None):
2522 if x[1] == ('func', ('symbol', 'public'), None):
2520 newsym = ('func', ('symbol', '_notpublic'), None)
2523 newsym = ('func', ('symbol', '_notpublic'), None)
2521 o = _optimize(newsym, not small)
2524 o = _optimize(newsym, not small)
2522 return o[0], o[1]
2525 return o[0], o[1]
2523 else:
2526 else:
2524 o = _optimize(x[1], not small)
2527 o = _optimize(x[1], not small)
2525 return o[0], (op, o[1])
2528 return o[0], (op, o[1])
2526 elif op == 'parentpost':
2529 elif op == 'parentpost':
2527 o = _optimize(x[1], small)
2530 o = _optimize(x[1], small)
2528 return o[0], (op, o[1])
2531 return o[0], (op, o[1])
2529 elif op in ('dagrange', 'range', 'parent', 'ancestor'):
2532 elif op in ('dagrange', 'range', 'parent', 'ancestor'):
2530 wa, ta = _optimize(x[1], small)
2533 wa, ta = _optimize(x[1], small)
2531 wb, tb = _optimize(x[2], small)
2534 wb, tb = _optimize(x[2], small)
2532 return wa + wb, (op, ta, tb)
2535 return wa + wb, (op, ta, tb)
2533 elif op == 'list':
2536 elif op == 'list':
2534 ws, ts = zip(*(_optimize(y, small) for y in x[1:]))
2537 ws, ts = zip(*(_optimize(y, small) for y in x[1:]))
2535 return sum(ws), (op,) + ts
2538 return sum(ws), (op,) + ts
2536 elif op == 'keyvalue':
2539 elif op == 'keyvalue':
2537 w, t = _optimize(x[2], small)
2540 w, t = _optimize(x[2], small)
2538 return w, (op, x[1], t)
2541 return w, (op, x[1], t)
2539 elif op == 'func':
2542 elif op == 'func':
2540 f = getsymbol(x[1])
2543 f = getsymbol(x[1])
2541 wa, ta = _optimize(x[2], small)
2544 wa, ta = _optimize(x[2], small)
2542 if f in ('author', 'branch', 'closed', 'date', 'desc', 'file', 'grep',
2545 if f in ('author', 'branch', 'closed', 'date', 'desc', 'file', 'grep',
2543 'keyword', 'outgoing', 'user'):
2546 'keyword', 'outgoing', 'user'):
2544 w = 10 # slow
2547 w = 10 # slow
2545 elif f in ('modifies', 'adds', 'removes'):
2548 elif f in ('modifies', 'adds', 'removes'):
2546 w = 30 # slower
2549 w = 30 # slower
2547 elif f == "contains":
2550 elif f == "contains":
2548 w = 100 # very slow
2551 w = 100 # very slow
2549 elif f == "ancestor":
2552 elif f == "ancestor":
2550 w = 1 * smallbonus
2553 w = 1 * smallbonus
2551 elif f in ('reverse', 'limit', 'first', '_intlist'):
2554 elif f in ('reverse', 'limit', 'first', '_intlist'):
2552 w = 0
2555 w = 0
2553 elif f == "sort":
2556 elif f == "sort":
2554 w = 10 # assume most sorts look at changelog
2557 w = 10 # assume most sorts look at changelog
2555 else:
2558 else:
2556 w = 1
2559 w = 1
2557 return w + wa, (op, x[1], ta)
2560 return w + wa, (op, x[1], ta)
2558 raise ValueError('invalid operator %r' % op)
2561 raise ValueError('invalid operator %r' % op)
2559
2562
2560 def optimize(tree):
2563 def optimize(tree):
2561 """Optimize evaluatable tree
2564 """Optimize evaluatable tree
2562
2565
2563 All pseudo operations should be transformed beforehand.
2566 All pseudo operations should be transformed beforehand.
2564 """
2567 """
2565 _weight, newtree = _optimize(tree, small=True)
2568 _weight, newtree = _optimize(tree, small=True)
2566 return newtree
2569 return newtree
2567
2570
2568 # the set of valid characters for the initial letter of symbols in
2571 # the set of valid characters for the initial letter of symbols in
2569 # alias declarations and definitions
2572 # alias declarations and definitions
2570 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2573 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2571 if c.isalnum() or c in '._@$' or ord(c) > 127)
2574 if c.isalnum() or c in '._@$' or ord(c) > 127)
2572
2575
2573 def _parsewith(spec, lookup=None, syminitletters=None):
2576 def _parsewith(spec, lookup=None, syminitletters=None):
2574 """Generate a parse tree of given spec with given tokenizing options
2577 """Generate a parse tree of given spec with given tokenizing options
2575
2578
2576 >>> _parsewith('foo($1)', syminitletters=_aliassyminitletters)
2579 >>> _parsewith('foo($1)', syminitletters=_aliassyminitletters)
2577 ('func', ('symbol', 'foo'), ('symbol', '$1'))
2580 ('func', ('symbol', 'foo'), ('symbol', '$1'))
2578 >>> _parsewith('$1')
2581 >>> _parsewith('$1')
2579 Traceback (most recent call last):
2582 Traceback (most recent call last):
2580 ...
2583 ...
2581 ParseError: ("syntax error in revset '$1'", 0)
2584 ParseError: ("syntax error in revset '$1'", 0)
2582 >>> _parsewith('foo bar')
2585 >>> _parsewith('foo bar')
2583 Traceback (most recent call last):
2586 Traceback (most recent call last):
2584 ...
2587 ...
2585 ParseError: ('invalid token', 4)
2588 ParseError: ('invalid token', 4)
2586 """
2589 """
2587 p = parser.parser(elements)
2590 p = parser.parser(elements)
2588 tree, pos = p.parse(tokenize(spec, lookup=lookup,
2591 tree, pos = p.parse(tokenize(spec, lookup=lookup,
2589 syminitletters=syminitletters))
2592 syminitletters=syminitletters))
2590 if pos != len(spec):
2593 if pos != len(spec):
2591 raise error.ParseError(_('invalid token'), pos)
2594 raise error.ParseError(_('invalid token'), pos)
2592 return _fixops(parser.simplifyinfixops(tree, ('list', 'or')))
2595 return _fixops(parser.simplifyinfixops(tree, ('list', 'or')))
2593
2596
2594 class _aliasrules(parser.basealiasrules):
2597 class _aliasrules(parser.basealiasrules):
2595 """Parsing and expansion rule set of revset aliases"""
2598 """Parsing and expansion rule set of revset aliases"""
2596 _section = _('revset alias')
2599 _section = _('revset alias')
2597
2600
2598 @staticmethod
2601 @staticmethod
2599 def _parse(spec):
2602 def _parse(spec):
2600 """Parse alias declaration/definition ``spec``
2603 """Parse alias declaration/definition ``spec``
2601
2604
2602 This allows symbol names to use also ``$`` as an initial letter
2605 This allows symbol names to use also ``$`` as an initial letter
2603 (for backward compatibility), and callers of this function should
2606 (for backward compatibility), and callers of this function should
2604 examine whether ``$`` is used also for unexpected symbols or not.
2607 examine whether ``$`` is used also for unexpected symbols or not.
2605 """
2608 """
2606 return _parsewith(spec, syminitletters=_aliassyminitletters)
2609 return _parsewith(spec, syminitletters=_aliassyminitletters)
2607
2610
2608 @staticmethod
2611 @staticmethod
2609 def _trygetfunc(tree):
2612 def _trygetfunc(tree):
2610 if tree[0] == 'func' and tree[1][0] == 'symbol':
2613 if tree[0] == 'func' and tree[1][0] == 'symbol':
2611 return tree[1][1], getlist(tree[2])
2614 return tree[1][1], getlist(tree[2])
2612
2615
2613 def expandaliases(ui, tree):
2616 def expandaliases(ui, tree):
2614 aliases = _aliasrules.buildmap(ui.configitems('revsetalias'))
2617 aliases = _aliasrules.buildmap(ui.configitems('revsetalias'))
2615 tree = _aliasrules.expand(aliases, tree)
2618 tree = _aliasrules.expand(aliases, tree)
2616 # warn about problematic (but not referred) aliases
2619 # warn about problematic (but not referred) aliases
2617 for name, alias in sorted(aliases.iteritems()):
2620 for name, alias in sorted(aliases.iteritems()):
2618 if alias.error and not alias.warned:
2621 if alias.error and not alias.warned:
2619 ui.warn(_('warning: %s\n') % (alias.error))
2622 ui.warn(_('warning: %s\n') % (alias.error))
2620 alias.warned = True
2623 alias.warned = True
2621 return tree
2624 return tree
2622
2625
2623 def foldconcat(tree):
2626 def foldconcat(tree):
2624 """Fold elements to be concatenated by `##`
2627 """Fold elements to be concatenated by `##`
2625 """
2628 """
2626 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2629 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2627 return tree
2630 return tree
2628 if tree[0] == '_concat':
2631 if tree[0] == '_concat':
2629 pending = [tree]
2632 pending = [tree]
2630 l = []
2633 l = []
2631 while pending:
2634 while pending:
2632 e = pending.pop()
2635 e = pending.pop()
2633 if e[0] == '_concat':
2636 if e[0] == '_concat':
2634 pending.extend(reversed(e[1:]))
2637 pending.extend(reversed(e[1:]))
2635 elif e[0] in ('string', 'symbol'):
2638 elif e[0] in ('string', 'symbol'):
2636 l.append(e[1])
2639 l.append(e[1])
2637 else:
2640 else:
2638 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2641 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2639 raise error.ParseError(msg)
2642 raise error.ParseError(msg)
2640 return ('string', ''.join(l))
2643 return ('string', ''.join(l))
2641 else:
2644 else:
2642 return tuple(foldconcat(t) for t in tree)
2645 return tuple(foldconcat(t) for t in tree)
2643
2646
2644 def parse(spec, lookup=None):
2647 def parse(spec, lookup=None):
2645 return _parsewith(spec, lookup=lookup)
2648 return _parsewith(spec, lookup=lookup)
2646
2649
2647 def posttreebuilthook(tree, repo):
2650 def posttreebuilthook(tree, repo):
2648 # hook for extensions to execute code on the optimized tree
2651 # hook for extensions to execute code on the optimized tree
2649 pass
2652 pass
2650
2653
2651 def match(ui, spec, repo=None):
2654 def match(ui, spec, repo=None):
2652 """Create a matcher for a single revision spec."""
2655 """Create a matcher for a single revision spec."""
2653 return matchany(ui, [spec], repo=repo)
2656 return matchany(ui, [spec], repo=repo)
2654
2657
2655 def matchany(ui, specs, repo=None):
2658 def matchany(ui, specs, repo=None):
2656 """Create a matcher that will include any revisions matching one of the
2659 """Create a matcher that will include any revisions matching one of the
2657 given specs"""
2660 given specs"""
2658 if not specs:
2661 if not specs:
2659 def mfunc(repo, subset=None):
2662 def mfunc(repo, subset=None):
2660 return baseset()
2663 return baseset()
2661 return mfunc
2664 return mfunc
2662 if not all(specs):
2665 if not all(specs):
2663 raise error.ParseError(_("empty query"))
2666 raise error.ParseError(_("empty query"))
2664 lookup = None
2667 lookup = None
2665 if repo:
2668 if repo:
2666 lookup = repo.__contains__
2669 lookup = repo.__contains__
2667 if len(specs) == 1:
2670 if len(specs) == 1:
2668 tree = parse(specs[0], lookup)
2671 tree = parse(specs[0], lookup)
2669 else:
2672 else:
2670 tree = ('or', ('list',) + tuple(parse(s, lookup) for s in specs))
2673 tree = ('or', ('list',) + tuple(parse(s, lookup) for s in specs))
2671
2674
2672 if ui:
2675 if ui:
2673 tree = expandaliases(ui, tree)
2676 tree = expandaliases(ui, tree)
2674 tree = foldconcat(tree)
2677 tree = foldconcat(tree)
2675 tree = analyze(tree)
2678 tree = analyze(tree)
2676 tree = optimize(tree)
2679 tree = optimize(tree)
2677 posttreebuilthook(tree, repo)
2680 posttreebuilthook(tree, repo)
2678 return makematcher(tree)
2681 return makematcher(tree)
2679
2682
2680 def makematcher(tree):
2683 def makematcher(tree):
2681 """Create a matcher from an evaluatable tree"""
2684 """Create a matcher from an evaluatable tree"""
2682 def mfunc(repo, subset=None):
2685 def mfunc(repo, subset=None):
2683 if subset is None:
2686 if subset is None:
2684 subset = fullreposet(repo)
2687 subset = fullreposet(repo)
2685 if util.safehasattr(subset, 'isascending'):
2688 if util.safehasattr(subset, 'isascending'):
2686 result = getset(repo, subset, tree)
2689 result = getset(repo, subset, tree)
2687 else:
2690 else:
2688 result = getset(repo, baseset(subset), tree)
2691 result = getset(repo, baseset(subset), tree)
2689 return result
2692 return result
2690 return mfunc
2693 return mfunc
2691
2694
2692 def formatspec(expr, *args):
2695 def formatspec(expr, *args):
2693 '''
2696 '''
2694 This is a convenience function for using revsets internally, and
2697 This is a convenience function for using revsets internally, and
2695 escapes arguments appropriately. Aliases are intentionally ignored
2698 escapes arguments appropriately. Aliases are intentionally ignored
2696 so that intended expression behavior isn't accidentally subverted.
2699 so that intended expression behavior isn't accidentally subverted.
2697
2700
2698 Supported arguments:
2701 Supported arguments:
2699
2702
2700 %r = revset expression, parenthesized
2703 %r = revset expression, parenthesized
2701 %d = int(arg), no quoting
2704 %d = int(arg), no quoting
2702 %s = string(arg), escaped and single-quoted
2705 %s = string(arg), escaped and single-quoted
2703 %b = arg.branch(), escaped and single-quoted
2706 %b = arg.branch(), escaped and single-quoted
2704 %n = hex(arg), single-quoted
2707 %n = hex(arg), single-quoted
2705 %% = a literal '%'
2708 %% = a literal '%'
2706
2709
2707 Prefixing the type with 'l' specifies a parenthesized list of that type.
2710 Prefixing the type with 'l' specifies a parenthesized list of that type.
2708
2711
2709 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2712 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2710 '(10 or 11):: and ((this()) or (that()))'
2713 '(10 or 11):: and ((this()) or (that()))'
2711 >>> formatspec('%d:: and not %d::', 10, 20)
2714 >>> formatspec('%d:: and not %d::', 10, 20)
2712 '10:: and not 20::'
2715 '10:: and not 20::'
2713 >>> formatspec('%ld or %ld', [], [1])
2716 >>> formatspec('%ld or %ld', [], [1])
2714 "_list('') or 1"
2717 "_list('') or 1"
2715 >>> formatspec('keyword(%s)', 'foo\\xe9')
2718 >>> formatspec('keyword(%s)', 'foo\\xe9')
2716 "keyword('foo\\\\xe9')"
2719 "keyword('foo\\\\xe9')"
2717 >>> b = lambda: 'default'
2720 >>> b = lambda: 'default'
2718 >>> b.branch = b
2721 >>> b.branch = b
2719 >>> formatspec('branch(%b)', b)
2722 >>> formatspec('branch(%b)', b)
2720 "branch('default')"
2723 "branch('default')"
2721 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2724 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2722 "root(_list('a\\x00b\\x00c\\x00d'))"
2725 "root(_list('a\\x00b\\x00c\\x00d'))"
2723 '''
2726 '''
2724
2727
2725 def quote(s):
2728 def quote(s):
2726 return repr(str(s))
2729 return repr(str(s))
2727
2730
2728 def argtype(c, arg):
2731 def argtype(c, arg):
2729 if c == 'd':
2732 if c == 'd':
2730 return str(int(arg))
2733 return str(int(arg))
2731 elif c == 's':
2734 elif c == 's':
2732 return quote(arg)
2735 return quote(arg)
2733 elif c == 'r':
2736 elif c == 'r':
2734 parse(arg) # make sure syntax errors are confined
2737 parse(arg) # make sure syntax errors are confined
2735 return '(%s)' % arg
2738 return '(%s)' % arg
2736 elif c == 'n':
2739 elif c == 'n':
2737 return quote(node.hex(arg))
2740 return quote(node.hex(arg))
2738 elif c == 'b':
2741 elif c == 'b':
2739 return quote(arg.branch())
2742 return quote(arg.branch())
2740
2743
2741 def listexp(s, t):
2744 def listexp(s, t):
2742 l = len(s)
2745 l = len(s)
2743 if l == 0:
2746 if l == 0:
2744 return "_list('')"
2747 return "_list('')"
2745 elif l == 1:
2748 elif l == 1:
2746 return argtype(t, s[0])
2749 return argtype(t, s[0])
2747 elif t == 'd':
2750 elif t == 'd':
2748 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2751 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2749 elif t == 's':
2752 elif t == 's':
2750 return "_list('%s')" % "\0".join(s)
2753 return "_list('%s')" % "\0".join(s)
2751 elif t == 'n':
2754 elif t == 'n':
2752 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2755 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2753 elif t == 'b':
2756 elif t == 'b':
2754 return "_list('%s')" % "\0".join(a.branch() for a in s)
2757 return "_list('%s')" % "\0".join(a.branch() for a in s)
2755
2758
2756 m = l // 2
2759 m = l // 2
2757 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2760 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2758
2761
2759 ret = ''
2762 ret = ''
2760 pos = 0
2763 pos = 0
2761 arg = 0
2764 arg = 0
2762 while pos < len(expr):
2765 while pos < len(expr):
2763 c = expr[pos]
2766 c = expr[pos]
2764 if c == '%':
2767 if c == '%':
2765 pos += 1
2768 pos += 1
2766 d = expr[pos]
2769 d = expr[pos]
2767 if d == '%':
2770 if d == '%':
2768 ret += d
2771 ret += d
2769 elif d in 'dsnbr':
2772 elif d in 'dsnbr':
2770 ret += argtype(d, args[arg])
2773 ret += argtype(d, args[arg])
2771 arg += 1
2774 arg += 1
2772 elif d == 'l':
2775 elif d == 'l':
2773 # a list of some type
2776 # a list of some type
2774 pos += 1
2777 pos += 1
2775 d = expr[pos]
2778 d = expr[pos]
2776 ret += listexp(list(args[arg]), d)
2779 ret += listexp(list(args[arg]), d)
2777 arg += 1
2780 arg += 1
2778 else:
2781 else:
2779 raise error.Abort(_('unexpected revspec format character %s')
2782 raise error.Abort(_('unexpected revspec format character %s')
2780 % d)
2783 % d)
2781 else:
2784 else:
2782 ret += c
2785 ret += c
2783 pos += 1
2786 pos += 1
2784
2787
2785 return ret
2788 return ret
2786
2789
2787 def prettyformat(tree):
2790 def prettyformat(tree):
2788 return parser.prettyformat(tree, ('string', 'symbol'))
2791 return parser.prettyformat(tree, ('string', 'symbol'))
2789
2792
2790 def depth(tree):
2793 def depth(tree):
2791 if isinstance(tree, tuple):
2794 if isinstance(tree, tuple):
2792 return max(map(depth, tree)) + 1
2795 return max(map(depth, tree)) + 1
2793 else:
2796 else:
2794 return 0
2797 return 0
2795
2798
2796 def funcsused(tree):
2799 def funcsused(tree):
2797 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2800 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2798 return set()
2801 return set()
2799 else:
2802 else:
2800 funcs = set()
2803 funcs = set()
2801 for s in tree[1:]:
2804 for s in tree[1:]:
2802 funcs |= funcsused(s)
2805 funcs |= funcsused(s)
2803 if tree[0] == 'func':
2806 if tree[0] == 'func':
2804 funcs.add(tree[1][1])
2807 funcs.add(tree[1][1])
2805 return funcs
2808 return funcs
2806
2809
2807 def _formatsetrepr(r):
2810 def _formatsetrepr(r):
2808 """Format an optional printable representation of a set
2811 """Format an optional printable representation of a set
2809
2812
2810 ======== =================================
2813 ======== =================================
2811 type(r) example
2814 type(r) example
2812 ======== =================================
2815 ======== =================================
2813 tuple ('<not %r>', other)
2816 tuple ('<not %r>', other)
2814 str '<branch closed>'
2817 str '<branch closed>'
2815 callable lambda: '<branch %r>' % sorted(b)
2818 callable lambda: '<branch %r>' % sorted(b)
2816 object other
2819 object other
2817 ======== =================================
2820 ======== =================================
2818 """
2821 """
2819 if r is None:
2822 if r is None:
2820 return ''
2823 return ''
2821 elif isinstance(r, tuple):
2824 elif isinstance(r, tuple):
2822 return r[0] % r[1:]
2825 return r[0] % r[1:]
2823 elif isinstance(r, str):
2826 elif isinstance(r, str):
2824 return r
2827 return r
2825 elif callable(r):
2828 elif callable(r):
2826 return r()
2829 return r()
2827 else:
2830 else:
2828 return repr(r)
2831 return repr(r)
2829
2832
2830 class abstractsmartset(object):
2833 class abstractsmartset(object):
2831
2834
2832 def __nonzero__(self):
2835 def __nonzero__(self):
2833 """True if the smartset is not empty"""
2836 """True if the smartset is not empty"""
2834 raise NotImplementedError()
2837 raise NotImplementedError()
2835
2838
2836 def __contains__(self, rev):
2839 def __contains__(self, rev):
2837 """provide fast membership testing"""
2840 """provide fast membership testing"""
2838 raise NotImplementedError()
2841 raise NotImplementedError()
2839
2842
2840 def __iter__(self):
2843 def __iter__(self):
2841 """iterate the set in the order it is supposed to be iterated"""
2844 """iterate the set in the order it is supposed to be iterated"""
2842 raise NotImplementedError()
2845 raise NotImplementedError()
2843
2846
2844 # Attributes containing a function to perform a fast iteration in a given
2847 # Attributes containing a function to perform a fast iteration in a given
2845 # direction. A smartset can have none, one, or both defined.
2848 # direction. A smartset can have none, one, or both defined.
2846 #
2849 #
2847 # Default value is None instead of a function returning None to avoid
2850 # Default value is None instead of a function returning None to avoid
2848 # initializing an iterator just for testing if a fast method exists.
2851 # initializing an iterator just for testing if a fast method exists.
2849 fastasc = None
2852 fastasc = None
2850 fastdesc = None
2853 fastdesc = None
2851
2854
2852 def isascending(self):
2855 def isascending(self):
2853 """True if the set will iterate in ascending order"""
2856 """True if the set will iterate in ascending order"""
2854 raise NotImplementedError()
2857 raise NotImplementedError()
2855
2858
2856 def isdescending(self):
2859 def isdescending(self):
2857 """True if the set will iterate in descending order"""
2860 """True if the set will iterate in descending order"""
2858 raise NotImplementedError()
2861 raise NotImplementedError()
2859
2862
2860 def istopo(self):
2863 def istopo(self):
2861 """True if the set will iterate in topographical order"""
2864 """True if the set will iterate in topographical order"""
2862 raise NotImplementedError()
2865 raise NotImplementedError()
2863
2866
2864 @util.cachefunc
2867 @util.cachefunc
2865 def min(self):
2868 def min(self):
2866 """return the minimum element in the set"""
2869 """return the minimum element in the set"""
2867 if self.fastasc is not None:
2870 if self.fastasc is not None:
2868 for r in self.fastasc():
2871 for r in self.fastasc():
2869 return r
2872 return r
2870 raise ValueError('arg is an empty sequence')
2873 raise ValueError('arg is an empty sequence')
2871 return min(self)
2874 return min(self)
2872
2875
2873 @util.cachefunc
2876 @util.cachefunc
2874 def max(self):
2877 def max(self):
2875 """return the maximum element in the set"""
2878 """return the maximum element in the set"""
2876 if self.fastdesc is not None:
2879 if self.fastdesc is not None:
2877 for r in self.fastdesc():
2880 for r in self.fastdesc():
2878 return r
2881 return r
2879 raise ValueError('arg is an empty sequence')
2882 raise ValueError('arg is an empty sequence')
2880 return max(self)
2883 return max(self)
2881
2884
2882 def first(self):
2885 def first(self):
2883 """return the first element in the set (user iteration perspective)
2886 """return the first element in the set (user iteration perspective)
2884
2887
2885 Return None if the set is empty"""
2888 Return None if the set is empty"""
2886 raise NotImplementedError()
2889 raise NotImplementedError()
2887
2890
2888 def last(self):
2891 def last(self):
2889 """return the last element in the set (user iteration perspective)
2892 """return the last element in the set (user iteration perspective)
2890
2893
2891 Return None if the set is empty"""
2894 Return None if the set is empty"""
2892 raise NotImplementedError()
2895 raise NotImplementedError()
2893
2896
2894 def __len__(self):
2897 def __len__(self):
2895 """return the length of the smartsets
2898 """return the length of the smartsets
2896
2899
2897 This can be expensive on smartset that could be lazy otherwise."""
2900 This can be expensive on smartset that could be lazy otherwise."""
2898 raise NotImplementedError()
2901 raise NotImplementedError()
2899
2902
2900 def reverse(self):
2903 def reverse(self):
2901 """reverse the expected iteration order"""
2904 """reverse the expected iteration order"""
2902 raise NotImplementedError()
2905 raise NotImplementedError()
2903
2906
2904 def sort(self, reverse=True):
2907 def sort(self, reverse=True):
2905 """get the set to iterate in an ascending or descending order"""
2908 """get the set to iterate in an ascending or descending order"""
2906 raise NotImplementedError()
2909 raise NotImplementedError()
2907
2910
2908 def __and__(self, other):
2911 def __and__(self, other):
2909 """Returns a new object with the intersection of the two collections.
2912 """Returns a new object with the intersection of the two collections.
2910
2913
2911 This is part of the mandatory API for smartset."""
2914 This is part of the mandatory API for smartset."""
2912 if isinstance(other, fullreposet):
2915 if isinstance(other, fullreposet):
2913 return self
2916 return self
2914 return self.filter(other.__contains__, condrepr=other, cache=False)
2917 return self.filter(other.__contains__, condrepr=other, cache=False)
2915
2918
2916 def __add__(self, other):
2919 def __add__(self, other):
2917 """Returns a new object with the union of the two collections.
2920 """Returns a new object with the union of the two collections.
2918
2921
2919 This is part of the mandatory API for smartset."""
2922 This is part of the mandatory API for smartset."""
2920 return addset(self, other)
2923 return addset(self, other)
2921
2924
2922 def __sub__(self, other):
2925 def __sub__(self, other):
2923 """Returns a new object with the substraction of the two collections.
2926 """Returns a new object with the substraction of the two collections.
2924
2927
2925 This is part of the mandatory API for smartset."""
2928 This is part of the mandatory API for smartset."""
2926 c = other.__contains__
2929 c = other.__contains__
2927 return self.filter(lambda r: not c(r), condrepr=('<not %r>', other),
2930 return self.filter(lambda r: not c(r), condrepr=('<not %r>', other),
2928 cache=False)
2931 cache=False)
2929
2932
2930 def filter(self, condition, condrepr=None, cache=True):
2933 def filter(self, condition, condrepr=None, cache=True):
2931 """Returns this smartset filtered by condition as a new smartset.
2934 """Returns this smartset filtered by condition as a new smartset.
2932
2935
2933 `condition` is a callable which takes a revision number and returns a
2936 `condition` is a callable which takes a revision number and returns a
2934 boolean. Optional `condrepr` provides a printable representation of
2937 boolean. Optional `condrepr` provides a printable representation of
2935 the given `condition`.
2938 the given `condition`.
2936
2939
2937 This is part of the mandatory API for smartset."""
2940 This is part of the mandatory API for smartset."""
2938 # builtin cannot be cached. but do not needs to
2941 # builtin cannot be cached. but do not needs to
2939 if cache and util.safehasattr(condition, 'func_code'):
2942 if cache and util.safehasattr(condition, 'func_code'):
2940 condition = util.cachefunc(condition)
2943 condition = util.cachefunc(condition)
2941 return filteredset(self, condition, condrepr)
2944 return filteredset(self, condition, condrepr)
2942
2945
2943 class baseset(abstractsmartset):
2946 class baseset(abstractsmartset):
2944 """Basic data structure that represents a revset and contains the basic
2947 """Basic data structure that represents a revset and contains the basic
2945 operation that it should be able to perform.
2948 operation that it should be able to perform.
2946
2949
2947 Every method in this class should be implemented by any smartset class.
2950 Every method in this class should be implemented by any smartset class.
2948 """
2951 """
2949 def __init__(self, data=(), datarepr=None, istopo=False):
2952 def __init__(self, data=(), datarepr=None, istopo=False):
2950 """
2953 """
2951 datarepr: a tuple of (format, obj, ...), a function or an object that
2954 datarepr: a tuple of (format, obj, ...), a function or an object that
2952 provides a printable representation of the given data.
2955 provides a printable representation of the given data.
2953 """
2956 """
2954 self._ascending = None
2957 self._ascending = None
2955 self._istopo = istopo
2958 self._istopo = istopo
2956 if not isinstance(data, list):
2959 if not isinstance(data, list):
2957 if isinstance(data, set):
2960 if isinstance(data, set):
2958 self._set = data
2961 self._set = data
2959 # set has no order we pick one for stability purpose
2962 # set has no order we pick one for stability purpose
2960 self._ascending = True
2963 self._ascending = True
2961 data = list(data)
2964 data = list(data)
2962 self._list = data
2965 self._list = data
2963 self._datarepr = datarepr
2966 self._datarepr = datarepr
2964
2967
2965 @util.propertycache
2968 @util.propertycache
2966 def _set(self):
2969 def _set(self):
2967 return set(self._list)
2970 return set(self._list)
2968
2971
2969 @util.propertycache
2972 @util.propertycache
2970 def _asclist(self):
2973 def _asclist(self):
2971 asclist = self._list[:]
2974 asclist = self._list[:]
2972 asclist.sort()
2975 asclist.sort()
2973 return asclist
2976 return asclist
2974
2977
2975 def __iter__(self):
2978 def __iter__(self):
2976 if self._ascending is None:
2979 if self._ascending is None:
2977 return iter(self._list)
2980 return iter(self._list)
2978 elif self._ascending:
2981 elif self._ascending:
2979 return iter(self._asclist)
2982 return iter(self._asclist)
2980 else:
2983 else:
2981 return reversed(self._asclist)
2984 return reversed(self._asclist)
2982
2985
2983 def fastasc(self):
2986 def fastasc(self):
2984 return iter(self._asclist)
2987 return iter(self._asclist)
2985
2988
2986 def fastdesc(self):
2989 def fastdesc(self):
2987 return reversed(self._asclist)
2990 return reversed(self._asclist)
2988
2991
2989 @util.propertycache
2992 @util.propertycache
2990 def __contains__(self):
2993 def __contains__(self):
2991 return self._set.__contains__
2994 return self._set.__contains__
2992
2995
2993 def __nonzero__(self):
2996 def __nonzero__(self):
2994 return bool(self._list)
2997 return bool(self._list)
2995
2998
2996 def sort(self, reverse=False):
2999 def sort(self, reverse=False):
2997 self._ascending = not bool(reverse)
3000 self._ascending = not bool(reverse)
2998 self._istopo = False
3001 self._istopo = False
2999
3002
3000 def reverse(self):
3003 def reverse(self):
3001 if self._ascending is None:
3004 if self._ascending is None:
3002 self._list.reverse()
3005 self._list.reverse()
3003 else:
3006 else:
3004 self._ascending = not self._ascending
3007 self._ascending = not self._ascending
3005 self._istopo = False
3008 self._istopo = False
3006
3009
3007 def __len__(self):
3010 def __len__(self):
3008 return len(self._list)
3011 return len(self._list)
3009
3012
3010 def isascending(self):
3013 def isascending(self):
3011 """Returns True if the collection is ascending order, False if not.
3014 """Returns True if the collection is ascending order, False if not.
3012
3015
3013 This is part of the mandatory API for smartset."""
3016 This is part of the mandatory API for smartset."""
3014 if len(self) <= 1:
3017 if len(self) <= 1:
3015 return True
3018 return True
3016 return self._ascending is not None and self._ascending
3019 return self._ascending is not None and self._ascending
3017
3020
3018 def isdescending(self):
3021 def isdescending(self):
3019 """Returns True if the collection is descending order, False if not.
3022 """Returns True if the collection is descending order, False if not.
3020
3023
3021 This is part of the mandatory API for smartset."""
3024 This is part of the mandatory API for smartset."""
3022 if len(self) <= 1:
3025 if len(self) <= 1:
3023 return True
3026 return True
3024 return self._ascending is not None and not self._ascending
3027 return self._ascending is not None and not self._ascending
3025
3028
3026 def istopo(self):
3029 def istopo(self):
3027 """Is the collection is in topographical order or not.
3030 """Is the collection is in topographical order or not.
3028
3031
3029 This is part of the mandatory API for smartset."""
3032 This is part of the mandatory API for smartset."""
3030 if len(self) <= 1:
3033 if len(self) <= 1:
3031 return True
3034 return True
3032 return self._istopo
3035 return self._istopo
3033
3036
3034 def first(self):
3037 def first(self):
3035 if self:
3038 if self:
3036 if self._ascending is None:
3039 if self._ascending is None:
3037 return self._list[0]
3040 return self._list[0]
3038 elif self._ascending:
3041 elif self._ascending:
3039 return self._asclist[0]
3042 return self._asclist[0]
3040 else:
3043 else:
3041 return self._asclist[-1]
3044 return self._asclist[-1]
3042 return None
3045 return None
3043
3046
3044 def last(self):
3047 def last(self):
3045 if self:
3048 if self:
3046 if self._ascending is None:
3049 if self._ascending is None:
3047 return self._list[-1]
3050 return self._list[-1]
3048 elif self._ascending:
3051 elif self._ascending:
3049 return self._asclist[-1]
3052 return self._asclist[-1]
3050 else:
3053 else:
3051 return self._asclist[0]
3054 return self._asclist[0]
3052 return None
3055 return None
3053
3056
3054 def __repr__(self):
3057 def __repr__(self):
3055 d = {None: '', False: '-', True: '+'}[self._ascending]
3058 d = {None: '', False: '-', True: '+'}[self._ascending]
3056 s = _formatsetrepr(self._datarepr)
3059 s = _formatsetrepr(self._datarepr)
3057 if not s:
3060 if not s:
3058 l = self._list
3061 l = self._list
3059 # if _list has been built from a set, it might have a different
3062 # if _list has been built from a set, it might have a different
3060 # order from one python implementation to another.
3063 # order from one python implementation to another.
3061 # We fallback to the sorted version for a stable output.
3064 # We fallback to the sorted version for a stable output.
3062 if self._ascending is not None:
3065 if self._ascending is not None:
3063 l = self._asclist
3066 l = self._asclist
3064 s = repr(l)
3067 s = repr(l)
3065 return '<%s%s %s>' % (type(self).__name__, d, s)
3068 return '<%s%s %s>' % (type(self).__name__, d, s)
3066
3069
3067 class filteredset(abstractsmartset):
3070 class filteredset(abstractsmartset):
3068 """Duck type for baseset class which iterates lazily over the revisions in
3071 """Duck type for baseset class which iterates lazily over the revisions in
3069 the subset and contains a function which tests for membership in the
3072 the subset and contains a function which tests for membership in the
3070 revset
3073 revset
3071 """
3074 """
3072 def __init__(self, subset, condition=lambda x: True, condrepr=None):
3075 def __init__(self, subset, condition=lambda x: True, condrepr=None):
3073 """
3076 """
3074 condition: a function that decide whether a revision in the subset
3077 condition: a function that decide whether a revision in the subset
3075 belongs to the revset or not.
3078 belongs to the revset or not.
3076 condrepr: a tuple of (format, obj, ...), a function or an object that
3079 condrepr: a tuple of (format, obj, ...), a function or an object that
3077 provides a printable representation of the given condition.
3080 provides a printable representation of the given condition.
3078 """
3081 """
3079 self._subset = subset
3082 self._subset = subset
3080 self._condition = condition
3083 self._condition = condition
3081 self._condrepr = condrepr
3084 self._condrepr = condrepr
3082
3085
3083 def __contains__(self, x):
3086 def __contains__(self, x):
3084 return x in self._subset and self._condition(x)
3087 return x in self._subset and self._condition(x)
3085
3088
3086 def __iter__(self):
3089 def __iter__(self):
3087 return self._iterfilter(self._subset)
3090 return self._iterfilter(self._subset)
3088
3091
3089 def _iterfilter(self, it):
3092 def _iterfilter(self, it):
3090 cond = self._condition
3093 cond = self._condition
3091 for x in it:
3094 for x in it:
3092 if cond(x):
3095 if cond(x):
3093 yield x
3096 yield x
3094
3097
3095 @property
3098 @property
3096 def fastasc(self):
3099 def fastasc(self):
3097 it = self._subset.fastasc
3100 it = self._subset.fastasc
3098 if it is None:
3101 if it is None:
3099 return None
3102 return None
3100 return lambda: self._iterfilter(it())
3103 return lambda: self._iterfilter(it())
3101
3104
3102 @property
3105 @property
3103 def fastdesc(self):
3106 def fastdesc(self):
3104 it = self._subset.fastdesc
3107 it = self._subset.fastdesc
3105 if it is None:
3108 if it is None:
3106 return None
3109 return None
3107 return lambda: self._iterfilter(it())
3110 return lambda: self._iterfilter(it())
3108
3111
3109 def __nonzero__(self):
3112 def __nonzero__(self):
3110 fast = None
3113 fast = None
3111 candidates = [self.fastasc if self.isascending() else None,
3114 candidates = [self.fastasc if self.isascending() else None,
3112 self.fastdesc if self.isdescending() else None,
3115 self.fastdesc if self.isdescending() else None,
3113 self.fastasc,
3116 self.fastasc,
3114 self.fastdesc]
3117 self.fastdesc]
3115 for candidate in candidates:
3118 for candidate in candidates:
3116 if candidate is not None:
3119 if candidate is not None:
3117 fast = candidate
3120 fast = candidate
3118 break
3121 break
3119
3122
3120 if fast is not None:
3123 if fast is not None:
3121 it = fast()
3124 it = fast()
3122 else:
3125 else:
3123 it = self
3126 it = self
3124
3127
3125 for r in it:
3128 for r in it:
3126 return True
3129 return True
3127 return False
3130 return False
3128
3131
3129 def __len__(self):
3132 def __len__(self):
3130 # Basic implementation to be changed in future patches.
3133 # Basic implementation to be changed in future patches.
3131 # until this gets improved, we use generator expression
3134 # until this gets improved, we use generator expression
3132 # here, since list compr is free to call __len__ again
3135 # here, since list compr is free to call __len__ again
3133 # causing infinite recursion
3136 # causing infinite recursion
3134 l = baseset(r for r in self)
3137 l = baseset(r for r in self)
3135 return len(l)
3138 return len(l)
3136
3139
3137 def sort(self, reverse=False):
3140 def sort(self, reverse=False):
3138 self._subset.sort(reverse=reverse)
3141 self._subset.sort(reverse=reverse)
3139
3142
3140 def reverse(self):
3143 def reverse(self):
3141 self._subset.reverse()
3144 self._subset.reverse()
3142
3145
3143 def isascending(self):
3146 def isascending(self):
3144 return self._subset.isascending()
3147 return self._subset.isascending()
3145
3148
3146 def isdescending(self):
3149 def isdescending(self):
3147 return self._subset.isdescending()
3150 return self._subset.isdescending()
3148
3151
3149 def istopo(self):
3152 def istopo(self):
3150 return self._subset.istopo()
3153 return self._subset.istopo()
3151
3154
3152 def first(self):
3155 def first(self):
3153 for x in self:
3156 for x in self:
3154 return x
3157 return x
3155 return None
3158 return None
3156
3159
3157 def last(self):
3160 def last(self):
3158 it = None
3161 it = None
3159 if self.isascending():
3162 if self.isascending():
3160 it = self.fastdesc
3163 it = self.fastdesc
3161 elif self.isdescending():
3164 elif self.isdescending():
3162 it = self.fastasc
3165 it = self.fastasc
3163 if it is not None:
3166 if it is not None:
3164 for x in it():
3167 for x in it():
3165 return x
3168 return x
3166 return None #empty case
3169 return None #empty case
3167 else:
3170 else:
3168 x = None
3171 x = None
3169 for x in self:
3172 for x in self:
3170 pass
3173 pass
3171 return x
3174 return x
3172
3175
3173 def __repr__(self):
3176 def __repr__(self):
3174 xs = [repr(self._subset)]
3177 xs = [repr(self._subset)]
3175 s = _formatsetrepr(self._condrepr)
3178 s = _formatsetrepr(self._condrepr)
3176 if s:
3179 if s:
3177 xs.append(s)
3180 xs.append(s)
3178 return '<%s %s>' % (type(self).__name__, ', '.join(xs))
3181 return '<%s %s>' % (type(self).__name__, ', '.join(xs))
3179
3182
3180 def _iterordered(ascending, iter1, iter2):
3183 def _iterordered(ascending, iter1, iter2):
3181 """produce an ordered iteration from two iterators with the same order
3184 """produce an ordered iteration from two iterators with the same order
3182
3185
3183 The ascending is used to indicated the iteration direction.
3186 The ascending is used to indicated the iteration direction.
3184 """
3187 """
3185 choice = max
3188 choice = max
3186 if ascending:
3189 if ascending:
3187 choice = min
3190 choice = min
3188
3191
3189 val1 = None
3192 val1 = None
3190 val2 = None
3193 val2 = None
3191 try:
3194 try:
3192 # Consume both iterators in an ordered way until one is empty
3195 # Consume both iterators in an ordered way until one is empty
3193 while True:
3196 while True:
3194 if val1 is None:
3197 if val1 is None:
3195 val1 = next(iter1)
3198 val1 = next(iter1)
3196 if val2 is None:
3199 if val2 is None:
3197 val2 = next(iter2)
3200 val2 = next(iter2)
3198 n = choice(val1, val2)
3201 n = choice(val1, val2)
3199 yield n
3202 yield n
3200 if val1 == n:
3203 if val1 == n:
3201 val1 = None
3204 val1 = None
3202 if val2 == n:
3205 if val2 == n:
3203 val2 = None
3206 val2 = None
3204 except StopIteration:
3207 except StopIteration:
3205 # Flush any remaining values and consume the other one
3208 # Flush any remaining values and consume the other one
3206 it = iter2
3209 it = iter2
3207 if val1 is not None:
3210 if val1 is not None:
3208 yield val1
3211 yield val1
3209 it = iter1
3212 it = iter1
3210 elif val2 is not None:
3213 elif val2 is not None:
3211 # might have been equality and both are empty
3214 # might have been equality and both are empty
3212 yield val2
3215 yield val2
3213 for val in it:
3216 for val in it:
3214 yield val
3217 yield val
3215
3218
3216 class addset(abstractsmartset):
3219 class addset(abstractsmartset):
3217 """Represent the addition of two sets
3220 """Represent the addition of two sets
3218
3221
3219 Wrapper structure for lazily adding two structures without losing much
3222 Wrapper structure for lazily adding two structures without losing much
3220 performance on the __contains__ method
3223 performance on the __contains__ method
3221
3224
3222 If the ascending attribute is set, that means the two structures are
3225 If the ascending attribute is set, that means the two structures are
3223 ordered in either an ascending or descending way. Therefore, we can add
3226 ordered in either an ascending or descending way. Therefore, we can add
3224 them maintaining the order by iterating over both at the same time
3227 them maintaining the order by iterating over both at the same time
3225
3228
3226 >>> xs = baseset([0, 3, 2])
3229 >>> xs = baseset([0, 3, 2])
3227 >>> ys = baseset([5, 2, 4])
3230 >>> ys = baseset([5, 2, 4])
3228
3231
3229 >>> rs = addset(xs, ys)
3232 >>> rs = addset(xs, ys)
3230 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3233 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3231 (True, True, False, True, 0, 4)
3234 (True, True, False, True, 0, 4)
3232 >>> rs = addset(xs, baseset([]))
3235 >>> rs = addset(xs, baseset([]))
3233 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3236 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3234 (True, True, False, 0, 2)
3237 (True, True, False, 0, 2)
3235 >>> rs = addset(baseset([]), baseset([]))
3238 >>> rs = addset(baseset([]), baseset([]))
3236 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3239 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3237 (False, False, None, None)
3240 (False, False, None, None)
3238
3241
3239 iterate unsorted:
3242 iterate unsorted:
3240 >>> rs = addset(xs, ys)
3243 >>> rs = addset(xs, ys)
3241 >>> # (use generator because pypy could call len())
3244 >>> # (use generator because pypy could call len())
3242 >>> list(x for x in rs) # without _genlist
3245 >>> list(x for x in rs) # without _genlist
3243 [0, 3, 2, 5, 4]
3246 [0, 3, 2, 5, 4]
3244 >>> assert not rs._genlist
3247 >>> assert not rs._genlist
3245 >>> len(rs)
3248 >>> len(rs)
3246 5
3249 5
3247 >>> [x for x in rs] # with _genlist
3250 >>> [x for x in rs] # with _genlist
3248 [0, 3, 2, 5, 4]
3251 [0, 3, 2, 5, 4]
3249 >>> assert rs._genlist
3252 >>> assert rs._genlist
3250
3253
3251 iterate ascending:
3254 iterate ascending:
3252 >>> rs = addset(xs, ys, ascending=True)
3255 >>> rs = addset(xs, ys, ascending=True)
3253 >>> # (use generator because pypy could call len())
3256 >>> # (use generator because pypy could call len())
3254 >>> list(x for x in rs), list(x for x in rs.fastasc()) # without _asclist
3257 >>> list(x for x in rs), list(x for x in rs.fastasc()) # without _asclist
3255 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3258 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3256 >>> assert not rs._asclist
3259 >>> assert not rs._asclist
3257 >>> len(rs)
3260 >>> len(rs)
3258 5
3261 5
3259 >>> [x for x in rs], [x for x in rs.fastasc()]
3262 >>> [x for x in rs], [x for x in rs.fastasc()]
3260 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3263 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3261 >>> assert rs._asclist
3264 >>> assert rs._asclist
3262
3265
3263 iterate descending:
3266 iterate descending:
3264 >>> rs = addset(xs, ys, ascending=False)
3267 >>> rs = addset(xs, ys, ascending=False)
3265 >>> # (use generator because pypy could call len())
3268 >>> # (use generator because pypy could call len())
3266 >>> list(x for x in rs), list(x for x in rs.fastdesc()) # without _asclist
3269 >>> list(x for x in rs), list(x for x in rs.fastdesc()) # without _asclist
3267 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3270 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3268 >>> assert not rs._asclist
3271 >>> assert not rs._asclist
3269 >>> len(rs)
3272 >>> len(rs)
3270 5
3273 5
3271 >>> [x for x in rs], [x for x in rs.fastdesc()]
3274 >>> [x for x in rs], [x for x in rs.fastdesc()]
3272 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3275 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3273 >>> assert rs._asclist
3276 >>> assert rs._asclist
3274
3277
3275 iterate ascending without fastasc:
3278 iterate ascending without fastasc:
3276 >>> rs = addset(xs, generatorset(ys), ascending=True)
3279 >>> rs = addset(xs, generatorset(ys), ascending=True)
3277 >>> assert rs.fastasc is None
3280 >>> assert rs.fastasc is None
3278 >>> [x for x in rs]
3281 >>> [x for x in rs]
3279 [0, 2, 3, 4, 5]
3282 [0, 2, 3, 4, 5]
3280
3283
3281 iterate descending without fastdesc:
3284 iterate descending without fastdesc:
3282 >>> rs = addset(generatorset(xs), ys, ascending=False)
3285 >>> rs = addset(generatorset(xs), ys, ascending=False)
3283 >>> assert rs.fastdesc is None
3286 >>> assert rs.fastdesc is None
3284 >>> [x for x in rs]
3287 >>> [x for x in rs]
3285 [5, 4, 3, 2, 0]
3288 [5, 4, 3, 2, 0]
3286 """
3289 """
3287 def __init__(self, revs1, revs2, ascending=None):
3290 def __init__(self, revs1, revs2, ascending=None):
3288 self._r1 = revs1
3291 self._r1 = revs1
3289 self._r2 = revs2
3292 self._r2 = revs2
3290 self._iter = None
3293 self._iter = None
3291 self._ascending = ascending
3294 self._ascending = ascending
3292 self._genlist = None
3295 self._genlist = None
3293 self._asclist = None
3296 self._asclist = None
3294
3297
3295 def __len__(self):
3298 def __len__(self):
3296 return len(self._list)
3299 return len(self._list)
3297
3300
3298 def __nonzero__(self):
3301 def __nonzero__(self):
3299 return bool(self._r1) or bool(self._r2)
3302 return bool(self._r1) or bool(self._r2)
3300
3303
3301 @util.propertycache
3304 @util.propertycache
3302 def _list(self):
3305 def _list(self):
3303 if not self._genlist:
3306 if not self._genlist:
3304 self._genlist = baseset(iter(self))
3307 self._genlist = baseset(iter(self))
3305 return self._genlist
3308 return self._genlist
3306
3309
3307 def __iter__(self):
3310 def __iter__(self):
3308 """Iterate over both collections without repeating elements
3311 """Iterate over both collections without repeating elements
3309
3312
3310 If the ascending attribute is not set, iterate over the first one and
3313 If the ascending attribute is not set, iterate over the first one and
3311 then over the second one checking for membership on the first one so we
3314 then over the second one checking for membership on the first one so we
3312 dont yield any duplicates.
3315 dont yield any duplicates.
3313
3316
3314 If the ascending attribute is set, iterate over both collections at the
3317 If the ascending attribute is set, iterate over both collections at the
3315 same time, yielding only one value at a time in the given order.
3318 same time, yielding only one value at a time in the given order.
3316 """
3319 """
3317 if self._ascending is None:
3320 if self._ascending is None:
3318 if self._genlist:
3321 if self._genlist:
3319 return iter(self._genlist)
3322 return iter(self._genlist)
3320 def arbitraryordergen():
3323 def arbitraryordergen():
3321 for r in self._r1:
3324 for r in self._r1:
3322 yield r
3325 yield r
3323 inr1 = self._r1.__contains__
3326 inr1 = self._r1.__contains__
3324 for r in self._r2:
3327 for r in self._r2:
3325 if not inr1(r):
3328 if not inr1(r):
3326 yield r
3329 yield r
3327 return arbitraryordergen()
3330 return arbitraryordergen()
3328 # try to use our own fast iterator if it exists
3331 # try to use our own fast iterator if it exists
3329 self._trysetasclist()
3332 self._trysetasclist()
3330 if self._ascending:
3333 if self._ascending:
3331 attr = 'fastasc'
3334 attr = 'fastasc'
3332 else:
3335 else:
3333 attr = 'fastdesc'
3336 attr = 'fastdesc'
3334 it = getattr(self, attr)
3337 it = getattr(self, attr)
3335 if it is not None:
3338 if it is not None:
3336 return it()
3339 return it()
3337 # maybe half of the component supports fast
3340 # maybe half of the component supports fast
3338 # get iterator for _r1
3341 # get iterator for _r1
3339 iter1 = getattr(self._r1, attr)
3342 iter1 = getattr(self._r1, attr)
3340 if iter1 is None:
3343 if iter1 is None:
3341 # let's avoid side effect (not sure it matters)
3344 # let's avoid side effect (not sure it matters)
3342 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3345 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3343 else:
3346 else:
3344 iter1 = iter1()
3347 iter1 = iter1()
3345 # get iterator for _r2
3348 # get iterator for _r2
3346 iter2 = getattr(self._r2, attr)
3349 iter2 = getattr(self._r2, attr)
3347 if iter2 is None:
3350 if iter2 is None:
3348 # let's avoid side effect (not sure it matters)
3351 # let's avoid side effect (not sure it matters)
3349 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3352 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3350 else:
3353 else:
3351 iter2 = iter2()
3354 iter2 = iter2()
3352 return _iterordered(self._ascending, iter1, iter2)
3355 return _iterordered(self._ascending, iter1, iter2)
3353
3356
3354 def _trysetasclist(self):
3357 def _trysetasclist(self):
3355 """populate the _asclist attribute if possible and necessary"""
3358 """populate the _asclist attribute if possible and necessary"""
3356 if self._genlist is not None and self._asclist is None:
3359 if self._genlist is not None and self._asclist is None:
3357 self._asclist = sorted(self._genlist)
3360 self._asclist = sorted(self._genlist)
3358
3361
3359 @property
3362 @property
3360 def fastasc(self):
3363 def fastasc(self):
3361 self._trysetasclist()
3364 self._trysetasclist()
3362 if self._asclist is not None:
3365 if self._asclist is not None:
3363 return self._asclist.__iter__
3366 return self._asclist.__iter__
3364 iter1 = self._r1.fastasc
3367 iter1 = self._r1.fastasc
3365 iter2 = self._r2.fastasc
3368 iter2 = self._r2.fastasc
3366 if None in (iter1, iter2):
3369 if None in (iter1, iter2):
3367 return None
3370 return None
3368 return lambda: _iterordered(True, iter1(), iter2())
3371 return lambda: _iterordered(True, iter1(), iter2())
3369
3372
3370 @property
3373 @property
3371 def fastdesc(self):
3374 def fastdesc(self):
3372 self._trysetasclist()
3375 self._trysetasclist()
3373 if self._asclist is not None:
3376 if self._asclist is not None:
3374 return self._asclist.__reversed__
3377 return self._asclist.__reversed__
3375 iter1 = self._r1.fastdesc
3378 iter1 = self._r1.fastdesc
3376 iter2 = self._r2.fastdesc
3379 iter2 = self._r2.fastdesc
3377 if None in (iter1, iter2):
3380 if None in (iter1, iter2):
3378 return None
3381 return None
3379 return lambda: _iterordered(False, iter1(), iter2())
3382 return lambda: _iterordered(False, iter1(), iter2())
3380
3383
3381 def __contains__(self, x):
3384 def __contains__(self, x):
3382 return x in self._r1 or x in self._r2
3385 return x in self._r1 or x in self._r2
3383
3386
3384 def sort(self, reverse=False):
3387 def sort(self, reverse=False):
3385 """Sort the added set
3388 """Sort the added set
3386
3389
3387 For this we use the cached list with all the generated values and if we
3390 For this we use the cached list with all the generated values and if we
3388 know they are ascending or descending we can sort them in a smart way.
3391 know they are ascending or descending we can sort them in a smart way.
3389 """
3392 """
3390 self._ascending = not reverse
3393 self._ascending = not reverse
3391
3394
3392 def isascending(self):
3395 def isascending(self):
3393 return self._ascending is not None and self._ascending
3396 return self._ascending is not None and self._ascending
3394
3397
3395 def isdescending(self):
3398 def isdescending(self):
3396 return self._ascending is not None and not self._ascending
3399 return self._ascending is not None and not self._ascending
3397
3400
3398 def istopo(self):
3401 def istopo(self):
3399 # not worth the trouble asserting if the two sets combined are still
3402 # not worth the trouble asserting if the two sets combined are still
3400 # in topographical order. Use the sort() predicate to explicitly sort
3403 # in topographical order. Use the sort() predicate to explicitly sort
3401 # again instead.
3404 # again instead.
3402 return False
3405 return False
3403
3406
3404 def reverse(self):
3407 def reverse(self):
3405 if self._ascending is None:
3408 if self._ascending is None:
3406 self._list.reverse()
3409 self._list.reverse()
3407 else:
3410 else:
3408 self._ascending = not self._ascending
3411 self._ascending = not self._ascending
3409
3412
3410 def first(self):
3413 def first(self):
3411 for x in self:
3414 for x in self:
3412 return x
3415 return x
3413 return None
3416 return None
3414
3417
3415 def last(self):
3418 def last(self):
3416 self.reverse()
3419 self.reverse()
3417 val = self.first()
3420 val = self.first()
3418 self.reverse()
3421 self.reverse()
3419 return val
3422 return val
3420
3423
3421 def __repr__(self):
3424 def __repr__(self):
3422 d = {None: '', False: '-', True: '+'}[self._ascending]
3425 d = {None: '', False: '-', True: '+'}[self._ascending]
3423 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3426 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3424
3427
3425 class generatorset(abstractsmartset):
3428 class generatorset(abstractsmartset):
3426 """Wrap a generator for lazy iteration
3429 """Wrap a generator for lazy iteration
3427
3430
3428 Wrapper structure for generators that provides lazy membership and can
3431 Wrapper structure for generators that provides lazy membership and can
3429 be iterated more than once.
3432 be iterated more than once.
3430 When asked for membership it generates values until either it finds the
3433 When asked for membership it generates values until either it finds the
3431 requested one or has gone through all the elements in the generator
3434 requested one or has gone through all the elements in the generator
3432 """
3435 """
3433 def __init__(self, gen, iterasc=None):
3436 def __init__(self, gen, iterasc=None):
3434 """
3437 """
3435 gen: a generator producing the values for the generatorset.
3438 gen: a generator producing the values for the generatorset.
3436 """
3439 """
3437 self._gen = gen
3440 self._gen = gen
3438 self._asclist = None
3441 self._asclist = None
3439 self._cache = {}
3442 self._cache = {}
3440 self._genlist = []
3443 self._genlist = []
3441 self._finished = False
3444 self._finished = False
3442 self._ascending = True
3445 self._ascending = True
3443 if iterasc is not None:
3446 if iterasc is not None:
3444 if iterasc:
3447 if iterasc:
3445 self.fastasc = self._iterator
3448 self.fastasc = self._iterator
3446 self.__contains__ = self._asccontains
3449 self.__contains__ = self._asccontains
3447 else:
3450 else:
3448 self.fastdesc = self._iterator
3451 self.fastdesc = self._iterator
3449 self.__contains__ = self._desccontains
3452 self.__contains__ = self._desccontains
3450
3453
3451 def __nonzero__(self):
3454 def __nonzero__(self):
3452 # Do not use 'for r in self' because it will enforce the iteration
3455 # Do not use 'for r in self' because it will enforce the iteration
3453 # order (default ascending), possibly unrolling a whole descending
3456 # order (default ascending), possibly unrolling a whole descending
3454 # iterator.
3457 # iterator.
3455 if self._genlist:
3458 if self._genlist:
3456 return True
3459 return True
3457 for r in self._consumegen():
3460 for r in self._consumegen():
3458 return True
3461 return True
3459 return False
3462 return False
3460
3463
3461 def __contains__(self, x):
3464 def __contains__(self, x):
3462 if x in self._cache:
3465 if x in self._cache:
3463 return self._cache[x]
3466 return self._cache[x]
3464
3467
3465 # Use new values only, as existing values would be cached.
3468 # Use new values only, as existing values would be cached.
3466 for l in self._consumegen():
3469 for l in self._consumegen():
3467 if l == x:
3470 if l == x:
3468 return True
3471 return True
3469
3472
3470 self._cache[x] = False
3473 self._cache[x] = False
3471 return False
3474 return False
3472
3475
3473 def _asccontains(self, x):
3476 def _asccontains(self, x):
3474 """version of contains optimised for ascending generator"""
3477 """version of contains optimised for ascending generator"""
3475 if x in self._cache:
3478 if x in self._cache:
3476 return self._cache[x]
3479 return self._cache[x]
3477
3480
3478 # Use new values only, as existing values would be cached.
3481 # Use new values only, as existing values would be cached.
3479 for l in self._consumegen():
3482 for l in self._consumegen():
3480 if l == x:
3483 if l == x:
3481 return True
3484 return True
3482 if l > x:
3485 if l > x:
3483 break
3486 break
3484
3487
3485 self._cache[x] = False
3488 self._cache[x] = False
3486 return False
3489 return False
3487
3490
3488 def _desccontains(self, x):
3491 def _desccontains(self, x):
3489 """version of contains optimised for descending generator"""
3492 """version of contains optimised for descending generator"""
3490 if x in self._cache:
3493 if x in self._cache:
3491 return self._cache[x]
3494 return self._cache[x]
3492
3495
3493 # Use new values only, as existing values would be cached.
3496 # Use new values only, as existing values would be cached.
3494 for l in self._consumegen():
3497 for l in self._consumegen():
3495 if l == x:
3498 if l == x:
3496 return True
3499 return True
3497 if l < x:
3500 if l < x:
3498 break
3501 break
3499
3502
3500 self._cache[x] = False
3503 self._cache[x] = False
3501 return False
3504 return False
3502
3505
3503 def __iter__(self):
3506 def __iter__(self):
3504 if self._ascending:
3507 if self._ascending:
3505 it = self.fastasc
3508 it = self.fastasc
3506 else:
3509 else:
3507 it = self.fastdesc
3510 it = self.fastdesc
3508 if it is not None:
3511 if it is not None:
3509 return it()
3512 return it()
3510 # we need to consume the iterator
3513 # we need to consume the iterator
3511 for x in self._consumegen():
3514 for x in self._consumegen():
3512 pass
3515 pass
3513 # recall the same code
3516 # recall the same code
3514 return iter(self)
3517 return iter(self)
3515
3518
3516 def _iterator(self):
3519 def _iterator(self):
3517 if self._finished:
3520 if self._finished:
3518 return iter(self._genlist)
3521 return iter(self._genlist)
3519
3522
3520 # We have to use this complex iteration strategy to allow multiple
3523 # We have to use this complex iteration strategy to allow multiple
3521 # iterations at the same time. We need to be able to catch revision
3524 # iterations at the same time. We need to be able to catch revision
3522 # removed from _consumegen and added to genlist in another instance.
3525 # removed from _consumegen and added to genlist in another instance.
3523 #
3526 #
3524 # Getting rid of it would provide an about 15% speed up on this
3527 # Getting rid of it would provide an about 15% speed up on this
3525 # iteration.
3528 # iteration.
3526 genlist = self._genlist
3529 genlist = self._genlist
3527 nextrev = self._consumegen().next
3530 nextrev = self._consumegen().next
3528 _len = len # cache global lookup
3531 _len = len # cache global lookup
3529 def gen():
3532 def gen():
3530 i = 0
3533 i = 0
3531 while True:
3534 while True:
3532 if i < _len(genlist):
3535 if i < _len(genlist):
3533 yield genlist[i]
3536 yield genlist[i]
3534 else:
3537 else:
3535 yield nextrev()
3538 yield nextrev()
3536 i += 1
3539 i += 1
3537 return gen()
3540 return gen()
3538
3541
3539 def _consumegen(self):
3542 def _consumegen(self):
3540 cache = self._cache
3543 cache = self._cache
3541 genlist = self._genlist.append
3544 genlist = self._genlist.append
3542 for item in self._gen:
3545 for item in self._gen:
3543 cache[item] = True
3546 cache[item] = True
3544 genlist(item)
3547 genlist(item)
3545 yield item
3548 yield item
3546 if not self._finished:
3549 if not self._finished:
3547 self._finished = True
3550 self._finished = True
3548 asc = self._genlist[:]
3551 asc = self._genlist[:]
3549 asc.sort()
3552 asc.sort()
3550 self._asclist = asc
3553 self._asclist = asc
3551 self.fastasc = asc.__iter__
3554 self.fastasc = asc.__iter__
3552 self.fastdesc = asc.__reversed__
3555 self.fastdesc = asc.__reversed__
3553
3556
3554 def __len__(self):
3557 def __len__(self):
3555 for x in self._consumegen():
3558 for x in self._consumegen():
3556 pass
3559 pass
3557 return len(self._genlist)
3560 return len(self._genlist)
3558
3561
3559 def sort(self, reverse=False):
3562 def sort(self, reverse=False):
3560 self._ascending = not reverse
3563 self._ascending = not reverse
3561
3564
3562 def reverse(self):
3565 def reverse(self):
3563 self._ascending = not self._ascending
3566 self._ascending = not self._ascending
3564
3567
3565 def isascending(self):
3568 def isascending(self):
3566 return self._ascending
3569 return self._ascending
3567
3570
3568 def isdescending(self):
3571 def isdescending(self):
3569 return not self._ascending
3572 return not self._ascending
3570
3573
3571 def istopo(self):
3574 def istopo(self):
3572 # not worth the trouble asserting if the two sets combined are still
3575 # not worth the trouble asserting if the two sets combined are still
3573 # in topographical order. Use the sort() predicate to explicitly sort
3576 # in topographical order. Use the sort() predicate to explicitly sort
3574 # again instead.
3577 # again instead.
3575 return False
3578 return False
3576
3579
3577 def first(self):
3580 def first(self):
3578 if self._ascending:
3581 if self._ascending:
3579 it = self.fastasc
3582 it = self.fastasc
3580 else:
3583 else:
3581 it = self.fastdesc
3584 it = self.fastdesc
3582 if it is None:
3585 if it is None:
3583 # we need to consume all and try again
3586 # we need to consume all and try again
3584 for x in self._consumegen():
3587 for x in self._consumegen():
3585 pass
3588 pass
3586 return self.first()
3589 return self.first()
3587 return next(it(), None)
3590 return next(it(), None)
3588
3591
3589 def last(self):
3592 def last(self):
3590 if self._ascending:
3593 if self._ascending:
3591 it = self.fastdesc
3594 it = self.fastdesc
3592 else:
3595 else:
3593 it = self.fastasc
3596 it = self.fastasc
3594 if it is None:
3597 if it is None:
3595 # we need to consume all and try again
3598 # we need to consume all and try again
3596 for x in self._consumegen():
3599 for x in self._consumegen():
3597 pass
3600 pass
3598 return self.first()
3601 return self.first()
3599 return next(it(), None)
3602 return next(it(), None)
3600
3603
3601 def __repr__(self):
3604 def __repr__(self):
3602 d = {False: '-', True: '+'}[self._ascending]
3605 d = {False: '-', True: '+'}[self._ascending]
3603 return '<%s%s>' % (type(self).__name__, d)
3606 return '<%s%s>' % (type(self).__name__, d)
3604
3607
3605 class spanset(abstractsmartset):
3608 class spanset(abstractsmartset):
3606 """Duck type for baseset class which represents a range of revisions and
3609 """Duck type for baseset class which represents a range of revisions and
3607 can work lazily and without having all the range in memory
3610 can work lazily and without having all the range in memory
3608
3611
3609 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3612 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3610 notable points:
3613 notable points:
3611 - when x < y it will be automatically descending,
3614 - when x < y it will be automatically descending,
3612 - revision filtered with this repoview will be skipped.
3615 - revision filtered with this repoview will be skipped.
3613
3616
3614 """
3617 """
3615 def __init__(self, repo, start=0, end=None):
3618 def __init__(self, repo, start=0, end=None):
3616 """
3619 """
3617 start: first revision included the set
3620 start: first revision included the set
3618 (default to 0)
3621 (default to 0)
3619 end: first revision excluded (last+1)
3622 end: first revision excluded (last+1)
3620 (default to len(repo)
3623 (default to len(repo)
3621
3624
3622 Spanset will be descending if `end` < `start`.
3625 Spanset will be descending if `end` < `start`.
3623 """
3626 """
3624 if end is None:
3627 if end is None:
3625 end = len(repo)
3628 end = len(repo)
3626 self._ascending = start <= end
3629 self._ascending = start <= end
3627 if not self._ascending:
3630 if not self._ascending:
3628 start, end = end + 1, start +1
3631 start, end = end + 1, start +1
3629 self._start = start
3632 self._start = start
3630 self._end = end
3633 self._end = end
3631 self._hiddenrevs = repo.changelog.filteredrevs
3634 self._hiddenrevs = repo.changelog.filteredrevs
3632
3635
3633 def sort(self, reverse=False):
3636 def sort(self, reverse=False):
3634 self._ascending = not reverse
3637 self._ascending = not reverse
3635
3638
3636 def reverse(self):
3639 def reverse(self):
3637 self._ascending = not self._ascending
3640 self._ascending = not self._ascending
3638
3641
3639 def istopo(self):
3642 def istopo(self):
3640 # not worth the trouble asserting if the two sets combined are still
3643 # not worth the trouble asserting if the two sets combined are still
3641 # in topographical order. Use the sort() predicate to explicitly sort
3644 # in topographical order. Use the sort() predicate to explicitly sort
3642 # again instead.
3645 # again instead.
3643 return False
3646 return False
3644
3647
3645 def _iterfilter(self, iterrange):
3648 def _iterfilter(self, iterrange):
3646 s = self._hiddenrevs
3649 s = self._hiddenrevs
3647 for r in iterrange:
3650 for r in iterrange:
3648 if r not in s:
3651 if r not in s:
3649 yield r
3652 yield r
3650
3653
3651 def __iter__(self):
3654 def __iter__(self):
3652 if self._ascending:
3655 if self._ascending:
3653 return self.fastasc()
3656 return self.fastasc()
3654 else:
3657 else:
3655 return self.fastdesc()
3658 return self.fastdesc()
3656
3659
3657 def fastasc(self):
3660 def fastasc(self):
3658 iterrange = xrange(self._start, self._end)
3661 iterrange = xrange(self._start, self._end)
3659 if self._hiddenrevs:
3662 if self._hiddenrevs:
3660 return self._iterfilter(iterrange)
3663 return self._iterfilter(iterrange)
3661 return iter(iterrange)
3664 return iter(iterrange)
3662
3665
3663 def fastdesc(self):
3666 def fastdesc(self):
3664 iterrange = xrange(self._end - 1, self._start - 1, -1)
3667 iterrange = xrange(self._end - 1, self._start - 1, -1)
3665 if self._hiddenrevs:
3668 if self._hiddenrevs:
3666 return self._iterfilter(iterrange)
3669 return self._iterfilter(iterrange)
3667 return iter(iterrange)
3670 return iter(iterrange)
3668
3671
3669 def __contains__(self, rev):
3672 def __contains__(self, rev):
3670 hidden = self._hiddenrevs
3673 hidden = self._hiddenrevs
3671 return ((self._start <= rev < self._end)
3674 return ((self._start <= rev < self._end)
3672 and not (hidden and rev in hidden))
3675 and not (hidden and rev in hidden))
3673
3676
3674 def __nonzero__(self):
3677 def __nonzero__(self):
3675 for r in self:
3678 for r in self:
3676 return True
3679 return True
3677 return False
3680 return False
3678
3681
3679 def __len__(self):
3682 def __len__(self):
3680 if not self._hiddenrevs:
3683 if not self._hiddenrevs:
3681 return abs(self._end - self._start)
3684 return abs(self._end - self._start)
3682 else:
3685 else:
3683 count = 0
3686 count = 0
3684 start = self._start
3687 start = self._start
3685 end = self._end
3688 end = self._end
3686 for rev in self._hiddenrevs:
3689 for rev in self._hiddenrevs:
3687 if (end < rev <= start) or (start <= rev < end):
3690 if (end < rev <= start) or (start <= rev < end):
3688 count += 1
3691 count += 1
3689 return abs(self._end - self._start) - count
3692 return abs(self._end - self._start) - count
3690
3693
3691 def isascending(self):
3694 def isascending(self):
3692 return self._ascending
3695 return self._ascending
3693
3696
3694 def isdescending(self):
3697 def isdescending(self):
3695 return not self._ascending
3698 return not self._ascending
3696
3699
3697 def first(self):
3700 def first(self):
3698 if self._ascending:
3701 if self._ascending:
3699 it = self.fastasc
3702 it = self.fastasc
3700 else:
3703 else:
3701 it = self.fastdesc
3704 it = self.fastdesc
3702 for x in it():
3705 for x in it():
3703 return x
3706 return x
3704 return None
3707 return None
3705
3708
3706 def last(self):
3709 def last(self):
3707 if self._ascending:
3710 if self._ascending:
3708 it = self.fastdesc
3711 it = self.fastdesc
3709 else:
3712 else:
3710 it = self.fastasc
3713 it = self.fastasc
3711 for x in it():
3714 for x in it():
3712 return x
3715 return x
3713 return None
3716 return None
3714
3717
3715 def __repr__(self):
3718 def __repr__(self):
3716 d = {False: '-', True: '+'}[self._ascending]
3719 d = {False: '-', True: '+'}[self._ascending]
3717 return '<%s%s %d:%d>' % (type(self).__name__, d,
3720 return '<%s%s %d:%d>' % (type(self).__name__, d,
3718 self._start, self._end - 1)
3721 self._start, self._end - 1)
3719
3722
3720 class fullreposet(spanset):
3723 class fullreposet(spanset):
3721 """a set containing all revisions in the repo
3724 """a set containing all revisions in the repo
3722
3725
3723 This class exists to host special optimization and magic to handle virtual
3726 This class exists to host special optimization and magic to handle virtual
3724 revisions such as "null".
3727 revisions such as "null".
3725 """
3728 """
3726
3729
3727 def __init__(self, repo):
3730 def __init__(self, repo):
3728 super(fullreposet, self).__init__(repo)
3731 super(fullreposet, self).__init__(repo)
3729
3732
3730 def __and__(self, other):
3733 def __and__(self, other):
3731 """As self contains the whole repo, all of the other set should also be
3734 """As self contains the whole repo, all of the other set should also be
3732 in self. Therefore `self & other = other`.
3735 in self. Therefore `self & other = other`.
3733
3736
3734 This boldly assumes the other contains valid revs only.
3737 This boldly assumes the other contains valid revs only.
3735 """
3738 """
3736 # other not a smartset, make is so
3739 # other not a smartset, make is so
3737 if not util.safehasattr(other, 'isascending'):
3740 if not util.safehasattr(other, 'isascending'):
3738 # filter out hidden revision
3741 # filter out hidden revision
3739 # (this boldly assumes all smartset are pure)
3742 # (this boldly assumes all smartset are pure)
3740 #
3743 #
3741 # `other` was used with "&", let's assume this is a set like
3744 # `other` was used with "&", let's assume this is a set like
3742 # object.
3745 # object.
3743 other = baseset(other - self._hiddenrevs)
3746 other = baseset(other - self._hiddenrevs)
3744
3747
3745 # XXX As fullreposet is also used as bootstrap, this is wrong.
3748 # XXX As fullreposet is also used as bootstrap, this is wrong.
3746 #
3749 #
3747 # With a giveme312() revset returning [3,1,2], this makes
3750 # With a giveme312() revset returning [3,1,2], this makes
3748 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3751 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3749 # We cannot just drop it because other usage still need to sort it:
3752 # We cannot just drop it because other usage still need to sort it:
3750 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3753 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3751 #
3754 #
3752 # There is also some faulty revset implementations that rely on it
3755 # There is also some faulty revset implementations that rely on it
3753 # (eg: children as of its state in e8075329c5fb)
3756 # (eg: children as of its state in e8075329c5fb)
3754 #
3757 #
3755 # When we fix the two points above we can move this into the if clause
3758 # When we fix the two points above we can move this into the if clause
3756 other.sort(reverse=self.isdescending())
3759 other.sort(reverse=self.isdescending())
3757 return other
3760 return other
3758
3761
3759 def prettyformatset(revs):
3762 def prettyformatset(revs):
3760 lines = []
3763 lines = []
3761 rs = repr(revs)
3764 rs = repr(revs)
3762 p = 0
3765 p = 0
3763 while p < len(rs):
3766 while p < len(rs):
3764 q = rs.find('<', p + 1)
3767 q = rs.find('<', p + 1)
3765 if q < 0:
3768 if q < 0:
3766 q = len(rs)
3769 q = len(rs)
3767 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3770 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3768 assert l >= 0
3771 assert l >= 0
3769 lines.append((l, rs[p:q].rstrip()))
3772 lines.append((l, rs[p:q].rstrip()))
3770 p = q
3773 p = q
3771 return '\n'.join(' ' * l + s for l, s in lines)
3774 return '\n'.join(' ' * l + s for l, s in lines)
3772
3775
3773 def loadpredicate(ui, extname, registrarobj):
3776 def loadpredicate(ui, extname, registrarobj):
3774 """Load revset predicates from specified registrarobj
3777 """Load revset predicates from specified registrarobj
3775 """
3778 """
3776 for name, func in registrarobj._table.iteritems():
3779 for name, func in registrarobj._table.iteritems():
3777 symbols[name] = func
3780 symbols[name] = func
3778 if func._safe:
3781 if func._safe:
3779 safesymbols.add(name)
3782 safesymbols.add(name)
3780
3783
3781 # load built-in predicates explicitly to setup safesymbols
3784 # load built-in predicates explicitly to setup safesymbols
3782 loadpredicate(None, None, predicate)
3785 loadpredicate(None, None, predicate)
3783
3786
3784 # tell hggettext to extract docstrings from these functions:
3787 # tell hggettext to extract docstrings from these functions:
3785 i18nfunctions = symbols.values()
3788 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now