##// END OF EJS Templates
revset: add a followlines(file, fromline, toline[, rev]) revset...
Denis Laxalde -
r30719:42c75b4f default
parent child Browse files
Show More
@@ -1,3846 +1,3892 b''
1 # revset.py - revision set queries for mercurial
1 # revset.py - revision set queries for mercurial
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import heapq
10 import heapq
11 import re
11 import re
12 import string
12 import string
13
13
14 from .i18n import _
14 from .i18n import _
15 from . import (
15 from . import (
16 destutil,
16 destutil,
17 encoding,
17 encoding,
18 error,
18 error,
19 hbisect,
19 hbisect,
20 match as matchmod,
20 match as matchmod,
21 node,
21 node,
22 obsolete as obsmod,
22 obsolete as obsmod,
23 parser,
23 parser,
24 pathutil,
24 pathutil,
25 phases,
25 phases,
26 pycompat,
26 pycompat,
27 registrar,
27 registrar,
28 repoview,
28 repoview,
29 util,
29 util,
30 )
30 )
31
31
32 def _revancestors(repo, revs, followfirst):
32 def _revancestors(repo, revs, followfirst):
33 """Like revlog.ancestors(), but supports followfirst."""
33 """Like revlog.ancestors(), but supports followfirst."""
34 if followfirst:
34 if followfirst:
35 cut = 1
35 cut = 1
36 else:
36 else:
37 cut = None
37 cut = None
38 cl = repo.changelog
38 cl = repo.changelog
39
39
40 def iterate():
40 def iterate():
41 revs.sort(reverse=True)
41 revs.sort(reverse=True)
42 irevs = iter(revs)
42 irevs = iter(revs)
43 h = []
43 h = []
44
44
45 inputrev = next(irevs, None)
45 inputrev = next(irevs, None)
46 if inputrev is not None:
46 if inputrev is not None:
47 heapq.heappush(h, -inputrev)
47 heapq.heappush(h, -inputrev)
48
48
49 seen = set()
49 seen = set()
50 while h:
50 while h:
51 current = -heapq.heappop(h)
51 current = -heapq.heappop(h)
52 if current == inputrev:
52 if current == inputrev:
53 inputrev = next(irevs, None)
53 inputrev = next(irevs, None)
54 if inputrev is not None:
54 if inputrev is not None:
55 heapq.heappush(h, -inputrev)
55 heapq.heappush(h, -inputrev)
56 if current not in seen:
56 if current not in seen:
57 seen.add(current)
57 seen.add(current)
58 yield current
58 yield current
59 for parent in cl.parentrevs(current)[:cut]:
59 for parent in cl.parentrevs(current)[:cut]:
60 if parent != node.nullrev:
60 if parent != node.nullrev:
61 heapq.heappush(h, -parent)
61 heapq.heappush(h, -parent)
62
62
63 return generatorset(iterate(), iterasc=False)
63 return generatorset(iterate(), iterasc=False)
64
64
65 def _revdescendants(repo, revs, followfirst):
65 def _revdescendants(repo, revs, followfirst):
66 """Like revlog.descendants() but supports followfirst."""
66 """Like revlog.descendants() but supports followfirst."""
67 if followfirst:
67 if followfirst:
68 cut = 1
68 cut = 1
69 else:
69 else:
70 cut = None
70 cut = None
71
71
72 def iterate():
72 def iterate():
73 cl = repo.changelog
73 cl = repo.changelog
74 # XXX this should be 'parentset.min()' assuming 'parentset' is a
74 # XXX this should be 'parentset.min()' assuming 'parentset' is a
75 # smartset (and if it is not, it should.)
75 # smartset (and if it is not, it should.)
76 first = min(revs)
76 first = min(revs)
77 nullrev = node.nullrev
77 nullrev = node.nullrev
78 if first == nullrev:
78 if first == nullrev:
79 # Are there nodes with a null first parent and a non-null
79 # Are there nodes with a null first parent and a non-null
80 # second one? Maybe. Do we care? Probably not.
80 # second one? Maybe. Do we care? Probably not.
81 for i in cl:
81 for i in cl:
82 yield i
82 yield i
83 else:
83 else:
84 seen = set(revs)
84 seen = set(revs)
85 for i in cl.revs(first + 1):
85 for i in cl.revs(first + 1):
86 for x in cl.parentrevs(i)[:cut]:
86 for x in cl.parentrevs(i)[:cut]:
87 if x != nullrev and x in seen:
87 if x != nullrev and x in seen:
88 seen.add(i)
88 seen.add(i)
89 yield i
89 yield i
90 break
90 break
91
91
92 return generatorset(iterate(), iterasc=True)
92 return generatorset(iterate(), iterasc=True)
93
93
94 def _reachablerootspure(repo, minroot, roots, heads, includepath):
94 def _reachablerootspure(repo, minroot, roots, heads, includepath):
95 """return (heads(::<roots> and ::<heads>))
95 """return (heads(::<roots> and ::<heads>))
96
96
97 If includepath is True, return (<roots>::<heads>)."""
97 If includepath is True, return (<roots>::<heads>)."""
98 if not roots:
98 if not roots:
99 return []
99 return []
100 parentrevs = repo.changelog.parentrevs
100 parentrevs = repo.changelog.parentrevs
101 roots = set(roots)
101 roots = set(roots)
102 visit = list(heads)
102 visit = list(heads)
103 reachable = set()
103 reachable = set()
104 seen = {}
104 seen = {}
105 # prefetch all the things! (because python is slow)
105 # prefetch all the things! (because python is slow)
106 reached = reachable.add
106 reached = reachable.add
107 dovisit = visit.append
107 dovisit = visit.append
108 nextvisit = visit.pop
108 nextvisit = visit.pop
109 # open-code the post-order traversal due to the tiny size of
109 # open-code the post-order traversal due to the tiny size of
110 # sys.getrecursionlimit()
110 # sys.getrecursionlimit()
111 while visit:
111 while visit:
112 rev = nextvisit()
112 rev = nextvisit()
113 if rev in roots:
113 if rev in roots:
114 reached(rev)
114 reached(rev)
115 if not includepath:
115 if not includepath:
116 continue
116 continue
117 parents = parentrevs(rev)
117 parents = parentrevs(rev)
118 seen[rev] = parents
118 seen[rev] = parents
119 for parent in parents:
119 for parent in parents:
120 if parent >= minroot and parent not in seen:
120 if parent >= minroot and parent not in seen:
121 dovisit(parent)
121 dovisit(parent)
122 if not reachable:
122 if not reachable:
123 return baseset()
123 return baseset()
124 if not includepath:
124 if not includepath:
125 return reachable
125 return reachable
126 for rev in sorted(seen):
126 for rev in sorted(seen):
127 for parent in seen[rev]:
127 for parent in seen[rev]:
128 if parent in reachable:
128 if parent in reachable:
129 reached(rev)
129 reached(rev)
130 return reachable
130 return reachable
131
131
132 def reachableroots(repo, roots, heads, includepath=False):
132 def reachableroots(repo, roots, heads, includepath=False):
133 """return (heads(::<roots> and ::<heads>))
133 """return (heads(::<roots> and ::<heads>))
134
134
135 If includepath is True, return (<roots>::<heads>)."""
135 If includepath is True, return (<roots>::<heads>)."""
136 if not roots:
136 if not roots:
137 return baseset()
137 return baseset()
138 minroot = roots.min()
138 minroot = roots.min()
139 roots = list(roots)
139 roots = list(roots)
140 heads = list(heads)
140 heads = list(heads)
141 try:
141 try:
142 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
142 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
143 except AttributeError:
143 except AttributeError:
144 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
144 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
145 revs = baseset(revs)
145 revs = baseset(revs)
146 revs.sort()
146 revs.sort()
147 return revs
147 return revs
148
148
149 elements = {
149 elements = {
150 # token-type: binding-strength, primary, prefix, infix, suffix
150 # token-type: binding-strength, primary, prefix, infix, suffix
151 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
151 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
152 "##": (20, None, None, ("_concat", 20), None),
152 "##": (20, None, None, ("_concat", 20), None),
153 "~": (18, None, None, ("ancestor", 18), None),
153 "~": (18, None, None, ("ancestor", 18), None),
154 "^": (18, None, None, ("parent", 18), "parentpost"),
154 "^": (18, None, None, ("parent", 18), "parentpost"),
155 "-": (5, None, ("negate", 19), ("minus", 5), None),
155 "-": (5, None, ("negate", 19), ("minus", 5), None),
156 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"),
156 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"),
157 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"),
157 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"),
158 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), "rangepost"),
158 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), "rangepost"),
159 "not": (10, None, ("not", 10), None, None),
159 "not": (10, None, ("not", 10), None, None),
160 "!": (10, None, ("not", 10), None, None),
160 "!": (10, None, ("not", 10), None, None),
161 "and": (5, None, None, ("and", 5), None),
161 "and": (5, None, None, ("and", 5), None),
162 "&": (5, None, None, ("and", 5), None),
162 "&": (5, None, None, ("and", 5), None),
163 "%": (5, None, None, ("only", 5), "onlypost"),
163 "%": (5, None, None, ("only", 5), "onlypost"),
164 "or": (4, None, None, ("or", 4), None),
164 "or": (4, None, None, ("or", 4), None),
165 "|": (4, None, None, ("or", 4), None),
165 "|": (4, None, None, ("or", 4), None),
166 "+": (4, None, None, ("or", 4), None),
166 "+": (4, None, None, ("or", 4), None),
167 "=": (3, None, None, ("keyvalue", 3), None),
167 "=": (3, None, None, ("keyvalue", 3), None),
168 ",": (2, None, None, ("list", 2), None),
168 ",": (2, None, None, ("list", 2), None),
169 ")": (0, None, None, None, None),
169 ")": (0, None, None, None, None),
170 "symbol": (0, "symbol", None, None, None),
170 "symbol": (0, "symbol", None, None, None),
171 "string": (0, "string", None, None, None),
171 "string": (0, "string", None, None, None),
172 "end": (0, None, None, None, None),
172 "end": (0, None, None, None, None),
173 }
173 }
174
174
175 keywords = set(['and', 'or', 'not'])
175 keywords = set(['and', 'or', 'not'])
176
176
177 # default set of valid characters for the initial letter of symbols
177 # default set of valid characters for the initial letter of symbols
178 _syminitletters = set(
178 _syminitletters = set(
179 string.ascii_letters +
179 string.ascii_letters +
180 string.digits + pycompat.sysstr('._@')) | set(map(chr, xrange(128, 256)))
180 string.digits + pycompat.sysstr('._@')) | set(map(chr, xrange(128, 256)))
181
181
182 # default set of valid characters for non-initial letters of symbols
182 # default set of valid characters for non-initial letters of symbols
183 _symletters = _syminitletters | set(pycompat.sysstr('-/'))
183 _symletters = _syminitletters | set(pycompat.sysstr('-/'))
184
184
185 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
185 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
186 '''
186 '''
187 Parse a revset statement into a stream of tokens
187 Parse a revset statement into a stream of tokens
188
188
189 ``syminitletters`` is the set of valid characters for the initial
189 ``syminitletters`` is the set of valid characters for the initial
190 letter of symbols.
190 letter of symbols.
191
191
192 By default, character ``c`` is recognized as valid for initial
192 By default, character ``c`` is recognized as valid for initial
193 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
193 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
194
194
195 ``symletters`` is the set of valid characters for non-initial
195 ``symletters`` is the set of valid characters for non-initial
196 letters of symbols.
196 letters of symbols.
197
197
198 By default, character ``c`` is recognized as valid for non-initial
198 By default, character ``c`` is recognized as valid for non-initial
199 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
199 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
200
200
201 Check that @ is a valid unquoted token character (issue3686):
201 Check that @ is a valid unquoted token character (issue3686):
202 >>> list(tokenize("@::"))
202 >>> list(tokenize("@::"))
203 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
203 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
204
204
205 '''
205 '''
206 if syminitletters is None:
206 if syminitletters is None:
207 syminitletters = _syminitletters
207 syminitletters = _syminitletters
208 if symletters is None:
208 if symletters is None:
209 symletters = _symletters
209 symletters = _symletters
210
210
211 if program and lookup:
211 if program and lookup:
212 # attempt to parse old-style ranges first to deal with
212 # attempt to parse old-style ranges first to deal with
213 # things like old-tag which contain query metacharacters
213 # things like old-tag which contain query metacharacters
214 parts = program.split(':', 1)
214 parts = program.split(':', 1)
215 if all(lookup(sym) for sym in parts if sym):
215 if all(lookup(sym) for sym in parts if sym):
216 if parts[0]:
216 if parts[0]:
217 yield ('symbol', parts[0], 0)
217 yield ('symbol', parts[0], 0)
218 if len(parts) > 1:
218 if len(parts) > 1:
219 s = len(parts[0])
219 s = len(parts[0])
220 yield (':', None, s)
220 yield (':', None, s)
221 if parts[1]:
221 if parts[1]:
222 yield ('symbol', parts[1], s + 1)
222 yield ('symbol', parts[1], s + 1)
223 yield ('end', None, len(program))
223 yield ('end', None, len(program))
224 return
224 return
225
225
226 pos, l = 0, len(program)
226 pos, l = 0, len(program)
227 while pos < l:
227 while pos < l:
228 c = program[pos]
228 c = program[pos]
229 if c.isspace(): # skip inter-token whitespace
229 if c.isspace(): # skip inter-token whitespace
230 pass
230 pass
231 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
231 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
232 yield ('::', None, pos)
232 yield ('::', None, pos)
233 pos += 1 # skip ahead
233 pos += 1 # skip ahead
234 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
234 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
235 yield ('..', None, pos)
235 yield ('..', None, pos)
236 pos += 1 # skip ahead
236 pos += 1 # skip ahead
237 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
237 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
238 yield ('##', None, pos)
238 yield ('##', None, pos)
239 pos += 1 # skip ahead
239 pos += 1 # skip ahead
240 elif c in "():=,-|&+!~^%": # handle simple operators
240 elif c in "():=,-|&+!~^%": # handle simple operators
241 yield (c, None, pos)
241 yield (c, None, pos)
242 elif (c in '"\'' or c == 'r' and
242 elif (c in '"\'' or c == 'r' and
243 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
243 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
244 if c == 'r':
244 if c == 'r':
245 pos += 1
245 pos += 1
246 c = program[pos]
246 c = program[pos]
247 decode = lambda x: x
247 decode = lambda x: x
248 else:
248 else:
249 decode = parser.unescapestr
249 decode = parser.unescapestr
250 pos += 1
250 pos += 1
251 s = pos
251 s = pos
252 while pos < l: # find closing quote
252 while pos < l: # find closing quote
253 d = program[pos]
253 d = program[pos]
254 if d == '\\': # skip over escaped characters
254 if d == '\\': # skip over escaped characters
255 pos += 2
255 pos += 2
256 continue
256 continue
257 if d == c:
257 if d == c:
258 yield ('string', decode(program[s:pos]), s)
258 yield ('string', decode(program[s:pos]), s)
259 break
259 break
260 pos += 1
260 pos += 1
261 else:
261 else:
262 raise error.ParseError(_("unterminated string"), s)
262 raise error.ParseError(_("unterminated string"), s)
263 # gather up a symbol/keyword
263 # gather up a symbol/keyword
264 elif c in syminitletters:
264 elif c in syminitletters:
265 s = pos
265 s = pos
266 pos += 1
266 pos += 1
267 while pos < l: # find end of symbol
267 while pos < l: # find end of symbol
268 d = program[pos]
268 d = program[pos]
269 if d not in symletters:
269 if d not in symletters:
270 break
270 break
271 if d == '.' and program[pos - 1] == '.': # special case for ..
271 if d == '.' and program[pos - 1] == '.': # special case for ..
272 pos -= 1
272 pos -= 1
273 break
273 break
274 pos += 1
274 pos += 1
275 sym = program[s:pos]
275 sym = program[s:pos]
276 if sym in keywords: # operator keywords
276 if sym in keywords: # operator keywords
277 yield (sym, None, s)
277 yield (sym, None, s)
278 elif '-' in sym:
278 elif '-' in sym:
279 # some jerk gave us foo-bar-baz, try to check if it's a symbol
279 # some jerk gave us foo-bar-baz, try to check if it's a symbol
280 if lookup and lookup(sym):
280 if lookup and lookup(sym):
281 # looks like a real symbol
281 # looks like a real symbol
282 yield ('symbol', sym, s)
282 yield ('symbol', sym, s)
283 else:
283 else:
284 # looks like an expression
284 # looks like an expression
285 parts = sym.split('-')
285 parts = sym.split('-')
286 for p in parts[:-1]:
286 for p in parts[:-1]:
287 if p: # possible consecutive -
287 if p: # possible consecutive -
288 yield ('symbol', p, s)
288 yield ('symbol', p, s)
289 s += len(p)
289 s += len(p)
290 yield ('-', None, pos)
290 yield ('-', None, pos)
291 s += 1
291 s += 1
292 if parts[-1]: # possible trailing -
292 if parts[-1]: # possible trailing -
293 yield ('symbol', parts[-1], s)
293 yield ('symbol', parts[-1], s)
294 else:
294 else:
295 yield ('symbol', sym, s)
295 yield ('symbol', sym, s)
296 pos -= 1
296 pos -= 1
297 else:
297 else:
298 raise error.ParseError(_("syntax error in revset '%s'") %
298 raise error.ParseError(_("syntax error in revset '%s'") %
299 program, pos)
299 program, pos)
300 pos += 1
300 pos += 1
301 yield ('end', None, pos)
301 yield ('end', None, pos)
302
302
303 # helpers
303 # helpers
304
304
305 def getsymbol(x):
305 def getsymbol(x):
306 if x and x[0] == 'symbol':
306 if x and x[0] == 'symbol':
307 return x[1]
307 return x[1]
308 raise error.ParseError(_('not a symbol'))
308 raise error.ParseError(_('not a symbol'))
309
309
310 def getstring(x, err):
310 def getstring(x, err):
311 if x and (x[0] == 'string' or x[0] == 'symbol'):
311 if x and (x[0] == 'string' or x[0] == 'symbol'):
312 return x[1]
312 return x[1]
313 raise error.ParseError(err)
313 raise error.ParseError(err)
314
314
315 def getlist(x):
315 def getlist(x):
316 if not x:
316 if not x:
317 return []
317 return []
318 if x[0] == 'list':
318 if x[0] == 'list':
319 return list(x[1:])
319 return list(x[1:])
320 return [x]
320 return [x]
321
321
322 def getargs(x, min, max, err):
322 def getargs(x, min, max, err):
323 l = getlist(x)
323 l = getlist(x)
324 if len(l) < min or (max >= 0 and len(l) > max):
324 if len(l) < min or (max >= 0 and len(l) > max):
325 raise error.ParseError(err)
325 raise error.ParseError(err)
326 return l
326 return l
327
327
328 def getargsdict(x, funcname, keys):
328 def getargsdict(x, funcname, keys):
329 return parser.buildargsdict(getlist(x), funcname, keys.split(),
329 return parser.buildargsdict(getlist(x), funcname, keys.split(),
330 keyvaluenode='keyvalue', keynode='symbol')
330 keyvaluenode='keyvalue', keynode='symbol')
331
331
332 def getset(repo, subset, x):
332 def getset(repo, subset, x):
333 if not x:
333 if not x:
334 raise error.ParseError(_("missing argument"))
334 raise error.ParseError(_("missing argument"))
335 s = methods[x[0]](repo, subset, *x[1:])
335 s = methods[x[0]](repo, subset, *x[1:])
336 if util.safehasattr(s, 'isascending'):
336 if util.safehasattr(s, 'isascending'):
337 return s
337 return s
338 # else case should not happen, because all non-func are internal,
338 # else case should not happen, because all non-func are internal,
339 # ignoring for now.
339 # ignoring for now.
340 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
340 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
341 repo.ui.deprecwarn('revset "%s" uses list instead of smartset'
341 repo.ui.deprecwarn('revset "%s" uses list instead of smartset'
342 % x[1][1],
342 % x[1][1],
343 '3.9')
343 '3.9')
344 return baseset(s)
344 return baseset(s)
345
345
346 def _getrevsource(repo, r):
346 def _getrevsource(repo, r):
347 extra = repo[r].extra()
347 extra = repo[r].extra()
348 for label in ('source', 'transplant_source', 'rebase_source'):
348 for label in ('source', 'transplant_source', 'rebase_source'):
349 if label in extra:
349 if label in extra:
350 try:
350 try:
351 return repo[extra[label]].rev()
351 return repo[extra[label]].rev()
352 except error.RepoLookupError:
352 except error.RepoLookupError:
353 pass
353 pass
354 return None
354 return None
355
355
356 # operator methods
356 # operator methods
357
357
358 def stringset(repo, subset, x):
358 def stringset(repo, subset, x):
359 x = repo[x].rev()
359 x = repo[x].rev()
360 if (x in subset
360 if (x in subset
361 or x == node.nullrev and isinstance(subset, fullreposet)):
361 or x == node.nullrev and isinstance(subset, fullreposet)):
362 return baseset([x])
362 return baseset([x])
363 return baseset()
363 return baseset()
364
364
365 def rangeset(repo, subset, x, y, order):
365 def rangeset(repo, subset, x, y, order):
366 m = getset(repo, fullreposet(repo), x)
366 m = getset(repo, fullreposet(repo), x)
367 n = getset(repo, fullreposet(repo), y)
367 n = getset(repo, fullreposet(repo), y)
368
368
369 if not m or not n:
369 if not m or not n:
370 return baseset()
370 return baseset()
371 return _makerangeset(repo, subset, m.first(), n.last(), order)
371 return _makerangeset(repo, subset, m.first(), n.last(), order)
372
372
373 def rangepre(repo, subset, y, order):
373 def rangepre(repo, subset, y, order):
374 # ':y' can't be rewritten to '0:y' since '0' may be hidden
374 # ':y' can't be rewritten to '0:y' since '0' may be hidden
375 n = getset(repo, fullreposet(repo), y)
375 n = getset(repo, fullreposet(repo), y)
376 if not n:
376 if not n:
377 return baseset()
377 return baseset()
378 return _makerangeset(repo, subset, 0, n.last(), order)
378 return _makerangeset(repo, subset, 0, n.last(), order)
379
379
380 def _makerangeset(repo, subset, m, n, order):
380 def _makerangeset(repo, subset, m, n, order):
381 if m == n:
381 if m == n:
382 r = baseset([m])
382 r = baseset([m])
383 elif n == node.wdirrev:
383 elif n == node.wdirrev:
384 r = spanset(repo, m, len(repo)) + baseset([n])
384 r = spanset(repo, m, len(repo)) + baseset([n])
385 elif m == node.wdirrev:
385 elif m == node.wdirrev:
386 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
386 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
387 elif m < n:
387 elif m < n:
388 r = spanset(repo, m, n + 1)
388 r = spanset(repo, m, n + 1)
389 else:
389 else:
390 r = spanset(repo, m, n - 1)
390 r = spanset(repo, m, n - 1)
391
391
392 if order == defineorder:
392 if order == defineorder:
393 return r & subset
393 return r & subset
394 else:
394 else:
395 # carrying the sorting over when possible would be more efficient
395 # carrying the sorting over when possible would be more efficient
396 return subset & r
396 return subset & r
397
397
398 def dagrange(repo, subset, x, y, order):
398 def dagrange(repo, subset, x, y, order):
399 r = fullreposet(repo)
399 r = fullreposet(repo)
400 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
400 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
401 includepath=True)
401 includepath=True)
402 return subset & xs
402 return subset & xs
403
403
404 def andset(repo, subset, x, y, order):
404 def andset(repo, subset, x, y, order):
405 return getset(repo, getset(repo, subset, x), y)
405 return getset(repo, getset(repo, subset, x), y)
406
406
407 def differenceset(repo, subset, x, y, order):
407 def differenceset(repo, subset, x, y, order):
408 return getset(repo, subset, x) - getset(repo, subset, y)
408 return getset(repo, subset, x) - getset(repo, subset, y)
409
409
410 def _orsetlist(repo, subset, xs):
410 def _orsetlist(repo, subset, xs):
411 assert xs
411 assert xs
412 if len(xs) == 1:
412 if len(xs) == 1:
413 return getset(repo, subset, xs[0])
413 return getset(repo, subset, xs[0])
414 p = len(xs) // 2
414 p = len(xs) // 2
415 a = _orsetlist(repo, subset, xs[:p])
415 a = _orsetlist(repo, subset, xs[:p])
416 b = _orsetlist(repo, subset, xs[p:])
416 b = _orsetlist(repo, subset, xs[p:])
417 return a + b
417 return a + b
418
418
419 def orset(repo, subset, x, order):
419 def orset(repo, subset, x, order):
420 xs = getlist(x)
420 xs = getlist(x)
421 if order == followorder:
421 if order == followorder:
422 # slow path to take the subset order
422 # slow path to take the subset order
423 return subset & _orsetlist(repo, fullreposet(repo), xs)
423 return subset & _orsetlist(repo, fullreposet(repo), xs)
424 else:
424 else:
425 return _orsetlist(repo, subset, xs)
425 return _orsetlist(repo, subset, xs)
426
426
427 def notset(repo, subset, x, order):
427 def notset(repo, subset, x, order):
428 return subset - getset(repo, subset, x)
428 return subset - getset(repo, subset, x)
429
429
430 def listset(repo, subset, *xs):
430 def listset(repo, subset, *xs):
431 raise error.ParseError(_("can't use a list in this context"),
431 raise error.ParseError(_("can't use a list in this context"),
432 hint=_('see hg help "revsets.x or y"'))
432 hint=_('see hg help "revsets.x or y"'))
433
433
434 def keyvaluepair(repo, subset, k, v):
434 def keyvaluepair(repo, subset, k, v):
435 raise error.ParseError(_("can't use a key-value pair in this context"))
435 raise error.ParseError(_("can't use a key-value pair in this context"))
436
436
437 def func(repo, subset, a, b, order):
437 def func(repo, subset, a, b, order):
438 f = getsymbol(a)
438 f = getsymbol(a)
439 if f in symbols:
439 if f in symbols:
440 func = symbols[f]
440 func = symbols[f]
441 if getattr(func, '_takeorder', False):
441 if getattr(func, '_takeorder', False):
442 return func(repo, subset, b, order)
442 return func(repo, subset, b, order)
443 return func(repo, subset, b)
443 return func(repo, subset, b)
444
444
445 keep = lambda fn: getattr(fn, '__doc__', None) is not None
445 keep = lambda fn: getattr(fn, '__doc__', None) is not None
446
446
447 syms = [s for (s, fn) in symbols.items() if keep(fn)]
447 syms = [s for (s, fn) in symbols.items() if keep(fn)]
448 raise error.UnknownIdentifier(f, syms)
448 raise error.UnknownIdentifier(f, syms)
449
449
450 # functions
450 # functions
451
451
452 # symbols are callables like:
452 # symbols are callables like:
453 # fn(repo, subset, x)
453 # fn(repo, subset, x)
454 # with:
454 # with:
455 # repo - current repository instance
455 # repo - current repository instance
456 # subset - of revisions to be examined
456 # subset - of revisions to be examined
457 # x - argument in tree form
457 # x - argument in tree form
458 symbols = {}
458 symbols = {}
459
459
460 # symbols which can't be used for a DoS attack for any given input
460 # symbols which can't be used for a DoS attack for any given input
461 # (e.g. those which accept regexes as plain strings shouldn't be included)
461 # (e.g. those which accept regexes as plain strings shouldn't be included)
462 # functions that just return a lot of changesets (like all) don't count here
462 # functions that just return a lot of changesets (like all) don't count here
463 safesymbols = set()
463 safesymbols = set()
464
464
465 predicate = registrar.revsetpredicate()
465 predicate = registrar.revsetpredicate()
466
466
467 @predicate('_destupdate')
467 @predicate('_destupdate')
468 def _destupdate(repo, subset, x):
468 def _destupdate(repo, subset, x):
469 # experimental revset for update destination
469 # experimental revset for update destination
470 args = getargsdict(x, 'limit', 'clean check')
470 args = getargsdict(x, 'limit', 'clean check')
471 return subset & baseset([destutil.destupdate(repo, **args)[0]])
471 return subset & baseset([destutil.destupdate(repo, **args)[0]])
472
472
473 @predicate('_destmerge')
473 @predicate('_destmerge')
474 def _destmerge(repo, subset, x):
474 def _destmerge(repo, subset, x):
475 # experimental revset for merge destination
475 # experimental revset for merge destination
476 sourceset = None
476 sourceset = None
477 if x is not None:
477 if x is not None:
478 sourceset = getset(repo, fullreposet(repo), x)
478 sourceset = getset(repo, fullreposet(repo), x)
479 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
479 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
480
480
481 @predicate('adds(pattern)', safe=True)
481 @predicate('adds(pattern)', safe=True)
482 def adds(repo, subset, x):
482 def adds(repo, subset, x):
483 """Changesets that add a file matching pattern.
483 """Changesets that add a file matching pattern.
484
484
485 The pattern without explicit kind like ``glob:`` is expected to be
485 The pattern without explicit kind like ``glob:`` is expected to be
486 relative to the current directory and match against a file or a
486 relative to the current directory and match against a file or a
487 directory.
487 directory.
488 """
488 """
489 # i18n: "adds" is a keyword
489 # i18n: "adds" is a keyword
490 pat = getstring(x, _("adds requires a pattern"))
490 pat = getstring(x, _("adds requires a pattern"))
491 return checkstatus(repo, subset, pat, 1)
491 return checkstatus(repo, subset, pat, 1)
492
492
493 @predicate('ancestor(*changeset)', safe=True)
493 @predicate('ancestor(*changeset)', safe=True)
494 def ancestor(repo, subset, x):
494 def ancestor(repo, subset, x):
495 """A greatest common ancestor of the changesets.
495 """A greatest common ancestor of the changesets.
496
496
497 Accepts 0 or more changesets.
497 Accepts 0 or more changesets.
498 Will return empty list when passed no args.
498 Will return empty list when passed no args.
499 Greatest common ancestor of a single changeset is that changeset.
499 Greatest common ancestor of a single changeset is that changeset.
500 """
500 """
501 # i18n: "ancestor" is a keyword
501 # i18n: "ancestor" is a keyword
502 l = getlist(x)
502 l = getlist(x)
503 rl = fullreposet(repo)
503 rl = fullreposet(repo)
504 anc = None
504 anc = None
505
505
506 # (getset(repo, rl, i) for i in l) generates a list of lists
506 # (getset(repo, rl, i) for i in l) generates a list of lists
507 for revs in (getset(repo, rl, i) for i in l):
507 for revs in (getset(repo, rl, i) for i in l):
508 for r in revs:
508 for r in revs:
509 if anc is None:
509 if anc is None:
510 anc = repo[r]
510 anc = repo[r]
511 else:
511 else:
512 anc = anc.ancestor(repo[r])
512 anc = anc.ancestor(repo[r])
513
513
514 if anc is not None and anc.rev() in subset:
514 if anc is not None and anc.rev() in subset:
515 return baseset([anc.rev()])
515 return baseset([anc.rev()])
516 return baseset()
516 return baseset()
517
517
518 def _ancestors(repo, subset, x, followfirst=False):
518 def _ancestors(repo, subset, x, followfirst=False):
519 heads = getset(repo, fullreposet(repo), x)
519 heads = getset(repo, fullreposet(repo), x)
520 if not heads:
520 if not heads:
521 return baseset()
521 return baseset()
522 s = _revancestors(repo, heads, followfirst)
522 s = _revancestors(repo, heads, followfirst)
523 return subset & s
523 return subset & s
524
524
525 @predicate('ancestors(set)', safe=True)
525 @predicate('ancestors(set)', safe=True)
526 def ancestors(repo, subset, x):
526 def ancestors(repo, subset, x):
527 """Changesets that are ancestors of a changeset in set.
527 """Changesets that are ancestors of a changeset in set.
528 """
528 """
529 return _ancestors(repo, subset, x)
529 return _ancestors(repo, subset, x)
530
530
531 @predicate('_firstancestors', safe=True)
531 @predicate('_firstancestors', safe=True)
532 def _firstancestors(repo, subset, x):
532 def _firstancestors(repo, subset, x):
533 # ``_firstancestors(set)``
533 # ``_firstancestors(set)``
534 # Like ``ancestors(set)`` but follows only the first parents.
534 # Like ``ancestors(set)`` but follows only the first parents.
535 return _ancestors(repo, subset, x, followfirst=True)
535 return _ancestors(repo, subset, x, followfirst=True)
536
536
537 def ancestorspec(repo, subset, x, n, order):
537 def ancestorspec(repo, subset, x, n, order):
538 """``set~n``
538 """``set~n``
539 Changesets that are the Nth ancestor (first parents only) of a changeset
539 Changesets that are the Nth ancestor (first parents only) of a changeset
540 in set.
540 in set.
541 """
541 """
542 try:
542 try:
543 n = int(n[1])
543 n = int(n[1])
544 except (TypeError, ValueError):
544 except (TypeError, ValueError):
545 raise error.ParseError(_("~ expects a number"))
545 raise error.ParseError(_("~ expects a number"))
546 ps = set()
546 ps = set()
547 cl = repo.changelog
547 cl = repo.changelog
548 for r in getset(repo, fullreposet(repo), x):
548 for r in getset(repo, fullreposet(repo), x):
549 for i in range(n):
549 for i in range(n):
550 r = cl.parentrevs(r)[0]
550 r = cl.parentrevs(r)[0]
551 ps.add(r)
551 ps.add(r)
552 return subset & ps
552 return subset & ps
553
553
554 @predicate('author(string)', safe=True)
554 @predicate('author(string)', safe=True)
555 def author(repo, subset, x):
555 def author(repo, subset, x):
556 """Alias for ``user(string)``.
556 """Alias for ``user(string)``.
557 """
557 """
558 # i18n: "author" is a keyword
558 # i18n: "author" is a keyword
559 n = encoding.lower(getstring(x, _("author requires a string")))
559 n = encoding.lower(getstring(x, _("author requires a string")))
560 kind, pattern, matcher = _substringmatcher(n)
560 kind, pattern, matcher = _substringmatcher(n)
561 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())),
561 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())),
562 condrepr=('<user %r>', n))
562 condrepr=('<user %r>', n))
563
563
564 @predicate('bisect(string)', safe=True)
564 @predicate('bisect(string)', safe=True)
565 def bisect(repo, subset, x):
565 def bisect(repo, subset, x):
566 """Changesets marked in the specified bisect status:
566 """Changesets marked in the specified bisect status:
567
567
568 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
568 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
569 - ``goods``, ``bads`` : csets topologically good/bad
569 - ``goods``, ``bads`` : csets topologically good/bad
570 - ``range`` : csets taking part in the bisection
570 - ``range`` : csets taking part in the bisection
571 - ``pruned`` : csets that are goods, bads or skipped
571 - ``pruned`` : csets that are goods, bads or skipped
572 - ``untested`` : csets whose fate is yet unknown
572 - ``untested`` : csets whose fate is yet unknown
573 - ``ignored`` : csets ignored due to DAG topology
573 - ``ignored`` : csets ignored due to DAG topology
574 - ``current`` : the cset currently being bisected
574 - ``current`` : the cset currently being bisected
575 """
575 """
576 # i18n: "bisect" is a keyword
576 # i18n: "bisect" is a keyword
577 status = getstring(x, _("bisect requires a string")).lower()
577 status = getstring(x, _("bisect requires a string")).lower()
578 state = set(hbisect.get(repo, status))
578 state = set(hbisect.get(repo, status))
579 return subset & state
579 return subset & state
580
580
581 # Backward-compatibility
581 # Backward-compatibility
582 # - no help entry so that we do not advertise it any more
582 # - no help entry so that we do not advertise it any more
583 @predicate('bisected', safe=True)
583 @predicate('bisected', safe=True)
584 def bisected(repo, subset, x):
584 def bisected(repo, subset, x):
585 return bisect(repo, subset, x)
585 return bisect(repo, subset, x)
586
586
587 @predicate('bookmark([name])', safe=True)
587 @predicate('bookmark([name])', safe=True)
588 def bookmark(repo, subset, x):
588 def bookmark(repo, subset, x):
589 """The named bookmark or all bookmarks.
589 """The named bookmark or all bookmarks.
590
590
591 If `name` starts with `re:`, the remainder of the name is treated as
591 If `name` starts with `re:`, the remainder of the name is treated as
592 a regular expression. To match a bookmark that actually starts with `re:`,
592 a regular expression. To match a bookmark that actually starts with `re:`,
593 use the prefix `literal:`.
593 use the prefix `literal:`.
594 """
594 """
595 # i18n: "bookmark" is a keyword
595 # i18n: "bookmark" is a keyword
596 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
596 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
597 if args:
597 if args:
598 bm = getstring(args[0],
598 bm = getstring(args[0],
599 # i18n: "bookmark" is a keyword
599 # i18n: "bookmark" is a keyword
600 _('the argument to bookmark must be a string'))
600 _('the argument to bookmark must be a string'))
601 kind, pattern, matcher = util.stringmatcher(bm)
601 kind, pattern, matcher = util.stringmatcher(bm)
602 bms = set()
602 bms = set()
603 if kind == 'literal':
603 if kind == 'literal':
604 bmrev = repo._bookmarks.get(pattern, None)
604 bmrev = repo._bookmarks.get(pattern, None)
605 if not bmrev:
605 if not bmrev:
606 raise error.RepoLookupError(_("bookmark '%s' does not exist")
606 raise error.RepoLookupError(_("bookmark '%s' does not exist")
607 % pattern)
607 % pattern)
608 bms.add(repo[bmrev].rev())
608 bms.add(repo[bmrev].rev())
609 else:
609 else:
610 matchrevs = set()
610 matchrevs = set()
611 for name, bmrev in repo._bookmarks.iteritems():
611 for name, bmrev in repo._bookmarks.iteritems():
612 if matcher(name):
612 if matcher(name):
613 matchrevs.add(bmrev)
613 matchrevs.add(bmrev)
614 if not matchrevs:
614 if not matchrevs:
615 raise error.RepoLookupError(_("no bookmarks exist"
615 raise error.RepoLookupError(_("no bookmarks exist"
616 " that match '%s'") % pattern)
616 " that match '%s'") % pattern)
617 for bmrev in matchrevs:
617 for bmrev in matchrevs:
618 bms.add(repo[bmrev].rev())
618 bms.add(repo[bmrev].rev())
619 else:
619 else:
620 bms = set([repo[r].rev()
620 bms = set([repo[r].rev()
621 for r in repo._bookmarks.values()])
621 for r in repo._bookmarks.values()])
622 bms -= set([node.nullrev])
622 bms -= set([node.nullrev])
623 return subset & bms
623 return subset & bms
624
624
625 @predicate('branch(string or set)', safe=True)
625 @predicate('branch(string or set)', safe=True)
626 def branch(repo, subset, x):
626 def branch(repo, subset, x):
627 """
627 """
628 All changesets belonging to the given branch or the branches of the given
628 All changesets belonging to the given branch or the branches of the given
629 changesets.
629 changesets.
630
630
631 If `string` starts with `re:`, the remainder of the name is treated as
631 If `string` starts with `re:`, the remainder of the name is treated as
632 a regular expression. To match a branch that actually starts with `re:`,
632 a regular expression. To match a branch that actually starts with `re:`,
633 use the prefix `literal:`.
633 use the prefix `literal:`.
634 """
634 """
635 getbi = repo.revbranchcache().branchinfo
635 getbi = repo.revbranchcache().branchinfo
636
636
637 try:
637 try:
638 b = getstring(x, '')
638 b = getstring(x, '')
639 except error.ParseError:
639 except error.ParseError:
640 # not a string, but another revspec, e.g. tip()
640 # not a string, but another revspec, e.g. tip()
641 pass
641 pass
642 else:
642 else:
643 kind, pattern, matcher = util.stringmatcher(b)
643 kind, pattern, matcher = util.stringmatcher(b)
644 if kind == 'literal':
644 if kind == 'literal':
645 # note: falls through to the revspec case if no branch with
645 # note: falls through to the revspec case if no branch with
646 # this name exists and pattern kind is not specified explicitly
646 # this name exists and pattern kind is not specified explicitly
647 if pattern in repo.branchmap():
647 if pattern in repo.branchmap():
648 return subset.filter(lambda r: matcher(getbi(r)[0]),
648 return subset.filter(lambda r: matcher(getbi(r)[0]),
649 condrepr=('<branch %r>', b))
649 condrepr=('<branch %r>', b))
650 if b.startswith('literal:'):
650 if b.startswith('literal:'):
651 raise error.RepoLookupError(_("branch '%s' does not exist")
651 raise error.RepoLookupError(_("branch '%s' does not exist")
652 % pattern)
652 % pattern)
653 else:
653 else:
654 return subset.filter(lambda r: matcher(getbi(r)[0]),
654 return subset.filter(lambda r: matcher(getbi(r)[0]),
655 condrepr=('<branch %r>', b))
655 condrepr=('<branch %r>', b))
656
656
657 s = getset(repo, fullreposet(repo), x)
657 s = getset(repo, fullreposet(repo), x)
658 b = set()
658 b = set()
659 for r in s:
659 for r in s:
660 b.add(getbi(r)[0])
660 b.add(getbi(r)[0])
661 c = s.__contains__
661 c = s.__contains__
662 return subset.filter(lambda r: c(r) or getbi(r)[0] in b,
662 return subset.filter(lambda r: c(r) or getbi(r)[0] in b,
663 condrepr=lambda: '<branch %r>' % sorted(b))
663 condrepr=lambda: '<branch %r>' % sorted(b))
664
664
665 @predicate('bumped()', safe=True)
665 @predicate('bumped()', safe=True)
666 def bumped(repo, subset, x):
666 def bumped(repo, subset, x):
667 """Mutable changesets marked as successors of public changesets.
667 """Mutable changesets marked as successors of public changesets.
668
668
669 Only non-public and non-obsolete changesets can be `bumped`.
669 Only non-public and non-obsolete changesets can be `bumped`.
670 """
670 """
671 # i18n: "bumped" is a keyword
671 # i18n: "bumped" is a keyword
672 getargs(x, 0, 0, _("bumped takes no arguments"))
672 getargs(x, 0, 0, _("bumped takes no arguments"))
673 bumped = obsmod.getrevs(repo, 'bumped')
673 bumped = obsmod.getrevs(repo, 'bumped')
674 return subset & bumped
674 return subset & bumped
675
675
676 @predicate('bundle()', safe=True)
676 @predicate('bundle()', safe=True)
677 def bundle(repo, subset, x):
677 def bundle(repo, subset, x):
678 """Changesets in the bundle.
678 """Changesets in the bundle.
679
679
680 Bundle must be specified by the -R option."""
680 Bundle must be specified by the -R option."""
681
681
682 try:
682 try:
683 bundlerevs = repo.changelog.bundlerevs
683 bundlerevs = repo.changelog.bundlerevs
684 except AttributeError:
684 except AttributeError:
685 raise error.Abort(_("no bundle provided - specify with -R"))
685 raise error.Abort(_("no bundle provided - specify with -R"))
686 return subset & bundlerevs
686 return subset & bundlerevs
687
687
688 def checkstatus(repo, subset, pat, field):
688 def checkstatus(repo, subset, pat, field):
689 hasset = matchmod.patkind(pat) == 'set'
689 hasset = matchmod.patkind(pat) == 'set'
690
690
691 mcache = [None]
691 mcache = [None]
692 def matches(x):
692 def matches(x):
693 c = repo[x]
693 c = repo[x]
694 if not mcache[0] or hasset:
694 if not mcache[0] or hasset:
695 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
695 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
696 m = mcache[0]
696 m = mcache[0]
697 fname = None
697 fname = None
698 if not m.anypats() and len(m.files()) == 1:
698 if not m.anypats() and len(m.files()) == 1:
699 fname = m.files()[0]
699 fname = m.files()[0]
700 if fname is not None:
700 if fname is not None:
701 if fname not in c.files():
701 if fname not in c.files():
702 return False
702 return False
703 else:
703 else:
704 for f in c.files():
704 for f in c.files():
705 if m(f):
705 if m(f):
706 break
706 break
707 else:
707 else:
708 return False
708 return False
709 files = repo.status(c.p1().node(), c.node())[field]
709 files = repo.status(c.p1().node(), c.node())[field]
710 if fname is not None:
710 if fname is not None:
711 if fname in files:
711 if fname in files:
712 return True
712 return True
713 else:
713 else:
714 for f in files:
714 for f in files:
715 if m(f):
715 if m(f):
716 return True
716 return True
717
717
718 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
718 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
719
719
720 def _children(repo, subset, parentset):
720 def _children(repo, subset, parentset):
721 if not parentset:
721 if not parentset:
722 return baseset()
722 return baseset()
723 cs = set()
723 cs = set()
724 pr = repo.changelog.parentrevs
724 pr = repo.changelog.parentrevs
725 minrev = parentset.min()
725 minrev = parentset.min()
726 nullrev = node.nullrev
726 nullrev = node.nullrev
727 for r in subset:
727 for r in subset:
728 if r <= minrev:
728 if r <= minrev:
729 continue
729 continue
730 p1, p2 = pr(r)
730 p1, p2 = pr(r)
731 if p1 in parentset:
731 if p1 in parentset:
732 cs.add(r)
732 cs.add(r)
733 if p2 != nullrev and p2 in parentset:
733 if p2 != nullrev and p2 in parentset:
734 cs.add(r)
734 cs.add(r)
735 return baseset(cs)
735 return baseset(cs)
736
736
737 @predicate('children(set)', safe=True)
737 @predicate('children(set)', safe=True)
738 def children(repo, subset, x):
738 def children(repo, subset, x):
739 """Child changesets of changesets in set.
739 """Child changesets of changesets in set.
740 """
740 """
741 s = getset(repo, fullreposet(repo), x)
741 s = getset(repo, fullreposet(repo), x)
742 cs = _children(repo, subset, s)
742 cs = _children(repo, subset, s)
743 return subset & cs
743 return subset & cs
744
744
745 @predicate('closed()', safe=True)
745 @predicate('closed()', safe=True)
746 def closed(repo, subset, x):
746 def closed(repo, subset, x):
747 """Changeset is closed.
747 """Changeset is closed.
748 """
748 """
749 # i18n: "closed" is a keyword
749 # i18n: "closed" is a keyword
750 getargs(x, 0, 0, _("closed takes no arguments"))
750 getargs(x, 0, 0, _("closed takes no arguments"))
751 return subset.filter(lambda r: repo[r].closesbranch(),
751 return subset.filter(lambda r: repo[r].closesbranch(),
752 condrepr='<branch closed>')
752 condrepr='<branch closed>')
753
753
754 @predicate('contains(pattern)')
754 @predicate('contains(pattern)')
755 def contains(repo, subset, x):
755 def contains(repo, subset, x):
756 """The revision's manifest contains a file matching pattern (but might not
756 """The revision's manifest contains a file matching pattern (but might not
757 modify it). See :hg:`help patterns` for information about file patterns.
757 modify it). See :hg:`help patterns` for information about file patterns.
758
758
759 The pattern without explicit kind like ``glob:`` is expected to be
759 The pattern without explicit kind like ``glob:`` is expected to be
760 relative to the current directory and match against a file exactly
760 relative to the current directory and match against a file exactly
761 for efficiency.
761 for efficiency.
762 """
762 """
763 # i18n: "contains" is a keyword
763 # i18n: "contains" is a keyword
764 pat = getstring(x, _("contains requires a pattern"))
764 pat = getstring(x, _("contains requires a pattern"))
765
765
766 def matches(x):
766 def matches(x):
767 if not matchmod.patkind(pat):
767 if not matchmod.patkind(pat):
768 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
768 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
769 if pats in repo[x]:
769 if pats in repo[x]:
770 return True
770 return True
771 else:
771 else:
772 c = repo[x]
772 c = repo[x]
773 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
773 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
774 for f in c.manifest():
774 for f in c.manifest():
775 if m(f):
775 if m(f):
776 return True
776 return True
777 return False
777 return False
778
778
779 return subset.filter(matches, condrepr=('<contains %r>', pat))
779 return subset.filter(matches, condrepr=('<contains %r>', pat))
780
780
781 @predicate('converted([id])', safe=True)
781 @predicate('converted([id])', safe=True)
782 def converted(repo, subset, x):
782 def converted(repo, subset, x):
783 """Changesets converted from the given identifier in the old repository if
783 """Changesets converted from the given identifier in the old repository if
784 present, or all converted changesets if no identifier is specified.
784 present, or all converted changesets if no identifier is specified.
785 """
785 """
786
786
787 # There is exactly no chance of resolving the revision, so do a simple
787 # There is exactly no chance of resolving the revision, so do a simple
788 # string compare and hope for the best
788 # string compare and hope for the best
789
789
790 rev = None
790 rev = None
791 # i18n: "converted" is a keyword
791 # i18n: "converted" is a keyword
792 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
792 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
793 if l:
793 if l:
794 # i18n: "converted" is a keyword
794 # i18n: "converted" is a keyword
795 rev = getstring(l[0], _('converted requires a revision'))
795 rev = getstring(l[0], _('converted requires a revision'))
796
796
797 def _matchvalue(r):
797 def _matchvalue(r):
798 source = repo[r].extra().get('convert_revision', None)
798 source = repo[r].extra().get('convert_revision', None)
799 return source is not None and (rev is None or source.startswith(rev))
799 return source is not None and (rev is None or source.startswith(rev))
800
800
801 return subset.filter(lambda r: _matchvalue(r),
801 return subset.filter(lambda r: _matchvalue(r),
802 condrepr=('<converted %r>', rev))
802 condrepr=('<converted %r>', rev))
803
803
804 @predicate('date(interval)', safe=True)
804 @predicate('date(interval)', safe=True)
805 def date(repo, subset, x):
805 def date(repo, subset, x):
806 """Changesets within the interval, see :hg:`help dates`.
806 """Changesets within the interval, see :hg:`help dates`.
807 """
807 """
808 # i18n: "date" is a keyword
808 # i18n: "date" is a keyword
809 ds = getstring(x, _("date requires a string"))
809 ds = getstring(x, _("date requires a string"))
810 dm = util.matchdate(ds)
810 dm = util.matchdate(ds)
811 return subset.filter(lambda x: dm(repo[x].date()[0]),
811 return subset.filter(lambda x: dm(repo[x].date()[0]),
812 condrepr=('<date %r>', ds))
812 condrepr=('<date %r>', ds))
813
813
814 @predicate('desc(string)', safe=True)
814 @predicate('desc(string)', safe=True)
815 def desc(repo, subset, x):
815 def desc(repo, subset, x):
816 """Search commit message for string. The match is case-insensitive.
816 """Search commit message for string. The match is case-insensitive.
817 """
817 """
818 # i18n: "desc" is a keyword
818 # i18n: "desc" is a keyword
819 ds = encoding.lower(getstring(x, _("desc requires a string")))
819 ds = encoding.lower(getstring(x, _("desc requires a string")))
820
820
821 def matches(x):
821 def matches(x):
822 c = repo[x]
822 c = repo[x]
823 return ds in encoding.lower(c.description())
823 return ds in encoding.lower(c.description())
824
824
825 return subset.filter(matches, condrepr=('<desc %r>', ds))
825 return subset.filter(matches, condrepr=('<desc %r>', ds))
826
826
827 def _descendants(repo, subset, x, followfirst=False):
827 def _descendants(repo, subset, x, followfirst=False):
828 roots = getset(repo, fullreposet(repo), x)
828 roots = getset(repo, fullreposet(repo), x)
829 if not roots:
829 if not roots:
830 return baseset()
830 return baseset()
831 s = _revdescendants(repo, roots, followfirst)
831 s = _revdescendants(repo, roots, followfirst)
832
832
833 # Both sets need to be ascending in order to lazily return the union
833 # Both sets need to be ascending in order to lazily return the union
834 # in the correct order.
834 # in the correct order.
835 base = subset & roots
835 base = subset & roots
836 desc = subset & s
836 desc = subset & s
837 result = base + desc
837 result = base + desc
838 if subset.isascending():
838 if subset.isascending():
839 result.sort()
839 result.sort()
840 elif subset.isdescending():
840 elif subset.isdescending():
841 result.sort(reverse=True)
841 result.sort(reverse=True)
842 else:
842 else:
843 result = subset & result
843 result = subset & result
844 return result
844 return result
845
845
846 @predicate('descendants(set)', safe=True)
846 @predicate('descendants(set)', safe=True)
847 def descendants(repo, subset, x):
847 def descendants(repo, subset, x):
848 """Changesets which are descendants of changesets in set.
848 """Changesets which are descendants of changesets in set.
849 """
849 """
850 return _descendants(repo, subset, x)
850 return _descendants(repo, subset, x)
851
851
852 @predicate('_firstdescendants', safe=True)
852 @predicate('_firstdescendants', safe=True)
853 def _firstdescendants(repo, subset, x):
853 def _firstdescendants(repo, subset, x):
854 # ``_firstdescendants(set)``
854 # ``_firstdescendants(set)``
855 # Like ``descendants(set)`` but follows only the first parents.
855 # Like ``descendants(set)`` but follows only the first parents.
856 return _descendants(repo, subset, x, followfirst=True)
856 return _descendants(repo, subset, x, followfirst=True)
857
857
858 @predicate('destination([set])', safe=True)
858 @predicate('destination([set])', safe=True)
859 def destination(repo, subset, x):
859 def destination(repo, subset, x):
860 """Changesets that were created by a graft, transplant or rebase operation,
860 """Changesets that were created by a graft, transplant or rebase operation,
861 with the given revisions specified as the source. Omitting the optional set
861 with the given revisions specified as the source. Omitting the optional set
862 is the same as passing all().
862 is the same as passing all().
863 """
863 """
864 if x is not None:
864 if x is not None:
865 sources = getset(repo, fullreposet(repo), x)
865 sources = getset(repo, fullreposet(repo), x)
866 else:
866 else:
867 sources = fullreposet(repo)
867 sources = fullreposet(repo)
868
868
869 dests = set()
869 dests = set()
870
870
871 # subset contains all of the possible destinations that can be returned, so
871 # subset contains all of the possible destinations that can be returned, so
872 # iterate over them and see if their source(s) were provided in the arg set.
872 # iterate over them and see if their source(s) were provided in the arg set.
873 # Even if the immediate src of r is not in the arg set, src's source (or
873 # Even if the immediate src of r is not in the arg set, src's source (or
874 # further back) may be. Scanning back further than the immediate src allows
874 # further back) may be. Scanning back further than the immediate src allows
875 # transitive transplants and rebases to yield the same results as transitive
875 # transitive transplants and rebases to yield the same results as transitive
876 # grafts.
876 # grafts.
877 for r in subset:
877 for r in subset:
878 src = _getrevsource(repo, r)
878 src = _getrevsource(repo, r)
879 lineage = None
879 lineage = None
880
880
881 while src is not None:
881 while src is not None:
882 if lineage is None:
882 if lineage is None:
883 lineage = list()
883 lineage = list()
884
884
885 lineage.append(r)
885 lineage.append(r)
886
886
887 # The visited lineage is a match if the current source is in the arg
887 # The visited lineage is a match if the current source is in the arg
888 # set. Since every candidate dest is visited by way of iterating
888 # set. Since every candidate dest is visited by way of iterating
889 # subset, any dests further back in the lineage will be tested by a
889 # subset, any dests further back in the lineage will be tested by a
890 # different iteration over subset. Likewise, if the src was already
890 # different iteration over subset. Likewise, if the src was already
891 # selected, the current lineage can be selected without going back
891 # selected, the current lineage can be selected without going back
892 # further.
892 # further.
893 if src in sources or src in dests:
893 if src in sources or src in dests:
894 dests.update(lineage)
894 dests.update(lineage)
895 break
895 break
896
896
897 r = src
897 r = src
898 src = _getrevsource(repo, r)
898 src = _getrevsource(repo, r)
899
899
900 return subset.filter(dests.__contains__,
900 return subset.filter(dests.__contains__,
901 condrepr=lambda: '<destination %r>' % sorted(dests))
901 condrepr=lambda: '<destination %r>' % sorted(dests))
902
902
903 @predicate('divergent()', safe=True)
903 @predicate('divergent()', safe=True)
904 def divergent(repo, subset, x):
904 def divergent(repo, subset, x):
905 """
905 """
906 Final successors of changesets with an alternative set of final successors.
906 Final successors of changesets with an alternative set of final successors.
907 """
907 """
908 # i18n: "divergent" is a keyword
908 # i18n: "divergent" is a keyword
909 getargs(x, 0, 0, _("divergent takes no arguments"))
909 getargs(x, 0, 0, _("divergent takes no arguments"))
910 divergent = obsmod.getrevs(repo, 'divergent')
910 divergent = obsmod.getrevs(repo, 'divergent')
911 return subset & divergent
911 return subset & divergent
912
912
913 @predicate('extinct()', safe=True)
913 @predicate('extinct()', safe=True)
914 def extinct(repo, subset, x):
914 def extinct(repo, subset, x):
915 """Obsolete changesets with obsolete descendants only.
915 """Obsolete changesets with obsolete descendants only.
916 """
916 """
917 # i18n: "extinct" is a keyword
917 # i18n: "extinct" is a keyword
918 getargs(x, 0, 0, _("extinct takes no arguments"))
918 getargs(x, 0, 0, _("extinct takes no arguments"))
919 extincts = obsmod.getrevs(repo, 'extinct')
919 extincts = obsmod.getrevs(repo, 'extinct')
920 return subset & extincts
920 return subset & extincts
921
921
922 @predicate('extra(label, [value])', safe=True)
922 @predicate('extra(label, [value])', safe=True)
923 def extra(repo, subset, x):
923 def extra(repo, subset, x):
924 """Changesets with the given label in the extra metadata, with the given
924 """Changesets with the given label in the extra metadata, with the given
925 optional value.
925 optional value.
926
926
927 If `value` starts with `re:`, the remainder of the value is treated as
927 If `value` starts with `re:`, the remainder of the value is treated as
928 a regular expression. To match a value that actually starts with `re:`,
928 a regular expression. To match a value that actually starts with `re:`,
929 use the prefix `literal:`.
929 use the prefix `literal:`.
930 """
930 """
931 args = getargsdict(x, 'extra', 'label value')
931 args = getargsdict(x, 'extra', 'label value')
932 if 'label' not in args:
932 if 'label' not in args:
933 # i18n: "extra" is a keyword
933 # i18n: "extra" is a keyword
934 raise error.ParseError(_('extra takes at least 1 argument'))
934 raise error.ParseError(_('extra takes at least 1 argument'))
935 # i18n: "extra" is a keyword
935 # i18n: "extra" is a keyword
936 label = getstring(args['label'], _('first argument to extra must be '
936 label = getstring(args['label'], _('first argument to extra must be '
937 'a string'))
937 'a string'))
938 value = None
938 value = None
939
939
940 if 'value' in args:
940 if 'value' in args:
941 # i18n: "extra" is a keyword
941 # i18n: "extra" is a keyword
942 value = getstring(args['value'], _('second argument to extra must be '
942 value = getstring(args['value'], _('second argument to extra must be '
943 'a string'))
943 'a string'))
944 kind, value, matcher = util.stringmatcher(value)
944 kind, value, matcher = util.stringmatcher(value)
945
945
946 def _matchvalue(r):
946 def _matchvalue(r):
947 extra = repo[r].extra()
947 extra = repo[r].extra()
948 return label in extra and (value is None or matcher(extra[label]))
948 return label in extra and (value is None or matcher(extra[label]))
949
949
950 return subset.filter(lambda r: _matchvalue(r),
950 return subset.filter(lambda r: _matchvalue(r),
951 condrepr=('<extra[%r] %r>', label, value))
951 condrepr=('<extra[%r] %r>', label, value))
952
952
953 @predicate('filelog(pattern)', safe=True)
953 @predicate('filelog(pattern)', safe=True)
954 def filelog(repo, subset, x):
954 def filelog(repo, subset, x):
955 """Changesets connected to the specified filelog.
955 """Changesets connected to the specified filelog.
956
956
957 For performance reasons, visits only revisions mentioned in the file-level
957 For performance reasons, visits only revisions mentioned in the file-level
958 filelog, rather than filtering through all changesets (much faster, but
958 filelog, rather than filtering through all changesets (much faster, but
959 doesn't include deletes or duplicate changes). For a slower, more accurate
959 doesn't include deletes or duplicate changes). For a slower, more accurate
960 result, use ``file()``.
960 result, use ``file()``.
961
961
962 The pattern without explicit kind like ``glob:`` is expected to be
962 The pattern without explicit kind like ``glob:`` is expected to be
963 relative to the current directory and match against a file exactly
963 relative to the current directory and match against a file exactly
964 for efficiency.
964 for efficiency.
965
965
966 If some linkrev points to revisions filtered by the current repoview, we'll
966 If some linkrev points to revisions filtered by the current repoview, we'll
967 work around it to return a non-filtered value.
967 work around it to return a non-filtered value.
968 """
968 """
969
969
970 # i18n: "filelog" is a keyword
970 # i18n: "filelog" is a keyword
971 pat = getstring(x, _("filelog requires a pattern"))
971 pat = getstring(x, _("filelog requires a pattern"))
972 s = set()
972 s = set()
973 cl = repo.changelog
973 cl = repo.changelog
974
974
975 if not matchmod.patkind(pat):
975 if not matchmod.patkind(pat):
976 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
976 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
977 files = [f]
977 files = [f]
978 else:
978 else:
979 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
979 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
980 files = (f for f in repo[None] if m(f))
980 files = (f for f in repo[None] if m(f))
981
981
982 for f in files:
982 for f in files:
983 fl = repo.file(f)
983 fl = repo.file(f)
984 known = {}
984 known = {}
985 scanpos = 0
985 scanpos = 0
986 for fr in list(fl):
986 for fr in list(fl):
987 fn = fl.node(fr)
987 fn = fl.node(fr)
988 if fn in known:
988 if fn in known:
989 s.add(known[fn])
989 s.add(known[fn])
990 continue
990 continue
991
991
992 lr = fl.linkrev(fr)
992 lr = fl.linkrev(fr)
993 if lr in cl:
993 if lr in cl:
994 s.add(lr)
994 s.add(lr)
995 elif scanpos is not None:
995 elif scanpos is not None:
996 # lowest matching changeset is filtered, scan further
996 # lowest matching changeset is filtered, scan further
997 # ahead in changelog
997 # ahead in changelog
998 start = max(lr, scanpos) + 1
998 start = max(lr, scanpos) + 1
999 scanpos = None
999 scanpos = None
1000 for r in cl.revs(start):
1000 for r in cl.revs(start):
1001 # minimize parsing of non-matching entries
1001 # minimize parsing of non-matching entries
1002 if f in cl.revision(r) and f in cl.readfiles(r):
1002 if f in cl.revision(r) and f in cl.readfiles(r):
1003 try:
1003 try:
1004 # try to use manifest delta fastpath
1004 # try to use manifest delta fastpath
1005 n = repo[r].filenode(f)
1005 n = repo[r].filenode(f)
1006 if n not in known:
1006 if n not in known:
1007 if n == fn:
1007 if n == fn:
1008 s.add(r)
1008 s.add(r)
1009 scanpos = r
1009 scanpos = r
1010 break
1010 break
1011 else:
1011 else:
1012 known[n] = r
1012 known[n] = r
1013 except error.ManifestLookupError:
1013 except error.ManifestLookupError:
1014 # deletion in changelog
1014 # deletion in changelog
1015 continue
1015 continue
1016
1016
1017 return subset & s
1017 return subset & s
1018
1018
1019 @predicate('first(set, [n])', safe=True)
1019 @predicate('first(set, [n])', safe=True)
1020 def first(repo, subset, x):
1020 def first(repo, subset, x):
1021 """An alias for limit().
1021 """An alias for limit().
1022 """
1022 """
1023 return limit(repo, subset, x)
1023 return limit(repo, subset, x)
1024
1024
1025 def _follow(repo, subset, x, name, followfirst=False):
1025 def _follow(repo, subset, x, name, followfirst=False):
1026 l = getargs(x, 0, 2, _("%s takes no arguments or a pattern "
1026 l = getargs(x, 0, 2, _("%s takes no arguments or a pattern "
1027 "and an optional revset") % name)
1027 "and an optional revset") % name)
1028 c = repo['.']
1028 c = repo['.']
1029 if l:
1029 if l:
1030 x = getstring(l[0], _("%s expected a pattern") % name)
1030 x = getstring(l[0], _("%s expected a pattern") % name)
1031 rev = None
1031 rev = None
1032 if len(l) >= 2:
1032 if len(l) >= 2:
1033 revs = getset(repo, fullreposet(repo), l[1])
1033 revs = getset(repo, fullreposet(repo), l[1])
1034 if len(revs) != 1:
1034 if len(revs) != 1:
1035 raise error.RepoLookupError(
1035 raise error.RepoLookupError(
1036 _("%s expected one starting revision") % name)
1036 _("%s expected one starting revision") % name)
1037 rev = revs.last()
1037 rev = revs.last()
1038 c = repo[rev]
1038 c = repo[rev]
1039 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
1039 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
1040 ctx=repo[rev], default='path')
1040 ctx=repo[rev], default='path')
1041
1041
1042 files = c.manifest().walk(matcher)
1042 files = c.manifest().walk(matcher)
1043
1043
1044 s = set()
1044 s = set()
1045 for fname in files:
1045 for fname in files:
1046 fctx = c[fname]
1046 fctx = c[fname]
1047 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
1047 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
1048 # include the revision responsible for the most recent version
1048 # include the revision responsible for the most recent version
1049 s.add(fctx.introrev())
1049 s.add(fctx.introrev())
1050 else:
1050 else:
1051 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1051 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1052
1052
1053 return subset & s
1053 return subset & s
1054
1054
1055 @predicate('follow([pattern[, startrev]])', safe=True)
1055 @predicate('follow([pattern[, startrev]])', safe=True)
1056 def follow(repo, subset, x):
1056 def follow(repo, subset, x):
1057 """
1057 """
1058 An alias for ``::.`` (ancestors of the working directory's first parent).
1058 An alias for ``::.`` (ancestors of the working directory's first parent).
1059 If pattern is specified, the histories of files matching given
1059 If pattern is specified, the histories of files matching given
1060 pattern in the revision given by startrev are followed, including copies.
1060 pattern in the revision given by startrev are followed, including copies.
1061 """
1061 """
1062 return _follow(repo, subset, x, 'follow')
1062 return _follow(repo, subset, x, 'follow')
1063
1063
1064 @predicate('_followfirst', safe=True)
1064 @predicate('_followfirst', safe=True)
1065 def _followfirst(repo, subset, x):
1065 def _followfirst(repo, subset, x):
1066 # ``followfirst([pattern[, startrev]])``
1066 # ``followfirst([pattern[, startrev]])``
1067 # Like ``follow([pattern[, startrev]])`` but follows only the first parent
1067 # Like ``follow([pattern[, startrev]])`` but follows only the first parent
1068 # of every revisions or files revisions.
1068 # of every revisions or files revisions.
1069 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1069 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1070
1070
1071 @predicate('followlines(file, fromline, toline[, rev=.])', safe=True)
1072 def followlines(repo, subset, x):
1073 """Changesets modifying `file` in line range ('fromline', 'toline').
1074
1075 Line range corresponds to 'file' content at 'rev' and should hence be
1076 consistent with file size. If rev is not specified, working directory's
1077 parent is used.
1078 """
1079 from . import context # avoid circular import issues
1080
1081 args = getargs(x, 3, 4, _("followlines takes at least three arguments"))
1082
1083 rev = '.'
1084 if len(args) == 4:
1085 revarg = getargsdict(args[3], 'followlines', 'rev')
1086 if 'rev' in revarg:
1087 revs = getset(repo, fullreposet(repo), revarg['rev'])
1088 if len(revs) != 1:
1089 raise error.ParseError(
1090 _("followlines expects exactly one revision"))
1091 rev = revs.last()
1092
1093 pat = getstring(args[0], _("followlines requires a pattern"))
1094 if not matchmod.patkind(pat):
1095 fname = pathutil.canonpath(repo.root, repo.getcwd(), pat)
1096 else:
1097 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[rev])
1098 files = [f for f in repo[rev] if m(f)]
1099 if len(files) != 1:
1100 raise error.ParseError(_("followlines expects exactly one file"))
1101 fname = files[0]
1102
1103 try:
1104 fromline, toline = [int(getsymbol(a)) for a in args[1:3]]
1105 except ValueError:
1106 raise error.ParseError(_("line range bounds must be integers"))
1107 if toline - fromline < 0:
1108 raise error.ParseError(_("line range must be positive"))
1109 if fromline < 1:
1110 raise error.ParseError(_("fromline must be strictly positive"))
1111 fromline -= 1
1112
1113 fctx = repo[rev].filectx(fname)
1114 revs = (c.rev() for c in context.blockancestors(fctx, fromline, toline))
1115 return subset & generatorset(revs, iterasc=False)
1116
1071 @predicate('all()', safe=True)
1117 @predicate('all()', safe=True)
1072 def getall(repo, subset, x):
1118 def getall(repo, subset, x):
1073 """All changesets, the same as ``0:tip``.
1119 """All changesets, the same as ``0:tip``.
1074 """
1120 """
1075 # i18n: "all" is a keyword
1121 # i18n: "all" is a keyword
1076 getargs(x, 0, 0, _("all takes no arguments"))
1122 getargs(x, 0, 0, _("all takes no arguments"))
1077 return subset & spanset(repo) # drop "null" if any
1123 return subset & spanset(repo) # drop "null" if any
1078
1124
1079 @predicate('grep(regex)')
1125 @predicate('grep(regex)')
1080 def grep(repo, subset, x):
1126 def grep(repo, subset, x):
1081 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1127 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1082 to ensure special escape characters are handled correctly. Unlike
1128 to ensure special escape characters are handled correctly. Unlike
1083 ``keyword(string)``, the match is case-sensitive.
1129 ``keyword(string)``, the match is case-sensitive.
1084 """
1130 """
1085 try:
1131 try:
1086 # i18n: "grep" is a keyword
1132 # i18n: "grep" is a keyword
1087 gr = re.compile(getstring(x, _("grep requires a string")))
1133 gr = re.compile(getstring(x, _("grep requires a string")))
1088 except re.error as e:
1134 except re.error as e:
1089 raise error.ParseError(_('invalid match pattern: %s') % e)
1135 raise error.ParseError(_('invalid match pattern: %s') % e)
1090
1136
1091 def matches(x):
1137 def matches(x):
1092 c = repo[x]
1138 c = repo[x]
1093 for e in c.files() + [c.user(), c.description()]:
1139 for e in c.files() + [c.user(), c.description()]:
1094 if gr.search(e):
1140 if gr.search(e):
1095 return True
1141 return True
1096 return False
1142 return False
1097
1143
1098 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1144 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1099
1145
1100 @predicate('_matchfiles', safe=True)
1146 @predicate('_matchfiles', safe=True)
1101 def _matchfiles(repo, subset, x):
1147 def _matchfiles(repo, subset, x):
1102 # _matchfiles takes a revset list of prefixed arguments:
1148 # _matchfiles takes a revset list of prefixed arguments:
1103 #
1149 #
1104 # [p:foo, i:bar, x:baz]
1150 # [p:foo, i:bar, x:baz]
1105 #
1151 #
1106 # builds a match object from them and filters subset. Allowed
1152 # builds a match object from them and filters subset. Allowed
1107 # prefixes are 'p:' for regular patterns, 'i:' for include
1153 # prefixes are 'p:' for regular patterns, 'i:' for include
1108 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1154 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1109 # a revision identifier, or the empty string to reference the
1155 # a revision identifier, or the empty string to reference the
1110 # working directory, from which the match object is
1156 # working directory, from which the match object is
1111 # initialized. Use 'd:' to set the default matching mode, default
1157 # initialized. Use 'd:' to set the default matching mode, default
1112 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1158 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1113
1159
1114 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1160 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1115 pats, inc, exc = [], [], []
1161 pats, inc, exc = [], [], []
1116 rev, default = None, None
1162 rev, default = None, None
1117 for arg in l:
1163 for arg in l:
1118 s = getstring(arg, "_matchfiles requires string arguments")
1164 s = getstring(arg, "_matchfiles requires string arguments")
1119 prefix, value = s[:2], s[2:]
1165 prefix, value = s[:2], s[2:]
1120 if prefix == 'p:':
1166 if prefix == 'p:':
1121 pats.append(value)
1167 pats.append(value)
1122 elif prefix == 'i:':
1168 elif prefix == 'i:':
1123 inc.append(value)
1169 inc.append(value)
1124 elif prefix == 'x:':
1170 elif prefix == 'x:':
1125 exc.append(value)
1171 exc.append(value)
1126 elif prefix == 'r:':
1172 elif prefix == 'r:':
1127 if rev is not None:
1173 if rev is not None:
1128 raise error.ParseError('_matchfiles expected at most one '
1174 raise error.ParseError('_matchfiles expected at most one '
1129 'revision')
1175 'revision')
1130 if value != '': # empty means working directory; leave rev as None
1176 if value != '': # empty means working directory; leave rev as None
1131 rev = value
1177 rev = value
1132 elif prefix == 'd:':
1178 elif prefix == 'd:':
1133 if default is not None:
1179 if default is not None:
1134 raise error.ParseError('_matchfiles expected at most one '
1180 raise error.ParseError('_matchfiles expected at most one '
1135 'default mode')
1181 'default mode')
1136 default = value
1182 default = value
1137 else:
1183 else:
1138 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1184 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1139 if not default:
1185 if not default:
1140 default = 'glob'
1186 default = 'glob'
1141
1187
1142 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1188 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1143 exclude=exc, ctx=repo[rev], default=default)
1189 exclude=exc, ctx=repo[rev], default=default)
1144
1190
1145 # This directly read the changelog data as creating changectx for all
1191 # This directly read the changelog data as creating changectx for all
1146 # revisions is quite expensive.
1192 # revisions is quite expensive.
1147 getfiles = repo.changelog.readfiles
1193 getfiles = repo.changelog.readfiles
1148 wdirrev = node.wdirrev
1194 wdirrev = node.wdirrev
1149 def matches(x):
1195 def matches(x):
1150 if x == wdirrev:
1196 if x == wdirrev:
1151 files = repo[x].files()
1197 files = repo[x].files()
1152 else:
1198 else:
1153 files = getfiles(x)
1199 files = getfiles(x)
1154 for f in files:
1200 for f in files:
1155 if m(f):
1201 if m(f):
1156 return True
1202 return True
1157 return False
1203 return False
1158
1204
1159 return subset.filter(matches,
1205 return subset.filter(matches,
1160 condrepr=('<matchfiles patterns=%r, include=%r '
1206 condrepr=('<matchfiles patterns=%r, include=%r '
1161 'exclude=%r, default=%r, rev=%r>',
1207 'exclude=%r, default=%r, rev=%r>',
1162 pats, inc, exc, default, rev))
1208 pats, inc, exc, default, rev))
1163
1209
1164 @predicate('file(pattern)', safe=True)
1210 @predicate('file(pattern)', safe=True)
1165 def hasfile(repo, subset, x):
1211 def hasfile(repo, subset, x):
1166 """Changesets affecting files matched by pattern.
1212 """Changesets affecting files matched by pattern.
1167
1213
1168 For a faster but less accurate result, consider using ``filelog()``
1214 For a faster but less accurate result, consider using ``filelog()``
1169 instead.
1215 instead.
1170
1216
1171 This predicate uses ``glob:`` as the default kind of pattern.
1217 This predicate uses ``glob:`` as the default kind of pattern.
1172 """
1218 """
1173 # i18n: "file" is a keyword
1219 # i18n: "file" is a keyword
1174 pat = getstring(x, _("file requires a pattern"))
1220 pat = getstring(x, _("file requires a pattern"))
1175 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1221 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1176
1222
1177 @predicate('head()', safe=True)
1223 @predicate('head()', safe=True)
1178 def head(repo, subset, x):
1224 def head(repo, subset, x):
1179 """Changeset is a named branch head.
1225 """Changeset is a named branch head.
1180 """
1226 """
1181 # i18n: "head" is a keyword
1227 # i18n: "head" is a keyword
1182 getargs(x, 0, 0, _("head takes no arguments"))
1228 getargs(x, 0, 0, _("head takes no arguments"))
1183 hs = set()
1229 hs = set()
1184 cl = repo.changelog
1230 cl = repo.changelog
1185 for ls in repo.branchmap().itervalues():
1231 for ls in repo.branchmap().itervalues():
1186 hs.update(cl.rev(h) for h in ls)
1232 hs.update(cl.rev(h) for h in ls)
1187 return subset & baseset(hs)
1233 return subset & baseset(hs)
1188
1234
1189 @predicate('heads(set)', safe=True)
1235 @predicate('heads(set)', safe=True)
1190 def heads(repo, subset, x):
1236 def heads(repo, subset, x):
1191 """Members of set with no children in set.
1237 """Members of set with no children in set.
1192 """
1238 """
1193 s = getset(repo, subset, x)
1239 s = getset(repo, subset, x)
1194 ps = parents(repo, subset, x)
1240 ps = parents(repo, subset, x)
1195 return s - ps
1241 return s - ps
1196
1242
1197 @predicate('hidden()', safe=True)
1243 @predicate('hidden()', safe=True)
1198 def hidden(repo, subset, x):
1244 def hidden(repo, subset, x):
1199 """Hidden changesets.
1245 """Hidden changesets.
1200 """
1246 """
1201 # i18n: "hidden" is a keyword
1247 # i18n: "hidden" is a keyword
1202 getargs(x, 0, 0, _("hidden takes no arguments"))
1248 getargs(x, 0, 0, _("hidden takes no arguments"))
1203 hiddenrevs = repoview.filterrevs(repo, 'visible')
1249 hiddenrevs = repoview.filterrevs(repo, 'visible')
1204 return subset & hiddenrevs
1250 return subset & hiddenrevs
1205
1251
1206 @predicate('keyword(string)', safe=True)
1252 @predicate('keyword(string)', safe=True)
1207 def keyword(repo, subset, x):
1253 def keyword(repo, subset, x):
1208 """Search commit message, user name, and names of changed files for
1254 """Search commit message, user name, and names of changed files for
1209 string. The match is case-insensitive.
1255 string. The match is case-insensitive.
1210 """
1256 """
1211 # i18n: "keyword" is a keyword
1257 # i18n: "keyword" is a keyword
1212 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1258 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1213
1259
1214 def matches(r):
1260 def matches(r):
1215 c = repo[r]
1261 c = repo[r]
1216 return any(kw in encoding.lower(t)
1262 return any(kw in encoding.lower(t)
1217 for t in c.files() + [c.user(), c.description()])
1263 for t in c.files() + [c.user(), c.description()])
1218
1264
1219 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1265 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1220
1266
1221 @predicate('limit(set[, n[, offset]])', safe=True)
1267 @predicate('limit(set[, n[, offset]])', safe=True)
1222 def limit(repo, subset, x):
1268 def limit(repo, subset, x):
1223 """First n members of set, defaulting to 1, starting from offset.
1269 """First n members of set, defaulting to 1, starting from offset.
1224 """
1270 """
1225 args = getargsdict(x, 'limit', 'set n offset')
1271 args = getargsdict(x, 'limit', 'set n offset')
1226 if 'set' not in args:
1272 if 'set' not in args:
1227 # i18n: "limit" is a keyword
1273 # i18n: "limit" is a keyword
1228 raise error.ParseError(_("limit requires one to three arguments"))
1274 raise error.ParseError(_("limit requires one to three arguments"))
1229 try:
1275 try:
1230 lim, ofs = 1, 0
1276 lim, ofs = 1, 0
1231 if 'n' in args:
1277 if 'n' in args:
1232 # i18n: "limit" is a keyword
1278 # i18n: "limit" is a keyword
1233 lim = int(getstring(args['n'], _("limit requires a number")))
1279 lim = int(getstring(args['n'], _("limit requires a number")))
1234 if 'offset' in args:
1280 if 'offset' in args:
1235 # i18n: "limit" is a keyword
1281 # i18n: "limit" is a keyword
1236 ofs = int(getstring(args['offset'], _("limit requires a number")))
1282 ofs = int(getstring(args['offset'], _("limit requires a number")))
1237 if ofs < 0:
1283 if ofs < 0:
1238 raise error.ParseError(_("negative offset"))
1284 raise error.ParseError(_("negative offset"))
1239 except (TypeError, ValueError):
1285 except (TypeError, ValueError):
1240 # i18n: "limit" is a keyword
1286 # i18n: "limit" is a keyword
1241 raise error.ParseError(_("limit expects a number"))
1287 raise error.ParseError(_("limit expects a number"))
1242 os = getset(repo, fullreposet(repo), args['set'])
1288 os = getset(repo, fullreposet(repo), args['set'])
1243 result = []
1289 result = []
1244 it = iter(os)
1290 it = iter(os)
1245 for x in xrange(ofs):
1291 for x in xrange(ofs):
1246 y = next(it, None)
1292 y = next(it, None)
1247 if y is None:
1293 if y is None:
1248 break
1294 break
1249 for x in xrange(lim):
1295 for x in xrange(lim):
1250 y = next(it, None)
1296 y = next(it, None)
1251 if y is None:
1297 if y is None:
1252 break
1298 break
1253 elif y in subset:
1299 elif y in subset:
1254 result.append(y)
1300 result.append(y)
1255 return baseset(result, datarepr=('<limit n=%d, offset=%d, %r, %r>',
1301 return baseset(result, datarepr=('<limit n=%d, offset=%d, %r, %r>',
1256 lim, ofs, subset, os))
1302 lim, ofs, subset, os))
1257
1303
1258 @predicate('last(set, [n])', safe=True)
1304 @predicate('last(set, [n])', safe=True)
1259 def last(repo, subset, x):
1305 def last(repo, subset, x):
1260 """Last n members of set, defaulting to 1.
1306 """Last n members of set, defaulting to 1.
1261 """
1307 """
1262 # i18n: "last" is a keyword
1308 # i18n: "last" is a keyword
1263 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1309 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1264 try:
1310 try:
1265 lim = 1
1311 lim = 1
1266 if len(l) == 2:
1312 if len(l) == 2:
1267 # i18n: "last" is a keyword
1313 # i18n: "last" is a keyword
1268 lim = int(getstring(l[1], _("last requires a number")))
1314 lim = int(getstring(l[1], _("last requires a number")))
1269 except (TypeError, ValueError):
1315 except (TypeError, ValueError):
1270 # i18n: "last" is a keyword
1316 # i18n: "last" is a keyword
1271 raise error.ParseError(_("last expects a number"))
1317 raise error.ParseError(_("last expects a number"))
1272 os = getset(repo, fullreposet(repo), l[0])
1318 os = getset(repo, fullreposet(repo), l[0])
1273 os.reverse()
1319 os.reverse()
1274 result = []
1320 result = []
1275 it = iter(os)
1321 it = iter(os)
1276 for x in xrange(lim):
1322 for x in xrange(lim):
1277 y = next(it, None)
1323 y = next(it, None)
1278 if y is None:
1324 if y is None:
1279 break
1325 break
1280 elif y in subset:
1326 elif y in subset:
1281 result.append(y)
1327 result.append(y)
1282 return baseset(result, datarepr=('<last n=%d, %r, %r>', lim, subset, os))
1328 return baseset(result, datarepr=('<last n=%d, %r, %r>', lim, subset, os))
1283
1329
1284 @predicate('max(set)', safe=True)
1330 @predicate('max(set)', safe=True)
1285 def maxrev(repo, subset, x):
1331 def maxrev(repo, subset, x):
1286 """Changeset with highest revision number in set.
1332 """Changeset with highest revision number in set.
1287 """
1333 """
1288 os = getset(repo, fullreposet(repo), x)
1334 os = getset(repo, fullreposet(repo), x)
1289 try:
1335 try:
1290 m = os.max()
1336 m = os.max()
1291 if m in subset:
1337 if m in subset:
1292 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1338 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1293 except ValueError:
1339 except ValueError:
1294 # os.max() throws a ValueError when the collection is empty.
1340 # os.max() throws a ValueError when the collection is empty.
1295 # Same as python's max().
1341 # Same as python's max().
1296 pass
1342 pass
1297 return baseset(datarepr=('<max %r, %r>', subset, os))
1343 return baseset(datarepr=('<max %r, %r>', subset, os))
1298
1344
1299 @predicate('merge()', safe=True)
1345 @predicate('merge()', safe=True)
1300 def merge(repo, subset, x):
1346 def merge(repo, subset, x):
1301 """Changeset is a merge changeset.
1347 """Changeset is a merge changeset.
1302 """
1348 """
1303 # i18n: "merge" is a keyword
1349 # i18n: "merge" is a keyword
1304 getargs(x, 0, 0, _("merge takes no arguments"))
1350 getargs(x, 0, 0, _("merge takes no arguments"))
1305 cl = repo.changelog
1351 cl = repo.changelog
1306 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1352 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1307 condrepr='<merge>')
1353 condrepr='<merge>')
1308
1354
1309 @predicate('branchpoint()', safe=True)
1355 @predicate('branchpoint()', safe=True)
1310 def branchpoint(repo, subset, x):
1356 def branchpoint(repo, subset, x):
1311 """Changesets with more than one child.
1357 """Changesets with more than one child.
1312 """
1358 """
1313 # i18n: "branchpoint" is a keyword
1359 # i18n: "branchpoint" is a keyword
1314 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1360 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1315 cl = repo.changelog
1361 cl = repo.changelog
1316 if not subset:
1362 if not subset:
1317 return baseset()
1363 return baseset()
1318 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1364 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1319 # (and if it is not, it should.)
1365 # (and if it is not, it should.)
1320 baserev = min(subset)
1366 baserev = min(subset)
1321 parentscount = [0]*(len(repo) - baserev)
1367 parentscount = [0]*(len(repo) - baserev)
1322 for r in cl.revs(start=baserev + 1):
1368 for r in cl.revs(start=baserev + 1):
1323 for p in cl.parentrevs(r):
1369 for p in cl.parentrevs(r):
1324 if p >= baserev:
1370 if p >= baserev:
1325 parentscount[p - baserev] += 1
1371 parentscount[p - baserev] += 1
1326 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1372 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1327 condrepr='<branchpoint>')
1373 condrepr='<branchpoint>')
1328
1374
1329 @predicate('min(set)', safe=True)
1375 @predicate('min(set)', safe=True)
1330 def minrev(repo, subset, x):
1376 def minrev(repo, subset, x):
1331 """Changeset with lowest revision number in set.
1377 """Changeset with lowest revision number in set.
1332 """
1378 """
1333 os = getset(repo, fullreposet(repo), x)
1379 os = getset(repo, fullreposet(repo), x)
1334 try:
1380 try:
1335 m = os.min()
1381 m = os.min()
1336 if m in subset:
1382 if m in subset:
1337 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1383 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1338 except ValueError:
1384 except ValueError:
1339 # os.min() throws a ValueError when the collection is empty.
1385 # os.min() throws a ValueError when the collection is empty.
1340 # Same as python's min().
1386 # Same as python's min().
1341 pass
1387 pass
1342 return baseset(datarepr=('<min %r, %r>', subset, os))
1388 return baseset(datarepr=('<min %r, %r>', subset, os))
1343
1389
1344 @predicate('modifies(pattern)', safe=True)
1390 @predicate('modifies(pattern)', safe=True)
1345 def modifies(repo, subset, x):
1391 def modifies(repo, subset, x):
1346 """Changesets modifying files matched by pattern.
1392 """Changesets modifying files matched by pattern.
1347
1393
1348 The pattern without explicit kind like ``glob:`` is expected to be
1394 The pattern without explicit kind like ``glob:`` is expected to be
1349 relative to the current directory and match against a file or a
1395 relative to the current directory and match against a file or a
1350 directory.
1396 directory.
1351 """
1397 """
1352 # i18n: "modifies" is a keyword
1398 # i18n: "modifies" is a keyword
1353 pat = getstring(x, _("modifies requires a pattern"))
1399 pat = getstring(x, _("modifies requires a pattern"))
1354 return checkstatus(repo, subset, pat, 0)
1400 return checkstatus(repo, subset, pat, 0)
1355
1401
1356 @predicate('named(namespace)')
1402 @predicate('named(namespace)')
1357 def named(repo, subset, x):
1403 def named(repo, subset, x):
1358 """The changesets in a given namespace.
1404 """The changesets in a given namespace.
1359
1405
1360 If `namespace` starts with `re:`, the remainder of the string is treated as
1406 If `namespace` starts with `re:`, the remainder of the string is treated as
1361 a regular expression. To match a namespace that actually starts with `re:`,
1407 a regular expression. To match a namespace that actually starts with `re:`,
1362 use the prefix `literal:`.
1408 use the prefix `literal:`.
1363 """
1409 """
1364 # i18n: "named" is a keyword
1410 # i18n: "named" is a keyword
1365 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1411 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1366
1412
1367 ns = getstring(args[0],
1413 ns = getstring(args[0],
1368 # i18n: "named" is a keyword
1414 # i18n: "named" is a keyword
1369 _('the argument to named must be a string'))
1415 _('the argument to named must be a string'))
1370 kind, pattern, matcher = util.stringmatcher(ns)
1416 kind, pattern, matcher = util.stringmatcher(ns)
1371 namespaces = set()
1417 namespaces = set()
1372 if kind == 'literal':
1418 if kind == 'literal':
1373 if pattern not in repo.names:
1419 if pattern not in repo.names:
1374 raise error.RepoLookupError(_("namespace '%s' does not exist")
1420 raise error.RepoLookupError(_("namespace '%s' does not exist")
1375 % ns)
1421 % ns)
1376 namespaces.add(repo.names[pattern])
1422 namespaces.add(repo.names[pattern])
1377 else:
1423 else:
1378 for name, ns in repo.names.iteritems():
1424 for name, ns in repo.names.iteritems():
1379 if matcher(name):
1425 if matcher(name):
1380 namespaces.add(ns)
1426 namespaces.add(ns)
1381 if not namespaces:
1427 if not namespaces:
1382 raise error.RepoLookupError(_("no namespace exists"
1428 raise error.RepoLookupError(_("no namespace exists"
1383 " that match '%s'") % pattern)
1429 " that match '%s'") % pattern)
1384
1430
1385 names = set()
1431 names = set()
1386 for ns in namespaces:
1432 for ns in namespaces:
1387 for name in ns.listnames(repo):
1433 for name in ns.listnames(repo):
1388 if name not in ns.deprecated:
1434 if name not in ns.deprecated:
1389 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1435 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1390
1436
1391 names -= set([node.nullrev])
1437 names -= set([node.nullrev])
1392 return subset & names
1438 return subset & names
1393
1439
1394 @predicate('id(string)', safe=True)
1440 @predicate('id(string)', safe=True)
1395 def node_(repo, subset, x):
1441 def node_(repo, subset, x):
1396 """Revision non-ambiguously specified by the given hex string prefix.
1442 """Revision non-ambiguously specified by the given hex string prefix.
1397 """
1443 """
1398 # i18n: "id" is a keyword
1444 # i18n: "id" is a keyword
1399 l = getargs(x, 1, 1, _("id requires one argument"))
1445 l = getargs(x, 1, 1, _("id requires one argument"))
1400 # i18n: "id" is a keyword
1446 # i18n: "id" is a keyword
1401 n = getstring(l[0], _("id requires a string"))
1447 n = getstring(l[0], _("id requires a string"))
1402 if len(n) == 40:
1448 if len(n) == 40:
1403 try:
1449 try:
1404 rn = repo.changelog.rev(node.bin(n))
1450 rn = repo.changelog.rev(node.bin(n))
1405 except (LookupError, TypeError):
1451 except (LookupError, TypeError):
1406 rn = None
1452 rn = None
1407 else:
1453 else:
1408 rn = None
1454 rn = None
1409 pm = repo.changelog._partialmatch(n)
1455 pm = repo.changelog._partialmatch(n)
1410 if pm is not None:
1456 if pm is not None:
1411 rn = repo.changelog.rev(pm)
1457 rn = repo.changelog.rev(pm)
1412
1458
1413 if rn is None:
1459 if rn is None:
1414 return baseset()
1460 return baseset()
1415 result = baseset([rn])
1461 result = baseset([rn])
1416 return result & subset
1462 return result & subset
1417
1463
1418 @predicate('obsolete()', safe=True)
1464 @predicate('obsolete()', safe=True)
1419 def obsolete(repo, subset, x):
1465 def obsolete(repo, subset, x):
1420 """Mutable changeset with a newer version."""
1466 """Mutable changeset with a newer version."""
1421 # i18n: "obsolete" is a keyword
1467 # i18n: "obsolete" is a keyword
1422 getargs(x, 0, 0, _("obsolete takes no arguments"))
1468 getargs(x, 0, 0, _("obsolete takes no arguments"))
1423 obsoletes = obsmod.getrevs(repo, 'obsolete')
1469 obsoletes = obsmod.getrevs(repo, 'obsolete')
1424 return subset & obsoletes
1470 return subset & obsoletes
1425
1471
1426 @predicate('only(set, [set])', safe=True)
1472 @predicate('only(set, [set])', safe=True)
1427 def only(repo, subset, x):
1473 def only(repo, subset, x):
1428 """Changesets that are ancestors of the first set that are not ancestors
1474 """Changesets that are ancestors of the first set that are not ancestors
1429 of any other head in the repo. If a second set is specified, the result
1475 of any other head in the repo. If a second set is specified, the result
1430 is ancestors of the first set that are not ancestors of the second set
1476 is ancestors of the first set that are not ancestors of the second set
1431 (i.e. ::<set1> - ::<set2>).
1477 (i.e. ::<set1> - ::<set2>).
1432 """
1478 """
1433 cl = repo.changelog
1479 cl = repo.changelog
1434 # i18n: "only" is a keyword
1480 # i18n: "only" is a keyword
1435 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1481 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1436 include = getset(repo, fullreposet(repo), args[0])
1482 include = getset(repo, fullreposet(repo), args[0])
1437 if len(args) == 1:
1483 if len(args) == 1:
1438 if not include:
1484 if not include:
1439 return baseset()
1485 return baseset()
1440
1486
1441 descendants = set(_revdescendants(repo, include, False))
1487 descendants = set(_revdescendants(repo, include, False))
1442 exclude = [rev for rev in cl.headrevs()
1488 exclude = [rev for rev in cl.headrevs()
1443 if not rev in descendants and not rev in include]
1489 if not rev in descendants and not rev in include]
1444 else:
1490 else:
1445 exclude = getset(repo, fullreposet(repo), args[1])
1491 exclude = getset(repo, fullreposet(repo), args[1])
1446
1492
1447 results = set(cl.findmissingrevs(common=exclude, heads=include))
1493 results = set(cl.findmissingrevs(common=exclude, heads=include))
1448 # XXX we should turn this into a baseset instead of a set, smartset may do
1494 # XXX we should turn this into a baseset instead of a set, smartset may do
1449 # some optimizations from the fact this is a baseset.
1495 # some optimizations from the fact this is a baseset.
1450 return subset & results
1496 return subset & results
1451
1497
1452 @predicate('origin([set])', safe=True)
1498 @predicate('origin([set])', safe=True)
1453 def origin(repo, subset, x):
1499 def origin(repo, subset, x):
1454 """
1500 """
1455 Changesets that were specified as a source for the grafts, transplants or
1501 Changesets that were specified as a source for the grafts, transplants or
1456 rebases that created the given revisions. Omitting the optional set is the
1502 rebases that created the given revisions. Omitting the optional set is the
1457 same as passing all(). If a changeset created by these operations is itself
1503 same as passing all(). If a changeset created by these operations is itself
1458 specified as a source for one of these operations, only the source changeset
1504 specified as a source for one of these operations, only the source changeset
1459 for the first operation is selected.
1505 for the first operation is selected.
1460 """
1506 """
1461 if x is not None:
1507 if x is not None:
1462 dests = getset(repo, fullreposet(repo), x)
1508 dests = getset(repo, fullreposet(repo), x)
1463 else:
1509 else:
1464 dests = fullreposet(repo)
1510 dests = fullreposet(repo)
1465
1511
1466 def _firstsrc(rev):
1512 def _firstsrc(rev):
1467 src = _getrevsource(repo, rev)
1513 src = _getrevsource(repo, rev)
1468 if src is None:
1514 if src is None:
1469 return None
1515 return None
1470
1516
1471 while True:
1517 while True:
1472 prev = _getrevsource(repo, src)
1518 prev = _getrevsource(repo, src)
1473
1519
1474 if prev is None:
1520 if prev is None:
1475 return src
1521 return src
1476 src = prev
1522 src = prev
1477
1523
1478 o = set([_firstsrc(r) for r in dests])
1524 o = set([_firstsrc(r) for r in dests])
1479 o -= set([None])
1525 o -= set([None])
1480 # XXX we should turn this into a baseset instead of a set, smartset may do
1526 # XXX we should turn this into a baseset instead of a set, smartset may do
1481 # some optimizations from the fact this is a baseset.
1527 # some optimizations from the fact this is a baseset.
1482 return subset & o
1528 return subset & o
1483
1529
1484 @predicate('outgoing([path])', safe=True)
1530 @predicate('outgoing([path])', safe=True)
1485 def outgoing(repo, subset, x):
1531 def outgoing(repo, subset, x):
1486 """Changesets not found in the specified destination repository, or the
1532 """Changesets not found in the specified destination repository, or the
1487 default push location.
1533 default push location.
1488 """
1534 """
1489 # Avoid cycles.
1535 # Avoid cycles.
1490 from . import (
1536 from . import (
1491 discovery,
1537 discovery,
1492 hg,
1538 hg,
1493 )
1539 )
1494 # i18n: "outgoing" is a keyword
1540 # i18n: "outgoing" is a keyword
1495 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1541 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1496 # i18n: "outgoing" is a keyword
1542 # i18n: "outgoing" is a keyword
1497 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1543 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1498 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1544 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1499 dest, branches = hg.parseurl(dest)
1545 dest, branches = hg.parseurl(dest)
1500 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1546 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1501 if revs:
1547 if revs:
1502 revs = [repo.lookup(rev) for rev in revs]
1548 revs = [repo.lookup(rev) for rev in revs]
1503 other = hg.peer(repo, {}, dest)
1549 other = hg.peer(repo, {}, dest)
1504 repo.ui.pushbuffer()
1550 repo.ui.pushbuffer()
1505 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1551 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1506 repo.ui.popbuffer()
1552 repo.ui.popbuffer()
1507 cl = repo.changelog
1553 cl = repo.changelog
1508 o = set([cl.rev(r) for r in outgoing.missing])
1554 o = set([cl.rev(r) for r in outgoing.missing])
1509 return subset & o
1555 return subset & o
1510
1556
1511 @predicate('p1([set])', safe=True)
1557 @predicate('p1([set])', safe=True)
1512 def p1(repo, subset, x):
1558 def p1(repo, subset, x):
1513 """First parent of changesets in set, or the working directory.
1559 """First parent of changesets in set, or the working directory.
1514 """
1560 """
1515 if x is None:
1561 if x is None:
1516 p = repo[x].p1().rev()
1562 p = repo[x].p1().rev()
1517 if p >= 0:
1563 if p >= 0:
1518 return subset & baseset([p])
1564 return subset & baseset([p])
1519 return baseset()
1565 return baseset()
1520
1566
1521 ps = set()
1567 ps = set()
1522 cl = repo.changelog
1568 cl = repo.changelog
1523 for r in getset(repo, fullreposet(repo), x):
1569 for r in getset(repo, fullreposet(repo), x):
1524 ps.add(cl.parentrevs(r)[0])
1570 ps.add(cl.parentrevs(r)[0])
1525 ps -= set([node.nullrev])
1571 ps -= set([node.nullrev])
1526 # XXX we should turn this into a baseset instead of a set, smartset may do
1572 # XXX we should turn this into a baseset instead of a set, smartset may do
1527 # some optimizations from the fact this is a baseset.
1573 # some optimizations from the fact this is a baseset.
1528 return subset & ps
1574 return subset & ps
1529
1575
1530 @predicate('p2([set])', safe=True)
1576 @predicate('p2([set])', safe=True)
1531 def p2(repo, subset, x):
1577 def p2(repo, subset, x):
1532 """Second parent of changesets in set, or the working directory.
1578 """Second parent of changesets in set, or the working directory.
1533 """
1579 """
1534 if x is None:
1580 if x is None:
1535 ps = repo[x].parents()
1581 ps = repo[x].parents()
1536 try:
1582 try:
1537 p = ps[1].rev()
1583 p = ps[1].rev()
1538 if p >= 0:
1584 if p >= 0:
1539 return subset & baseset([p])
1585 return subset & baseset([p])
1540 return baseset()
1586 return baseset()
1541 except IndexError:
1587 except IndexError:
1542 return baseset()
1588 return baseset()
1543
1589
1544 ps = set()
1590 ps = set()
1545 cl = repo.changelog
1591 cl = repo.changelog
1546 for r in getset(repo, fullreposet(repo), x):
1592 for r in getset(repo, fullreposet(repo), x):
1547 ps.add(cl.parentrevs(r)[1])
1593 ps.add(cl.parentrevs(r)[1])
1548 ps -= set([node.nullrev])
1594 ps -= set([node.nullrev])
1549 # XXX we should turn this into a baseset instead of a set, smartset may do
1595 # XXX we should turn this into a baseset instead of a set, smartset may do
1550 # some optimizations from the fact this is a baseset.
1596 # some optimizations from the fact this is a baseset.
1551 return subset & ps
1597 return subset & ps
1552
1598
1553 def parentpost(repo, subset, x, order):
1599 def parentpost(repo, subset, x, order):
1554 return p1(repo, subset, x)
1600 return p1(repo, subset, x)
1555
1601
1556 @predicate('parents([set])', safe=True)
1602 @predicate('parents([set])', safe=True)
1557 def parents(repo, subset, x):
1603 def parents(repo, subset, x):
1558 """
1604 """
1559 The set of all parents for all changesets in set, or the working directory.
1605 The set of all parents for all changesets in set, or the working directory.
1560 """
1606 """
1561 if x is None:
1607 if x is None:
1562 ps = set(p.rev() for p in repo[x].parents())
1608 ps = set(p.rev() for p in repo[x].parents())
1563 else:
1609 else:
1564 ps = set()
1610 ps = set()
1565 cl = repo.changelog
1611 cl = repo.changelog
1566 up = ps.update
1612 up = ps.update
1567 parentrevs = cl.parentrevs
1613 parentrevs = cl.parentrevs
1568 for r in getset(repo, fullreposet(repo), x):
1614 for r in getset(repo, fullreposet(repo), x):
1569 if r == node.wdirrev:
1615 if r == node.wdirrev:
1570 up(p.rev() for p in repo[r].parents())
1616 up(p.rev() for p in repo[r].parents())
1571 else:
1617 else:
1572 up(parentrevs(r))
1618 up(parentrevs(r))
1573 ps -= set([node.nullrev])
1619 ps -= set([node.nullrev])
1574 return subset & ps
1620 return subset & ps
1575
1621
1576 def _phase(repo, subset, target):
1622 def _phase(repo, subset, target):
1577 """helper to select all rev in phase <target>"""
1623 """helper to select all rev in phase <target>"""
1578 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1624 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1579 if repo._phasecache._phasesets:
1625 if repo._phasecache._phasesets:
1580 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1626 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1581 s = baseset(s)
1627 s = baseset(s)
1582 s.sort() # set are non ordered, so we enforce ascending
1628 s.sort() # set are non ordered, so we enforce ascending
1583 return subset & s
1629 return subset & s
1584 else:
1630 else:
1585 phase = repo._phasecache.phase
1631 phase = repo._phasecache.phase
1586 condition = lambda r: phase(repo, r) == target
1632 condition = lambda r: phase(repo, r) == target
1587 return subset.filter(condition, condrepr=('<phase %r>', target),
1633 return subset.filter(condition, condrepr=('<phase %r>', target),
1588 cache=False)
1634 cache=False)
1589
1635
1590 @predicate('draft()', safe=True)
1636 @predicate('draft()', safe=True)
1591 def draft(repo, subset, x):
1637 def draft(repo, subset, x):
1592 """Changeset in draft phase."""
1638 """Changeset in draft phase."""
1593 # i18n: "draft" is a keyword
1639 # i18n: "draft" is a keyword
1594 getargs(x, 0, 0, _("draft takes no arguments"))
1640 getargs(x, 0, 0, _("draft takes no arguments"))
1595 target = phases.draft
1641 target = phases.draft
1596 return _phase(repo, subset, target)
1642 return _phase(repo, subset, target)
1597
1643
1598 @predicate('secret()', safe=True)
1644 @predicate('secret()', safe=True)
1599 def secret(repo, subset, x):
1645 def secret(repo, subset, x):
1600 """Changeset in secret phase."""
1646 """Changeset in secret phase."""
1601 # i18n: "secret" is a keyword
1647 # i18n: "secret" is a keyword
1602 getargs(x, 0, 0, _("secret takes no arguments"))
1648 getargs(x, 0, 0, _("secret takes no arguments"))
1603 target = phases.secret
1649 target = phases.secret
1604 return _phase(repo, subset, target)
1650 return _phase(repo, subset, target)
1605
1651
1606 def parentspec(repo, subset, x, n, order):
1652 def parentspec(repo, subset, x, n, order):
1607 """``set^0``
1653 """``set^0``
1608 The set.
1654 The set.
1609 ``set^1`` (or ``set^``), ``set^2``
1655 ``set^1`` (or ``set^``), ``set^2``
1610 First or second parent, respectively, of all changesets in set.
1656 First or second parent, respectively, of all changesets in set.
1611 """
1657 """
1612 try:
1658 try:
1613 n = int(n[1])
1659 n = int(n[1])
1614 if n not in (0, 1, 2):
1660 if n not in (0, 1, 2):
1615 raise ValueError
1661 raise ValueError
1616 except (TypeError, ValueError):
1662 except (TypeError, ValueError):
1617 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1663 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1618 ps = set()
1664 ps = set()
1619 cl = repo.changelog
1665 cl = repo.changelog
1620 for r in getset(repo, fullreposet(repo), x):
1666 for r in getset(repo, fullreposet(repo), x):
1621 if n == 0:
1667 if n == 0:
1622 ps.add(r)
1668 ps.add(r)
1623 elif n == 1:
1669 elif n == 1:
1624 ps.add(cl.parentrevs(r)[0])
1670 ps.add(cl.parentrevs(r)[0])
1625 elif n == 2:
1671 elif n == 2:
1626 parents = cl.parentrevs(r)
1672 parents = cl.parentrevs(r)
1627 if parents[1] != node.nullrev:
1673 if parents[1] != node.nullrev:
1628 ps.add(parents[1])
1674 ps.add(parents[1])
1629 return subset & ps
1675 return subset & ps
1630
1676
1631 @predicate('present(set)', safe=True)
1677 @predicate('present(set)', safe=True)
1632 def present(repo, subset, x):
1678 def present(repo, subset, x):
1633 """An empty set, if any revision in set isn't found; otherwise,
1679 """An empty set, if any revision in set isn't found; otherwise,
1634 all revisions in set.
1680 all revisions in set.
1635
1681
1636 If any of specified revisions is not present in the local repository,
1682 If any of specified revisions is not present in the local repository,
1637 the query is normally aborted. But this predicate allows the query
1683 the query is normally aborted. But this predicate allows the query
1638 to continue even in such cases.
1684 to continue even in such cases.
1639 """
1685 """
1640 try:
1686 try:
1641 return getset(repo, subset, x)
1687 return getset(repo, subset, x)
1642 except error.RepoLookupError:
1688 except error.RepoLookupError:
1643 return baseset()
1689 return baseset()
1644
1690
1645 # for internal use
1691 # for internal use
1646 @predicate('_notpublic', safe=True)
1692 @predicate('_notpublic', safe=True)
1647 def _notpublic(repo, subset, x):
1693 def _notpublic(repo, subset, x):
1648 getargs(x, 0, 0, "_notpublic takes no arguments")
1694 getargs(x, 0, 0, "_notpublic takes no arguments")
1649 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1695 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1650 if repo._phasecache._phasesets:
1696 if repo._phasecache._phasesets:
1651 s = set()
1697 s = set()
1652 for u in repo._phasecache._phasesets[1:]:
1698 for u in repo._phasecache._phasesets[1:]:
1653 s.update(u)
1699 s.update(u)
1654 s = baseset(s - repo.changelog.filteredrevs)
1700 s = baseset(s - repo.changelog.filteredrevs)
1655 s.sort()
1701 s.sort()
1656 return subset & s
1702 return subset & s
1657 else:
1703 else:
1658 phase = repo._phasecache.phase
1704 phase = repo._phasecache.phase
1659 target = phases.public
1705 target = phases.public
1660 condition = lambda r: phase(repo, r) != target
1706 condition = lambda r: phase(repo, r) != target
1661 return subset.filter(condition, condrepr=('<phase %r>', target),
1707 return subset.filter(condition, condrepr=('<phase %r>', target),
1662 cache=False)
1708 cache=False)
1663
1709
1664 @predicate('public()', safe=True)
1710 @predicate('public()', safe=True)
1665 def public(repo, subset, x):
1711 def public(repo, subset, x):
1666 """Changeset in public phase."""
1712 """Changeset in public phase."""
1667 # i18n: "public" is a keyword
1713 # i18n: "public" is a keyword
1668 getargs(x, 0, 0, _("public takes no arguments"))
1714 getargs(x, 0, 0, _("public takes no arguments"))
1669 phase = repo._phasecache.phase
1715 phase = repo._phasecache.phase
1670 target = phases.public
1716 target = phases.public
1671 condition = lambda r: phase(repo, r) == target
1717 condition = lambda r: phase(repo, r) == target
1672 return subset.filter(condition, condrepr=('<phase %r>', target),
1718 return subset.filter(condition, condrepr=('<phase %r>', target),
1673 cache=False)
1719 cache=False)
1674
1720
1675 @predicate('remote([id [,path]])', safe=True)
1721 @predicate('remote([id [,path]])', safe=True)
1676 def remote(repo, subset, x):
1722 def remote(repo, subset, x):
1677 """Local revision that corresponds to the given identifier in a
1723 """Local revision that corresponds to the given identifier in a
1678 remote repository, if present. Here, the '.' identifier is a
1724 remote repository, if present. Here, the '.' identifier is a
1679 synonym for the current local branch.
1725 synonym for the current local branch.
1680 """
1726 """
1681
1727
1682 from . import hg # avoid start-up nasties
1728 from . import hg # avoid start-up nasties
1683 # i18n: "remote" is a keyword
1729 # i18n: "remote" is a keyword
1684 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1730 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1685
1731
1686 q = '.'
1732 q = '.'
1687 if len(l) > 0:
1733 if len(l) > 0:
1688 # i18n: "remote" is a keyword
1734 # i18n: "remote" is a keyword
1689 q = getstring(l[0], _("remote requires a string id"))
1735 q = getstring(l[0], _("remote requires a string id"))
1690 if q == '.':
1736 if q == '.':
1691 q = repo['.'].branch()
1737 q = repo['.'].branch()
1692
1738
1693 dest = ''
1739 dest = ''
1694 if len(l) > 1:
1740 if len(l) > 1:
1695 # i18n: "remote" is a keyword
1741 # i18n: "remote" is a keyword
1696 dest = getstring(l[1], _("remote requires a repository path"))
1742 dest = getstring(l[1], _("remote requires a repository path"))
1697 dest = repo.ui.expandpath(dest or 'default')
1743 dest = repo.ui.expandpath(dest or 'default')
1698 dest, branches = hg.parseurl(dest)
1744 dest, branches = hg.parseurl(dest)
1699 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1745 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1700 if revs:
1746 if revs:
1701 revs = [repo.lookup(rev) for rev in revs]
1747 revs = [repo.lookup(rev) for rev in revs]
1702 other = hg.peer(repo, {}, dest)
1748 other = hg.peer(repo, {}, dest)
1703 n = other.lookup(q)
1749 n = other.lookup(q)
1704 if n in repo:
1750 if n in repo:
1705 r = repo[n].rev()
1751 r = repo[n].rev()
1706 if r in subset:
1752 if r in subset:
1707 return baseset([r])
1753 return baseset([r])
1708 return baseset()
1754 return baseset()
1709
1755
1710 @predicate('removes(pattern)', safe=True)
1756 @predicate('removes(pattern)', safe=True)
1711 def removes(repo, subset, x):
1757 def removes(repo, subset, x):
1712 """Changesets which remove files matching pattern.
1758 """Changesets which remove files matching pattern.
1713
1759
1714 The pattern without explicit kind like ``glob:`` is expected to be
1760 The pattern without explicit kind like ``glob:`` is expected to be
1715 relative to the current directory and match against a file or a
1761 relative to the current directory and match against a file or a
1716 directory.
1762 directory.
1717 """
1763 """
1718 # i18n: "removes" is a keyword
1764 # i18n: "removes" is a keyword
1719 pat = getstring(x, _("removes requires a pattern"))
1765 pat = getstring(x, _("removes requires a pattern"))
1720 return checkstatus(repo, subset, pat, 2)
1766 return checkstatus(repo, subset, pat, 2)
1721
1767
1722 @predicate('rev(number)', safe=True)
1768 @predicate('rev(number)', safe=True)
1723 def rev(repo, subset, x):
1769 def rev(repo, subset, x):
1724 """Revision with the given numeric identifier.
1770 """Revision with the given numeric identifier.
1725 """
1771 """
1726 # i18n: "rev" is a keyword
1772 # i18n: "rev" is a keyword
1727 l = getargs(x, 1, 1, _("rev requires one argument"))
1773 l = getargs(x, 1, 1, _("rev requires one argument"))
1728 try:
1774 try:
1729 # i18n: "rev" is a keyword
1775 # i18n: "rev" is a keyword
1730 l = int(getstring(l[0], _("rev requires a number")))
1776 l = int(getstring(l[0], _("rev requires a number")))
1731 except (TypeError, ValueError):
1777 except (TypeError, ValueError):
1732 # i18n: "rev" is a keyword
1778 # i18n: "rev" is a keyword
1733 raise error.ParseError(_("rev expects a number"))
1779 raise error.ParseError(_("rev expects a number"))
1734 if l not in repo.changelog and l != node.nullrev:
1780 if l not in repo.changelog and l != node.nullrev:
1735 return baseset()
1781 return baseset()
1736 return subset & baseset([l])
1782 return subset & baseset([l])
1737
1783
1738 @predicate('matching(revision [, field])', safe=True)
1784 @predicate('matching(revision [, field])', safe=True)
1739 def matching(repo, subset, x):
1785 def matching(repo, subset, x):
1740 """Changesets in which a given set of fields match the set of fields in the
1786 """Changesets in which a given set of fields match the set of fields in the
1741 selected revision or set.
1787 selected revision or set.
1742
1788
1743 To match more than one field pass the list of fields to match separated
1789 To match more than one field pass the list of fields to match separated
1744 by spaces (e.g. ``author description``).
1790 by spaces (e.g. ``author description``).
1745
1791
1746 Valid fields are most regular revision fields and some special fields.
1792 Valid fields are most regular revision fields and some special fields.
1747
1793
1748 Regular revision fields are ``description``, ``author``, ``branch``,
1794 Regular revision fields are ``description``, ``author``, ``branch``,
1749 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1795 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1750 and ``diff``.
1796 and ``diff``.
1751 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1797 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1752 contents of the revision. Two revisions matching their ``diff`` will
1798 contents of the revision. Two revisions matching their ``diff`` will
1753 also match their ``files``.
1799 also match their ``files``.
1754
1800
1755 Special fields are ``summary`` and ``metadata``:
1801 Special fields are ``summary`` and ``metadata``:
1756 ``summary`` matches the first line of the description.
1802 ``summary`` matches the first line of the description.
1757 ``metadata`` is equivalent to matching ``description user date``
1803 ``metadata`` is equivalent to matching ``description user date``
1758 (i.e. it matches the main metadata fields).
1804 (i.e. it matches the main metadata fields).
1759
1805
1760 ``metadata`` is the default field which is used when no fields are
1806 ``metadata`` is the default field which is used when no fields are
1761 specified. You can match more than one field at a time.
1807 specified. You can match more than one field at a time.
1762 """
1808 """
1763 # i18n: "matching" is a keyword
1809 # i18n: "matching" is a keyword
1764 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1810 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1765
1811
1766 revs = getset(repo, fullreposet(repo), l[0])
1812 revs = getset(repo, fullreposet(repo), l[0])
1767
1813
1768 fieldlist = ['metadata']
1814 fieldlist = ['metadata']
1769 if len(l) > 1:
1815 if len(l) > 1:
1770 fieldlist = getstring(l[1],
1816 fieldlist = getstring(l[1],
1771 # i18n: "matching" is a keyword
1817 # i18n: "matching" is a keyword
1772 _("matching requires a string "
1818 _("matching requires a string "
1773 "as its second argument")).split()
1819 "as its second argument")).split()
1774
1820
1775 # Make sure that there are no repeated fields,
1821 # Make sure that there are no repeated fields,
1776 # expand the 'special' 'metadata' field type
1822 # expand the 'special' 'metadata' field type
1777 # and check the 'files' whenever we check the 'diff'
1823 # and check the 'files' whenever we check the 'diff'
1778 fields = []
1824 fields = []
1779 for field in fieldlist:
1825 for field in fieldlist:
1780 if field == 'metadata':
1826 if field == 'metadata':
1781 fields += ['user', 'description', 'date']
1827 fields += ['user', 'description', 'date']
1782 elif field == 'diff':
1828 elif field == 'diff':
1783 # a revision matching the diff must also match the files
1829 # a revision matching the diff must also match the files
1784 # since matching the diff is very costly, make sure to
1830 # since matching the diff is very costly, make sure to
1785 # also match the files first
1831 # also match the files first
1786 fields += ['files', 'diff']
1832 fields += ['files', 'diff']
1787 else:
1833 else:
1788 if field == 'author':
1834 if field == 'author':
1789 field = 'user'
1835 field = 'user'
1790 fields.append(field)
1836 fields.append(field)
1791 fields = set(fields)
1837 fields = set(fields)
1792 if 'summary' in fields and 'description' in fields:
1838 if 'summary' in fields and 'description' in fields:
1793 # If a revision matches its description it also matches its summary
1839 # If a revision matches its description it also matches its summary
1794 fields.discard('summary')
1840 fields.discard('summary')
1795
1841
1796 # We may want to match more than one field
1842 # We may want to match more than one field
1797 # Not all fields take the same amount of time to be matched
1843 # Not all fields take the same amount of time to be matched
1798 # Sort the selected fields in order of increasing matching cost
1844 # Sort the selected fields in order of increasing matching cost
1799 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1845 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1800 'files', 'description', 'substate', 'diff']
1846 'files', 'description', 'substate', 'diff']
1801 def fieldkeyfunc(f):
1847 def fieldkeyfunc(f):
1802 try:
1848 try:
1803 return fieldorder.index(f)
1849 return fieldorder.index(f)
1804 except ValueError:
1850 except ValueError:
1805 # assume an unknown field is very costly
1851 # assume an unknown field is very costly
1806 return len(fieldorder)
1852 return len(fieldorder)
1807 fields = list(fields)
1853 fields = list(fields)
1808 fields.sort(key=fieldkeyfunc)
1854 fields.sort(key=fieldkeyfunc)
1809
1855
1810 # Each field will be matched with its own "getfield" function
1856 # Each field will be matched with its own "getfield" function
1811 # which will be added to the getfieldfuncs array of functions
1857 # which will be added to the getfieldfuncs array of functions
1812 getfieldfuncs = []
1858 getfieldfuncs = []
1813 _funcs = {
1859 _funcs = {
1814 'user': lambda r: repo[r].user(),
1860 'user': lambda r: repo[r].user(),
1815 'branch': lambda r: repo[r].branch(),
1861 'branch': lambda r: repo[r].branch(),
1816 'date': lambda r: repo[r].date(),
1862 'date': lambda r: repo[r].date(),
1817 'description': lambda r: repo[r].description(),
1863 'description': lambda r: repo[r].description(),
1818 'files': lambda r: repo[r].files(),
1864 'files': lambda r: repo[r].files(),
1819 'parents': lambda r: repo[r].parents(),
1865 'parents': lambda r: repo[r].parents(),
1820 'phase': lambda r: repo[r].phase(),
1866 'phase': lambda r: repo[r].phase(),
1821 'substate': lambda r: repo[r].substate,
1867 'substate': lambda r: repo[r].substate,
1822 'summary': lambda r: repo[r].description().splitlines()[0],
1868 'summary': lambda r: repo[r].description().splitlines()[0],
1823 'diff': lambda r: list(repo[r].diff(git=True),)
1869 'diff': lambda r: list(repo[r].diff(git=True),)
1824 }
1870 }
1825 for info in fields:
1871 for info in fields:
1826 getfield = _funcs.get(info, None)
1872 getfield = _funcs.get(info, None)
1827 if getfield is None:
1873 if getfield is None:
1828 raise error.ParseError(
1874 raise error.ParseError(
1829 # i18n: "matching" is a keyword
1875 # i18n: "matching" is a keyword
1830 _("unexpected field name passed to matching: %s") % info)
1876 _("unexpected field name passed to matching: %s") % info)
1831 getfieldfuncs.append(getfield)
1877 getfieldfuncs.append(getfield)
1832 # convert the getfield array of functions into a "getinfo" function
1878 # convert the getfield array of functions into a "getinfo" function
1833 # which returns an array of field values (or a single value if there
1879 # which returns an array of field values (or a single value if there
1834 # is only one field to match)
1880 # is only one field to match)
1835 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1881 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1836
1882
1837 def matches(x):
1883 def matches(x):
1838 for rev in revs:
1884 for rev in revs:
1839 target = getinfo(rev)
1885 target = getinfo(rev)
1840 match = True
1886 match = True
1841 for n, f in enumerate(getfieldfuncs):
1887 for n, f in enumerate(getfieldfuncs):
1842 if target[n] != f(x):
1888 if target[n] != f(x):
1843 match = False
1889 match = False
1844 if match:
1890 if match:
1845 return True
1891 return True
1846 return False
1892 return False
1847
1893
1848 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1894 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1849
1895
1850 @predicate('reverse(set)', safe=True, takeorder=True)
1896 @predicate('reverse(set)', safe=True, takeorder=True)
1851 def reverse(repo, subset, x, order):
1897 def reverse(repo, subset, x, order):
1852 """Reverse order of set.
1898 """Reverse order of set.
1853 """
1899 """
1854 l = getset(repo, subset, x)
1900 l = getset(repo, subset, x)
1855 if order == defineorder:
1901 if order == defineorder:
1856 l.reverse()
1902 l.reverse()
1857 return l
1903 return l
1858
1904
1859 @predicate('roots(set)', safe=True)
1905 @predicate('roots(set)', safe=True)
1860 def roots(repo, subset, x):
1906 def roots(repo, subset, x):
1861 """Changesets in set with no parent changeset in set.
1907 """Changesets in set with no parent changeset in set.
1862 """
1908 """
1863 s = getset(repo, fullreposet(repo), x)
1909 s = getset(repo, fullreposet(repo), x)
1864 parents = repo.changelog.parentrevs
1910 parents = repo.changelog.parentrevs
1865 def filter(r):
1911 def filter(r):
1866 for p in parents(r):
1912 for p in parents(r):
1867 if 0 <= p and p in s:
1913 if 0 <= p and p in s:
1868 return False
1914 return False
1869 return True
1915 return True
1870 return subset & s.filter(filter, condrepr='<roots>')
1916 return subset & s.filter(filter, condrepr='<roots>')
1871
1917
1872 _sortkeyfuncs = {
1918 _sortkeyfuncs = {
1873 'rev': lambda c: c.rev(),
1919 'rev': lambda c: c.rev(),
1874 'branch': lambda c: c.branch(),
1920 'branch': lambda c: c.branch(),
1875 'desc': lambda c: c.description(),
1921 'desc': lambda c: c.description(),
1876 'user': lambda c: c.user(),
1922 'user': lambda c: c.user(),
1877 'author': lambda c: c.user(),
1923 'author': lambda c: c.user(),
1878 'date': lambda c: c.date()[0],
1924 'date': lambda c: c.date()[0],
1879 }
1925 }
1880
1926
1881 def _getsortargs(x):
1927 def _getsortargs(x):
1882 """Parse sort options into (set, [(key, reverse)], opts)"""
1928 """Parse sort options into (set, [(key, reverse)], opts)"""
1883 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
1929 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
1884 if 'set' not in args:
1930 if 'set' not in args:
1885 # i18n: "sort" is a keyword
1931 # i18n: "sort" is a keyword
1886 raise error.ParseError(_('sort requires one or two arguments'))
1932 raise error.ParseError(_('sort requires one or two arguments'))
1887 keys = "rev"
1933 keys = "rev"
1888 if 'keys' in args:
1934 if 'keys' in args:
1889 # i18n: "sort" is a keyword
1935 # i18n: "sort" is a keyword
1890 keys = getstring(args['keys'], _("sort spec must be a string"))
1936 keys = getstring(args['keys'], _("sort spec must be a string"))
1891
1937
1892 keyflags = []
1938 keyflags = []
1893 for k in keys.split():
1939 for k in keys.split():
1894 fk = k
1940 fk = k
1895 reverse = (k[0] == '-')
1941 reverse = (k[0] == '-')
1896 if reverse:
1942 if reverse:
1897 k = k[1:]
1943 k = k[1:]
1898 if k not in _sortkeyfuncs and k != 'topo':
1944 if k not in _sortkeyfuncs and k != 'topo':
1899 raise error.ParseError(_("unknown sort key %r") % fk)
1945 raise error.ParseError(_("unknown sort key %r") % fk)
1900 keyflags.append((k, reverse))
1946 keyflags.append((k, reverse))
1901
1947
1902 if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
1948 if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
1903 # i18n: "topo" is a keyword
1949 # i18n: "topo" is a keyword
1904 raise error.ParseError(_('topo sort order cannot be combined '
1950 raise error.ParseError(_('topo sort order cannot be combined '
1905 'with other sort keys'))
1951 'with other sort keys'))
1906
1952
1907 opts = {}
1953 opts = {}
1908 if 'topo.firstbranch' in args:
1954 if 'topo.firstbranch' in args:
1909 if any(k == 'topo' for k, reverse in keyflags):
1955 if any(k == 'topo' for k, reverse in keyflags):
1910 opts['topo.firstbranch'] = args['topo.firstbranch']
1956 opts['topo.firstbranch'] = args['topo.firstbranch']
1911 else:
1957 else:
1912 # i18n: "topo" and "topo.firstbranch" are keywords
1958 # i18n: "topo" and "topo.firstbranch" are keywords
1913 raise error.ParseError(_('topo.firstbranch can only be used '
1959 raise error.ParseError(_('topo.firstbranch can only be used '
1914 'when using the topo sort key'))
1960 'when using the topo sort key'))
1915
1961
1916 return args['set'], keyflags, opts
1962 return args['set'], keyflags, opts
1917
1963
1918 @predicate('sort(set[, [-]key... [, ...]])', safe=True, takeorder=True)
1964 @predicate('sort(set[, [-]key... [, ...]])', safe=True, takeorder=True)
1919 def sort(repo, subset, x, order):
1965 def sort(repo, subset, x, order):
1920 """Sort set by keys. The default sort order is ascending, specify a key
1966 """Sort set by keys. The default sort order is ascending, specify a key
1921 as ``-key`` to sort in descending order.
1967 as ``-key`` to sort in descending order.
1922
1968
1923 The keys can be:
1969 The keys can be:
1924
1970
1925 - ``rev`` for the revision number,
1971 - ``rev`` for the revision number,
1926 - ``branch`` for the branch name,
1972 - ``branch`` for the branch name,
1927 - ``desc`` for the commit message (description),
1973 - ``desc`` for the commit message (description),
1928 - ``user`` for user name (``author`` can be used as an alias),
1974 - ``user`` for user name (``author`` can be used as an alias),
1929 - ``date`` for the commit date
1975 - ``date`` for the commit date
1930 - ``topo`` for a reverse topographical sort
1976 - ``topo`` for a reverse topographical sort
1931
1977
1932 The ``topo`` sort order cannot be combined with other sort keys. This sort
1978 The ``topo`` sort order cannot be combined with other sort keys. This sort
1933 takes one optional argument, ``topo.firstbranch``, which takes a revset that
1979 takes one optional argument, ``topo.firstbranch``, which takes a revset that
1934 specifies what topographical branches to prioritize in the sort.
1980 specifies what topographical branches to prioritize in the sort.
1935
1981
1936 """
1982 """
1937 s, keyflags, opts = _getsortargs(x)
1983 s, keyflags, opts = _getsortargs(x)
1938 revs = getset(repo, subset, s)
1984 revs = getset(repo, subset, s)
1939
1985
1940 if not keyflags or order != defineorder:
1986 if not keyflags or order != defineorder:
1941 return revs
1987 return revs
1942 if len(keyflags) == 1 and keyflags[0][0] == "rev":
1988 if len(keyflags) == 1 and keyflags[0][0] == "rev":
1943 revs.sort(reverse=keyflags[0][1])
1989 revs.sort(reverse=keyflags[0][1])
1944 return revs
1990 return revs
1945 elif keyflags[0][0] == "topo":
1991 elif keyflags[0][0] == "topo":
1946 firstbranch = ()
1992 firstbranch = ()
1947 if 'topo.firstbranch' in opts:
1993 if 'topo.firstbranch' in opts:
1948 firstbranch = getset(repo, subset, opts['topo.firstbranch'])
1994 firstbranch = getset(repo, subset, opts['topo.firstbranch'])
1949 revs = baseset(_toposort(revs, repo.changelog.parentrevs, firstbranch),
1995 revs = baseset(_toposort(revs, repo.changelog.parentrevs, firstbranch),
1950 istopo=True)
1996 istopo=True)
1951 if keyflags[0][1]:
1997 if keyflags[0][1]:
1952 revs.reverse()
1998 revs.reverse()
1953 return revs
1999 return revs
1954
2000
1955 # sort() is guaranteed to be stable
2001 # sort() is guaranteed to be stable
1956 ctxs = [repo[r] for r in revs]
2002 ctxs = [repo[r] for r in revs]
1957 for k, reverse in reversed(keyflags):
2003 for k, reverse in reversed(keyflags):
1958 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
2004 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
1959 return baseset([c.rev() for c in ctxs])
2005 return baseset([c.rev() for c in ctxs])
1960
2006
1961 def _toposort(revs, parentsfunc, firstbranch=()):
2007 def _toposort(revs, parentsfunc, firstbranch=()):
1962 """Yield revisions from heads to roots one (topo) branch at a time.
2008 """Yield revisions from heads to roots one (topo) branch at a time.
1963
2009
1964 This function aims to be used by a graph generator that wishes to minimize
2010 This function aims to be used by a graph generator that wishes to minimize
1965 the number of parallel branches and their interleaving.
2011 the number of parallel branches and their interleaving.
1966
2012
1967 Example iteration order (numbers show the "true" order in a changelog):
2013 Example iteration order (numbers show the "true" order in a changelog):
1968
2014
1969 o 4
2015 o 4
1970 |
2016 |
1971 o 1
2017 o 1
1972 |
2018 |
1973 | o 3
2019 | o 3
1974 | |
2020 | |
1975 | o 2
2021 | o 2
1976 |/
2022 |/
1977 o 0
2023 o 0
1978
2024
1979 Note that the ancestors of merges are understood by the current
2025 Note that the ancestors of merges are understood by the current
1980 algorithm to be on the same branch. This means no reordering will
2026 algorithm to be on the same branch. This means no reordering will
1981 occur behind a merge.
2027 occur behind a merge.
1982 """
2028 """
1983
2029
1984 ### Quick summary of the algorithm
2030 ### Quick summary of the algorithm
1985 #
2031 #
1986 # This function is based around a "retention" principle. We keep revisions
2032 # This function is based around a "retention" principle. We keep revisions
1987 # in memory until we are ready to emit a whole branch that immediately
2033 # in memory until we are ready to emit a whole branch that immediately
1988 # "merges" into an existing one. This reduces the number of parallel
2034 # "merges" into an existing one. This reduces the number of parallel
1989 # branches with interleaved revisions.
2035 # branches with interleaved revisions.
1990 #
2036 #
1991 # During iteration revs are split into two groups:
2037 # During iteration revs are split into two groups:
1992 # A) revision already emitted
2038 # A) revision already emitted
1993 # B) revision in "retention". They are stored as different subgroups.
2039 # B) revision in "retention". They are stored as different subgroups.
1994 #
2040 #
1995 # for each REV, we do the following logic:
2041 # for each REV, we do the following logic:
1996 #
2042 #
1997 # 1) if REV is a parent of (A), we will emit it. If there is a
2043 # 1) if REV is a parent of (A), we will emit it. If there is a
1998 # retention group ((B) above) that is blocked on REV being
2044 # retention group ((B) above) that is blocked on REV being
1999 # available, we emit all the revisions out of that retention
2045 # available, we emit all the revisions out of that retention
2000 # group first.
2046 # group first.
2001 #
2047 #
2002 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
2048 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
2003 # available, if such subgroup exist, we add REV to it and the subgroup is
2049 # available, if such subgroup exist, we add REV to it and the subgroup is
2004 # now awaiting for REV.parents() to be available.
2050 # now awaiting for REV.parents() to be available.
2005 #
2051 #
2006 # 3) finally if no such group existed in (B), we create a new subgroup.
2052 # 3) finally if no such group existed in (B), we create a new subgroup.
2007 #
2053 #
2008 #
2054 #
2009 # To bootstrap the algorithm, we emit the tipmost revision (which
2055 # To bootstrap the algorithm, we emit the tipmost revision (which
2010 # puts it in group (A) from above).
2056 # puts it in group (A) from above).
2011
2057
2012 revs.sort(reverse=True)
2058 revs.sort(reverse=True)
2013
2059
2014 # Set of parents of revision that have been emitted. They can be considered
2060 # Set of parents of revision that have been emitted. They can be considered
2015 # unblocked as the graph generator is already aware of them so there is no
2061 # unblocked as the graph generator is already aware of them so there is no
2016 # need to delay the revisions that reference them.
2062 # need to delay the revisions that reference them.
2017 #
2063 #
2018 # If someone wants to prioritize a branch over the others, pre-filling this
2064 # If someone wants to prioritize a branch over the others, pre-filling this
2019 # set will force all other branches to wait until this branch is ready to be
2065 # set will force all other branches to wait until this branch is ready to be
2020 # emitted.
2066 # emitted.
2021 unblocked = set(firstbranch)
2067 unblocked = set(firstbranch)
2022
2068
2023 # list of groups waiting to be displayed, each group is defined by:
2069 # list of groups waiting to be displayed, each group is defined by:
2024 #
2070 #
2025 # (revs: lists of revs waiting to be displayed,
2071 # (revs: lists of revs waiting to be displayed,
2026 # blocked: set of that cannot be displayed before those in 'revs')
2072 # blocked: set of that cannot be displayed before those in 'revs')
2027 #
2073 #
2028 # The second value ('blocked') correspond to parents of any revision in the
2074 # The second value ('blocked') correspond to parents of any revision in the
2029 # group ('revs') that is not itself contained in the group. The main idea
2075 # group ('revs') that is not itself contained in the group. The main idea
2030 # of this algorithm is to delay as much as possible the emission of any
2076 # of this algorithm is to delay as much as possible the emission of any
2031 # revision. This means waiting for the moment we are about to display
2077 # revision. This means waiting for the moment we are about to display
2032 # these parents to display the revs in a group.
2078 # these parents to display the revs in a group.
2033 #
2079 #
2034 # This first implementation is smart until it encounters a merge: it will
2080 # This first implementation is smart until it encounters a merge: it will
2035 # emit revs as soon as any parent is about to be emitted and can grow an
2081 # emit revs as soon as any parent is about to be emitted and can grow an
2036 # arbitrary number of revs in 'blocked'. In practice this mean we properly
2082 # arbitrary number of revs in 'blocked'. In practice this mean we properly
2037 # retains new branches but gives up on any special ordering for ancestors
2083 # retains new branches but gives up on any special ordering for ancestors
2038 # of merges. The implementation can be improved to handle this better.
2084 # of merges. The implementation can be improved to handle this better.
2039 #
2085 #
2040 # The first subgroup is special. It corresponds to all the revision that
2086 # The first subgroup is special. It corresponds to all the revision that
2041 # were already emitted. The 'revs' lists is expected to be empty and the
2087 # were already emitted. The 'revs' lists is expected to be empty and the
2042 # 'blocked' set contains the parents revisions of already emitted revision.
2088 # 'blocked' set contains the parents revisions of already emitted revision.
2043 #
2089 #
2044 # You could pre-seed the <parents> set of groups[0] to a specific
2090 # You could pre-seed the <parents> set of groups[0] to a specific
2045 # changesets to select what the first emitted branch should be.
2091 # changesets to select what the first emitted branch should be.
2046 groups = [([], unblocked)]
2092 groups = [([], unblocked)]
2047 pendingheap = []
2093 pendingheap = []
2048 pendingset = set()
2094 pendingset = set()
2049
2095
2050 heapq.heapify(pendingheap)
2096 heapq.heapify(pendingheap)
2051 heappop = heapq.heappop
2097 heappop = heapq.heappop
2052 heappush = heapq.heappush
2098 heappush = heapq.heappush
2053 for currentrev in revs:
2099 for currentrev in revs:
2054 # Heap works with smallest element, we want highest so we invert
2100 # Heap works with smallest element, we want highest so we invert
2055 if currentrev not in pendingset:
2101 if currentrev not in pendingset:
2056 heappush(pendingheap, -currentrev)
2102 heappush(pendingheap, -currentrev)
2057 pendingset.add(currentrev)
2103 pendingset.add(currentrev)
2058 # iterates on pending rev until after the current rev have been
2104 # iterates on pending rev until after the current rev have been
2059 # processed.
2105 # processed.
2060 rev = None
2106 rev = None
2061 while rev != currentrev:
2107 while rev != currentrev:
2062 rev = -heappop(pendingheap)
2108 rev = -heappop(pendingheap)
2063 pendingset.remove(rev)
2109 pendingset.remove(rev)
2064
2110
2065 # Seek for a subgroup blocked, waiting for the current revision.
2111 # Seek for a subgroup blocked, waiting for the current revision.
2066 matching = [i for i, g in enumerate(groups) if rev in g[1]]
2112 matching = [i for i, g in enumerate(groups) if rev in g[1]]
2067
2113
2068 if matching:
2114 if matching:
2069 # The main idea is to gather together all sets that are blocked
2115 # The main idea is to gather together all sets that are blocked
2070 # on the same revision.
2116 # on the same revision.
2071 #
2117 #
2072 # Groups are merged when a common blocking ancestor is
2118 # Groups are merged when a common blocking ancestor is
2073 # observed. For example, given two groups:
2119 # observed. For example, given two groups:
2074 #
2120 #
2075 # revs [5, 4] waiting for 1
2121 # revs [5, 4] waiting for 1
2076 # revs [3, 2] waiting for 1
2122 # revs [3, 2] waiting for 1
2077 #
2123 #
2078 # These two groups will be merged when we process
2124 # These two groups will be merged when we process
2079 # 1. In theory, we could have merged the groups when
2125 # 1. In theory, we could have merged the groups when
2080 # we added 2 to the group it is now in (we could have
2126 # we added 2 to the group it is now in (we could have
2081 # noticed the groups were both blocked on 1 then), but
2127 # noticed the groups were both blocked on 1 then), but
2082 # the way it works now makes the algorithm simpler.
2128 # the way it works now makes the algorithm simpler.
2083 #
2129 #
2084 # We also always keep the oldest subgroup first. We can
2130 # We also always keep the oldest subgroup first. We can
2085 # probably improve the behavior by having the longest set
2131 # probably improve the behavior by having the longest set
2086 # first. That way, graph algorithms could minimise the length
2132 # first. That way, graph algorithms could minimise the length
2087 # of parallel lines their drawing. This is currently not done.
2133 # of parallel lines their drawing. This is currently not done.
2088 targetidx = matching.pop(0)
2134 targetidx = matching.pop(0)
2089 trevs, tparents = groups[targetidx]
2135 trevs, tparents = groups[targetidx]
2090 for i in matching:
2136 for i in matching:
2091 gr = groups[i]
2137 gr = groups[i]
2092 trevs.extend(gr[0])
2138 trevs.extend(gr[0])
2093 tparents |= gr[1]
2139 tparents |= gr[1]
2094 # delete all merged subgroups (except the one we kept)
2140 # delete all merged subgroups (except the one we kept)
2095 # (starting from the last subgroup for performance and
2141 # (starting from the last subgroup for performance and
2096 # sanity reasons)
2142 # sanity reasons)
2097 for i in reversed(matching):
2143 for i in reversed(matching):
2098 del groups[i]
2144 del groups[i]
2099 else:
2145 else:
2100 # This is a new head. We create a new subgroup for it.
2146 # This is a new head. We create a new subgroup for it.
2101 targetidx = len(groups)
2147 targetidx = len(groups)
2102 groups.append(([], set([rev])))
2148 groups.append(([], set([rev])))
2103
2149
2104 gr = groups[targetidx]
2150 gr = groups[targetidx]
2105
2151
2106 # We now add the current nodes to this subgroups. This is done
2152 # We now add the current nodes to this subgroups. This is done
2107 # after the subgroup merging because all elements from a subgroup
2153 # after the subgroup merging because all elements from a subgroup
2108 # that relied on this rev must precede it.
2154 # that relied on this rev must precede it.
2109 #
2155 #
2110 # we also update the <parents> set to include the parents of the
2156 # we also update the <parents> set to include the parents of the
2111 # new nodes.
2157 # new nodes.
2112 if rev == currentrev: # only display stuff in rev
2158 if rev == currentrev: # only display stuff in rev
2113 gr[0].append(rev)
2159 gr[0].append(rev)
2114 gr[1].remove(rev)
2160 gr[1].remove(rev)
2115 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
2161 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
2116 gr[1].update(parents)
2162 gr[1].update(parents)
2117 for p in parents:
2163 for p in parents:
2118 if p not in pendingset:
2164 if p not in pendingset:
2119 pendingset.add(p)
2165 pendingset.add(p)
2120 heappush(pendingheap, -p)
2166 heappush(pendingheap, -p)
2121
2167
2122 # Look for a subgroup to display
2168 # Look for a subgroup to display
2123 #
2169 #
2124 # When unblocked is empty (if clause), we were not waiting for any
2170 # When unblocked is empty (if clause), we were not waiting for any
2125 # revisions during the first iteration (if no priority was given) or
2171 # revisions during the first iteration (if no priority was given) or
2126 # if we emitted a whole disconnected set of the graph (reached a
2172 # if we emitted a whole disconnected set of the graph (reached a
2127 # root). In that case we arbitrarily take the oldest known
2173 # root). In that case we arbitrarily take the oldest known
2128 # subgroup. The heuristic could probably be better.
2174 # subgroup. The heuristic could probably be better.
2129 #
2175 #
2130 # Otherwise (elif clause) if the subgroup is blocked on
2176 # Otherwise (elif clause) if the subgroup is blocked on
2131 # a revision we just emitted, we can safely emit it as
2177 # a revision we just emitted, we can safely emit it as
2132 # well.
2178 # well.
2133 if not unblocked:
2179 if not unblocked:
2134 if len(groups) > 1: # display other subset
2180 if len(groups) > 1: # display other subset
2135 targetidx = 1
2181 targetidx = 1
2136 gr = groups[1]
2182 gr = groups[1]
2137 elif not gr[1] & unblocked:
2183 elif not gr[1] & unblocked:
2138 gr = None
2184 gr = None
2139
2185
2140 if gr is not None:
2186 if gr is not None:
2141 # update the set of awaited revisions with the one from the
2187 # update the set of awaited revisions with the one from the
2142 # subgroup
2188 # subgroup
2143 unblocked |= gr[1]
2189 unblocked |= gr[1]
2144 # output all revisions in the subgroup
2190 # output all revisions in the subgroup
2145 for r in gr[0]:
2191 for r in gr[0]:
2146 yield r
2192 yield r
2147 # delete the subgroup that you just output
2193 # delete the subgroup that you just output
2148 # unless it is groups[0] in which case you just empty it.
2194 # unless it is groups[0] in which case you just empty it.
2149 if targetidx:
2195 if targetidx:
2150 del groups[targetidx]
2196 del groups[targetidx]
2151 else:
2197 else:
2152 gr[0][:] = []
2198 gr[0][:] = []
2153 # Check if we have some subgroup waiting for revisions we are not going to
2199 # Check if we have some subgroup waiting for revisions we are not going to
2154 # iterate over
2200 # iterate over
2155 for g in groups:
2201 for g in groups:
2156 for r in g[0]:
2202 for r in g[0]:
2157 yield r
2203 yield r
2158
2204
2159 @predicate('subrepo([pattern])')
2205 @predicate('subrepo([pattern])')
2160 def subrepo(repo, subset, x):
2206 def subrepo(repo, subset, x):
2161 """Changesets that add, modify or remove the given subrepo. If no subrepo
2207 """Changesets that add, modify or remove the given subrepo. If no subrepo
2162 pattern is named, any subrepo changes are returned.
2208 pattern is named, any subrepo changes are returned.
2163 """
2209 """
2164 # i18n: "subrepo" is a keyword
2210 # i18n: "subrepo" is a keyword
2165 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
2211 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
2166 pat = None
2212 pat = None
2167 if len(args) != 0:
2213 if len(args) != 0:
2168 pat = getstring(args[0], _("subrepo requires a pattern"))
2214 pat = getstring(args[0], _("subrepo requires a pattern"))
2169
2215
2170 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
2216 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
2171
2217
2172 def submatches(names):
2218 def submatches(names):
2173 k, p, m = util.stringmatcher(pat)
2219 k, p, m = util.stringmatcher(pat)
2174 for name in names:
2220 for name in names:
2175 if m(name):
2221 if m(name):
2176 yield name
2222 yield name
2177
2223
2178 def matches(x):
2224 def matches(x):
2179 c = repo[x]
2225 c = repo[x]
2180 s = repo.status(c.p1().node(), c.node(), match=m)
2226 s = repo.status(c.p1().node(), c.node(), match=m)
2181
2227
2182 if pat is None:
2228 if pat is None:
2183 return s.added or s.modified or s.removed
2229 return s.added or s.modified or s.removed
2184
2230
2185 if s.added:
2231 if s.added:
2186 return any(submatches(c.substate.keys()))
2232 return any(submatches(c.substate.keys()))
2187
2233
2188 if s.modified:
2234 if s.modified:
2189 subs = set(c.p1().substate.keys())
2235 subs = set(c.p1().substate.keys())
2190 subs.update(c.substate.keys())
2236 subs.update(c.substate.keys())
2191
2237
2192 for path in submatches(subs):
2238 for path in submatches(subs):
2193 if c.p1().substate.get(path) != c.substate.get(path):
2239 if c.p1().substate.get(path) != c.substate.get(path):
2194 return True
2240 return True
2195
2241
2196 if s.removed:
2242 if s.removed:
2197 return any(submatches(c.p1().substate.keys()))
2243 return any(submatches(c.p1().substate.keys()))
2198
2244
2199 return False
2245 return False
2200
2246
2201 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
2247 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
2202
2248
2203 def _substringmatcher(pattern):
2249 def _substringmatcher(pattern):
2204 kind, pattern, matcher = util.stringmatcher(pattern)
2250 kind, pattern, matcher = util.stringmatcher(pattern)
2205 if kind == 'literal':
2251 if kind == 'literal':
2206 matcher = lambda s: pattern in s
2252 matcher = lambda s: pattern in s
2207 return kind, pattern, matcher
2253 return kind, pattern, matcher
2208
2254
2209 @predicate('tag([name])', safe=True)
2255 @predicate('tag([name])', safe=True)
2210 def tag(repo, subset, x):
2256 def tag(repo, subset, x):
2211 """The specified tag by name, or all tagged revisions if no name is given.
2257 """The specified tag by name, or all tagged revisions if no name is given.
2212
2258
2213 If `name` starts with `re:`, the remainder of the name is treated as
2259 If `name` starts with `re:`, the remainder of the name is treated as
2214 a regular expression. To match a tag that actually starts with `re:`,
2260 a regular expression. To match a tag that actually starts with `re:`,
2215 use the prefix `literal:`.
2261 use the prefix `literal:`.
2216 """
2262 """
2217 # i18n: "tag" is a keyword
2263 # i18n: "tag" is a keyword
2218 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2264 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2219 cl = repo.changelog
2265 cl = repo.changelog
2220 if args:
2266 if args:
2221 pattern = getstring(args[0],
2267 pattern = getstring(args[0],
2222 # i18n: "tag" is a keyword
2268 # i18n: "tag" is a keyword
2223 _('the argument to tag must be a string'))
2269 _('the argument to tag must be a string'))
2224 kind, pattern, matcher = util.stringmatcher(pattern)
2270 kind, pattern, matcher = util.stringmatcher(pattern)
2225 if kind == 'literal':
2271 if kind == 'literal':
2226 # avoid resolving all tags
2272 # avoid resolving all tags
2227 tn = repo._tagscache.tags.get(pattern, None)
2273 tn = repo._tagscache.tags.get(pattern, None)
2228 if tn is None:
2274 if tn is None:
2229 raise error.RepoLookupError(_("tag '%s' does not exist")
2275 raise error.RepoLookupError(_("tag '%s' does not exist")
2230 % pattern)
2276 % pattern)
2231 s = set([repo[tn].rev()])
2277 s = set([repo[tn].rev()])
2232 else:
2278 else:
2233 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2279 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2234 else:
2280 else:
2235 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2281 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2236 return subset & s
2282 return subset & s
2237
2283
2238 @predicate('tagged', safe=True)
2284 @predicate('tagged', safe=True)
2239 def tagged(repo, subset, x):
2285 def tagged(repo, subset, x):
2240 return tag(repo, subset, x)
2286 return tag(repo, subset, x)
2241
2287
2242 @predicate('unstable()', safe=True)
2288 @predicate('unstable()', safe=True)
2243 def unstable(repo, subset, x):
2289 def unstable(repo, subset, x):
2244 """Non-obsolete changesets with obsolete ancestors.
2290 """Non-obsolete changesets with obsolete ancestors.
2245 """
2291 """
2246 # i18n: "unstable" is a keyword
2292 # i18n: "unstable" is a keyword
2247 getargs(x, 0, 0, _("unstable takes no arguments"))
2293 getargs(x, 0, 0, _("unstable takes no arguments"))
2248 unstables = obsmod.getrevs(repo, 'unstable')
2294 unstables = obsmod.getrevs(repo, 'unstable')
2249 return subset & unstables
2295 return subset & unstables
2250
2296
2251
2297
2252 @predicate('user(string)', safe=True)
2298 @predicate('user(string)', safe=True)
2253 def user(repo, subset, x):
2299 def user(repo, subset, x):
2254 """User name contains string. The match is case-insensitive.
2300 """User name contains string. The match is case-insensitive.
2255
2301
2256 If `string` starts with `re:`, the remainder of the string is treated as
2302 If `string` starts with `re:`, the remainder of the string is treated as
2257 a regular expression. To match a user that actually contains `re:`, use
2303 a regular expression. To match a user that actually contains `re:`, use
2258 the prefix `literal:`.
2304 the prefix `literal:`.
2259 """
2305 """
2260 return author(repo, subset, x)
2306 return author(repo, subset, x)
2261
2307
2262 @predicate('wdir', safe=True)
2308 @predicate('wdir', safe=True)
2263 def wdir(repo, subset, x):
2309 def wdir(repo, subset, x):
2264 """Working directory. (EXPERIMENTAL)"""
2310 """Working directory. (EXPERIMENTAL)"""
2265 # i18n: "wdir" is a keyword
2311 # i18n: "wdir" is a keyword
2266 getargs(x, 0, 0, _("wdir takes no arguments"))
2312 getargs(x, 0, 0, _("wdir takes no arguments"))
2267 if node.wdirrev in subset or isinstance(subset, fullreposet):
2313 if node.wdirrev in subset or isinstance(subset, fullreposet):
2268 return baseset([node.wdirrev])
2314 return baseset([node.wdirrev])
2269 return baseset()
2315 return baseset()
2270
2316
2271 def _orderedlist(repo, subset, x):
2317 def _orderedlist(repo, subset, x):
2272 s = getstring(x, "internal error")
2318 s = getstring(x, "internal error")
2273 if not s:
2319 if not s:
2274 return baseset()
2320 return baseset()
2275 # remove duplicates here. it's difficult for caller to deduplicate sets
2321 # remove duplicates here. it's difficult for caller to deduplicate sets
2276 # because different symbols can point to the same rev.
2322 # because different symbols can point to the same rev.
2277 cl = repo.changelog
2323 cl = repo.changelog
2278 ls = []
2324 ls = []
2279 seen = set()
2325 seen = set()
2280 for t in s.split('\0'):
2326 for t in s.split('\0'):
2281 try:
2327 try:
2282 # fast path for integer revision
2328 # fast path for integer revision
2283 r = int(t)
2329 r = int(t)
2284 if str(r) != t or r not in cl:
2330 if str(r) != t or r not in cl:
2285 raise ValueError
2331 raise ValueError
2286 revs = [r]
2332 revs = [r]
2287 except ValueError:
2333 except ValueError:
2288 revs = stringset(repo, subset, t)
2334 revs = stringset(repo, subset, t)
2289
2335
2290 for r in revs:
2336 for r in revs:
2291 if r in seen:
2337 if r in seen:
2292 continue
2338 continue
2293 if (r in subset
2339 if (r in subset
2294 or r == node.nullrev and isinstance(subset, fullreposet)):
2340 or r == node.nullrev and isinstance(subset, fullreposet)):
2295 ls.append(r)
2341 ls.append(r)
2296 seen.add(r)
2342 seen.add(r)
2297 return baseset(ls)
2343 return baseset(ls)
2298
2344
2299 # for internal use
2345 # for internal use
2300 @predicate('_list', safe=True, takeorder=True)
2346 @predicate('_list', safe=True, takeorder=True)
2301 def _list(repo, subset, x, order):
2347 def _list(repo, subset, x, order):
2302 if order == followorder:
2348 if order == followorder:
2303 # slow path to take the subset order
2349 # slow path to take the subset order
2304 return subset & _orderedlist(repo, fullreposet(repo), x)
2350 return subset & _orderedlist(repo, fullreposet(repo), x)
2305 else:
2351 else:
2306 return _orderedlist(repo, subset, x)
2352 return _orderedlist(repo, subset, x)
2307
2353
2308 def _orderedintlist(repo, subset, x):
2354 def _orderedintlist(repo, subset, x):
2309 s = getstring(x, "internal error")
2355 s = getstring(x, "internal error")
2310 if not s:
2356 if not s:
2311 return baseset()
2357 return baseset()
2312 ls = [int(r) for r in s.split('\0')]
2358 ls = [int(r) for r in s.split('\0')]
2313 s = subset
2359 s = subset
2314 return baseset([r for r in ls if r in s])
2360 return baseset([r for r in ls if r in s])
2315
2361
2316 # for internal use
2362 # for internal use
2317 @predicate('_intlist', safe=True, takeorder=True)
2363 @predicate('_intlist', safe=True, takeorder=True)
2318 def _intlist(repo, subset, x, order):
2364 def _intlist(repo, subset, x, order):
2319 if order == followorder:
2365 if order == followorder:
2320 # slow path to take the subset order
2366 # slow path to take the subset order
2321 return subset & _orderedintlist(repo, fullreposet(repo), x)
2367 return subset & _orderedintlist(repo, fullreposet(repo), x)
2322 else:
2368 else:
2323 return _orderedintlist(repo, subset, x)
2369 return _orderedintlist(repo, subset, x)
2324
2370
2325 def _orderedhexlist(repo, subset, x):
2371 def _orderedhexlist(repo, subset, x):
2326 s = getstring(x, "internal error")
2372 s = getstring(x, "internal error")
2327 if not s:
2373 if not s:
2328 return baseset()
2374 return baseset()
2329 cl = repo.changelog
2375 cl = repo.changelog
2330 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2376 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2331 s = subset
2377 s = subset
2332 return baseset([r for r in ls if r in s])
2378 return baseset([r for r in ls if r in s])
2333
2379
2334 # for internal use
2380 # for internal use
2335 @predicate('_hexlist', safe=True, takeorder=True)
2381 @predicate('_hexlist', safe=True, takeorder=True)
2336 def _hexlist(repo, subset, x, order):
2382 def _hexlist(repo, subset, x, order):
2337 if order == followorder:
2383 if order == followorder:
2338 # slow path to take the subset order
2384 # slow path to take the subset order
2339 return subset & _orderedhexlist(repo, fullreposet(repo), x)
2385 return subset & _orderedhexlist(repo, fullreposet(repo), x)
2340 else:
2386 else:
2341 return _orderedhexlist(repo, subset, x)
2387 return _orderedhexlist(repo, subset, x)
2342
2388
2343 methods = {
2389 methods = {
2344 "range": rangeset,
2390 "range": rangeset,
2345 "rangepre": rangepre,
2391 "rangepre": rangepre,
2346 "dagrange": dagrange,
2392 "dagrange": dagrange,
2347 "string": stringset,
2393 "string": stringset,
2348 "symbol": stringset,
2394 "symbol": stringset,
2349 "and": andset,
2395 "and": andset,
2350 "or": orset,
2396 "or": orset,
2351 "not": notset,
2397 "not": notset,
2352 "difference": differenceset,
2398 "difference": differenceset,
2353 "list": listset,
2399 "list": listset,
2354 "keyvalue": keyvaluepair,
2400 "keyvalue": keyvaluepair,
2355 "func": func,
2401 "func": func,
2356 "ancestor": ancestorspec,
2402 "ancestor": ancestorspec,
2357 "parent": parentspec,
2403 "parent": parentspec,
2358 "parentpost": parentpost,
2404 "parentpost": parentpost,
2359 }
2405 }
2360
2406
2361 # Constants for ordering requirement, used in _analyze():
2407 # Constants for ordering requirement, used in _analyze():
2362 #
2408 #
2363 # If 'define', any nested functions and operations can change the ordering of
2409 # If 'define', any nested functions and operations can change the ordering of
2364 # the entries in the set. If 'follow', any nested functions and operations
2410 # the entries in the set. If 'follow', any nested functions and operations
2365 # should take the ordering specified by the first operand to the '&' operator.
2411 # should take the ordering specified by the first operand to the '&' operator.
2366 #
2412 #
2367 # For instance,
2413 # For instance,
2368 #
2414 #
2369 # X & (Y | Z)
2415 # X & (Y | Z)
2370 # ^ ^^^^^^^
2416 # ^ ^^^^^^^
2371 # | follow
2417 # | follow
2372 # define
2418 # define
2373 #
2419 #
2374 # will be evaluated as 'or(y(x()), z(x()))', where 'x()' can change the order
2420 # will be evaluated as 'or(y(x()), z(x()))', where 'x()' can change the order
2375 # of the entries in the set, but 'y()', 'z()' and 'or()' shouldn't.
2421 # of the entries in the set, but 'y()', 'z()' and 'or()' shouldn't.
2376 #
2422 #
2377 # 'any' means the order doesn't matter. For instance,
2423 # 'any' means the order doesn't matter. For instance,
2378 #
2424 #
2379 # X & !Y
2425 # X & !Y
2380 # ^
2426 # ^
2381 # any
2427 # any
2382 #
2428 #
2383 # 'y()' can either enforce its ordering requirement or take the ordering
2429 # 'y()' can either enforce its ordering requirement or take the ordering
2384 # specified by 'x()' because 'not()' doesn't care the order.
2430 # specified by 'x()' because 'not()' doesn't care the order.
2385 #
2431 #
2386 # Transition of ordering requirement:
2432 # Transition of ordering requirement:
2387 #
2433 #
2388 # 1. starts with 'define'
2434 # 1. starts with 'define'
2389 # 2. shifts to 'follow' by 'x & y'
2435 # 2. shifts to 'follow' by 'x & y'
2390 # 3. changes back to 'define' on function call 'f(x)' or function-like
2436 # 3. changes back to 'define' on function call 'f(x)' or function-like
2391 # operation 'x (f) y' because 'f' may have its own ordering requirement
2437 # operation 'x (f) y' because 'f' may have its own ordering requirement
2392 # for 'x' and 'y' (e.g. 'first(x)')
2438 # for 'x' and 'y' (e.g. 'first(x)')
2393 #
2439 #
2394 anyorder = 'any' # don't care the order
2440 anyorder = 'any' # don't care the order
2395 defineorder = 'define' # should define the order
2441 defineorder = 'define' # should define the order
2396 followorder = 'follow' # must follow the current order
2442 followorder = 'follow' # must follow the current order
2397
2443
2398 # transition table for 'x & y', from the current expression 'x' to 'y'
2444 # transition table for 'x & y', from the current expression 'x' to 'y'
2399 _tofolloworder = {
2445 _tofolloworder = {
2400 anyorder: anyorder,
2446 anyorder: anyorder,
2401 defineorder: followorder,
2447 defineorder: followorder,
2402 followorder: followorder,
2448 followorder: followorder,
2403 }
2449 }
2404
2450
2405 def _matchonly(revs, bases):
2451 def _matchonly(revs, bases):
2406 """
2452 """
2407 >>> f = lambda *args: _matchonly(*map(parse, args))
2453 >>> f = lambda *args: _matchonly(*map(parse, args))
2408 >>> f('ancestors(A)', 'not ancestors(B)')
2454 >>> f('ancestors(A)', 'not ancestors(B)')
2409 ('list', ('symbol', 'A'), ('symbol', 'B'))
2455 ('list', ('symbol', 'A'), ('symbol', 'B'))
2410 """
2456 """
2411 if (revs is not None
2457 if (revs is not None
2412 and revs[0] == 'func'
2458 and revs[0] == 'func'
2413 and getsymbol(revs[1]) == 'ancestors'
2459 and getsymbol(revs[1]) == 'ancestors'
2414 and bases is not None
2460 and bases is not None
2415 and bases[0] == 'not'
2461 and bases[0] == 'not'
2416 and bases[1][0] == 'func'
2462 and bases[1][0] == 'func'
2417 and getsymbol(bases[1][1]) == 'ancestors'):
2463 and getsymbol(bases[1][1]) == 'ancestors'):
2418 return ('list', revs[2], bases[1][2])
2464 return ('list', revs[2], bases[1][2])
2419
2465
2420 def _fixops(x):
2466 def _fixops(x):
2421 """Rewrite raw parsed tree to resolve ambiguous syntax which cannot be
2467 """Rewrite raw parsed tree to resolve ambiguous syntax which cannot be
2422 handled well by our simple top-down parser"""
2468 handled well by our simple top-down parser"""
2423 if not isinstance(x, tuple):
2469 if not isinstance(x, tuple):
2424 return x
2470 return x
2425
2471
2426 op = x[0]
2472 op = x[0]
2427 if op == 'parent':
2473 if op == 'parent':
2428 # x^:y means (x^) : y, not x ^ (:y)
2474 # x^:y means (x^) : y, not x ^ (:y)
2429 # x^: means (x^) :, not x ^ (:)
2475 # x^: means (x^) :, not x ^ (:)
2430 post = ('parentpost', x[1])
2476 post = ('parentpost', x[1])
2431 if x[2][0] == 'dagrangepre':
2477 if x[2][0] == 'dagrangepre':
2432 return _fixops(('dagrange', post, x[2][1]))
2478 return _fixops(('dagrange', post, x[2][1]))
2433 elif x[2][0] == 'rangepre':
2479 elif x[2][0] == 'rangepre':
2434 return _fixops(('range', post, x[2][1]))
2480 return _fixops(('range', post, x[2][1]))
2435 elif x[2][0] == 'rangeall':
2481 elif x[2][0] == 'rangeall':
2436 return _fixops(('rangepost', post))
2482 return _fixops(('rangepost', post))
2437 elif op == 'or':
2483 elif op == 'or':
2438 # make number of arguments deterministic:
2484 # make number of arguments deterministic:
2439 # x + y + z -> (or x y z) -> (or (list x y z))
2485 # x + y + z -> (or x y z) -> (or (list x y z))
2440 return (op, _fixops(('list',) + x[1:]))
2486 return (op, _fixops(('list',) + x[1:]))
2441
2487
2442 return (op,) + tuple(_fixops(y) for y in x[1:])
2488 return (op,) + tuple(_fixops(y) for y in x[1:])
2443
2489
2444 def _analyze(x, order):
2490 def _analyze(x, order):
2445 if x is None:
2491 if x is None:
2446 return x
2492 return x
2447
2493
2448 op = x[0]
2494 op = x[0]
2449 if op == 'minus':
2495 if op == 'minus':
2450 return _analyze(('and', x[1], ('not', x[2])), order)
2496 return _analyze(('and', x[1], ('not', x[2])), order)
2451 elif op == 'only':
2497 elif op == 'only':
2452 t = ('func', ('symbol', 'only'), ('list', x[1], x[2]))
2498 t = ('func', ('symbol', 'only'), ('list', x[1], x[2]))
2453 return _analyze(t, order)
2499 return _analyze(t, order)
2454 elif op == 'onlypost':
2500 elif op == 'onlypost':
2455 return _analyze(('func', ('symbol', 'only'), x[1]), order)
2501 return _analyze(('func', ('symbol', 'only'), x[1]), order)
2456 elif op == 'dagrangepre':
2502 elif op == 'dagrangepre':
2457 return _analyze(('func', ('symbol', 'ancestors'), x[1]), order)
2503 return _analyze(('func', ('symbol', 'ancestors'), x[1]), order)
2458 elif op == 'dagrangepost':
2504 elif op == 'dagrangepost':
2459 return _analyze(('func', ('symbol', 'descendants'), x[1]), order)
2505 return _analyze(('func', ('symbol', 'descendants'), x[1]), order)
2460 elif op == 'rangeall':
2506 elif op == 'rangeall':
2461 return _analyze(('rangepre', ('string', 'tip')), order)
2507 return _analyze(('rangepre', ('string', 'tip')), order)
2462 elif op == 'rangepost':
2508 elif op == 'rangepost':
2463 return _analyze(('range', x[1], ('string', 'tip')), order)
2509 return _analyze(('range', x[1], ('string', 'tip')), order)
2464 elif op == 'negate':
2510 elif op == 'negate':
2465 s = getstring(x[1], _("can't negate that"))
2511 s = getstring(x[1], _("can't negate that"))
2466 return _analyze(('string', '-' + s), order)
2512 return _analyze(('string', '-' + s), order)
2467 elif op in ('string', 'symbol'):
2513 elif op in ('string', 'symbol'):
2468 return x
2514 return x
2469 elif op == 'and':
2515 elif op == 'and':
2470 ta = _analyze(x[1], order)
2516 ta = _analyze(x[1], order)
2471 tb = _analyze(x[2], _tofolloworder[order])
2517 tb = _analyze(x[2], _tofolloworder[order])
2472 return (op, ta, tb, order)
2518 return (op, ta, tb, order)
2473 elif op == 'or':
2519 elif op == 'or':
2474 return (op, _analyze(x[1], order), order)
2520 return (op, _analyze(x[1], order), order)
2475 elif op == 'not':
2521 elif op == 'not':
2476 return (op, _analyze(x[1], anyorder), order)
2522 return (op, _analyze(x[1], anyorder), order)
2477 elif op in ('rangepre', 'parentpost'):
2523 elif op in ('rangepre', 'parentpost'):
2478 return (op, _analyze(x[1], defineorder), order)
2524 return (op, _analyze(x[1], defineorder), order)
2479 elif op == 'group':
2525 elif op == 'group':
2480 return _analyze(x[1], order)
2526 return _analyze(x[1], order)
2481 elif op in ('dagrange', 'range', 'parent', 'ancestor'):
2527 elif op in ('dagrange', 'range', 'parent', 'ancestor'):
2482 ta = _analyze(x[1], defineorder)
2528 ta = _analyze(x[1], defineorder)
2483 tb = _analyze(x[2], defineorder)
2529 tb = _analyze(x[2], defineorder)
2484 return (op, ta, tb, order)
2530 return (op, ta, tb, order)
2485 elif op == 'list':
2531 elif op == 'list':
2486 return (op,) + tuple(_analyze(y, order) for y in x[1:])
2532 return (op,) + tuple(_analyze(y, order) for y in x[1:])
2487 elif op == 'keyvalue':
2533 elif op == 'keyvalue':
2488 return (op, x[1], _analyze(x[2], order))
2534 return (op, x[1], _analyze(x[2], order))
2489 elif op == 'func':
2535 elif op == 'func':
2490 f = getsymbol(x[1])
2536 f = getsymbol(x[1])
2491 d = defineorder
2537 d = defineorder
2492 if f == 'present':
2538 if f == 'present':
2493 # 'present(set)' is known to return the argument set with no
2539 # 'present(set)' is known to return the argument set with no
2494 # modification, so forward the current order to its argument
2540 # modification, so forward the current order to its argument
2495 d = order
2541 d = order
2496 return (op, x[1], _analyze(x[2], d), order)
2542 return (op, x[1], _analyze(x[2], d), order)
2497 raise ValueError('invalid operator %r' % op)
2543 raise ValueError('invalid operator %r' % op)
2498
2544
2499 def analyze(x, order=defineorder):
2545 def analyze(x, order=defineorder):
2500 """Transform raw parsed tree to evaluatable tree which can be fed to
2546 """Transform raw parsed tree to evaluatable tree which can be fed to
2501 optimize() or getset()
2547 optimize() or getset()
2502
2548
2503 All pseudo operations should be mapped to real operations or functions
2549 All pseudo operations should be mapped to real operations or functions
2504 defined in methods or symbols table respectively.
2550 defined in methods or symbols table respectively.
2505
2551
2506 'order' specifies how the current expression 'x' is ordered (see the
2552 'order' specifies how the current expression 'x' is ordered (see the
2507 constants defined above.)
2553 constants defined above.)
2508 """
2554 """
2509 return _analyze(x, order)
2555 return _analyze(x, order)
2510
2556
2511 def _optimize(x, small):
2557 def _optimize(x, small):
2512 if x is None:
2558 if x is None:
2513 return 0, x
2559 return 0, x
2514
2560
2515 smallbonus = 1
2561 smallbonus = 1
2516 if small:
2562 if small:
2517 smallbonus = .5
2563 smallbonus = .5
2518
2564
2519 op = x[0]
2565 op = x[0]
2520 if op in ('string', 'symbol'):
2566 if op in ('string', 'symbol'):
2521 return smallbonus, x # single revisions are small
2567 return smallbonus, x # single revisions are small
2522 elif op == 'and':
2568 elif op == 'and':
2523 wa, ta = _optimize(x[1], True)
2569 wa, ta = _optimize(x[1], True)
2524 wb, tb = _optimize(x[2], True)
2570 wb, tb = _optimize(x[2], True)
2525 order = x[3]
2571 order = x[3]
2526 w = min(wa, wb)
2572 w = min(wa, wb)
2527
2573
2528 # (::x and not ::y)/(not ::y and ::x) have a fast path
2574 # (::x and not ::y)/(not ::y and ::x) have a fast path
2529 tm = _matchonly(ta, tb) or _matchonly(tb, ta)
2575 tm = _matchonly(ta, tb) or _matchonly(tb, ta)
2530 if tm:
2576 if tm:
2531 return w, ('func', ('symbol', 'only'), tm, order)
2577 return w, ('func', ('symbol', 'only'), tm, order)
2532
2578
2533 if tb is not None and tb[0] == 'not':
2579 if tb is not None and tb[0] == 'not':
2534 return wa, ('difference', ta, tb[1], order)
2580 return wa, ('difference', ta, tb[1], order)
2535
2581
2536 if wa > wb:
2582 if wa > wb:
2537 return w, (op, tb, ta, order)
2583 return w, (op, tb, ta, order)
2538 return w, (op, ta, tb, order)
2584 return w, (op, ta, tb, order)
2539 elif op == 'or':
2585 elif op == 'or':
2540 # fast path for machine-generated expression, that is likely to have
2586 # fast path for machine-generated expression, that is likely to have
2541 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2587 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2542 order = x[2]
2588 order = x[2]
2543 ws, ts, ss = [], [], []
2589 ws, ts, ss = [], [], []
2544 def flushss():
2590 def flushss():
2545 if not ss:
2591 if not ss:
2546 return
2592 return
2547 if len(ss) == 1:
2593 if len(ss) == 1:
2548 w, t = ss[0]
2594 w, t = ss[0]
2549 else:
2595 else:
2550 s = '\0'.join(t[1] for w, t in ss)
2596 s = '\0'.join(t[1] for w, t in ss)
2551 y = ('func', ('symbol', '_list'), ('string', s), order)
2597 y = ('func', ('symbol', '_list'), ('string', s), order)
2552 w, t = _optimize(y, False)
2598 w, t = _optimize(y, False)
2553 ws.append(w)
2599 ws.append(w)
2554 ts.append(t)
2600 ts.append(t)
2555 del ss[:]
2601 del ss[:]
2556 for y in getlist(x[1]):
2602 for y in getlist(x[1]):
2557 w, t = _optimize(y, False)
2603 w, t = _optimize(y, False)
2558 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2604 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2559 ss.append((w, t))
2605 ss.append((w, t))
2560 continue
2606 continue
2561 flushss()
2607 flushss()
2562 ws.append(w)
2608 ws.append(w)
2563 ts.append(t)
2609 ts.append(t)
2564 flushss()
2610 flushss()
2565 if len(ts) == 1:
2611 if len(ts) == 1:
2566 return ws[0], ts[0] # 'or' operation is fully optimized out
2612 return ws[0], ts[0] # 'or' operation is fully optimized out
2567 # we can't reorder trees by weight because it would change the order.
2613 # we can't reorder trees by weight because it would change the order.
2568 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2614 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2569 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2615 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2570 return max(ws), (op, ('list',) + tuple(ts), order)
2616 return max(ws), (op, ('list',) + tuple(ts), order)
2571 elif op == 'not':
2617 elif op == 'not':
2572 # Optimize not public() to _notpublic() because we have a fast version
2618 # Optimize not public() to _notpublic() because we have a fast version
2573 if x[1][:3] == ('func', ('symbol', 'public'), None):
2619 if x[1][:3] == ('func', ('symbol', 'public'), None):
2574 order = x[1][3]
2620 order = x[1][3]
2575 newsym = ('func', ('symbol', '_notpublic'), None, order)
2621 newsym = ('func', ('symbol', '_notpublic'), None, order)
2576 o = _optimize(newsym, not small)
2622 o = _optimize(newsym, not small)
2577 return o[0], o[1]
2623 return o[0], o[1]
2578 else:
2624 else:
2579 o = _optimize(x[1], not small)
2625 o = _optimize(x[1], not small)
2580 order = x[2]
2626 order = x[2]
2581 return o[0], (op, o[1], order)
2627 return o[0], (op, o[1], order)
2582 elif op in ('rangepre', 'parentpost'):
2628 elif op in ('rangepre', 'parentpost'):
2583 o = _optimize(x[1], small)
2629 o = _optimize(x[1], small)
2584 order = x[2]
2630 order = x[2]
2585 return o[0], (op, o[1], order)
2631 return o[0], (op, o[1], order)
2586 elif op in ('dagrange', 'range', 'parent', 'ancestor'):
2632 elif op in ('dagrange', 'range', 'parent', 'ancestor'):
2587 wa, ta = _optimize(x[1], small)
2633 wa, ta = _optimize(x[1], small)
2588 wb, tb = _optimize(x[2], small)
2634 wb, tb = _optimize(x[2], small)
2589 order = x[3]
2635 order = x[3]
2590 return wa + wb, (op, ta, tb, order)
2636 return wa + wb, (op, ta, tb, order)
2591 elif op == 'list':
2637 elif op == 'list':
2592 ws, ts = zip(*(_optimize(y, small) for y in x[1:]))
2638 ws, ts = zip(*(_optimize(y, small) for y in x[1:]))
2593 return sum(ws), (op,) + ts
2639 return sum(ws), (op,) + ts
2594 elif op == 'keyvalue':
2640 elif op == 'keyvalue':
2595 w, t = _optimize(x[2], small)
2641 w, t = _optimize(x[2], small)
2596 return w, (op, x[1], t)
2642 return w, (op, x[1], t)
2597 elif op == 'func':
2643 elif op == 'func':
2598 f = getsymbol(x[1])
2644 f = getsymbol(x[1])
2599 wa, ta = _optimize(x[2], small)
2645 wa, ta = _optimize(x[2], small)
2600 if f in ('author', 'branch', 'closed', 'date', 'desc', 'file', 'grep',
2646 if f in ('author', 'branch', 'closed', 'date', 'desc', 'file', 'grep',
2601 'keyword', 'outgoing', 'user', 'destination'):
2647 'keyword', 'outgoing', 'user', 'destination'):
2602 w = 10 # slow
2648 w = 10 # slow
2603 elif f in ('modifies', 'adds', 'removes'):
2649 elif f in ('modifies', 'adds', 'removes'):
2604 w = 30 # slower
2650 w = 30 # slower
2605 elif f == "contains":
2651 elif f == "contains":
2606 w = 100 # very slow
2652 w = 100 # very slow
2607 elif f == "ancestor":
2653 elif f == "ancestor":
2608 w = 1 * smallbonus
2654 w = 1 * smallbonus
2609 elif f in ('reverse', 'limit', 'first', 'wdir', '_intlist'):
2655 elif f in ('reverse', 'limit', 'first', 'wdir', '_intlist'):
2610 w = 0
2656 w = 0
2611 elif f == "sort":
2657 elif f == "sort":
2612 w = 10 # assume most sorts look at changelog
2658 w = 10 # assume most sorts look at changelog
2613 else:
2659 else:
2614 w = 1
2660 w = 1
2615 order = x[3]
2661 order = x[3]
2616 return w + wa, (op, x[1], ta, order)
2662 return w + wa, (op, x[1], ta, order)
2617 raise ValueError('invalid operator %r' % op)
2663 raise ValueError('invalid operator %r' % op)
2618
2664
2619 def optimize(tree):
2665 def optimize(tree):
2620 """Optimize evaluatable tree
2666 """Optimize evaluatable tree
2621
2667
2622 All pseudo operations should be transformed beforehand.
2668 All pseudo operations should be transformed beforehand.
2623 """
2669 """
2624 _weight, newtree = _optimize(tree, small=True)
2670 _weight, newtree = _optimize(tree, small=True)
2625 return newtree
2671 return newtree
2626
2672
2627 # the set of valid characters for the initial letter of symbols in
2673 # the set of valid characters for the initial letter of symbols in
2628 # alias declarations and definitions
2674 # alias declarations and definitions
2629 _aliassyminitletters = _syminitletters | set(pycompat.sysstr('$'))
2675 _aliassyminitletters = _syminitletters | set(pycompat.sysstr('$'))
2630
2676
2631 def _parsewith(spec, lookup=None, syminitletters=None):
2677 def _parsewith(spec, lookup=None, syminitletters=None):
2632 """Generate a parse tree of given spec with given tokenizing options
2678 """Generate a parse tree of given spec with given tokenizing options
2633
2679
2634 >>> _parsewith('foo($1)', syminitletters=_aliassyminitletters)
2680 >>> _parsewith('foo($1)', syminitletters=_aliassyminitletters)
2635 ('func', ('symbol', 'foo'), ('symbol', '$1'))
2681 ('func', ('symbol', 'foo'), ('symbol', '$1'))
2636 >>> _parsewith('$1')
2682 >>> _parsewith('$1')
2637 Traceback (most recent call last):
2683 Traceback (most recent call last):
2638 ...
2684 ...
2639 ParseError: ("syntax error in revset '$1'", 0)
2685 ParseError: ("syntax error in revset '$1'", 0)
2640 >>> _parsewith('foo bar')
2686 >>> _parsewith('foo bar')
2641 Traceback (most recent call last):
2687 Traceback (most recent call last):
2642 ...
2688 ...
2643 ParseError: ('invalid token', 4)
2689 ParseError: ('invalid token', 4)
2644 """
2690 """
2645 p = parser.parser(elements)
2691 p = parser.parser(elements)
2646 tree, pos = p.parse(tokenize(spec, lookup=lookup,
2692 tree, pos = p.parse(tokenize(spec, lookup=lookup,
2647 syminitletters=syminitletters))
2693 syminitletters=syminitletters))
2648 if pos != len(spec):
2694 if pos != len(spec):
2649 raise error.ParseError(_('invalid token'), pos)
2695 raise error.ParseError(_('invalid token'), pos)
2650 return _fixops(parser.simplifyinfixops(tree, ('list', 'or')))
2696 return _fixops(parser.simplifyinfixops(tree, ('list', 'or')))
2651
2697
2652 class _aliasrules(parser.basealiasrules):
2698 class _aliasrules(parser.basealiasrules):
2653 """Parsing and expansion rule set of revset aliases"""
2699 """Parsing and expansion rule set of revset aliases"""
2654 _section = _('revset alias')
2700 _section = _('revset alias')
2655
2701
2656 @staticmethod
2702 @staticmethod
2657 def _parse(spec):
2703 def _parse(spec):
2658 """Parse alias declaration/definition ``spec``
2704 """Parse alias declaration/definition ``spec``
2659
2705
2660 This allows symbol names to use also ``$`` as an initial letter
2706 This allows symbol names to use also ``$`` as an initial letter
2661 (for backward compatibility), and callers of this function should
2707 (for backward compatibility), and callers of this function should
2662 examine whether ``$`` is used also for unexpected symbols or not.
2708 examine whether ``$`` is used also for unexpected symbols or not.
2663 """
2709 """
2664 return _parsewith(spec, syminitletters=_aliassyminitletters)
2710 return _parsewith(spec, syminitletters=_aliassyminitletters)
2665
2711
2666 @staticmethod
2712 @staticmethod
2667 def _trygetfunc(tree):
2713 def _trygetfunc(tree):
2668 if tree[0] == 'func' and tree[1][0] == 'symbol':
2714 if tree[0] == 'func' and tree[1][0] == 'symbol':
2669 return tree[1][1], getlist(tree[2])
2715 return tree[1][1], getlist(tree[2])
2670
2716
2671 def expandaliases(ui, tree):
2717 def expandaliases(ui, tree):
2672 aliases = _aliasrules.buildmap(ui.configitems('revsetalias'))
2718 aliases = _aliasrules.buildmap(ui.configitems('revsetalias'))
2673 tree = _aliasrules.expand(aliases, tree)
2719 tree = _aliasrules.expand(aliases, tree)
2674 # warn about problematic (but not referred) aliases
2720 # warn about problematic (but not referred) aliases
2675 for name, alias in sorted(aliases.iteritems()):
2721 for name, alias in sorted(aliases.iteritems()):
2676 if alias.error and not alias.warned:
2722 if alias.error and not alias.warned:
2677 ui.warn(_('warning: %s\n') % (alias.error))
2723 ui.warn(_('warning: %s\n') % (alias.error))
2678 alias.warned = True
2724 alias.warned = True
2679 return tree
2725 return tree
2680
2726
2681 def foldconcat(tree):
2727 def foldconcat(tree):
2682 """Fold elements to be concatenated by `##`
2728 """Fold elements to be concatenated by `##`
2683 """
2729 """
2684 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2730 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2685 return tree
2731 return tree
2686 if tree[0] == '_concat':
2732 if tree[0] == '_concat':
2687 pending = [tree]
2733 pending = [tree]
2688 l = []
2734 l = []
2689 while pending:
2735 while pending:
2690 e = pending.pop()
2736 e = pending.pop()
2691 if e[0] == '_concat':
2737 if e[0] == '_concat':
2692 pending.extend(reversed(e[1:]))
2738 pending.extend(reversed(e[1:]))
2693 elif e[0] in ('string', 'symbol'):
2739 elif e[0] in ('string', 'symbol'):
2694 l.append(e[1])
2740 l.append(e[1])
2695 else:
2741 else:
2696 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2742 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2697 raise error.ParseError(msg)
2743 raise error.ParseError(msg)
2698 return ('string', ''.join(l))
2744 return ('string', ''.join(l))
2699 else:
2745 else:
2700 return tuple(foldconcat(t) for t in tree)
2746 return tuple(foldconcat(t) for t in tree)
2701
2747
2702 def parse(spec, lookup=None):
2748 def parse(spec, lookup=None):
2703 return _parsewith(spec, lookup=lookup)
2749 return _parsewith(spec, lookup=lookup)
2704
2750
2705 def posttreebuilthook(tree, repo):
2751 def posttreebuilthook(tree, repo):
2706 # hook for extensions to execute code on the optimized tree
2752 # hook for extensions to execute code on the optimized tree
2707 pass
2753 pass
2708
2754
2709 def match(ui, spec, repo=None, order=defineorder):
2755 def match(ui, spec, repo=None, order=defineorder):
2710 """Create a matcher for a single revision spec
2756 """Create a matcher for a single revision spec
2711
2757
2712 If order=followorder, a matcher takes the ordering specified by the input
2758 If order=followorder, a matcher takes the ordering specified by the input
2713 set.
2759 set.
2714 """
2760 """
2715 return matchany(ui, [spec], repo=repo, order=order)
2761 return matchany(ui, [spec], repo=repo, order=order)
2716
2762
2717 def matchany(ui, specs, repo=None, order=defineorder):
2763 def matchany(ui, specs, repo=None, order=defineorder):
2718 """Create a matcher that will include any revisions matching one of the
2764 """Create a matcher that will include any revisions matching one of the
2719 given specs
2765 given specs
2720
2766
2721 If order=followorder, a matcher takes the ordering specified by the input
2767 If order=followorder, a matcher takes the ordering specified by the input
2722 set.
2768 set.
2723 """
2769 """
2724 if not specs:
2770 if not specs:
2725 def mfunc(repo, subset=None):
2771 def mfunc(repo, subset=None):
2726 return baseset()
2772 return baseset()
2727 return mfunc
2773 return mfunc
2728 if not all(specs):
2774 if not all(specs):
2729 raise error.ParseError(_("empty query"))
2775 raise error.ParseError(_("empty query"))
2730 lookup = None
2776 lookup = None
2731 if repo:
2777 if repo:
2732 lookup = repo.__contains__
2778 lookup = repo.__contains__
2733 if len(specs) == 1:
2779 if len(specs) == 1:
2734 tree = parse(specs[0], lookup)
2780 tree = parse(specs[0], lookup)
2735 else:
2781 else:
2736 tree = ('or', ('list',) + tuple(parse(s, lookup) for s in specs))
2782 tree = ('or', ('list',) + tuple(parse(s, lookup) for s in specs))
2737
2783
2738 if ui:
2784 if ui:
2739 tree = expandaliases(ui, tree)
2785 tree = expandaliases(ui, tree)
2740 tree = foldconcat(tree)
2786 tree = foldconcat(tree)
2741 tree = analyze(tree, order)
2787 tree = analyze(tree, order)
2742 tree = optimize(tree)
2788 tree = optimize(tree)
2743 posttreebuilthook(tree, repo)
2789 posttreebuilthook(tree, repo)
2744 return makematcher(tree)
2790 return makematcher(tree)
2745
2791
2746 def makematcher(tree):
2792 def makematcher(tree):
2747 """Create a matcher from an evaluatable tree"""
2793 """Create a matcher from an evaluatable tree"""
2748 def mfunc(repo, subset=None):
2794 def mfunc(repo, subset=None):
2749 if subset is None:
2795 if subset is None:
2750 subset = fullreposet(repo)
2796 subset = fullreposet(repo)
2751 if util.safehasattr(subset, 'isascending'):
2797 if util.safehasattr(subset, 'isascending'):
2752 result = getset(repo, subset, tree)
2798 result = getset(repo, subset, tree)
2753 else:
2799 else:
2754 result = getset(repo, baseset(subset), tree)
2800 result = getset(repo, baseset(subset), tree)
2755 return result
2801 return result
2756 return mfunc
2802 return mfunc
2757
2803
2758 def formatspec(expr, *args):
2804 def formatspec(expr, *args):
2759 '''
2805 '''
2760 This is a convenience function for using revsets internally, and
2806 This is a convenience function for using revsets internally, and
2761 escapes arguments appropriately. Aliases are intentionally ignored
2807 escapes arguments appropriately. Aliases are intentionally ignored
2762 so that intended expression behavior isn't accidentally subverted.
2808 so that intended expression behavior isn't accidentally subverted.
2763
2809
2764 Supported arguments:
2810 Supported arguments:
2765
2811
2766 %r = revset expression, parenthesized
2812 %r = revset expression, parenthesized
2767 %d = int(arg), no quoting
2813 %d = int(arg), no quoting
2768 %s = string(arg), escaped and single-quoted
2814 %s = string(arg), escaped and single-quoted
2769 %b = arg.branch(), escaped and single-quoted
2815 %b = arg.branch(), escaped and single-quoted
2770 %n = hex(arg), single-quoted
2816 %n = hex(arg), single-quoted
2771 %% = a literal '%'
2817 %% = a literal '%'
2772
2818
2773 Prefixing the type with 'l' specifies a parenthesized list of that type.
2819 Prefixing the type with 'l' specifies a parenthesized list of that type.
2774
2820
2775 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2821 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2776 '(10 or 11):: and ((this()) or (that()))'
2822 '(10 or 11):: and ((this()) or (that()))'
2777 >>> formatspec('%d:: and not %d::', 10, 20)
2823 >>> formatspec('%d:: and not %d::', 10, 20)
2778 '10:: and not 20::'
2824 '10:: and not 20::'
2779 >>> formatspec('%ld or %ld', [], [1])
2825 >>> formatspec('%ld or %ld', [], [1])
2780 "_list('') or 1"
2826 "_list('') or 1"
2781 >>> formatspec('keyword(%s)', 'foo\\xe9')
2827 >>> formatspec('keyword(%s)', 'foo\\xe9')
2782 "keyword('foo\\\\xe9')"
2828 "keyword('foo\\\\xe9')"
2783 >>> b = lambda: 'default'
2829 >>> b = lambda: 'default'
2784 >>> b.branch = b
2830 >>> b.branch = b
2785 >>> formatspec('branch(%b)', b)
2831 >>> formatspec('branch(%b)', b)
2786 "branch('default')"
2832 "branch('default')"
2787 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2833 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2788 "root(_list('a\\x00b\\x00c\\x00d'))"
2834 "root(_list('a\\x00b\\x00c\\x00d'))"
2789 '''
2835 '''
2790
2836
2791 def quote(s):
2837 def quote(s):
2792 return repr(str(s))
2838 return repr(str(s))
2793
2839
2794 def argtype(c, arg):
2840 def argtype(c, arg):
2795 if c == 'd':
2841 if c == 'd':
2796 return str(int(arg))
2842 return str(int(arg))
2797 elif c == 's':
2843 elif c == 's':
2798 return quote(arg)
2844 return quote(arg)
2799 elif c == 'r':
2845 elif c == 'r':
2800 parse(arg) # make sure syntax errors are confined
2846 parse(arg) # make sure syntax errors are confined
2801 return '(%s)' % arg
2847 return '(%s)' % arg
2802 elif c == 'n':
2848 elif c == 'n':
2803 return quote(node.hex(arg))
2849 return quote(node.hex(arg))
2804 elif c == 'b':
2850 elif c == 'b':
2805 return quote(arg.branch())
2851 return quote(arg.branch())
2806
2852
2807 def listexp(s, t):
2853 def listexp(s, t):
2808 l = len(s)
2854 l = len(s)
2809 if l == 0:
2855 if l == 0:
2810 return "_list('')"
2856 return "_list('')"
2811 elif l == 1:
2857 elif l == 1:
2812 return argtype(t, s[0])
2858 return argtype(t, s[0])
2813 elif t == 'd':
2859 elif t == 'd':
2814 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2860 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2815 elif t == 's':
2861 elif t == 's':
2816 return "_list('%s')" % "\0".join(s)
2862 return "_list('%s')" % "\0".join(s)
2817 elif t == 'n':
2863 elif t == 'n':
2818 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2864 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2819 elif t == 'b':
2865 elif t == 'b':
2820 return "_list('%s')" % "\0".join(a.branch() for a in s)
2866 return "_list('%s')" % "\0".join(a.branch() for a in s)
2821
2867
2822 m = l // 2
2868 m = l // 2
2823 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2869 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2824
2870
2825 ret = ''
2871 ret = ''
2826 pos = 0
2872 pos = 0
2827 arg = 0
2873 arg = 0
2828 while pos < len(expr):
2874 while pos < len(expr):
2829 c = expr[pos]
2875 c = expr[pos]
2830 if c == '%':
2876 if c == '%':
2831 pos += 1
2877 pos += 1
2832 d = expr[pos]
2878 d = expr[pos]
2833 if d == '%':
2879 if d == '%':
2834 ret += d
2880 ret += d
2835 elif d in 'dsnbr':
2881 elif d in 'dsnbr':
2836 ret += argtype(d, args[arg])
2882 ret += argtype(d, args[arg])
2837 arg += 1
2883 arg += 1
2838 elif d == 'l':
2884 elif d == 'l':
2839 # a list of some type
2885 # a list of some type
2840 pos += 1
2886 pos += 1
2841 d = expr[pos]
2887 d = expr[pos]
2842 ret += listexp(list(args[arg]), d)
2888 ret += listexp(list(args[arg]), d)
2843 arg += 1
2889 arg += 1
2844 else:
2890 else:
2845 raise error.Abort(_('unexpected revspec format character %s')
2891 raise error.Abort(_('unexpected revspec format character %s')
2846 % d)
2892 % d)
2847 else:
2893 else:
2848 ret += c
2894 ret += c
2849 pos += 1
2895 pos += 1
2850
2896
2851 return ret
2897 return ret
2852
2898
2853 def prettyformat(tree):
2899 def prettyformat(tree):
2854 return parser.prettyformat(tree, ('string', 'symbol'))
2900 return parser.prettyformat(tree, ('string', 'symbol'))
2855
2901
2856 def depth(tree):
2902 def depth(tree):
2857 if isinstance(tree, tuple):
2903 if isinstance(tree, tuple):
2858 return max(map(depth, tree)) + 1
2904 return max(map(depth, tree)) + 1
2859 else:
2905 else:
2860 return 0
2906 return 0
2861
2907
2862 def funcsused(tree):
2908 def funcsused(tree):
2863 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2909 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2864 return set()
2910 return set()
2865 else:
2911 else:
2866 funcs = set()
2912 funcs = set()
2867 for s in tree[1:]:
2913 for s in tree[1:]:
2868 funcs |= funcsused(s)
2914 funcs |= funcsused(s)
2869 if tree[0] == 'func':
2915 if tree[0] == 'func':
2870 funcs.add(tree[1][1])
2916 funcs.add(tree[1][1])
2871 return funcs
2917 return funcs
2872
2918
2873 def _formatsetrepr(r):
2919 def _formatsetrepr(r):
2874 """Format an optional printable representation of a set
2920 """Format an optional printable representation of a set
2875
2921
2876 ======== =================================
2922 ======== =================================
2877 type(r) example
2923 type(r) example
2878 ======== =================================
2924 ======== =================================
2879 tuple ('<not %r>', other)
2925 tuple ('<not %r>', other)
2880 str '<branch closed>'
2926 str '<branch closed>'
2881 callable lambda: '<branch %r>' % sorted(b)
2927 callable lambda: '<branch %r>' % sorted(b)
2882 object other
2928 object other
2883 ======== =================================
2929 ======== =================================
2884 """
2930 """
2885 if r is None:
2931 if r is None:
2886 return ''
2932 return ''
2887 elif isinstance(r, tuple):
2933 elif isinstance(r, tuple):
2888 return r[0] % r[1:]
2934 return r[0] % r[1:]
2889 elif isinstance(r, str):
2935 elif isinstance(r, str):
2890 return r
2936 return r
2891 elif callable(r):
2937 elif callable(r):
2892 return r()
2938 return r()
2893 else:
2939 else:
2894 return repr(r)
2940 return repr(r)
2895
2941
2896 class abstractsmartset(object):
2942 class abstractsmartset(object):
2897
2943
2898 def __nonzero__(self):
2944 def __nonzero__(self):
2899 """True if the smartset is not empty"""
2945 """True if the smartset is not empty"""
2900 raise NotImplementedError()
2946 raise NotImplementedError()
2901
2947
2902 def __contains__(self, rev):
2948 def __contains__(self, rev):
2903 """provide fast membership testing"""
2949 """provide fast membership testing"""
2904 raise NotImplementedError()
2950 raise NotImplementedError()
2905
2951
2906 def __iter__(self):
2952 def __iter__(self):
2907 """iterate the set in the order it is supposed to be iterated"""
2953 """iterate the set in the order it is supposed to be iterated"""
2908 raise NotImplementedError()
2954 raise NotImplementedError()
2909
2955
2910 # Attributes containing a function to perform a fast iteration in a given
2956 # Attributes containing a function to perform a fast iteration in a given
2911 # direction. A smartset can have none, one, or both defined.
2957 # direction. A smartset can have none, one, or both defined.
2912 #
2958 #
2913 # Default value is None instead of a function returning None to avoid
2959 # Default value is None instead of a function returning None to avoid
2914 # initializing an iterator just for testing if a fast method exists.
2960 # initializing an iterator just for testing if a fast method exists.
2915 fastasc = None
2961 fastasc = None
2916 fastdesc = None
2962 fastdesc = None
2917
2963
2918 def isascending(self):
2964 def isascending(self):
2919 """True if the set will iterate in ascending order"""
2965 """True if the set will iterate in ascending order"""
2920 raise NotImplementedError()
2966 raise NotImplementedError()
2921
2967
2922 def isdescending(self):
2968 def isdescending(self):
2923 """True if the set will iterate in descending order"""
2969 """True if the set will iterate in descending order"""
2924 raise NotImplementedError()
2970 raise NotImplementedError()
2925
2971
2926 def istopo(self):
2972 def istopo(self):
2927 """True if the set will iterate in topographical order"""
2973 """True if the set will iterate in topographical order"""
2928 raise NotImplementedError()
2974 raise NotImplementedError()
2929
2975
2930 def min(self):
2976 def min(self):
2931 """return the minimum element in the set"""
2977 """return the minimum element in the set"""
2932 if self.fastasc is None:
2978 if self.fastasc is None:
2933 v = min(self)
2979 v = min(self)
2934 else:
2980 else:
2935 for v in self.fastasc():
2981 for v in self.fastasc():
2936 break
2982 break
2937 else:
2983 else:
2938 raise ValueError('arg is an empty sequence')
2984 raise ValueError('arg is an empty sequence')
2939 self.min = lambda: v
2985 self.min = lambda: v
2940 return v
2986 return v
2941
2987
2942 def max(self):
2988 def max(self):
2943 """return the maximum element in the set"""
2989 """return the maximum element in the set"""
2944 if self.fastdesc is None:
2990 if self.fastdesc is None:
2945 return max(self)
2991 return max(self)
2946 else:
2992 else:
2947 for v in self.fastdesc():
2993 for v in self.fastdesc():
2948 break
2994 break
2949 else:
2995 else:
2950 raise ValueError('arg is an empty sequence')
2996 raise ValueError('arg is an empty sequence')
2951 self.max = lambda: v
2997 self.max = lambda: v
2952 return v
2998 return v
2953
2999
2954 def first(self):
3000 def first(self):
2955 """return the first element in the set (user iteration perspective)
3001 """return the first element in the set (user iteration perspective)
2956
3002
2957 Return None if the set is empty"""
3003 Return None if the set is empty"""
2958 raise NotImplementedError()
3004 raise NotImplementedError()
2959
3005
2960 def last(self):
3006 def last(self):
2961 """return the last element in the set (user iteration perspective)
3007 """return the last element in the set (user iteration perspective)
2962
3008
2963 Return None if the set is empty"""
3009 Return None if the set is empty"""
2964 raise NotImplementedError()
3010 raise NotImplementedError()
2965
3011
2966 def __len__(self):
3012 def __len__(self):
2967 """return the length of the smartsets
3013 """return the length of the smartsets
2968
3014
2969 This can be expensive on smartset that could be lazy otherwise."""
3015 This can be expensive on smartset that could be lazy otherwise."""
2970 raise NotImplementedError()
3016 raise NotImplementedError()
2971
3017
2972 def reverse(self):
3018 def reverse(self):
2973 """reverse the expected iteration order"""
3019 """reverse the expected iteration order"""
2974 raise NotImplementedError()
3020 raise NotImplementedError()
2975
3021
2976 def sort(self, reverse=True):
3022 def sort(self, reverse=True):
2977 """get the set to iterate in an ascending or descending order"""
3023 """get the set to iterate in an ascending or descending order"""
2978 raise NotImplementedError()
3024 raise NotImplementedError()
2979
3025
2980 def __and__(self, other):
3026 def __and__(self, other):
2981 """Returns a new object with the intersection of the two collections.
3027 """Returns a new object with the intersection of the two collections.
2982
3028
2983 This is part of the mandatory API for smartset."""
3029 This is part of the mandatory API for smartset."""
2984 if isinstance(other, fullreposet):
3030 if isinstance(other, fullreposet):
2985 return self
3031 return self
2986 return self.filter(other.__contains__, condrepr=other, cache=False)
3032 return self.filter(other.__contains__, condrepr=other, cache=False)
2987
3033
2988 def __add__(self, other):
3034 def __add__(self, other):
2989 """Returns a new object with the union of the two collections.
3035 """Returns a new object with the union of the two collections.
2990
3036
2991 This is part of the mandatory API for smartset."""
3037 This is part of the mandatory API for smartset."""
2992 return addset(self, other)
3038 return addset(self, other)
2993
3039
2994 def __sub__(self, other):
3040 def __sub__(self, other):
2995 """Returns a new object with the substraction of the two collections.
3041 """Returns a new object with the substraction of the two collections.
2996
3042
2997 This is part of the mandatory API for smartset."""
3043 This is part of the mandatory API for smartset."""
2998 c = other.__contains__
3044 c = other.__contains__
2999 return self.filter(lambda r: not c(r), condrepr=('<not %r>', other),
3045 return self.filter(lambda r: not c(r), condrepr=('<not %r>', other),
3000 cache=False)
3046 cache=False)
3001
3047
3002 def filter(self, condition, condrepr=None, cache=True):
3048 def filter(self, condition, condrepr=None, cache=True):
3003 """Returns this smartset filtered by condition as a new smartset.
3049 """Returns this smartset filtered by condition as a new smartset.
3004
3050
3005 `condition` is a callable which takes a revision number and returns a
3051 `condition` is a callable which takes a revision number and returns a
3006 boolean. Optional `condrepr` provides a printable representation of
3052 boolean. Optional `condrepr` provides a printable representation of
3007 the given `condition`.
3053 the given `condition`.
3008
3054
3009 This is part of the mandatory API for smartset."""
3055 This is part of the mandatory API for smartset."""
3010 # builtin cannot be cached. but do not needs to
3056 # builtin cannot be cached. but do not needs to
3011 if cache and util.safehasattr(condition, 'func_code'):
3057 if cache and util.safehasattr(condition, 'func_code'):
3012 condition = util.cachefunc(condition)
3058 condition = util.cachefunc(condition)
3013 return filteredset(self, condition, condrepr)
3059 return filteredset(self, condition, condrepr)
3014
3060
3015 class baseset(abstractsmartset):
3061 class baseset(abstractsmartset):
3016 """Basic data structure that represents a revset and contains the basic
3062 """Basic data structure that represents a revset and contains the basic
3017 operation that it should be able to perform.
3063 operation that it should be able to perform.
3018
3064
3019 Every method in this class should be implemented by any smartset class.
3065 Every method in this class should be implemented by any smartset class.
3020 """
3066 """
3021 def __init__(self, data=(), datarepr=None, istopo=False):
3067 def __init__(self, data=(), datarepr=None, istopo=False):
3022 """
3068 """
3023 datarepr: a tuple of (format, obj, ...), a function or an object that
3069 datarepr: a tuple of (format, obj, ...), a function or an object that
3024 provides a printable representation of the given data.
3070 provides a printable representation of the given data.
3025 """
3071 """
3026 self._ascending = None
3072 self._ascending = None
3027 self._istopo = istopo
3073 self._istopo = istopo
3028 if not isinstance(data, list):
3074 if not isinstance(data, list):
3029 if isinstance(data, set):
3075 if isinstance(data, set):
3030 self._set = data
3076 self._set = data
3031 # set has no order we pick one for stability purpose
3077 # set has no order we pick one for stability purpose
3032 self._ascending = True
3078 self._ascending = True
3033 data = list(data)
3079 data = list(data)
3034 self._list = data
3080 self._list = data
3035 self._datarepr = datarepr
3081 self._datarepr = datarepr
3036
3082
3037 @util.propertycache
3083 @util.propertycache
3038 def _set(self):
3084 def _set(self):
3039 return set(self._list)
3085 return set(self._list)
3040
3086
3041 @util.propertycache
3087 @util.propertycache
3042 def _asclist(self):
3088 def _asclist(self):
3043 asclist = self._list[:]
3089 asclist = self._list[:]
3044 asclist.sort()
3090 asclist.sort()
3045 return asclist
3091 return asclist
3046
3092
3047 def __iter__(self):
3093 def __iter__(self):
3048 if self._ascending is None:
3094 if self._ascending is None:
3049 return iter(self._list)
3095 return iter(self._list)
3050 elif self._ascending:
3096 elif self._ascending:
3051 return iter(self._asclist)
3097 return iter(self._asclist)
3052 else:
3098 else:
3053 return reversed(self._asclist)
3099 return reversed(self._asclist)
3054
3100
3055 def fastasc(self):
3101 def fastasc(self):
3056 return iter(self._asclist)
3102 return iter(self._asclist)
3057
3103
3058 def fastdesc(self):
3104 def fastdesc(self):
3059 return reversed(self._asclist)
3105 return reversed(self._asclist)
3060
3106
3061 @util.propertycache
3107 @util.propertycache
3062 def __contains__(self):
3108 def __contains__(self):
3063 return self._set.__contains__
3109 return self._set.__contains__
3064
3110
3065 def __nonzero__(self):
3111 def __nonzero__(self):
3066 return bool(self._list)
3112 return bool(self._list)
3067
3113
3068 def sort(self, reverse=False):
3114 def sort(self, reverse=False):
3069 self._ascending = not bool(reverse)
3115 self._ascending = not bool(reverse)
3070 self._istopo = False
3116 self._istopo = False
3071
3117
3072 def reverse(self):
3118 def reverse(self):
3073 if self._ascending is None:
3119 if self._ascending is None:
3074 self._list.reverse()
3120 self._list.reverse()
3075 else:
3121 else:
3076 self._ascending = not self._ascending
3122 self._ascending = not self._ascending
3077 self._istopo = False
3123 self._istopo = False
3078
3124
3079 def __len__(self):
3125 def __len__(self):
3080 return len(self._list)
3126 return len(self._list)
3081
3127
3082 def isascending(self):
3128 def isascending(self):
3083 """Returns True if the collection is ascending order, False if not.
3129 """Returns True if the collection is ascending order, False if not.
3084
3130
3085 This is part of the mandatory API for smartset."""
3131 This is part of the mandatory API for smartset."""
3086 if len(self) <= 1:
3132 if len(self) <= 1:
3087 return True
3133 return True
3088 return self._ascending is not None and self._ascending
3134 return self._ascending is not None and self._ascending
3089
3135
3090 def isdescending(self):
3136 def isdescending(self):
3091 """Returns True if the collection is descending order, False if not.
3137 """Returns True if the collection is descending order, False if not.
3092
3138
3093 This is part of the mandatory API for smartset."""
3139 This is part of the mandatory API for smartset."""
3094 if len(self) <= 1:
3140 if len(self) <= 1:
3095 return True
3141 return True
3096 return self._ascending is not None and not self._ascending
3142 return self._ascending is not None and not self._ascending
3097
3143
3098 def istopo(self):
3144 def istopo(self):
3099 """Is the collection is in topographical order or not.
3145 """Is the collection is in topographical order or not.
3100
3146
3101 This is part of the mandatory API for smartset."""
3147 This is part of the mandatory API for smartset."""
3102 if len(self) <= 1:
3148 if len(self) <= 1:
3103 return True
3149 return True
3104 return self._istopo
3150 return self._istopo
3105
3151
3106 def first(self):
3152 def first(self):
3107 if self:
3153 if self:
3108 if self._ascending is None:
3154 if self._ascending is None:
3109 return self._list[0]
3155 return self._list[0]
3110 elif self._ascending:
3156 elif self._ascending:
3111 return self._asclist[0]
3157 return self._asclist[0]
3112 else:
3158 else:
3113 return self._asclist[-1]
3159 return self._asclist[-1]
3114 return None
3160 return None
3115
3161
3116 def last(self):
3162 def last(self):
3117 if self:
3163 if self:
3118 if self._ascending is None:
3164 if self._ascending is None:
3119 return self._list[-1]
3165 return self._list[-1]
3120 elif self._ascending:
3166 elif self._ascending:
3121 return self._asclist[-1]
3167 return self._asclist[-1]
3122 else:
3168 else:
3123 return self._asclist[0]
3169 return self._asclist[0]
3124 return None
3170 return None
3125
3171
3126 def __repr__(self):
3172 def __repr__(self):
3127 d = {None: '', False: '-', True: '+'}[self._ascending]
3173 d = {None: '', False: '-', True: '+'}[self._ascending]
3128 s = _formatsetrepr(self._datarepr)
3174 s = _formatsetrepr(self._datarepr)
3129 if not s:
3175 if not s:
3130 l = self._list
3176 l = self._list
3131 # if _list has been built from a set, it might have a different
3177 # if _list has been built from a set, it might have a different
3132 # order from one python implementation to another.
3178 # order from one python implementation to another.
3133 # We fallback to the sorted version for a stable output.
3179 # We fallback to the sorted version for a stable output.
3134 if self._ascending is not None:
3180 if self._ascending is not None:
3135 l = self._asclist
3181 l = self._asclist
3136 s = repr(l)
3182 s = repr(l)
3137 return '<%s%s %s>' % (type(self).__name__, d, s)
3183 return '<%s%s %s>' % (type(self).__name__, d, s)
3138
3184
3139 class filteredset(abstractsmartset):
3185 class filteredset(abstractsmartset):
3140 """Duck type for baseset class which iterates lazily over the revisions in
3186 """Duck type for baseset class which iterates lazily over the revisions in
3141 the subset and contains a function which tests for membership in the
3187 the subset and contains a function which tests for membership in the
3142 revset
3188 revset
3143 """
3189 """
3144 def __init__(self, subset, condition=lambda x: True, condrepr=None):
3190 def __init__(self, subset, condition=lambda x: True, condrepr=None):
3145 """
3191 """
3146 condition: a function that decide whether a revision in the subset
3192 condition: a function that decide whether a revision in the subset
3147 belongs to the revset or not.
3193 belongs to the revset or not.
3148 condrepr: a tuple of (format, obj, ...), a function or an object that
3194 condrepr: a tuple of (format, obj, ...), a function or an object that
3149 provides a printable representation of the given condition.
3195 provides a printable representation of the given condition.
3150 """
3196 """
3151 self._subset = subset
3197 self._subset = subset
3152 self._condition = condition
3198 self._condition = condition
3153 self._condrepr = condrepr
3199 self._condrepr = condrepr
3154
3200
3155 def __contains__(self, x):
3201 def __contains__(self, x):
3156 return x in self._subset and self._condition(x)
3202 return x in self._subset and self._condition(x)
3157
3203
3158 def __iter__(self):
3204 def __iter__(self):
3159 return self._iterfilter(self._subset)
3205 return self._iterfilter(self._subset)
3160
3206
3161 def _iterfilter(self, it):
3207 def _iterfilter(self, it):
3162 cond = self._condition
3208 cond = self._condition
3163 for x in it:
3209 for x in it:
3164 if cond(x):
3210 if cond(x):
3165 yield x
3211 yield x
3166
3212
3167 @property
3213 @property
3168 def fastasc(self):
3214 def fastasc(self):
3169 it = self._subset.fastasc
3215 it = self._subset.fastasc
3170 if it is None:
3216 if it is None:
3171 return None
3217 return None
3172 return lambda: self._iterfilter(it())
3218 return lambda: self._iterfilter(it())
3173
3219
3174 @property
3220 @property
3175 def fastdesc(self):
3221 def fastdesc(self):
3176 it = self._subset.fastdesc
3222 it = self._subset.fastdesc
3177 if it is None:
3223 if it is None:
3178 return None
3224 return None
3179 return lambda: self._iterfilter(it())
3225 return lambda: self._iterfilter(it())
3180
3226
3181 def __nonzero__(self):
3227 def __nonzero__(self):
3182 fast = None
3228 fast = None
3183 candidates = [self.fastasc if self.isascending() else None,
3229 candidates = [self.fastasc if self.isascending() else None,
3184 self.fastdesc if self.isdescending() else None,
3230 self.fastdesc if self.isdescending() else None,
3185 self.fastasc,
3231 self.fastasc,
3186 self.fastdesc]
3232 self.fastdesc]
3187 for candidate in candidates:
3233 for candidate in candidates:
3188 if candidate is not None:
3234 if candidate is not None:
3189 fast = candidate
3235 fast = candidate
3190 break
3236 break
3191
3237
3192 if fast is not None:
3238 if fast is not None:
3193 it = fast()
3239 it = fast()
3194 else:
3240 else:
3195 it = self
3241 it = self
3196
3242
3197 for r in it:
3243 for r in it:
3198 return True
3244 return True
3199 return False
3245 return False
3200
3246
3201 def __len__(self):
3247 def __len__(self):
3202 # Basic implementation to be changed in future patches.
3248 # Basic implementation to be changed in future patches.
3203 # until this gets improved, we use generator expression
3249 # until this gets improved, we use generator expression
3204 # here, since list comprehensions are free to call __len__ again
3250 # here, since list comprehensions are free to call __len__ again
3205 # causing infinite recursion
3251 # causing infinite recursion
3206 l = baseset(r for r in self)
3252 l = baseset(r for r in self)
3207 return len(l)
3253 return len(l)
3208
3254
3209 def sort(self, reverse=False):
3255 def sort(self, reverse=False):
3210 self._subset.sort(reverse=reverse)
3256 self._subset.sort(reverse=reverse)
3211
3257
3212 def reverse(self):
3258 def reverse(self):
3213 self._subset.reverse()
3259 self._subset.reverse()
3214
3260
3215 def isascending(self):
3261 def isascending(self):
3216 return self._subset.isascending()
3262 return self._subset.isascending()
3217
3263
3218 def isdescending(self):
3264 def isdescending(self):
3219 return self._subset.isdescending()
3265 return self._subset.isdescending()
3220
3266
3221 def istopo(self):
3267 def istopo(self):
3222 return self._subset.istopo()
3268 return self._subset.istopo()
3223
3269
3224 def first(self):
3270 def first(self):
3225 for x in self:
3271 for x in self:
3226 return x
3272 return x
3227 return None
3273 return None
3228
3274
3229 def last(self):
3275 def last(self):
3230 it = None
3276 it = None
3231 if self.isascending():
3277 if self.isascending():
3232 it = self.fastdesc
3278 it = self.fastdesc
3233 elif self.isdescending():
3279 elif self.isdescending():
3234 it = self.fastasc
3280 it = self.fastasc
3235 if it is not None:
3281 if it is not None:
3236 for x in it():
3282 for x in it():
3237 return x
3283 return x
3238 return None #empty case
3284 return None #empty case
3239 else:
3285 else:
3240 x = None
3286 x = None
3241 for x in self:
3287 for x in self:
3242 pass
3288 pass
3243 return x
3289 return x
3244
3290
3245 def __repr__(self):
3291 def __repr__(self):
3246 xs = [repr(self._subset)]
3292 xs = [repr(self._subset)]
3247 s = _formatsetrepr(self._condrepr)
3293 s = _formatsetrepr(self._condrepr)
3248 if s:
3294 if s:
3249 xs.append(s)
3295 xs.append(s)
3250 return '<%s %s>' % (type(self).__name__, ', '.join(xs))
3296 return '<%s %s>' % (type(self).__name__, ', '.join(xs))
3251
3297
3252 def _iterordered(ascending, iter1, iter2):
3298 def _iterordered(ascending, iter1, iter2):
3253 """produce an ordered iteration from two iterators with the same order
3299 """produce an ordered iteration from two iterators with the same order
3254
3300
3255 The ascending is used to indicated the iteration direction.
3301 The ascending is used to indicated the iteration direction.
3256 """
3302 """
3257 choice = max
3303 choice = max
3258 if ascending:
3304 if ascending:
3259 choice = min
3305 choice = min
3260
3306
3261 val1 = None
3307 val1 = None
3262 val2 = None
3308 val2 = None
3263 try:
3309 try:
3264 # Consume both iterators in an ordered way until one is empty
3310 # Consume both iterators in an ordered way until one is empty
3265 while True:
3311 while True:
3266 if val1 is None:
3312 if val1 is None:
3267 val1 = next(iter1)
3313 val1 = next(iter1)
3268 if val2 is None:
3314 if val2 is None:
3269 val2 = next(iter2)
3315 val2 = next(iter2)
3270 n = choice(val1, val2)
3316 n = choice(val1, val2)
3271 yield n
3317 yield n
3272 if val1 == n:
3318 if val1 == n:
3273 val1 = None
3319 val1 = None
3274 if val2 == n:
3320 if val2 == n:
3275 val2 = None
3321 val2 = None
3276 except StopIteration:
3322 except StopIteration:
3277 # Flush any remaining values and consume the other one
3323 # Flush any remaining values and consume the other one
3278 it = iter2
3324 it = iter2
3279 if val1 is not None:
3325 if val1 is not None:
3280 yield val1
3326 yield val1
3281 it = iter1
3327 it = iter1
3282 elif val2 is not None:
3328 elif val2 is not None:
3283 # might have been equality and both are empty
3329 # might have been equality and both are empty
3284 yield val2
3330 yield val2
3285 for val in it:
3331 for val in it:
3286 yield val
3332 yield val
3287
3333
3288 class addset(abstractsmartset):
3334 class addset(abstractsmartset):
3289 """Represent the addition of two sets
3335 """Represent the addition of two sets
3290
3336
3291 Wrapper structure for lazily adding two structures without losing much
3337 Wrapper structure for lazily adding two structures without losing much
3292 performance on the __contains__ method
3338 performance on the __contains__ method
3293
3339
3294 If the ascending attribute is set, that means the two structures are
3340 If the ascending attribute is set, that means the two structures are
3295 ordered in either an ascending or descending way. Therefore, we can add
3341 ordered in either an ascending or descending way. Therefore, we can add
3296 them maintaining the order by iterating over both at the same time
3342 them maintaining the order by iterating over both at the same time
3297
3343
3298 >>> xs = baseset([0, 3, 2])
3344 >>> xs = baseset([0, 3, 2])
3299 >>> ys = baseset([5, 2, 4])
3345 >>> ys = baseset([5, 2, 4])
3300
3346
3301 >>> rs = addset(xs, ys)
3347 >>> rs = addset(xs, ys)
3302 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3348 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3303 (True, True, False, True, 0, 4)
3349 (True, True, False, True, 0, 4)
3304 >>> rs = addset(xs, baseset([]))
3350 >>> rs = addset(xs, baseset([]))
3305 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3351 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3306 (True, True, False, 0, 2)
3352 (True, True, False, 0, 2)
3307 >>> rs = addset(baseset([]), baseset([]))
3353 >>> rs = addset(baseset([]), baseset([]))
3308 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3354 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3309 (False, False, None, None)
3355 (False, False, None, None)
3310
3356
3311 iterate unsorted:
3357 iterate unsorted:
3312 >>> rs = addset(xs, ys)
3358 >>> rs = addset(xs, ys)
3313 >>> # (use generator because pypy could call len())
3359 >>> # (use generator because pypy could call len())
3314 >>> list(x for x in rs) # without _genlist
3360 >>> list(x for x in rs) # without _genlist
3315 [0, 3, 2, 5, 4]
3361 [0, 3, 2, 5, 4]
3316 >>> assert not rs._genlist
3362 >>> assert not rs._genlist
3317 >>> len(rs)
3363 >>> len(rs)
3318 5
3364 5
3319 >>> [x for x in rs] # with _genlist
3365 >>> [x for x in rs] # with _genlist
3320 [0, 3, 2, 5, 4]
3366 [0, 3, 2, 5, 4]
3321 >>> assert rs._genlist
3367 >>> assert rs._genlist
3322
3368
3323 iterate ascending:
3369 iterate ascending:
3324 >>> rs = addset(xs, ys, ascending=True)
3370 >>> rs = addset(xs, ys, ascending=True)
3325 >>> # (use generator because pypy could call len())
3371 >>> # (use generator because pypy could call len())
3326 >>> list(x for x in rs), list(x for x in rs.fastasc()) # without _asclist
3372 >>> list(x for x in rs), list(x for x in rs.fastasc()) # without _asclist
3327 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3373 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3328 >>> assert not rs._asclist
3374 >>> assert not rs._asclist
3329 >>> len(rs)
3375 >>> len(rs)
3330 5
3376 5
3331 >>> [x for x in rs], [x for x in rs.fastasc()]
3377 >>> [x for x in rs], [x for x in rs.fastasc()]
3332 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3378 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3333 >>> assert rs._asclist
3379 >>> assert rs._asclist
3334
3380
3335 iterate descending:
3381 iterate descending:
3336 >>> rs = addset(xs, ys, ascending=False)
3382 >>> rs = addset(xs, ys, ascending=False)
3337 >>> # (use generator because pypy could call len())
3383 >>> # (use generator because pypy could call len())
3338 >>> list(x for x in rs), list(x for x in rs.fastdesc()) # without _asclist
3384 >>> list(x for x in rs), list(x for x in rs.fastdesc()) # without _asclist
3339 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3385 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3340 >>> assert not rs._asclist
3386 >>> assert not rs._asclist
3341 >>> len(rs)
3387 >>> len(rs)
3342 5
3388 5
3343 >>> [x for x in rs], [x for x in rs.fastdesc()]
3389 >>> [x for x in rs], [x for x in rs.fastdesc()]
3344 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3390 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3345 >>> assert rs._asclist
3391 >>> assert rs._asclist
3346
3392
3347 iterate ascending without fastasc:
3393 iterate ascending without fastasc:
3348 >>> rs = addset(xs, generatorset(ys), ascending=True)
3394 >>> rs = addset(xs, generatorset(ys), ascending=True)
3349 >>> assert rs.fastasc is None
3395 >>> assert rs.fastasc is None
3350 >>> [x for x in rs]
3396 >>> [x for x in rs]
3351 [0, 2, 3, 4, 5]
3397 [0, 2, 3, 4, 5]
3352
3398
3353 iterate descending without fastdesc:
3399 iterate descending without fastdesc:
3354 >>> rs = addset(generatorset(xs), ys, ascending=False)
3400 >>> rs = addset(generatorset(xs), ys, ascending=False)
3355 >>> assert rs.fastdesc is None
3401 >>> assert rs.fastdesc is None
3356 >>> [x for x in rs]
3402 >>> [x for x in rs]
3357 [5, 4, 3, 2, 0]
3403 [5, 4, 3, 2, 0]
3358 """
3404 """
3359 def __init__(self, revs1, revs2, ascending=None):
3405 def __init__(self, revs1, revs2, ascending=None):
3360 self._r1 = revs1
3406 self._r1 = revs1
3361 self._r2 = revs2
3407 self._r2 = revs2
3362 self._iter = None
3408 self._iter = None
3363 self._ascending = ascending
3409 self._ascending = ascending
3364 self._genlist = None
3410 self._genlist = None
3365 self._asclist = None
3411 self._asclist = None
3366
3412
3367 def __len__(self):
3413 def __len__(self):
3368 return len(self._list)
3414 return len(self._list)
3369
3415
3370 def __nonzero__(self):
3416 def __nonzero__(self):
3371 return bool(self._r1) or bool(self._r2)
3417 return bool(self._r1) or bool(self._r2)
3372
3418
3373 @util.propertycache
3419 @util.propertycache
3374 def _list(self):
3420 def _list(self):
3375 if not self._genlist:
3421 if not self._genlist:
3376 self._genlist = baseset(iter(self))
3422 self._genlist = baseset(iter(self))
3377 return self._genlist
3423 return self._genlist
3378
3424
3379 def __iter__(self):
3425 def __iter__(self):
3380 """Iterate over both collections without repeating elements
3426 """Iterate over both collections without repeating elements
3381
3427
3382 If the ascending attribute is not set, iterate over the first one and
3428 If the ascending attribute is not set, iterate over the first one and
3383 then over the second one checking for membership on the first one so we
3429 then over the second one checking for membership on the first one so we
3384 dont yield any duplicates.
3430 dont yield any duplicates.
3385
3431
3386 If the ascending attribute is set, iterate over both collections at the
3432 If the ascending attribute is set, iterate over both collections at the
3387 same time, yielding only one value at a time in the given order.
3433 same time, yielding only one value at a time in the given order.
3388 """
3434 """
3389 if self._ascending is None:
3435 if self._ascending is None:
3390 if self._genlist:
3436 if self._genlist:
3391 return iter(self._genlist)
3437 return iter(self._genlist)
3392 def arbitraryordergen():
3438 def arbitraryordergen():
3393 for r in self._r1:
3439 for r in self._r1:
3394 yield r
3440 yield r
3395 inr1 = self._r1.__contains__
3441 inr1 = self._r1.__contains__
3396 for r in self._r2:
3442 for r in self._r2:
3397 if not inr1(r):
3443 if not inr1(r):
3398 yield r
3444 yield r
3399 return arbitraryordergen()
3445 return arbitraryordergen()
3400 # try to use our own fast iterator if it exists
3446 # try to use our own fast iterator if it exists
3401 self._trysetasclist()
3447 self._trysetasclist()
3402 if self._ascending:
3448 if self._ascending:
3403 attr = 'fastasc'
3449 attr = 'fastasc'
3404 else:
3450 else:
3405 attr = 'fastdesc'
3451 attr = 'fastdesc'
3406 it = getattr(self, attr)
3452 it = getattr(self, attr)
3407 if it is not None:
3453 if it is not None:
3408 return it()
3454 return it()
3409 # maybe half of the component supports fast
3455 # maybe half of the component supports fast
3410 # get iterator for _r1
3456 # get iterator for _r1
3411 iter1 = getattr(self._r1, attr)
3457 iter1 = getattr(self._r1, attr)
3412 if iter1 is None:
3458 if iter1 is None:
3413 # let's avoid side effect (not sure it matters)
3459 # let's avoid side effect (not sure it matters)
3414 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3460 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3415 else:
3461 else:
3416 iter1 = iter1()
3462 iter1 = iter1()
3417 # get iterator for _r2
3463 # get iterator for _r2
3418 iter2 = getattr(self._r2, attr)
3464 iter2 = getattr(self._r2, attr)
3419 if iter2 is None:
3465 if iter2 is None:
3420 # let's avoid side effect (not sure it matters)
3466 # let's avoid side effect (not sure it matters)
3421 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3467 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3422 else:
3468 else:
3423 iter2 = iter2()
3469 iter2 = iter2()
3424 return _iterordered(self._ascending, iter1, iter2)
3470 return _iterordered(self._ascending, iter1, iter2)
3425
3471
3426 def _trysetasclist(self):
3472 def _trysetasclist(self):
3427 """populate the _asclist attribute if possible and necessary"""
3473 """populate the _asclist attribute if possible and necessary"""
3428 if self._genlist is not None and self._asclist is None:
3474 if self._genlist is not None and self._asclist is None:
3429 self._asclist = sorted(self._genlist)
3475 self._asclist = sorted(self._genlist)
3430
3476
3431 @property
3477 @property
3432 def fastasc(self):
3478 def fastasc(self):
3433 self._trysetasclist()
3479 self._trysetasclist()
3434 if self._asclist is not None:
3480 if self._asclist is not None:
3435 return self._asclist.__iter__
3481 return self._asclist.__iter__
3436 iter1 = self._r1.fastasc
3482 iter1 = self._r1.fastasc
3437 iter2 = self._r2.fastasc
3483 iter2 = self._r2.fastasc
3438 if None in (iter1, iter2):
3484 if None in (iter1, iter2):
3439 return None
3485 return None
3440 return lambda: _iterordered(True, iter1(), iter2())
3486 return lambda: _iterordered(True, iter1(), iter2())
3441
3487
3442 @property
3488 @property
3443 def fastdesc(self):
3489 def fastdesc(self):
3444 self._trysetasclist()
3490 self._trysetasclist()
3445 if self._asclist is not None:
3491 if self._asclist is not None:
3446 return self._asclist.__reversed__
3492 return self._asclist.__reversed__
3447 iter1 = self._r1.fastdesc
3493 iter1 = self._r1.fastdesc
3448 iter2 = self._r2.fastdesc
3494 iter2 = self._r2.fastdesc
3449 if None in (iter1, iter2):
3495 if None in (iter1, iter2):
3450 return None
3496 return None
3451 return lambda: _iterordered(False, iter1(), iter2())
3497 return lambda: _iterordered(False, iter1(), iter2())
3452
3498
3453 def __contains__(self, x):
3499 def __contains__(self, x):
3454 return x in self._r1 or x in self._r2
3500 return x in self._r1 or x in self._r2
3455
3501
3456 def sort(self, reverse=False):
3502 def sort(self, reverse=False):
3457 """Sort the added set
3503 """Sort the added set
3458
3504
3459 For this we use the cached list with all the generated values and if we
3505 For this we use the cached list with all the generated values and if we
3460 know they are ascending or descending we can sort them in a smart way.
3506 know they are ascending or descending we can sort them in a smart way.
3461 """
3507 """
3462 self._ascending = not reverse
3508 self._ascending = not reverse
3463
3509
3464 def isascending(self):
3510 def isascending(self):
3465 return self._ascending is not None and self._ascending
3511 return self._ascending is not None and self._ascending
3466
3512
3467 def isdescending(self):
3513 def isdescending(self):
3468 return self._ascending is not None and not self._ascending
3514 return self._ascending is not None and not self._ascending
3469
3515
3470 def istopo(self):
3516 def istopo(self):
3471 # not worth the trouble asserting if the two sets combined are still
3517 # not worth the trouble asserting if the two sets combined are still
3472 # in topographical order. Use the sort() predicate to explicitly sort
3518 # in topographical order. Use the sort() predicate to explicitly sort
3473 # again instead.
3519 # again instead.
3474 return False
3520 return False
3475
3521
3476 def reverse(self):
3522 def reverse(self):
3477 if self._ascending is None:
3523 if self._ascending is None:
3478 self._list.reverse()
3524 self._list.reverse()
3479 else:
3525 else:
3480 self._ascending = not self._ascending
3526 self._ascending = not self._ascending
3481
3527
3482 def first(self):
3528 def first(self):
3483 for x in self:
3529 for x in self:
3484 return x
3530 return x
3485 return None
3531 return None
3486
3532
3487 def last(self):
3533 def last(self):
3488 self.reverse()
3534 self.reverse()
3489 val = self.first()
3535 val = self.first()
3490 self.reverse()
3536 self.reverse()
3491 return val
3537 return val
3492
3538
3493 def __repr__(self):
3539 def __repr__(self):
3494 d = {None: '', False: '-', True: '+'}[self._ascending]
3540 d = {None: '', False: '-', True: '+'}[self._ascending]
3495 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3541 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3496
3542
3497 class generatorset(abstractsmartset):
3543 class generatorset(abstractsmartset):
3498 """Wrap a generator for lazy iteration
3544 """Wrap a generator for lazy iteration
3499
3545
3500 Wrapper structure for generators that provides lazy membership and can
3546 Wrapper structure for generators that provides lazy membership and can
3501 be iterated more than once.
3547 be iterated more than once.
3502 When asked for membership it generates values until either it finds the
3548 When asked for membership it generates values until either it finds the
3503 requested one or has gone through all the elements in the generator
3549 requested one or has gone through all the elements in the generator
3504 """
3550 """
3505 def __init__(self, gen, iterasc=None):
3551 def __init__(self, gen, iterasc=None):
3506 """
3552 """
3507 gen: a generator producing the values for the generatorset.
3553 gen: a generator producing the values for the generatorset.
3508 """
3554 """
3509 self._gen = gen
3555 self._gen = gen
3510 self._asclist = None
3556 self._asclist = None
3511 self._cache = {}
3557 self._cache = {}
3512 self._genlist = []
3558 self._genlist = []
3513 self._finished = False
3559 self._finished = False
3514 self._ascending = True
3560 self._ascending = True
3515 if iterasc is not None:
3561 if iterasc is not None:
3516 if iterasc:
3562 if iterasc:
3517 self.fastasc = self._iterator
3563 self.fastasc = self._iterator
3518 self.__contains__ = self._asccontains
3564 self.__contains__ = self._asccontains
3519 else:
3565 else:
3520 self.fastdesc = self._iterator
3566 self.fastdesc = self._iterator
3521 self.__contains__ = self._desccontains
3567 self.__contains__ = self._desccontains
3522
3568
3523 def __nonzero__(self):
3569 def __nonzero__(self):
3524 # Do not use 'for r in self' because it will enforce the iteration
3570 # Do not use 'for r in self' because it will enforce the iteration
3525 # order (default ascending), possibly unrolling a whole descending
3571 # order (default ascending), possibly unrolling a whole descending
3526 # iterator.
3572 # iterator.
3527 if self._genlist:
3573 if self._genlist:
3528 return True
3574 return True
3529 for r in self._consumegen():
3575 for r in self._consumegen():
3530 return True
3576 return True
3531 return False
3577 return False
3532
3578
3533 def __contains__(self, x):
3579 def __contains__(self, x):
3534 if x in self._cache:
3580 if x in self._cache:
3535 return self._cache[x]
3581 return self._cache[x]
3536
3582
3537 # Use new values only, as existing values would be cached.
3583 # Use new values only, as existing values would be cached.
3538 for l in self._consumegen():
3584 for l in self._consumegen():
3539 if l == x:
3585 if l == x:
3540 return True
3586 return True
3541
3587
3542 self._cache[x] = False
3588 self._cache[x] = False
3543 return False
3589 return False
3544
3590
3545 def _asccontains(self, x):
3591 def _asccontains(self, x):
3546 """version of contains optimised for ascending generator"""
3592 """version of contains optimised for ascending generator"""
3547 if x in self._cache:
3593 if x in self._cache:
3548 return self._cache[x]
3594 return self._cache[x]
3549
3595
3550 # Use new values only, as existing values would be cached.
3596 # Use new values only, as existing values would be cached.
3551 for l in self._consumegen():
3597 for l in self._consumegen():
3552 if l == x:
3598 if l == x:
3553 return True
3599 return True
3554 if l > x:
3600 if l > x:
3555 break
3601 break
3556
3602
3557 self._cache[x] = False
3603 self._cache[x] = False
3558 return False
3604 return False
3559
3605
3560 def _desccontains(self, x):
3606 def _desccontains(self, x):
3561 """version of contains optimised for descending generator"""
3607 """version of contains optimised for descending generator"""
3562 if x in self._cache:
3608 if x in self._cache:
3563 return self._cache[x]
3609 return self._cache[x]
3564
3610
3565 # Use new values only, as existing values would be cached.
3611 # Use new values only, as existing values would be cached.
3566 for l in self._consumegen():
3612 for l in self._consumegen():
3567 if l == x:
3613 if l == x:
3568 return True
3614 return True
3569 if l < x:
3615 if l < x:
3570 break
3616 break
3571
3617
3572 self._cache[x] = False
3618 self._cache[x] = False
3573 return False
3619 return False
3574
3620
3575 def __iter__(self):
3621 def __iter__(self):
3576 if self._ascending:
3622 if self._ascending:
3577 it = self.fastasc
3623 it = self.fastasc
3578 else:
3624 else:
3579 it = self.fastdesc
3625 it = self.fastdesc
3580 if it is not None:
3626 if it is not None:
3581 return it()
3627 return it()
3582 # we need to consume the iterator
3628 # we need to consume the iterator
3583 for x in self._consumegen():
3629 for x in self._consumegen():
3584 pass
3630 pass
3585 # recall the same code
3631 # recall the same code
3586 return iter(self)
3632 return iter(self)
3587
3633
3588 def _iterator(self):
3634 def _iterator(self):
3589 if self._finished:
3635 if self._finished:
3590 return iter(self._genlist)
3636 return iter(self._genlist)
3591
3637
3592 # We have to use this complex iteration strategy to allow multiple
3638 # We have to use this complex iteration strategy to allow multiple
3593 # iterations at the same time. We need to be able to catch revision
3639 # iterations at the same time. We need to be able to catch revision
3594 # removed from _consumegen and added to genlist in another instance.
3640 # removed from _consumegen and added to genlist in another instance.
3595 #
3641 #
3596 # Getting rid of it would provide an about 15% speed up on this
3642 # Getting rid of it would provide an about 15% speed up on this
3597 # iteration.
3643 # iteration.
3598 genlist = self._genlist
3644 genlist = self._genlist
3599 nextrev = self._consumegen().next
3645 nextrev = self._consumegen().next
3600 _len = len # cache global lookup
3646 _len = len # cache global lookup
3601 def gen():
3647 def gen():
3602 i = 0
3648 i = 0
3603 while True:
3649 while True:
3604 if i < _len(genlist):
3650 if i < _len(genlist):
3605 yield genlist[i]
3651 yield genlist[i]
3606 else:
3652 else:
3607 yield nextrev()
3653 yield nextrev()
3608 i += 1
3654 i += 1
3609 return gen()
3655 return gen()
3610
3656
3611 def _consumegen(self):
3657 def _consumegen(self):
3612 cache = self._cache
3658 cache = self._cache
3613 genlist = self._genlist.append
3659 genlist = self._genlist.append
3614 for item in self._gen:
3660 for item in self._gen:
3615 cache[item] = True
3661 cache[item] = True
3616 genlist(item)
3662 genlist(item)
3617 yield item
3663 yield item
3618 if not self._finished:
3664 if not self._finished:
3619 self._finished = True
3665 self._finished = True
3620 asc = self._genlist[:]
3666 asc = self._genlist[:]
3621 asc.sort()
3667 asc.sort()
3622 self._asclist = asc
3668 self._asclist = asc
3623 self.fastasc = asc.__iter__
3669 self.fastasc = asc.__iter__
3624 self.fastdesc = asc.__reversed__
3670 self.fastdesc = asc.__reversed__
3625
3671
3626 def __len__(self):
3672 def __len__(self):
3627 for x in self._consumegen():
3673 for x in self._consumegen():
3628 pass
3674 pass
3629 return len(self._genlist)
3675 return len(self._genlist)
3630
3676
3631 def sort(self, reverse=False):
3677 def sort(self, reverse=False):
3632 self._ascending = not reverse
3678 self._ascending = not reverse
3633
3679
3634 def reverse(self):
3680 def reverse(self):
3635 self._ascending = not self._ascending
3681 self._ascending = not self._ascending
3636
3682
3637 def isascending(self):
3683 def isascending(self):
3638 return self._ascending
3684 return self._ascending
3639
3685
3640 def isdescending(self):
3686 def isdescending(self):
3641 return not self._ascending
3687 return not self._ascending
3642
3688
3643 def istopo(self):
3689 def istopo(self):
3644 # not worth the trouble asserting if the two sets combined are still
3690 # not worth the trouble asserting if the two sets combined are still
3645 # in topographical order. Use the sort() predicate to explicitly sort
3691 # in topographical order. Use the sort() predicate to explicitly sort
3646 # again instead.
3692 # again instead.
3647 return False
3693 return False
3648
3694
3649 def first(self):
3695 def first(self):
3650 if self._ascending:
3696 if self._ascending:
3651 it = self.fastasc
3697 it = self.fastasc
3652 else:
3698 else:
3653 it = self.fastdesc
3699 it = self.fastdesc
3654 if it is None:
3700 if it is None:
3655 # we need to consume all and try again
3701 # we need to consume all and try again
3656 for x in self._consumegen():
3702 for x in self._consumegen():
3657 pass
3703 pass
3658 return self.first()
3704 return self.first()
3659 return next(it(), None)
3705 return next(it(), None)
3660
3706
3661 def last(self):
3707 def last(self):
3662 if self._ascending:
3708 if self._ascending:
3663 it = self.fastdesc
3709 it = self.fastdesc
3664 else:
3710 else:
3665 it = self.fastasc
3711 it = self.fastasc
3666 if it is None:
3712 if it is None:
3667 # we need to consume all and try again
3713 # we need to consume all and try again
3668 for x in self._consumegen():
3714 for x in self._consumegen():
3669 pass
3715 pass
3670 return self.first()
3716 return self.first()
3671 return next(it(), None)
3717 return next(it(), None)
3672
3718
3673 def __repr__(self):
3719 def __repr__(self):
3674 d = {False: '-', True: '+'}[self._ascending]
3720 d = {False: '-', True: '+'}[self._ascending]
3675 return '<%s%s>' % (type(self).__name__, d)
3721 return '<%s%s>' % (type(self).__name__, d)
3676
3722
3677 class spanset(abstractsmartset):
3723 class spanset(abstractsmartset):
3678 """Duck type for baseset class which represents a range of revisions and
3724 """Duck type for baseset class which represents a range of revisions and
3679 can work lazily and without having all the range in memory
3725 can work lazily and without having all the range in memory
3680
3726
3681 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3727 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3682 notable points:
3728 notable points:
3683 - when x < y it will be automatically descending,
3729 - when x < y it will be automatically descending,
3684 - revision filtered with this repoview will be skipped.
3730 - revision filtered with this repoview will be skipped.
3685
3731
3686 """
3732 """
3687 def __init__(self, repo, start=0, end=None):
3733 def __init__(self, repo, start=0, end=None):
3688 """
3734 """
3689 start: first revision included the set
3735 start: first revision included the set
3690 (default to 0)
3736 (default to 0)
3691 end: first revision excluded (last+1)
3737 end: first revision excluded (last+1)
3692 (default to len(repo)
3738 (default to len(repo)
3693
3739
3694 Spanset will be descending if `end` < `start`.
3740 Spanset will be descending if `end` < `start`.
3695 """
3741 """
3696 if end is None:
3742 if end is None:
3697 end = len(repo)
3743 end = len(repo)
3698 self._ascending = start <= end
3744 self._ascending = start <= end
3699 if not self._ascending:
3745 if not self._ascending:
3700 start, end = end + 1, start +1
3746 start, end = end + 1, start +1
3701 self._start = start
3747 self._start = start
3702 self._end = end
3748 self._end = end
3703 self._hiddenrevs = repo.changelog.filteredrevs
3749 self._hiddenrevs = repo.changelog.filteredrevs
3704
3750
3705 def sort(self, reverse=False):
3751 def sort(self, reverse=False):
3706 self._ascending = not reverse
3752 self._ascending = not reverse
3707
3753
3708 def reverse(self):
3754 def reverse(self):
3709 self._ascending = not self._ascending
3755 self._ascending = not self._ascending
3710
3756
3711 def istopo(self):
3757 def istopo(self):
3712 # not worth the trouble asserting if the two sets combined are still
3758 # not worth the trouble asserting if the two sets combined are still
3713 # in topographical order. Use the sort() predicate to explicitly sort
3759 # in topographical order. Use the sort() predicate to explicitly sort
3714 # again instead.
3760 # again instead.
3715 return False
3761 return False
3716
3762
3717 def _iterfilter(self, iterrange):
3763 def _iterfilter(self, iterrange):
3718 s = self._hiddenrevs
3764 s = self._hiddenrevs
3719 for r in iterrange:
3765 for r in iterrange:
3720 if r not in s:
3766 if r not in s:
3721 yield r
3767 yield r
3722
3768
3723 def __iter__(self):
3769 def __iter__(self):
3724 if self._ascending:
3770 if self._ascending:
3725 return self.fastasc()
3771 return self.fastasc()
3726 else:
3772 else:
3727 return self.fastdesc()
3773 return self.fastdesc()
3728
3774
3729 def fastasc(self):
3775 def fastasc(self):
3730 iterrange = xrange(self._start, self._end)
3776 iterrange = xrange(self._start, self._end)
3731 if self._hiddenrevs:
3777 if self._hiddenrevs:
3732 return self._iterfilter(iterrange)
3778 return self._iterfilter(iterrange)
3733 return iter(iterrange)
3779 return iter(iterrange)
3734
3780
3735 def fastdesc(self):
3781 def fastdesc(self):
3736 iterrange = xrange(self._end - 1, self._start - 1, -1)
3782 iterrange = xrange(self._end - 1, self._start - 1, -1)
3737 if self._hiddenrevs:
3783 if self._hiddenrevs:
3738 return self._iterfilter(iterrange)
3784 return self._iterfilter(iterrange)
3739 return iter(iterrange)
3785 return iter(iterrange)
3740
3786
3741 def __contains__(self, rev):
3787 def __contains__(self, rev):
3742 hidden = self._hiddenrevs
3788 hidden = self._hiddenrevs
3743 return ((self._start <= rev < self._end)
3789 return ((self._start <= rev < self._end)
3744 and not (hidden and rev in hidden))
3790 and not (hidden and rev in hidden))
3745
3791
3746 def __nonzero__(self):
3792 def __nonzero__(self):
3747 for r in self:
3793 for r in self:
3748 return True
3794 return True
3749 return False
3795 return False
3750
3796
3751 def __len__(self):
3797 def __len__(self):
3752 if not self._hiddenrevs:
3798 if not self._hiddenrevs:
3753 return abs(self._end - self._start)
3799 return abs(self._end - self._start)
3754 else:
3800 else:
3755 count = 0
3801 count = 0
3756 start = self._start
3802 start = self._start
3757 end = self._end
3803 end = self._end
3758 for rev in self._hiddenrevs:
3804 for rev in self._hiddenrevs:
3759 if (end < rev <= start) or (start <= rev < end):
3805 if (end < rev <= start) or (start <= rev < end):
3760 count += 1
3806 count += 1
3761 return abs(self._end - self._start) - count
3807 return abs(self._end - self._start) - count
3762
3808
3763 def isascending(self):
3809 def isascending(self):
3764 return self._ascending
3810 return self._ascending
3765
3811
3766 def isdescending(self):
3812 def isdescending(self):
3767 return not self._ascending
3813 return not self._ascending
3768
3814
3769 def first(self):
3815 def first(self):
3770 if self._ascending:
3816 if self._ascending:
3771 it = self.fastasc
3817 it = self.fastasc
3772 else:
3818 else:
3773 it = self.fastdesc
3819 it = self.fastdesc
3774 for x in it():
3820 for x in it():
3775 return x
3821 return x
3776 return None
3822 return None
3777
3823
3778 def last(self):
3824 def last(self):
3779 if self._ascending:
3825 if self._ascending:
3780 it = self.fastdesc
3826 it = self.fastdesc
3781 else:
3827 else:
3782 it = self.fastasc
3828 it = self.fastasc
3783 for x in it():
3829 for x in it():
3784 return x
3830 return x
3785 return None
3831 return None
3786
3832
3787 def __repr__(self):
3833 def __repr__(self):
3788 d = {False: '-', True: '+'}[self._ascending]
3834 d = {False: '-', True: '+'}[self._ascending]
3789 return '<%s%s %d:%d>' % (type(self).__name__, d,
3835 return '<%s%s %d:%d>' % (type(self).__name__, d,
3790 self._start, self._end - 1)
3836 self._start, self._end - 1)
3791
3837
3792 class fullreposet(spanset):
3838 class fullreposet(spanset):
3793 """a set containing all revisions in the repo
3839 """a set containing all revisions in the repo
3794
3840
3795 This class exists to host special optimization and magic to handle virtual
3841 This class exists to host special optimization and magic to handle virtual
3796 revisions such as "null".
3842 revisions such as "null".
3797 """
3843 """
3798
3844
3799 def __init__(self, repo):
3845 def __init__(self, repo):
3800 super(fullreposet, self).__init__(repo)
3846 super(fullreposet, self).__init__(repo)
3801
3847
3802 def __and__(self, other):
3848 def __and__(self, other):
3803 """As self contains the whole repo, all of the other set should also be
3849 """As self contains the whole repo, all of the other set should also be
3804 in self. Therefore `self & other = other`.
3850 in self. Therefore `self & other = other`.
3805
3851
3806 This boldly assumes the other contains valid revs only.
3852 This boldly assumes the other contains valid revs only.
3807 """
3853 """
3808 # other not a smartset, make is so
3854 # other not a smartset, make is so
3809 if not util.safehasattr(other, 'isascending'):
3855 if not util.safehasattr(other, 'isascending'):
3810 # filter out hidden revision
3856 # filter out hidden revision
3811 # (this boldly assumes all smartset are pure)
3857 # (this boldly assumes all smartset are pure)
3812 #
3858 #
3813 # `other` was used with "&", let's assume this is a set like
3859 # `other` was used with "&", let's assume this is a set like
3814 # object.
3860 # object.
3815 other = baseset(other - self._hiddenrevs)
3861 other = baseset(other - self._hiddenrevs)
3816
3862
3817 other.sort(reverse=self.isdescending())
3863 other.sort(reverse=self.isdescending())
3818 return other
3864 return other
3819
3865
3820 def prettyformatset(revs):
3866 def prettyformatset(revs):
3821 lines = []
3867 lines = []
3822 rs = repr(revs)
3868 rs = repr(revs)
3823 p = 0
3869 p = 0
3824 while p < len(rs):
3870 while p < len(rs):
3825 q = rs.find('<', p + 1)
3871 q = rs.find('<', p + 1)
3826 if q < 0:
3872 if q < 0:
3827 q = len(rs)
3873 q = len(rs)
3828 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3874 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3829 assert l >= 0
3875 assert l >= 0
3830 lines.append((l, rs[p:q].rstrip()))
3876 lines.append((l, rs[p:q].rstrip()))
3831 p = q
3877 p = q
3832 return '\n'.join(' ' * l + s for l, s in lines)
3878 return '\n'.join(' ' * l + s for l, s in lines)
3833
3879
3834 def loadpredicate(ui, extname, registrarobj):
3880 def loadpredicate(ui, extname, registrarobj):
3835 """Load revset predicates from specified registrarobj
3881 """Load revset predicates from specified registrarobj
3836 """
3882 """
3837 for name, func in registrarobj._table.iteritems():
3883 for name, func in registrarobj._table.iteritems():
3838 symbols[name] = func
3884 symbols[name] = func
3839 if func._safe:
3885 if func._safe:
3840 safesymbols.add(name)
3886 safesymbols.add(name)
3841
3887
3842 # load built-in predicates explicitly to setup safesymbols
3888 # load built-in predicates explicitly to setup safesymbols
3843 loadpredicate(None, None, predicate)
3889 loadpredicate(None, None, predicate)
3844
3890
3845 # tell hggettext to extract docstrings from these functions:
3891 # tell hggettext to extract docstrings from these functions:
3846 i18nfunctions = symbols.values()
3892 i18nfunctions = symbols.values()
@@ -1,640 +1,761 b''
1 $ HGMERGE=true; export HGMERGE
1 $ HGMERGE=true; export HGMERGE
2
2
3 init
3 init
4
4
5 $ hg init repo
5 $ hg init repo
6 $ cd repo
6 $ cd repo
7
7
8 commit
8 commit
9
9
10 $ echo 'a' > a
10 $ echo 'a' > a
11 $ hg ci -A -m test -u nobody -d '1 0'
11 $ hg ci -A -m test -u nobody -d '1 0'
12 adding a
12 adding a
13
13
14 annotate -c
14 annotate -c
15
15
16 $ hg annotate -c a
16 $ hg annotate -c a
17 8435f90966e4: a
17 8435f90966e4: a
18
18
19 annotate -cl
19 annotate -cl
20
20
21 $ hg annotate -cl a
21 $ hg annotate -cl a
22 8435f90966e4:1: a
22 8435f90966e4:1: a
23
23
24 annotate -d
24 annotate -d
25
25
26 $ hg annotate -d a
26 $ hg annotate -d a
27 Thu Jan 01 00:00:01 1970 +0000: a
27 Thu Jan 01 00:00:01 1970 +0000: a
28
28
29 annotate -n
29 annotate -n
30
30
31 $ hg annotate -n a
31 $ hg annotate -n a
32 0: a
32 0: a
33
33
34 annotate -nl
34 annotate -nl
35
35
36 $ hg annotate -nl a
36 $ hg annotate -nl a
37 0:1: a
37 0:1: a
38
38
39 annotate -u
39 annotate -u
40
40
41 $ hg annotate -u a
41 $ hg annotate -u a
42 nobody: a
42 nobody: a
43
43
44 annotate -cdnu
44 annotate -cdnu
45
45
46 $ hg annotate -cdnu a
46 $ hg annotate -cdnu a
47 nobody 0 8435f90966e4 Thu Jan 01 00:00:01 1970 +0000: a
47 nobody 0 8435f90966e4 Thu Jan 01 00:00:01 1970 +0000: a
48
48
49 annotate -cdnul
49 annotate -cdnul
50
50
51 $ hg annotate -cdnul a
51 $ hg annotate -cdnul a
52 nobody 0 8435f90966e4 Thu Jan 01 00:00:01 1970 +0000:1: a
52 nobody 0 8435f90966e4 Thu Jan 01 00:00:01 1970 +0000:1: a
53
53
54 annotate (JSON)
54 annotate (JSON)
55
55
56 $ hg annotate -Tjson a
56 $ hg annotate -Tjson a
57 [
57 [
58 {
58 {
59 "line": "a\n",
59 "line": "a\n",
60 "rev": 0
60 "rev": 0
61 }
61 }
62 ]
62 ]
63
63
64 $ hg annotate -Tjson -cdfnul a
64 $ hg annotate -Tjson -cdfnul a
65 [
65 [
66 {
66 {
67 "date": [1.0, 0],
67 "date": [1.0, 0],
68 "file": "a",
68 "file": "a",
69 "line": "a\n",
69 "line": "a\n",
70 "line_number": 1,
70 "line_number": 1,
71 "node": "8435f90966e442695d2ded29fdade2bac5ad8065",
71 "node": "8435f90966e442695d2ded29fdade2bac5ad8065",
72 "rev": 0,
72 "rev": 0,
73 "user": "nobody"
73 "user": "nobody"
74 }
74 }
75 ]
75 ]
76
76
77 $ cat <<EOF >>a
77 $ cat <<EOF >>a
78 > a
78 > a
79 > a
79 > a
80 > EOF
80 > EOF
81 $ hg ci -ma1 -d '1 0'
81 $ hg ci -ma1 -d '1 0'
82 $ hg cp a b
82 $ hg cp a b
83 $ hg ci -mb -d '1 0'
83 $ hg ci -mb -d '1 0'
84 $ cat <<EOF >> b
84 $ cat <<EOF >> b
85 > b4
85 > b4
86 > b5
86 > b5
87 > b6
87 > b6
88 > EOF
88 > EOF
89 $ hg ci -mb2 -d '2 0'
89 $ hg ci -mb2 -d '2 0'
90
90
91 annotate -n b
91 annotate -n b
92
92
93 $ hg annotate -n b
93 $ hg annotate -n b
94 0: a
94 0: a
95 1: a
95 1: a
96 1: a
96 1: a
97 3: b4
97 3: b4
98 3: b5
98 3: b5
99 3: b6
99 3: b6
100
100
101 annotate --no-follow b
101 annotate --no-follow b
102
102
103 $ hg annotate --no-follow b
103 $ hg annotate --no-follow b
104 2: a
104 2: a
105 2: a
105 2: a
106 2: a
106 2: a
107 3: b4
107 3: b4
108 3: b5
108 3: b5
109 3: b6
109 3: b6
110
110
111 annotate -nl b
111 annotate -nl b
112
112
113 $ hg annotate -nl b
113 $ hg annotate -nl b
114 0:1: a
114 0:1: a
115 1:2: a
115 1:2: a
116 1:3: a
116 1:3: a
117 3:4: b4
117 3:4: b4
118 3:5: b5
118 3:5: b5
119 3:6: b6
119 3:6: b6
120
120
121 annotate -nf b
121 annotate -nf b
122
122
123 $ hg annotate -nf b
123 $ hg annotate -nf b
124 0 a: a
124 0 a: a
125 1 a: a
125 1 a: a
126 1 a: a
126 1 a: a
127 3 b: b4
127 3 b: b4
128 3 b: b5
128 3 b: b5
129 3 b: b6
129 3 b: b6
130
130
131 annotate -nlf b
131 annotate -nlf b
132
132
133 $ hg annotate -nlf b
133 $ hg annotate -nlf b
134 0 a:1: a
134 0 a:1: a
135 1 a:2: a
135 1 a:2: a
136 1 a:3: a
136 1 a:3: a
137 3 b:4: b4
137 3 b:4: b4
138 3 b:5: b5
138 3 b:5: b5
139 3 b:6: b6
139 3 b:6: b6
140
140
141 $ hg up -C 2
141 $ hg up -C 2
142 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
142 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
143 $ cat <<EOF >> b
143 $ cat <<EOF >> b
144 > b4
144 > b4
145 > c
145 > c
146 > b5
146 > b5
147 > EOF
147 > EOF
148 $ hg ci -mb2.1 -d '2 0'
148 $ hg ci -mb2.1 -d '2 0'
149 created new head
149 created new head
150 $ hg merge
150 $ hg merge
151 merging b
151 merging b
152 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
152 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
153 (branch merge, don't forget to commit)
153 (branch merge, don't forget to commit)
154 $ hg ci -mmergeb -d '3 0'
154 $ hg ci -mmergeb -d '3 0'
155
155
156 annotate after merge
156 annotate after merge
157
157
158 $ hg annotate -nf b
158 $ hg annotate -nf b
159 0 a: a
159 0 a: a
160 1 a: a
160 1 a: a
161 1 a: a
161 1 a: a
162 3 b: b4
162 3 b: b4
163 4 b: c
163 4 b: c
164 3 b: b5
164 3 b: b5
165
165
166 annotate after merge with -l
166 annotate after merge with -l
167
167
168 $ hg annotate -nlf b
168 $ hg annotate -nlf b
169 0 a:1: a
169 0 a:1: a
170 1 a:2: a
170 1 a:2: a
171 1 a:3: a
171 1 a:3: a
172 3 b:4: b4
172 3 b:4: b4
173 4 b:5: c
173 4 b:5: c
174 3 b:5: b5
174 3 b:5: b5
175
175
176 $ hg up -C 1
176 $ hg up -C 1
177 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
177 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
178 $ hg cp a b
178 $ hg cp a b
179 $ cat <<EOF > b
179 $ cat <<EOF > b
180 > a
180 > a
181 > z
181 > z
182 > a
182 > a
183 > EOF
183 > EOF
184 $ hg ci -mc -d '3 0'
184 $ hg ci -mc -d '3 0'
185 created new head
185 created new head
186 $ hg merge
186 $ hg merge
187 merging b
187 merging b
188 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
188 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
189 (branch merge, don't forget to commit)
189 (branch merge, don't forget to commit)
190 $ cat <<EOF >> b
190 $ cat <<EOF >> b
191 > b4
191 > b4
192 > c
192 > c
193 > b5
193 > b5
194 > EOF
194 > EOF
195 $ echo d >> b
195 $ echo d >> b
196 $ hg ci -mmerge2 -d '4 0'
196 $ hg ci -mmerge2 -d '4 0'
197
197
198 annotate after rename merge
198 annotate after rename merge
199
199
200 $ hg annotate -nf b
200 $ hg annotate -nf b
201 0 a: a
201 0 a: a
202 6 b: z
202 6 b: z
203 1 a: a
203 1 a: a
204 3 b: b4
204 3 b: b4
205 4 b: c
205 4 b: c
206 3 b: b5
206 3 b: b5
207 7 b: d
207 7 b: d
208
208
209 annotate after rename merge with -l
209 annotate after rename merge with -l
210
210
211 $ hg annotate -nlf b
211 $ hg annotate -nlf b
212 0 a:1: a
212 0 a:1: a
213 6 b:2: z
213 6 b:2: z
214 1 a:3: a
214 1 a:3: a
215 3 b:4: b4
215 3 b:4: b4
216 4 b:5: c
216 4 b:5: c
217 3 b:5: b5
217 3 b:5: b5
218 7 b:7: d
218 7 b:7: d
219
219
220 Issue2807: alignment of line numbers with -l
220 Issue2807: alignment of line numbers with -l
221
221
222 $ echo more >> b
222 $ echo more >> b
223 $ hg ci -mmore -d '5 0'
223 $ hg ci -mmore -d '5 0'
224 $ echo more >> b
224 $ echo more >> b
225 $ hg ci -mmore -d '6 0'
225 $ hg ci -mmore -d '6 0'
226 $ echo more >> b
226 $ echo more >> b
227 $ hg ci -mmore -d '7 0'
227 $ hg ci -mmore -d '7 0'
228 $ hg annotate -nlf b
228 $ hg annotate -nlf b
229 0 a: 1: a
229 0 a: 1: a
230 6 b: 2: z
230 6 b: 2: z
231 1 a: 3: a
231 1 a: 3: a
232 3 b: 4: b4
232 3 b: 4: b4
233 4 b: 5: c
233 4 b: 5: c
234 3 b: 5: b5
234 3 b: 5: b5
235 7 b: 7: d
235 7 b: 7: d
236 8 b: 8: more
236 8 b: 8: more
237 9 b: 9: more
237 9 b: 9: more
238 10 b:10: more
238 10 b:10: more
239
239
240 linkrev vs rev
240 linkrev vs rev
241
241
242 $ hg annotate -r tip -n a
242 $ hg annotate -r tip -n a
243 0: a
243 0: a
244 1: a
244 1: a
245 1: a
245 1: a
246
246
247 linkrev vs rev with -l
247 linkrev vs rev with -l
248
248
249 $ hg annotate -r tip -nl a
249 $ hg annotate -r tip -nl a
250 0:1: a
250 0:1: a
251 1:2: a
251 1:2: a
252 1:3: a
252 1:3: a
253
253
254 Issue589: "undelete" sequence leads to crash
254 Issue589: "undelete" sequence leads to crash
255
255
256 annotate was crashing when trying to --follow something
256 annotate was crashing when trying to --follow something
257
257
258 like A -> B -> A
258 like A -> B -> A
259
259
260 generate ABA rename configuration
260 generate ABA rename configuration
261
261
262 $ echo foo > foo
262 $ echo foo > foo
263 $ hg add foo
263 $ hg add foo
264 $ hg ci -m addfoo
264 $ hg ci -m addfoo
265 $ hg rename foo bar
265 $ hg rename foo bar
266 $ hg ci -m renamefoo
266 $ hg ci -m renamefoo
267 $ hg rename bar foo
267 $ hg rename bar foo
268 $ hg ci -m renamebar
268 $ hg ci -m renamebar
269
269
270 annotate after ABA with follow
270 annotate after ABA with follow
271
271
272 $ hg annotate --follow foo
272 $ hg annotate --follow foo
273 foo: foo
273 foo: foo
274
274
275 missing file
275 missing file
276
276
277 $ hg ann nosuchfile
277 $ hg ann nosuchfile
278 abort: nosuchfile: no such file in rev e9e6b4fa872f
278 abort: nosuchfile: no such file in rev e9e6b4fa872f
279 [255]
279 [255]
280
280
281 annotate file without '\n' on last line
281 annotate file without '\n' on last line
282
282
283 $ printf "" > c
283 $ printf "" > c
284 $ hg ci -A -m test -u nobody -d '1 0'
284 $ hg ci -A -m test -u nobody -d '1 0'
285 adding c
285 adding c
286 $ hg annotate c
286 $ hg annotate c
287 $ printf "a\nb" > c
287 $ printf "a\nb" > c
288 $ hg ci -m test
288 $ hg ci -m test
289 $ hg annotate c
289 $ hg annotate c
290 [0-9]+: a (re)
290 [0-9]+: a (re)
291 [0-9]+: b (re)
291 [0-9]+: b (re)
292
292
293 Issue3841: check annotation of the file of which filelog includes
293 Issue3841: check annotation of the file of which filelog includes
294 merging between the revision and its ancestor
294 merging between the revision and its ancestor
295
295
296 to reproduce the situation with recent Mercurial, this script uses (1)
296 to reproduce the situation with recent Mercurial, this script uses (1)
297 "hg debugsetparents" to merge without ancestor check by "hg merge",
297 "hg debugsetparents" to merge without ancestor check by "hg merge",
298 and (2) the extension to allow filelog merging between the revision
298 and (2) the extension to allow filelog merging between the revision
299 and its ancestor by overriding "repo._filecommit".
299 and its ancestor by overriding "repo._filecommit".
300
300
301 $ cat > ../legacyrepo.py <<EOF
301 $ cat > ../legacyrepo.py <<EOF
302 > from mercurial import node, error
302 > from mercurial import node, error
303 > def reposetup(ui, repo):
303 > def reposetup(ui, repo):
304 > class legacyrepo(repo.__class__):
304 > class legacyrepo(repo.__class__):
305 > def _filecommit(self, fctx, manifest1, manifest2,
305 > def _filecommit(self, fctx, manifest1, manifest2,
306 > linkrev, tr, changelist):
306 > linkrev, tr, changelist):
307 > fname = fctx.path()
307 > fname = fctx.path()
308 > text = fctx.data()
308 > text = fctx.data()
309 > flog = self.file(fname)
309 > flog = self.file(fname)
310 > fparent1 = manifest1.get(fname, node.nullid)
310 > fparent1 = manifest1.get(fname, node.nullid)
311 > fparent2 = manifest2.get(fname, node.nullid)
311 > fparent2 = manifest2.get(fname, node.nullid)
312 > meta = {}
312 > meta = {}
313 > copy = fctx.renamed()
313 > copy = fctx.renamed()
314 > if copy and copy[0] != fname:
314 > if copy and copy[0] != fname:
315 > raise error.Abort('copying is not supported')
315 > raise error.Abort('copying is not supported')
316 > if fparent2 != node.nullid:
316 > if fparent2 != node.nullid:
317 > changelist.append(fname)
317 > changelist.append(fname)
318 > return flog.add(text, meta, tr, linkrev,
318 > return flog.add(text, meta, tr, linkrev,
319 > fparent1, fparent2)
319 > fparent1, fparent2)
320 > raise error.Abort('only merging is supported')
320 > raise error.Abort('only merging is supported')
321 > repo.__class__ = legacyrepo
321 > repo.__class__ = legacyrepo
322 > EOF
322 > EOF
323
323
324 $ cat > baz <<EOF
324 $ cat > baz <<EOF
325 > 1
325 > 1
326 > 2
326 > 2
327 > 3
327 > 3
328 > 4
328 > 4
329 > 5
329 > 5
330 > EOF
330 > EOF
331 $ hg add baz
331 $ hg add baz
332 $ hg commit -m "baz:0"
332 $ hg commit -m "baz:0"
333
333
334 $ cat > baz <<EOF
334 $ cat > baz <<EOF
335 > 1 baz:1
335 > 1 baz:1
336 > 2
336 > 2
337 > 3
337 > 3
338 > 4
338 > 4
339 > 5
339 > 5
340 > EOF
340 > EOF
341 $ hg commit -m "baz:1"
341 $ hg commit -m "baz:1"
342
342
343 $ cat > baz <<EOF
343 $ cat > baz <<EOF
344 > 1 baz:1
344 > 1 baz:1
345 > 2 baz:2
345 > 2 baz:2
346 > 3
346 > 3
347 > 4
347 > 4
348 > 5
348 > 5
349 > EOF
349 > EOF
350 $ hg debugsetparents 17 17
350 $ hg debugsetparents 17 17
351 $ hg --config extensions.legacyrepo=../legacyrepo.py commit -m "baz:2"
351 $ hg --config extensions.legacyrepo=../legacyrepo.py commit -m "baz:2"
352 $ hg debugindexdot .hg/store/data/baz.i
352 $ hg debugindexdot .hg/store/data/baz.i
353 digraph G {
353 digraph G {
354 -1 -> 0
354 -1 -> 0
355 0 -> 1
355 0 -> 1
356 1 -> 2
356 1 -> 2
357 1 -> 2
357 1 -> 2
358 }
358 }
359 $ hg annotate baz
359 $ hg annotate baz
360 17: 1 baz:1
360 17: 1 baz:1
361 18: 2 baz:2
361 18: 2 baz:2
362 16: 3
362 16: 3
363 16: 4
363 16: 4
364 16: 5
364 16: 5
365
365
366 $ cat > baz <<EOF
366 $ cat > baz <<EOF
367 > 1 baz:1
367 > 1 baz:1
368 > 2 baz:2
368 > 2 baz:2
369 > 3 baz:3
369 > 3 baz:3
370 > 4
370 > 4
371 > 5
371 > 5
372 > EOF
372 > EOF
373 $ hg commit -m "baz:3"
373 $ hg commit -m "baz:3"
374
374
375 $ cat > baz <<EOF
375 $ cat > baz <<EOF
376 > 1 baz:1
376 > 1 baz:1
377 > 2 baz:2
377 > 2 baz:2
378 > 3 baz:3
378 > 3 baz:3
379 > 4 baz:4
379 > 4 baz:4
380 > 5
380 > 5
381 > EOF
381 > EOF
382 $ hg debugsetparents 19 18
382 $ hg debugsetparents 19 18
383 $ hg --config extensions.legacyrepo=../legacyrepo.py commit -m "baz:4"
383 $ hg --config extensions.legacyrepo=../legacyrepo.py commit -m "baz:4"
384 $ hg debugindexdot .hg/store/data/baz.i
384 $ hg debugindexdot .hg/store/data/baz.i
385 digraph G {
385 digraph G {
386 -1 -> 0
386 -1 -> 0
387 0 -> 1
387 0 -> 1
388 1 -> 2
388 1 -> 2
389 1 -> 2
389 1 -> 2
390 2 -> 3
390 2 -> 3
391 3 -> 4
391 3 -> 4
392 2 -> 4
392 2 -> 4
393 }
393 }
394 $ hg annotate baz
394 $ hg annotate baz
395 17: 1 baz:1
395 17: 1 baz:1
396 18: 2 baz:2
396 18: 2 baz:2
397 19: 3 baz:3
397 19: 3 baz:3
398 20: 4 baz:4
398 20: 4 baz:4
399 16: 5
399 16: 5
400
400
401 annotate clean file
401 annotate clean file
402
402
403 $ hg annotate -ncr "wdir()" foo
403 $ hg annotate -ncr "wdir()" foo
404 11 472b18db256d : foo
404 11 472b18db256d : foo
405
405
406 annotate modified file
406 annotate modified file
407
407
408 $ echo foofoo >> foo
408 $ echo foofoo >> foo
409 $ hg annotate -r "wdir()" foo
409 $ hg annotate -r "wdir()" foo
410 11 : foo
410 11 : foo
411 20+: foofoo
411 20+: foofoo
412
412
413 $ hg annotate -cr "wdir()" foo
413 $ hg annotate -cr "wdir()" foo
414 472b18db256d : foo
414 472b18db256d : foo
415 b6bedd5477e7+: foofoo
415 b6bedd5477e7+: foofoo
416
416
417 $ hg annotate -ncr "wdir()" foo
417 $ hg annotate -ncr "wdir()" foo
418 11 472b18db256d : foo
418 11 472b18db256d : foo
419 20 b6bedd5477e7+: foofoo
419 20 b6bedd5477e7+: foofoo
420
420
421 $ hg annotate --debug -ncr "wdir()" foo
421 $ hg annotate --debug -ncr "wdir()" foo
422 11 472b18db256d1e8282064eab4bfdaf48cbfe83cd : foo
422 11 472b18db256d1e8282064eab4bfdaf48cbfe83cd : foo
423 20 b6bedd5477e797f25e568a6402d4697f3f895a72+: foofoo
423 20 b6bedd5477e797f25e568a6402d4697f3f895a72+: foofoo
424
424
425 $ hg annotate -udr "wdir()" foo
425 $ hg annotate -udr "wdir()" foo
426 test Thu Jan 01 00:00:00 1970 +0000: foo
426 test Thu Jan 01 00:00:00 1970 +0000: foo
427 test [A-Za-z0-9:+ ]+: foofoo (re)
427 test [A-Za-z0-9:+ ]+: foofoo (re)
428
428
429 $ hg annotate -ncr "wdir()" -Tjson foo
429 $ hg annotate -ncr "wdir()" -Tjson foo
430 [
430 [
431 {
431 {
432 "line": "foo\n",
432 "line": "foo\n",
433 "node": "472b18db256d1e8282064eab4bfdaf48cbfe83cd",
433 "node": "472b18db256d1e8282064eab4bfdaf48cbfe83cd",
434 "rev": 11
434 "rev": 11
435 },
435 },
436 {
436 {
437 "line": "foofoo\n",
437 "line": "foofoo\n",
438 "node": null,
438 "node": null,
439 "rev": null
439 "rev": null
440 }
440 }
441 ]
441 ]
442
442
443 annotate added file
443 annotate added file
444
444
445 $ echo bar > bar
445 $ echo bar > bar
446 $ hg add bar
446 $ hg add bar
447 $ hg annotate -ncr "wdir()" bar
447 $ hg annotate -ncr "wdir()" bar
448 20 b6bedd5477e7+: bar
448 20 b6bedd5477e7+: bar
449
449
450 annotate renamed file
450 annotate renamed file
451
451
452 $ hg rename foo renamefoo2
452 $ hg rename foo renamefoo2
453 $ hg annotate -ncr "wdir()" renamefoo2
453 $ hg annotate -ncr "wdir()" renamefoo2
454 11 472b18db256d : foo
454 11 472b18db256d : foo
455 20 b6bedd5477e7+: foofoo
455 20 b6bedd5477e7+: foofoo
456
456
457 annotate missing file
457 annotate missing file
458
458
459 $ rm baz
459 $ rm baz
460 #if windows
460 #if windows
461 $ hg annotate -ncr "wdir()" baz
461 $ hg annotate -ncr "wdir()" baz
462 abort: $TESTTMP\repo\baz: The system cannot find the file specified
462 abort: $TESTTMP\repo\baz: The system cannot find the file specified
463 [255]
463 [255]
464 #else
464 #else
465 $ hg annotate -ncr "wdir()" baz
465 $ hg annotate -ncr "wdir()" baz
466 abort: No such file or directory: $TESTTMP/repo/baz
466 abort: No such file or directory: $TESTTMP/repo/baz
467 [255]
467 [255]
468 #endif
468 #endif
469
469
470 annotate removed file
470 annotate removed file
471
471
472 $ hg rm baz
472 $ hg rm baz
473 #if windows
473 #if windows
474 $ hg annotate -ncr "wdir()" baz
474 $ hg annotate -ncr "wdir()" baz
475 abort: $TESTTMP\repo\baz: The system cannot find the file specified
475 abort: $TESTTMP\repo\baz: The system cannot find the file specified
476 [255]
476 [255]
477 #else
477 #else
478 $ hg annotate -ncr "wdir()" baz
478 $ hg annotate -ncr "wdir()" baz
479 abort: No such file or directory: $TESTTMP/repo/baz
479 abort: No such file or directory: $TESTTMP/repo/baz
480 [255]
480 [255]
481 #endif
481 #endif
482
482
483 $ hg revert --all --no-backup --quiet
484 $ hg id -n
485 20
486
487 Test followlines() revset
488
489 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3, 5)'
490 16: baz:0
491 19: baz:3
492 20: baz:4
493 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3, 5, rev=20)'
494 16: baz:0
495 19: baz:3
496 20: baz:4
497 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3, 5, rev=.^)'
498 16: baz:0
499 19: baz:3
500 $ printf "0\n0\n" | cat - baz > baz1
501 $ mv baz1 baz
502 $ hg ci -m 'added two lines with 0'
503 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5, 7)'
504 16: baz:0
505 19: baz:3
506 20: baz:4
507 $ echo 6 >> baz
508 $ hg ci -m 'added line 8'
509 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5, 7)'
510 16: baz:0
511 19: baz:3
512 20: baz:4
513 $ sed 's/3/3+/' baz > baz.new
514 $ mv baz.new baz
515 $ hg ci -m 'baz:3->3+'
516 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5, 7)'
517 16: baz:0
518 19: baz:3
519 20: baz:4
520 23: baz:3->3+
521 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 1, 2)'
522 21: added two lines with 0
523
524 file patterns are okay
525 $ hg log -T '{rev}: {desc}\n' -r 'followlines("path:baz", 1, 2)'
526 21: added two lines with 0
527
528 renames are followed
529 $ hg mv baz qux
530 $ sed 's/4/4+/' qux > qux.new
531 $ mv qux.new qux
532 $ hg ci -m 'qux:4->4+'
533 $ hg log -T '{rev}: {desc}\n' -r 'followlines(qux, 5, 7)'
534 16: baz:0
535 19: baz:3
536 20: baz:4
537 23: baz:3->3+
538 24: qux:4->4+
539 $ hg up 23 --quiet
540
541 merge
542 $ echo 7 >> baz
543 $ hg ci -m 'one more line, out of line range'
544 created new head
545 $ sed 's/3+/3-/' baz > baz.new
546 $ mv baz.new baz
547 $ hg ci -m 'baz:3+->3-'
548 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5, 7)'
549 16: baz:0
550 19: baz:3
551 20: baz:4
552 23: baz:3->3+
553 26: baz:3+->3-
554 $ hg merge 24
555 merging baz and qux to qux
556 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
557 (branch merge, don't forget to commit)
558 $ hg ci -m merge
559 $ hg log -T '{rev}: {desc}\n' -r 'followlines(qux, 5, 7)'
560 16: baz:0
561 19: baz:3
562 20: baz:4
563 23: baz:3->3+
564 24: qux:4->4+
565 26: baz:3+->3-
566 27: merge
567 $ hg up 24 --quiet
568 $ hg merge 26
569 merging qux and baz to qux
570 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
571 (branch merge, don't forget to commit)
572 $ hg ci -m 'merge from other side'
573 created new head
574 $ hg log -T '{rev}: {desc}\n' -r 'followlines(qux, 5, 7)'
575 16: baz:0
576 19: baz:3
577 20: baz:4
578 23: baz:3->3+
579 24: qux:4->4+
580 26: baz:3+->3-
581 28: merge from other side
582 $ hg up 23 --quiet
583
584 check error cases
585 $ hg log -r 'followlines(baz, 1, 2, rev=desc("b"))'
586 hg: parse error: followlines expects exactly one revision
587 [255]
588 $ hg log -r 'followlines("glob:*", 1, 2)'
589 hg: parse error: followlines expects exactly one file
590 [255]
591 $ hg log -r 'followlines(baz, x, 4)'
592 hg: parse error: line range bounds must be integers
593 [255]
594 $ hg log -r 'followlines(baz, 5, 4)'
595 hg: parse error: line range must be positive
596 [255]
597 $ hg log -r 'followlines(baz, 0, 4)'
598 hg: parse error: fromline must be strictly positive
599 [255]
600 $ hg log -r 'followlines(baz, 2, 40)'
601 abort: line range exceeds file size
602 [255]
603
483 Test annotate with whitespace options
604 Test annotate with whitespace options
484
605
485 $ cd ..
606 $ cd ..
486 $ hg init repo-ws
607 $ hg init repo-ws
487 $ cd repo-ws
608 $ cd repo-ws
488 $ cat > a <<EOF
609 $ cat > a <<EOF
489 > aa
610 > aa
490 >
611 >
491 > b b
612 > b b
492 > EOF
613 > EOF
493 $ hg ci -Am "adda"
614 $ hg ci -Am "adda"
494 adding a
615 adding a
495 $ sed 's/EOL$//g' > a <<EOF
616 $ sed 's/EOL$//g' > a <<EOF
496 > a a
617 > a a
497 >
618 >
498 > EOL
619 > EOL
499 > b b
620 > b b
500 > EOF
621 > EOF
501 $ hg ci -m "changea"
622 $ hg ci -m "changea"
502
623
503 Annotate with no option
624 Annotate with no option
504
625
505 $ hg annotate a
626 $ hg annotate a
506 1: a a
627 1: a a
507 0:
628 0:
508 1:
629 1:
509 1: b b
630 1: b b
510
631
511 Annotate with --ignore-space-change
632 Annotate with --ignore-space-change
512
633
513 $ hg annotate --ignore-space-change a
634 $ hg annotate --ignore-space-change a
514 1: a a
635 1: a a
515 1:
636 1:
516 0:
637 0:
517 0: b b
638 0: b b
518
639
519 Annotate with --ignore-all-space
640 Annotate with --ignore-all-space
520
641
521 $ hg annotate --ignore-all-space a
642 $ hg annotate --ignore-all-space a
522 0: a a
643 0: a a
523 0:
644 0:
524 1:
645 1:
525 0: b b
646 0: b b
526
647
527 Annotate with --ignore-blank-lines (similar to no options case)
648 Annotate with --ignore-blank-lines (similar to no options case)
528
649
529 $ hg annotate --ignore-blank-lines a
650 $ hg annotate --ignore-blank-lines a
530 1: a a
651 1: a a
531 0:
652 0:
532 1:
653 1:
533 1: b b
654 1: b b
534
655
535 $ cd ..
656 $ cd ..
536
657
537 Annotate with linkrev pointing to another branch
658 Annotate with linkrev pointing to another branch
538 ------------------------------------------------
659 ------------------------------------------------
539
660
540 create history with a filerev whose linkrev points to another branch
661 create history with a filerev whose linkrev points to another branch
541
662
542 $ hg init branchedlinkrev
663 $ hg init branchedlinkrev
543 $ cd branchedlinkrev
664 $ cd branchedlinkrev
544 $ echo A > a
665 $ echo A > a
545 $ hg commit -Am 'contentA'
666 $ hg commit -Am 'contentA'
546 adding a
667 adding a
547 $ echo B >> a
668 $ echo B >> a
548 $ hg commit -m 'contentB'
669 $ hg commit -m 'contentB'
549 $ hg up --rev 'desc(contentA)'
670 $ hg up --rev 'desc(contentA)'
550 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
671 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
551 $ echo unrelated > unrelated
672 $ echo unrelated > unrelated
552 $ hg commit -Am 'unrelated'
673 $ hg commit -Am 'unrelated'
553 adding unrelated
674 adding unrelated
554 created new head
675 created new head
555 $ hg graft -r 'desc(contentB)'
676 $ hg graft -r 'desc(contentB)'
556 grafting 1:fd27c222e3e6 "contentB"
677 grafting 1:fd27c222e3e6 "contentB"
557 $ echo C >> a
678 $ echo C >> a
558 $ hg commit -m 'contentC'
679 $ hg commit -m 'contentC'
559 $ echo W >> a
680 $ echo W >> a
560 $ hg log -G
681 $ hg log -G
561 @ changeset: 4:072f1e8df249
682 @ changeset: 4:072f1e8df249
562 | tag: tip
683 | tag: tip
563 | user: test
684 | user: test
564 | date: Thu Jan 01 00:00:00 1970 +0000
685 | date: Thu Jan 01 00:00:00 1970 +0000
565 | summary: contentC
686 | summary: contentC
566 |
687 |
567 o changeset: 3:ff38df03cc4b
688 o changeset: 3:ff38df03cc4b
568 | user: test
689 | user: test
569 | date: Thu Jan 01 00:00:00 1970 +0000
690 | date: Thu Jan 01 00:00:00 1970 +0000
570 | summary: contentB
691 | summary: contentB
571 |
692 |
572 o changeset: 2:62aaf3f6fc06
693 o changeset: 2:62aaf3f6fc06
573 | parent: 0:f0932f74827e
694 | parent: 0:f0932f74827e
574 | user: test
695 | user: test
575 | date: Thu Jan 01 00:00:00 1970 +0000
696 | date: Thu Jan 01 00:00:00 1970 +0000
576 | summary: unrelated
697 | summary: unrelated
577 |
698 |
578 | o changeset: 1:fd27c222e3e6
699 | o changeset: 1:fd27c222e3e6
579 |/ user: test
700 |/ user: test
580 | date: Thu Jan 01 00:00:00 1970 +0000
701 | date: Thu Jan 01 00:00:00 1970 +0000
581 | summary: contentB
702 | summary: contentB
582 |
703 |
583 o changeset: 0:f0932f74827e
704 o changeset: 0:f0932f74827e
584 user: test
705 user: test
585 date: Thu Jan 01 00:00:00 1970 +0000
706 date: Thu Jan 01 00:00:00 1970 +0000
586 summary: contentA
707 summary: contentA
587
708
588
709
589 Annotate should list ancestor of starting revision only
710 Annotate should list ancestor of starting revision only
590
711
591 $ hg annotate a
712 $ hg annotate a
592 0: A
713 0: A
593 3: B
714 3: B
594 4: C
715 4: C
595
716
596 $ hg annotate a -r 'wdir()'
717 $ hg annotate a -r 'wdir()'
597 0 : A
718 0 : A
598 3 : B
719 3 : B
599 4 : C
720 4 : C
600 4+: W
721 4+: W
601
722
602 Even when the starting revision is the linkrev-shadowed one:
723 Even when the starting revision is the linkrev-shadowed one:
603
724
604 $ hg annotate a -r 3
725 $ hg annotate a -r 3
605 0: A
726 0: A
606 3: B
727 3: B
607
728
608 $ cd ..
729 $ cd ..
609
730
610 Issue5360: Deleted chunk in p1 of a merge changeset
731 Issue5360: Deleted chunk in p1 of a merge changeset
611
732
612 $ hg init repo-5360
733 $ hg init repo-5360
613 $ cd repo-5360
734 $ cd repo-5360
614 $ echo 1 > a
735 $ echo 1 > a
615 $ hg commit -A a -m 1
736 $ hg commit -A a -m 1
616 $ echo 2 >> a
737 $ echo 2 >> a
617 $ hg commit -m 2
738 $ hg commit -m 2
618 $ echo a > a
739 $ echo a > a
619 $ hg commit -m a
740 $ hg commit -m a
620 $ hg update '.^' -q
741 $ hg update '.^' -q
621 $ echo 3 >> a
742 $ echo 3 >> a
622 $ hg commit -m 3 -q
743 $ hg commit -m 3 -q
623 $ hg merge 2 -q
744 $ hg merge 2 -q
624 $ cat > a << EOF
745 $ cat > a << EOF
625 > b
746 > b
626 > 1
747 > 1
627 > 2
748 > 2
628 > 3
749 > 3
629 > a
750 > a
630 > EOF
751 > EOF
631 $ hg resolve --mark -q
752 $ hg resolve --mark -q
632 $ hg commit -m m
753 $ hg commit -m m
633 $ hg annotate a
754 $ hg annotate a
634 4: b
755 4: b
635 0: 1
756 0: 1
636 1: 2
757 1: 2
637 3: 3
758 3: 3
638 2: a
759 2: a
639
760
640 $ cd ..
761 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now