##// END OF EJS Templates
revset: rename rev argument of followlines() to startrev...
Yuya Nishihara -
r30800:cd23879c default
parent child Browse files
Show More
@@ -1,3895 +1,3895 b''
1 # revset.py - revision set queries for mercurial
1 # revset.py - revision set queries for mercurial
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import heapq
10 import heapq
11 import re
11 import re
12 import string
12 import string
13
13
14 from .i18n import _
14 from .i18n import _
15 from . import (
15 from . import (
16 destutil,
16 destutil,
17 encoding,
17 encoding,
18 error,
18 error,
19 hbisect,
19 hbisect,
20 match as matchmod,
20 match as matchmod,
21 node,
21 node,
22 obsolete as obsmod,
22 obsolete as obsmod,
23 parser,
23 parser,
24 pathutil,
24 pathutil,
25 phases,
25 phases,
26 pycompat,
26 pycompat,
27 registrar,
27 registrar,
28 repoview,
28 repoview,
29 util,
29 util,
30 )
30 )
31
31
32 def _revancestors(repo, revs, followfirst):
32 def _revancestors(repo, revs, followfirst):
33 """Like revlog.ancestors(), but supports followfirst."""
33 """Like revlog.ancestors(), but supports followfirst."""
34 if followfirst:
34 if followfirst:
35 cut = 1
35 cut = 1
36 else:
36 else:
37 cut = None
37 cut = None
38 cl = repo.changelog
38 cl = repo.changelog
39
39
40 def iterate():
40 def iterate():
41 revs.sort(reverse=True)
41 revs.sort(reverse=True)
42 irevs = iter(revs)
42 irevs = iter(revs)
43 h = []
43 h = []
44
44
45 inputrev = next(irevs, None)
45 inputrev = next(irevs, None)
46 if inputrev is not None:
46 if inputrev is not None:
47 heapq.heappush(h, -inputrev)
47 heapq.heappush(h, -inputrev)
48
48
49 seen = set()
49 seen = set()
50 while h:
50 while h:
51 current = -heapq.heappop(h)
51 current = -heapq.heappop(h)
52 if current == inputrev:
52 if current == inputrev:
53 inputrev = next(irevs, None)
53 inputrev = next(irevs, None)
54 if inputrev is not None:
54 if inputrev is not None:
55 heapq.heappush(h, -inputrev)
55 heapq.heappush(h, -inputrev)
56 if current not in seen:
56 if current not in seen:
57 seen.add(current)
57 seen.add(current)
58 yield current
58 yield current
59 for parent in cl.parentrevs(current)[:cut]:
59 for parent in cl.parentrevs(current)[:cut]:
60 if parent != node.nullrev:
60 if parent != node.nullrev:
61 heapq.heappush(h, -parent)
61 heapq.heappush(h, -parent)
62
62
63 return generatorset(iterate(), iterasc=False)
63 return generatorset(iterate(), iterasc=False)
64
64
65 def _revdescendants(repo, revs, followfirst):
65 def _revdescendants(repo, revs, followfirst):
66 """Like revlog.descendants() but supports followfirst."""
66 """Like revlog.descendants() but supports followfirst."""
67 if followfirst:
67 if followfirst:
68 cut = 1
68 cut = 1
69 else:
69 else:
70 cut = None
70 cut = None
71
71
72 def iterate():
72 def iterate():
73 cl = repo.changelog
73 cl = repo.changelog
74 # XXX this should be 'parentset.min()' assuming 'parentset' is a
74 # XXX this should be 'parentset.min()' assuming 'parentset' is a
75 # smartset (and if it is not, it should.)
75 # smartset (and if it is not, it should.)
76 first = min(revs)
76 first = min(revs)
77 nullrev = node.nullrev
77 nullrev = node.nullrev
78 if first == nullrev:
78 if first == nullrev:
79 # Are there nodes with a null first parent and a non-null
79 # Are there nodes with a null first parent and a non-null
80 # second one? Maybe. Do we care? Probably not.
80 # second one? Maybe. Do we care? Probably not.
81 for i in cl:
81 for i in cl:
82 yield i
82 yield i
83 else:
83 else:
84 seen = set(revs)
84 seen = set(revs)
85 for i in cl.revs(first + 1):
85 for i in cl.revs(first + 1):
86 for x in cl.parentrevs(i)[:cut]:
86 for x in cl.parentrevs(i)[:cut]:
87 if x != nullrev and x in seen:
87 if x != nullrev and x in seen:
88 seen.add(i)
88 seen.add(i)
89 yield i
89 yield i
90 break
90 break
91
91
92 return generatorset(iterate(), iterasc=True)
92 return generatorset(iterate(), iterasc=True)
93
93
94 def _reachablerootspure(repo, minroot, roots, heads, includepath):
94 def _reachablerootspure(repo, minroot, roots, heads, includepath):
95 """return (heads(::<roots> and ::<heads>))
95 """return (heads(::<roots> and ::<heads>))
96
96
97 If includepath is True, return (<roots>::<heads>)."""
97 If includepath is True, return (<roots>::<heads>)."""
98 if not roots:
98 if not roots:
99 return []
99 return []
100 parentrevs = repo.changelog.parentrevs
100 parentrevs = repo.changelog.parentrevs
101 roots = set(roots)
101 roots = set(roots)
102 visit = list(heads)
102 visit = list(heads)
103 reachable = set()
103 reachable = set()
104 seen = {}
104 seen = {}
105 # prefetch all the things! (because python is slow)
105 # prefetch all the things! (because python is slow)
106 reached = reachable.add
106 reached = reachable.add
107 dovisit = visit.append
107 dovisit = visit.append
108 nextvisit = visit.pop
108 nextvisit = visit.pop
109 # open-code the post-order traversal due to the tiny size of
109 # open-code the post-order traversal due to the tiny size of
110 # sys.getrecursionlimit()
110 # sys.getrecursionlimit()
111 while visit:
111 while visit:
112 rev = nextvisit()
112 rev = nextvisit()
113 if rev in roots:
113 if rev in roots:
114 reached(rev)
114 reached(rev)
115 if not includepath:
115 if not includepath:
116 continue
116 continue
117 parents = parentrevs(rev)
117 parents = parentrevs(rev)
118 seen[rev] = parents
118 seen[rev] = parents
119 for parent in parents:
119 for parent in parents:
120 if parent >= minroot and parent not in seen:
120 if parent >= minroot and parent not in seen:
121 dovisit(parent)
121 dovisit(parent)
122 if not reachable:
122 if not reachable:
123 return baseset()
123 return baseset()
124 if not includepath:
124 if not includepath:
125 return reachable
125 return reachable
126 for rev in sorted(seen):
126 for rev in sorted(seen):
127 for parent in seen[rev]:
127 for parent in seen[rev]:
128 if parent in reachable:
128 if parent in reachable:
129 reached(rev)
129 reached(rev)
130 return reachable
130 return reachable
131
131
132 def reachableroots(repo, roots, heads, includepath=False):
132 def reachableroots(repo, roots, heads, includepath=False):
133 """return (heads(::<roots> and ::<heads>))
133 """return (heads(::<roots> and ::<heads>))
134
134
135 If includepath is True, return (<roots>::<heads>)."""
135 If includepath is True, return (<roots>::<heads>)."""
136 if not roots:
136 if not roots:
137 return baseset()
137 return baseset()
138 minroot = roots.min()
138 minroot = roots.min()
139 roots = list(roots)
139 roots = list(roots)
140 heads = list(heads)
140 heads = list(heads)
141 try:
141 try:
142 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
142 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
143 except AttributeError:
143 except AttributeError:
144 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
144 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
145 revs = baseset(revs)
145 revs = baseset(revs)
146 revs.sort()
146 revs.sort()
147 return revs
147 return revs
148
148
149 elements = {
149 elements = {
150 # token-type: binding-strength, primary, prefix, infix, suffix
150 # token-type: binding-strength, primary, prefix, infix, suffix
151 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
151 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
152 "##": (20, None, None, ("_concat", 20), None),
152 "##": (20, None, None, ("_concat", 20), None),
153 "~": (18, None, None, ("ancestor", 18), None),
153 "~": (18, None, None, ("ancestor", 18), None),
154 "^": (18, None, None, ("parent", 18), "parentpost"),
154 "^": (18, None, None, ("parent", 18), "parentpost"),
155 "-": (5, None, ("negate", 19), ("minus", 5), None),
155 "-": (5, None, ("negate", 19), ("minus", 5), None),
156 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"),
156 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"),
157 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"),
157 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"),
158 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), "rangepost"),
158 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), "rangepost"),
159 "not": (10, None, ("not", 10), None, None),
159 "not": (10, None, ("not", 10), None, None),
160 "!": (10, None, ("not", 10), None, None),
160 "!": (10, None, ("not", 10), None, None),
161 "and": (5, None, None, ("and", 5), None),
161 "and": (5, None, None, ("and", 5), None),
162 "&": (5, None, None, ("and", 5), None),
162 "&": (5, None, None, ("and", 5), None),
163 "%": (5, None, None, ("only", 5), "onlypost"),
163 "%": (5, None, None, ("only", 5), "onlypost"),
164 "or": (4, None, None, ("or", 4), None),
164 "or": (4, None, None, ("or", 4), None),
165 "|": (4, None, None, ("or", 4), None),
165 "|": (4, None, None, ("or", 4), None),
166 "+": (4, None, None, ("or", 4), None),
166 "+": (4, None, None, ("or", 4), None),
167 "=": (3, None, None, ("keyvalue", 3), None),
167 "=": (3, None, None, ("keyvalue", 3), None),
168 ",": (2, None, None, ("list", 2), None),
168 ",": (2, None, None, ("list", 2), None),
169 ")": (0, None, None, None, None),
169 ")": (0, None, None, None, None),
170 "symbol": (0, "symbol", None, None, None),
170 "symbol": (0, "symbol", None, None, None),
171 "string": (0, "string", None, None, None),
171 "string": (0, "string", None, None, None),
172 "end": (0, None, None, None, None),
172 "end": (0, None, None, None, None),
173 }
173 }
174
174
175 keywords = set(['and', 'or', 'not'])
175 keywords = set(['and', 'or', 'not'])
176
176
177 # default set of valid characters for the initial letter of symbols
177 # default set of valid characters for the initial letter of symbols
178 _syminitletters = set(
178 _syminitletters = set(
179 string.ascii_letters +
179 string.ascii_letters +
180 string.digits + pycompat.sysstr('._@')) | set(map(chr, xrange(128, 256)))
180 string.digits + pycompat.sysstr('._@')) | set(map(chr, xrange(128, 256)))
181
181
182 # default set of valid characters for non-initial letters of symbols
182 # default set of valid characters for non-initial letters of symbols
183 _symletters = _syminitletters | set(pycompat.sysstr('-/'))
183 _symletters = _syminitletters | set(pycompat.sysstr('-/'))
184
184
185 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
185 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
186 '''
186 '''
187 Parse a revset statement into a stream of tokens
187 Parse a revset statement into a stream of tokens
188
188
189 ``syminitletters`` is the set of valid characters for the initial
189 ``syminitletters`` is the set of valid characters for the initial
190 letter of symbols.
190 letter of symbols.
191
191
192 By default, character ``c`` is recognized as valid for initial
192 By default, character ``c`` is recognized as valid for initial
193 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
193 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
194
194
195 ``symletters`` is the set of valid characters for non-initial
195 ``symletters`` is the set of valid characters for non-initial
196 letters of symbols.
196 letters of symbols.
197
197
198 By default, character ``c`` is recognized as valid for non-initial
198 By default, character ``c`` is recognized as valid for non-initial
199 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
199 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
200
200
201 Check that @ is a valid unquoted token character (issue3686):
201 Check that @ is a valid unquoted token character (issue3686):
202 >>> list(tokenize("@::"))
202 >>> list(tokenize("@::"))
203 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
203 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
204
204
205 '''
205 '''
206 if syminitletters is None:
206 if syminitletters is None:
207 syminitletters = _syminitletters
207 syminitletters = _syminitletters
208 if symletters is None:
208 if symletters is None:
209 symletters = _symletters
209 symletters = _symletters
210
210
211 if program and lookup:
211 if program and lookup:
212 # attempt to parse old-style ranges first to deal with
212 # attempt to parse old-style ranges first to deal with
213 # things like old-tag which contain query metacharacters
213 # things like old-tag which contain query metacharacters
214 parts = program.split(':', 1)
214 parts = program.split(':', 1)
215 if all(lookup(sym) for sym in parts if sym):
215 if all(lookup(sym) for sym in parts if sym):
216 if parts[0]:
216 if parts[0]:
217 yield ('symbol', parts[0], 0)
217 yield ('symbol', parts[0], 0)
218 if len(parts) > 1:
218 if len(parts) > 1:
219 s = len(parts[0])
219 s = len(parts[0])
220 yield (':', None, s)
220 yield (':', None, s)
221 if parts[1]:
221 if parts[1]:
222 yield ('symbol', parts[1], s + 1)
222 yield ('symbol', parts[1], s + 1)
223 yield ('end', None, len(program))
223 yield ('end', None, len(program))
224 return
224 return
225
225
226 pos, l = 0, len(program)
226 pos, l = 0, len(program)
227 while pos < l:
227 while pos < l:
228 c = program[pos]
228 c = program[pos]
229 if c.isspace(): # skip inter-token whitespace
229 if c.isspace(): # skip inter-token whitespace
230 pass
230 pass
231 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
231 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
232 yield ('::', None, pos)
232 yield ('::', None, pos)
233 pos += 1 # skip ahead
233 pos += 1 # skip ahead
234 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
234 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
235 yield ('..', None, pos)
235 yield ('..', None, pos)
236 pos += 1 # skip ahead
236 pos += 1 # skip ahead
237 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
237 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
238 yield ('##', None, pos)
238 yield ('##', None, pos)
239 pos += 1 # skip ahead
239 pos += 1 # skip ahead
240 elif c in "():=,-|&+!~^%": # handle simple operators
240 elif c in "():=,-|&+!~^%": # handle simple operators
241 yield (c, None, pos)
241 yield (c, None, pos)
242 elif (c in '"\'' or c == 'r' and
242 elif (c in '"\'' or c == 'r' and
243 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
243 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
244 if c == 'r':
244 if c == 'r':
245 pos += 1
245 pos += 1
246 c = program[pos]
246 c = program[pos]
247 decode = lambda x: x
247 decode = lambda x: x
248 else:
248 else:
249 decode = parser.unescapestr
249 decode = parser.unescapestr
250 pos += 1
250 pos += 1
251 s = pos
251 s = pos
252 while pos < l: # find closing quote
252 while pos < l: # find closing quote
253 d = program[pos]
253 d = program[pos]
254 if d == '\\': # skip over escaped characters
254 if d == '\\': # skip over escaped characters
255 pos += 2
255 pos += 2
256 continue
256 continue
257 if d == c:
257 if d == c:
258 yield ('string', decode(program[s:pos]), s)
258 yield ('string', decode(program[s:pos]), s)
259 break
259 break
260 pos += 1
260 pos += 1
261 else:
261 else:
262 raise error.ParseError(_("unterminated string"), s)
262 raise error.ParseError(_("unterminated string"), s)
263 # gather up a symbol/keyword
263 # gather up a symbol/keyword
264 elif c in syminitletters:
264 elif c in syminitletters:
265 s = pos
265 s = pos
266 pos += 1
266 pos += 1
267 while pos < l: # find end of symbol
267 while pos < l: # find end of symbol
268 d = program[pos]
268 d = program[pos]
269 if d not in symletters:
269 if d not in symletters:
270 break
270 break
271 if d == '.' and program[pos - 1] == '.': # special case for ..
271 if d == '.' and program[pos - 1] == '.': # special case for ..
272 pos -= 1
272 pos -= 1
273 break
273 break
274 pos += 1
274 pos += 1
275 sym = program[s:pos]
275 sym = program[s:pos]
276 if sym in keywords: # operator keywords
276 if sym in keywords: # operator keywords
277 yield (sym, None, s)
277 yield (sym, None, s)
278 elif '-' in sym:
278 elif '-' in sym:
279 # some jerk gave us foo-bar-baz, try to check if it's a symbol
279 # some jerk gave us foo-bar-baz, try to check if it's a symbol
280 if lookup and lookup(sym):
280 if lookup and lookup(sym):
281 # looks like a real symbol
281 # looks like a real symbol
282 yield ('symbol', sym, s)
282 yield ('symbol', sym, s)
283 else:
283 else:
284 # looks like an expression
284 # looks like an expression
285 parts = sym.split('-')
285 parts = sym.split('-')
286 for p in parts[:-1]:
286 for p in parts[:-1]:
287 if p: # possible consecutive -
287 if p: # possible consecutive -
288 yield ('symbol', p, s)
288 yield ('symbol', p, s)
289 s += len(p)
289 s += len(p)
290 yield ('-', None, pos)
290 yield ('-', None, pos)
291 s += 1
291 s += 1
292 if parts[-1]: # possible trailing -
292 if parts[-1]: # possible trailing -
293 yield ('symbol', parts[-1], s)
293 yield ('symbol', parts[-1], s)
294 else:
294 else:
295 yield ('symbol', sym, s)
295 yield ('symbol', sym, s)
296 pos -= 1
296 pos -= 1
297 else:
297 else:
298 raise error.ParseError(_("syntax error in revset '%s'") %
298 raise error.ParseError(_("syntax error in revset '%s'") %
299 program, pos)
299 program, pos)
300 pos += 1
300 pos += 1
301 yield ('end', None, pos)
301 yield ('end', None, pos)
302
302
303 # helpers
303 # helpers
304
304
305 def getsymbol(x):
305 def getsymbol(x):
306 if x and x[0] == 'symbol':
306 if x and x[0] == 'symbol':
307 return x[1]
307 return x[1]
308 raise error.ParseError(_('not a symbol'))
308 raise error.ParseError(_('not a symbol'))
309
309
310 def getstring(x, err):
310 def getstring(x, err):
311 if x and (x[0] == 'string' or x[0] == 'symbol'):
311 if x and (x[0] == 'string' or x[0] == 'symbol'):
312 return x[1]
312 return x[1]
313 raise error.ParseError(err)
313 raise error.ParseError(err)
314
314
315 def getlist(x):
315 def getlist(x):
316 if not x:
316 if not x:
317 return []
317 return []
318 if x[0] == 'list':
318 if x[0] == 'list':
319 return list(x[1:])
319 return list(x[1:])
320 return [x]
320 return [x]
321
321
322 def getargs(x, min, max, err):
322 def getargs(x, min, max, err):
323 l = getlist(x)
323 l = getlist(x)
324 if len(l) < min or (max >= 0 and len(l) > max):
324 if len(l) < min or (max >= 0 and len(l) > max):
325 raise error.ParseError(err)
325 raise error.ParseError(err)
326 return l
326 return l
327
327
328 def getargsdict(x, funcname, keys):
328 def getargsdict(x, funcname, keys):
329 return parser.buildargsdict(getlist(x), funcname, parser.splitargspec(keys),
329 return parser.buildargsdict(getlist(x), funcname, parser.splitargspec(keys),
330 keyvaluenode='keyvalue', keynode='symbol')
330 keyvaluenode='keyvalue', keynode='symbol')
331
331
332 def getset(repo, subset, x):
332 def getset(repo, subset, x):
333 if not x:
333 if not x:
334 raise error.ParseError(_("missing argument"))
334 raise error.ParseError(_("missing argument"))
335 s = methods[x[0]](repo, subset, *x[1:])
335 s = methods[x[0]](repo, subset, *x[1:])
336 if util.safehasattr(s, 'isascending'):
336 if util.safehasattr(s, 'isascending'):
337 return s
337 return s
338 # else case should not happen, because all non-func are internal,
338 # else case should not happen, because all non-func are internal,
339 # ignoring for now.
339 # ignoring for now.
340 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
340 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
341 repo.ui.deprecwarn('revset "%s" uses list instead of smartset'
341 repo.ui.deprecwarn('revset "%s" uses list instead of smartset'
342 % x[1][1],
342 % x[1][1],
343 '3.9')
343 '3.9')
344 return baseset(s)
344 return baseset(s)
345
345
346 def _getrevsource(repo, r):
346 def _getrevsource(repo, r):
347 extra = repo[r].extra()
347 extra = repo[r].extra()
348 for label in ('source', 'transplant_source', 'rebase_source'):
348 for label in ('source', 'transplant_source', 'rebase_source'):
349 if label in extra:
349 if label in extra:
350 try:
350 try:
351 return repo[extra[label]].rev()
351 return repo[extra[label]].rev()
352 except error.RepoLookupError:
352 except error.RepoLookupError:
353 pass
353 pass
354 return None
354 return None
355
355
356 # operator methods
356 # operator methods
357
357
358 def stringset(repo, subset, x):
358 def stringset(repo, subset, x):
359 x = repo[x].rev()
359 x = repo[x].rev()
360 if (x in subset
360 if (x in subset
361 or x == node.nullrev and isinstance(subset, fullreposet)):
361 or x == node.nullrev and isinstance(subset, fullreposet)):
362 return baseset([x])
362 return baseset([x])
363 return baseset()
363 return baseset()
364
364
365 def rangeset(repo, subset, x, y, order):
365 def rangeset(repo, subset, x, y, order):
366 m = getset(repo, fullreposet(repo), x)
366 m = getset(repo, fullreposet(repo), x)
367 n = getset(repo, fullreposet(repo), y)
367 n = getset(repo, fullreposet(repo), y)
368
368
369 if not m or not n:
369 if not m or not n:
370 return baseset()
370 return baseset()
371 return _makerangeset(repo, subset, m.first(), n.last(), order)
371 return _makerangeset(repo, subset, m.first(), n.last(), order)
372
372
373 def rangepre(repo, subset, y, order):
373 def rangepre(repo, subset, y, order):
374 # ':y' can't be rewritten to '0:y' since '0' may be hidden
374 # ':y' can't be rewritten to '0:y' since '0' may be hidden
375 n = getset(repo, fullreposet(repo), y)
375 n = getset(repo, fullreposet(repo), y)
376 if not n:
376 if not n:
377 return baseset()
377 return baseset()
378 return _makerangeset(repo, subset, 0, n.last(), order)
378 return _makerangeset(repo, subset, 0, n.last(), order)
379
379
380 def _makerangeset(repo, subset, m, n, order):
380 def _makerangeset(repo, subset, m, n, order):
381 if m == n:
381 if m == n:
382 r = baseset([m])
382 r = baseset([m])
383 elif n == node.wdirrev:
383 elif n == node.wdirrev:
384 r = spanset(repo, m, len(repo)) + baseset([n])
384 r = spanset(repo, m, len(repo)) + baseset([n])
385 elif m == node.wdirrev:
385 elif m == node.wdirrev:
386 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
386 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
387 elif m < n:
387 elif m < n:
388 r = spanset(repo, m, n + 1)
388 r = spanset(repo, m, n + 1)
389 else:
389 else:
390 r = spanset(repo, m, n - 1)
390 r = spanset(repo, m, n - 1)
391
391
392 if order == defineorder:
392 if order == defineorder:
393 return r & subset
393 return r & subset
394 else:
394 else:
395 # carrying the sorting over when possible would be more efficient
395 # carrying the sorting over when possible would be more efficient
396 return subset & r
396 return subset & r
397
397
398 def dagrange(repo, subset, x, y, order):
398 def dagrange(repo, subset, x, y, order):
399 r = fullreposet(repo)
399 r = fullreposet(repo)
400 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
400 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
401 includepath=True)
401 includepath=True)
402 return subset & xs
402 return subset & xs
403
403
404 def andset(repo, subset, x, y, order):
404 def andset(repo, subset, x, y, order):
405 return getset(repo, getset(repo, subset, x), y)
405 return getset(repo, getset(repo, subset, x), y)
406
406
407 def differenceset(repo, subset, x, y, order):
407 def differenceset(repo, subset, x, y, order):
408 return getset(repo, subset, x) - getset(repo, subset, y)
408 return getset(repo, subset, x) - getset(repo, subset, y)
409
409
410 def _orsetlist(repo, subset, xs):
410 def _orsetlist(repo, subset, xs):
411 assert xs
411 assert xs
412 if len(xs) == 1:
412 if len(xs) == 1:
413 return getset(repo, subset, xs[0])
413 return getset(repo, subset, xs[0])
414 p = len(xs) // 2
414 p = len(xs) // 2
415 a = _orsetlist(repo, subset, xs[:p])
415 a = _orsetlist(repo, subset, xs[:p])
416 b = _orsetlist(repo, subset, xs[p:])
416 b = _orsetlist(repo, subset, xs[p:])
417 return a + b
417 return a + b
418
418
419 def orset(repo, subset, x, order):
419 def orset(repo, subset, x, order):
420 xs = getlist(x)
420 xs = getlist(x)
421 if order == followorder:
421 if order == followorder:
422 # slow path to take the subset order
422 # slow path to take the subset order
423 return subset & _orsetlist(repo, fullreposet(repo), xs)
423 return subset & _orsetlist(repo, fullreposet(repo), xs)
424 else:
424 else:
425 return _orsetlist(repo, subset, xs)
425 return _orsetlist(repo, subset, xs)
426
426
427 def notset(repo, subset, x, order):
427 def notset(repo, subset, x, order):
428 return subset - getset(repo, subset, x)
428 return subset - getset(repo, subset, x)
429
429
430 def listset(repo, subset, *xs):
430 def listset(repo, subset, *xs):
431 raise error.ParseError(_("can't use a list in this context"),
431 raise error.ParseError(_("can't use a list in this context"),
432 hint=_('see hg help "revsets.x or y"'))
432 hint=_('see hg help "revsets.x or y"'))
433
433
434 def keyvaluepair(repo, subset, k, v):
434 def keyvaluepair(repo, subset, k, v):
435 raise error.ParseError(_("can't use a key-value pair in this context"))
435 raise error.ParseError(_("can't use a key-value pair in this context"))
436
436
437 def func(repo, subset, a, b, order):
437 def func(repo, subset, a, b, order):
438 f = getsymbol(a)
438 f = getsymbol(a)
439 if f in symbols:
439 if f in symbols:
440 func = symbols[f]
440 func = symbols[f]
441 if getattr(func, '_takeorder', False):
441 if getattr(func, '_takeorder', False):
442 return func(repo, subset, b, order)
442 return func(repo, subset, b, order)
443 return func(repo, subset, b)
443 return func(repo, subset, b)
444
444
445 keep = lambda fn: getattr(fn, '__doc__', None) is not None
445 keep = lambda fn: getattr(fn, '__doc__', None) is not None
446
446
447 syms = [s for (s, fn) in symbols.items() if keep(fn)]
447 syms = [s for (s, fn) in symbols.items() if keep(fn)]
448 raise error.UnknownIdentifier(f, syms)
448 raise error.UnknownIdentifier(f, syms)
449
449
450 # functions
450 # functions
451
451
452 # symbols are callables like:
452 # symbols are callables like:
453 # fn(repo, subset, x)
453 # fn(repo, subset, x)
454 # with:
454 # with:
455 # repo - current repository instance
455 # repo - current repository instance
456 # subset - of revisions to be examined
456 # subset - of revisions to be examined
457 # x - argument in tree form
457 # x - argument in tree form
458 symbols = {}
458 symbols = {}
459
459
460 # symbols which can't be used for a DoS attack for any given input
460 # symbols which can't be used for a DoS attack for any given input
461 # (e.g. those which accept regexes as plain strings shouldn't be included)
461 # (e.g. those which accept regexes as plain strings shouldn't be included)
462 # functions that just return a lot of changesets (like all) don't count here
462 # functions that just return a lot of changesets (like all) don't count here
463 safesymbols = set()
463 safesymbols = set()
464
464
465 predicate = registrar.revsetpredicate()
465 predicate = registrar.revsetpredicate()
466
466
467 @predicate('_destupdate')
467 @predicate('_destupdate')
468 def _destupdate(repo, subset, x):
468 def _destupdate(repo, subset, x):
469 # experimental revset for update destination
469 # experimental revset for update destination
470 args = getargsdict(x, 'limit', 'clean check')
470 args = getargsdict(x, 'limit', 'clean check')
471 return subset & baseset([destutil.destupdate(repo, **args)[0]])
471 return subset & baseset([destutil.destupdate(repo, **args)[0]])
472
472
473 @predicate('_destmerge')
473 @predicate('_destmerge')
474 def _destmerge(repo, subset, x):
474 def _destmerge(repo, subset, x):
475 # experimental revset for merge destination
475 # experimental revset for merge destination
476 sourceset = None
476 sourceset = None
477 if x is not None:
477 if x is not None:
478 sourceset = getset(repo, fullreposet(repo), x)
478 sourceset = getset(repo, fullreposet(repo), x)
479 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
479 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
480
480
481 @predicate('adds(pattern)', safe=True)
481 @predicate('adds(pattern)', safe=True)
482 def adds(repo, subset, x):
482 def adds(repo, subset, x):
483 """Changesets that add a file matching pattern.
483 """Changesets that add a file matching pattern.
484
484
485 The pattern without explicit kind like ``glob:`` is expected to be
485 The pattern without explicit kind like ``glob:`` is expected to be
486 relative to the current directory and match against a file or a
486 relative to the current directory and match against a file or a
487 directory.
487 directory.
488 """
488 """
489 # i18n: "adds" is a keyword
489 # i18n: "adds" is a keyword
490 pat = getstring(x, _("adds requires a pattern"))
490 pat = getstring(x, _("adds requires a pattern"))
491 return checkstatus(repo, subset, pat, 1)
491 return checkstatus(repo, subset, pat, 1)
492
492
493 @predicate('ancestor(*changeset)', safe=True)
493 @predicate('ancestor(*changeset)', safe=True)
494 def ancestor(repo, subset, x):
494 def ancestor(repo, subset, x):
495 """A greatest common ancestor of the changesets.
495 """A greatest common ancestor of the changesets.
496
496
497 Accepts 0 or more changesets.
497 Accepts 0 or more changesets.
498 Will return empty list when passed no args.
498 Will return empty list when passed no args.
499 Greatest common ancestor of a single changeset is that changeset.
499 Greatest common ancestor of a single changeset is that changeset.
500 """
500 """
501 # i18n: "ancestor" is a keyword
501 # i18n: "ancestor" is a keyword
502 l = getlist(x)
502 l = getlist(x)
503 rl = fullreposet(repo)
503 rl = fullreposet(repo)
504 anc = None
504 anc = None
505
505
506 # (getset(repo, rl, i) for i in l) generates a list of lists
506 # (getset(repo, rl, i) for i in l) generates a list of lists
507 for revs in (getset(repo, rl, i) for i in l):
507 for revs in (getset(repo, rl, i) for i in l):
508 for r in revs:
508 for r in revs:
509 if anc is None:
509 if anc is None:
510 anc = repo[r]
510 anc = repo[r]
511 else:
511 else:
512 anc = anc.ancestor(repo[r])
512 anc = anc.ancestor(repo[r])
513
513
514 if anc is not None and anc.rev() in subset:
514 if anc is not None and anc.rev() in subset:
515 return baseset([anc.rev()])
515 return baseset([anc.rev()])
516 return baseset()
516 return baseset()
517
517
518 def _ancestors(repo, subset, x, followfirst=False):
518 def _ancestors(repo, subset, x, followfirst=False):
519 heads = getset(repo, fullreposet(repo), x)
519 heads = getset(repo, fullreposet(repo), x)
520 if not heads:
520 if not heads:
521 return baseset()
521 return baseset()
522 s = _revancestors(repo, heads, followfirst)
522 s = _revancestors(repo, heads, followfirst)
523 return subset & s
523 return subset & s
524
524
525 @predicate('ancestors(set)', safe=True)
525 @predicate('ancestors(set)', safe=True)
526 def ancestors(repo, subset, x):
526 def ancestors(repo, subset, x):
527 """Changesets that are ancestors of a changeset in set.
527 """Changesets that are ancestors of a changeset in set.
528 """
528 """
529 return _ancestors(repo, subset, x)
529 return _ancestors(repo, subset, x)
530
530
531 @predicate('_firstancestors', safe=True)
531 @predicate('_firstancestors', safe=True)
532 def _firstancestors(repo, subset, x):
532 def _firstancestors(repo, subset, x):
533 # ``_firstancestors(set)``
533 # ``_firstancestors(set)``
534 # Like ``ancestors(set)`` but follows only the first parents.
534 # Like ``ancestors(set)`` but follows only the first parents.
535 return _ancestors(repo, subset, x, followfirst=True)
535 return _ancestors(repo, subset, x, followfirst=True)
536
536
537 def ancestorspec(repo, subset, x, n, order):
537 def ancestorspec(repo, subset, x, n, order):
538 """``set~n``
538 """``set~n``
539 Changesets that are the Nth ancestor (first parents only) of a changeset
539 Changesets that are the Nth ancestor (first parents only) of a changeset
540 in set.
540 in set.
541 """
541 """
542 try:
542 try:
543 n = int(n[1])
543 n = int(n[1])
544 except (TypeError, ValueError):
544 except (TypeError, ValueError):
545 raise error.ParseError(_("~ expects a number"))
545 raise error.ParseError(_("~ expects a number"))
546 ps = set()
546 ps = set()
547 cl = repo.changelog
547 cl = repo.changelog
548 for r in getset(repo, fullreposet(repo), x):
548 for r in getset(repo, fullreposet(repo), x):
549 for i in range(n):
549 for i in range(n):
550 r = cl.parentrevs(r)[0]
550 r = cl.parentrevs(r)[0]
551 ps.add(r)
551 ps.add(r)
552 return subset & ps
552 return subset & ps
553
553
554 @predicate('author(string)', safe=True)
554 @predicate('author(string)', safe=True)
555 def author(repo, subset, x):
555 def author(repo, subset, x):
556 """Alias for ``user(string)``.
556 """Alias for ``user(string)``.
557 """
557 """
558 # i18n: "author" is a keyword
558 # i18n: "author" is a keyword
559 n = getstring(x, _("author requires a string"))
559 n = getstring(x, _("author requires a string"))
560 kind, pattern, matcher = _substringmatcher(n, casesensitive=False)
560 kind, pattern, matcher = _substringmatcher(n, casesensitive=False)
561 return subset.filter(lambda x: matcher(repo[x].user()),
561 return subset.filter(lambda x: matcher(repo[x].user()),
562 condrepr=('<user %r>', n))
562 condrepr=('<user %r>', n))
563
563
564 @predicate('bisect(string)', safe=True)
564 @predicate('bisect(string)', safe=True)
565 def bisect(repo, subset, x):
565 def bisect(repo, subset, x):
566 """Changesets marked in the specified bisect status:
566 """Changesets marked in the specified bisect status:
567
567
568 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
568 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
569 - ``goods``, ``bads`` : csets topologically good/bad
569 - ``goods``, ``bads`` : csets topologically good/bad
570 - ``range`` : csets taking part in the bisection
570 - ``range`` : csets taking part in the bisection
571 - ``pruned`` : csets that are goods, bads or skipped
571 - ``pruned`` : csets that are goods, bads or skipped
572 - ``untested`` : csets whose fate is yet unknown
572 - ``untested`` : csets whose fate is yet unknown
573 - ``ignored`` : csets ignored due to DAG topology
573 - ``ignored`` : csets ignored due to DAG topology
574 - ``current`` : the cset currently being bisected
574 - ``current`` : the cset currently being bisected
575 """
575 """
576 # i18n: "bisect" is a keyword
576 # i18n: "bisect" is a keyword
577 status = getstring(x, _("bisect requires a string")).lower()
577 status = getstring(x, _("bisect requires a string")).lower()
578 state = set(hbisect.get(repo, status))
578 state = set(hbisect.get(repo, status))
579 return subset & state
579 return subset & state
580
580
581 # Backward-compatibility
581 # Backward-compatibility
582 # - no help entry so that we do not advertise it any more
582 # - no help entry so that we do not advertise it any more
583 @predicate('bisected', safe=True)
583 @predicate('bisected', safe=True)
584 def bisected(repo, subset, x):
584 def bisected(repo, subset, x):
585 return bisect(repo, subset, x)
585 return bisect(repo, subset, x)
586
586
587 @predicate('bookmark([name])', safe=True)
587 @predicate('bookmark([name])', safe=True)
588 def bookmark(repo, subset, x):
588 def bookmark(repo, subset, x):
589 """The named bookmark or all bookmarks.
589 """The named bookmark or all bookmarks.
590
590
591 Pattern matching is supported for `name`. See :hg:`help revisions.patterns`.
591 Pattern matching is supported for `name`. See :hg:`help revisions.patterns`.
592 """
592 """
593 # i18n: "bookmark" is a keyword
593 # i18n: "bookmark" is a keyword
594 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
594 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
595 if args:
595 if args:
596 bm = getstring(args[0],
596 bm = getstring(args[0],
597 # i18n: "bookmark" is a keyword
597 # i18n: "bookmark" is a keyword
598 _('the argument to bookmark must be a string'))
598 _('the argument to bookmark must be a string'))
599 kind, pattern, matcher = util.stringmatcher(bm)
599 kind, pattern, matcher = util.stringmatcher(bm)
600 bms = set()
600 bms = set()
601 if kind == 'literal':
601 if kind == 'literal':
602 bmrev = repo._bookmarks.get(pattern, None)
602 bmrev = repo._bookmarks.get(pattern, None)
603 if not bmrev:
603 if not bmrev:
604 raise error.RepoLookupError(_("bookmark '%s' does not exist")
604 raise error.RepoLookupError(_("bookmark '%s' does not exist")
605 % pattern)
605 % pattern)
606 bms.add(repo[bmrev].rev())
606 bms.add(repo[bmrev].rev())
607 else:
607 else:
608 matchrevs = set()
608 matchrevs = set()
609 for name, bmrev in repo._bookmarks.iteritems():
609 for name, bmrev in repo._bookmarks.iteritems():
610 if matcher(name):
610 if matcher(name):
611 matchrevs.add(bmrev)
611 matchrevs.add(bmrev)
612 if not matchrevs:
612 if not matchrevs:
613 raise error.RepoLookupError(_("no bookmarks exist"
613 raise error.RepoLookupError(_("no bookmarks exist"
614 " that match '%s'") % pattern)
614 " that match '%s'") % pattern)
615 for bmrev in matchrevs:
615 for bmrev in matchrevs:
616 bms.add(repo[bmrev].rev())
616 bms.add(repo[bmrev].rev())
617 else:
617 else:
618 bms = set([repo[r].rev()
618 bms = set([repo[r].rev()
619 for r in repo._bookmarks.values()])
619 for r in repo._bookmarks.values()])
620 bms -= set([node.nullrev])
620 bms -= set([node.nullrev])
621 return subset & bms
621 return subset & bms
622
622
623 @predicate('branch(string or set)', safe=True)
623 @predicate('branch(string or set)', safe=True)
624 def branch(repo, subset, x):
624 def branch(repo, subset, x):
625 """
625 """
626 All changesets belonging to the given branch or the branches of the given
626 All changesets belonging to the given branch or the branches of the given
627 changesets.
627 changesets.
628
628
629 Pattern matching is supported for `string`. See
629 Pattern matching is supported for `string`. See
630 :hg:`help revisions.patterns`.
630 :hg:`help revisions.patterns`.
631 """
631 """
632 getbi = repo.revbranchcache().branchinfo
632 getbi = repo.revbranchcache().branchinfo
633
633
634 try:
634 try:
635 b = getstring(x, '')
635 b = getstring(x, '')
636 except error.ParseError:
636 except error.ParseError:
637 # not a string, but another revspec, e.g. tip()
637 # not a string, but another revspec, e.g. tip()
638 pass
638 pass
639 else:
639 else:
640 kind, pattern, matcher = util.stringmatcher(b)
640 kind, pattern, matcher = util.stringmatcher(b)
641 if kind == 'literal':
641 if kind == 'literal':
642 # note: falls through to the revspec case if no branch with
642 # note: falls through to the revspec case if no branch with
643 # this name exists and pattern kind is not specified explicitly
643 # this name exists and pattern kind is not specified explicitly
644 if pattern in repo.branchmap():
644 if pattern in repo.branchmap():
645 return subset.filter(lambda r: matcher(getbi(r)[0]),
645 return subset.filter(lambda r: matcher(getbi(r)[0]),
646 condrepr=('<branch %r>', b))
646 condrepr=('<branch %r>', b))
647 if b.startswith('literal:'):
647 if b.startswith('literal:'):
648 raise error.RepoLookupError(_("branch '%s' does not exist")
648 raise error.RepoLookupError(_("branch '%s' does not exist")
649 % pattern)
649 % pattern)
650 else:
650 else:
651 return subset.filter(lambda r: matcher(getbi(r)[0]),
651 return subset.filter(lambda r: matcher(getbi(r)[0]),
652 condrepr=('<branch %r>', b))
652 condrepr=('<branch %r>', b))
653
653
654 s = getset(repo, fullreposet(repo), x)
654 s = getset(repo, fullreposet(repo), x)
655 b = set()
655 b = set()
656 for r in s:
656 for r in s:
657 b.add(getbi(r)[0])
657 b.add(getbi(r)[0])
658 c = s.__contains__
658 c = s.__contains__
659 return subset.filter(lambda r: c(r) or getbi(r)[0] in b,
659 return subset.filter(lambda r: c(r) or getbi(r)[0] in b,
660 condrepr=lambda: '<branch %r>' % sorted(b))
660 condrepr=lambda: '<branch %r>' % sorted(b))
661
661
662 @predicate('bumped()', safe=True)
662 @predicate('bumped()', safe=True)
663 def bumped(repo, subset, x):
663 def bumped(repo, subset, x):
664 """Mutable changesets marked as successors of public changesets.
664 """Mutable changesets marked as successors of public changesets.
665
665
666 Only non-public and non-obsolete changesets can be `bumped`.
666 Only non-public and non-obsolete changesets can be `bumped`.
667 """
667 """
668 # i18n: "bumped" is a keyword
668 # i18n: "bumped" is a keyword
669 getargs(x, 0, 0, _("bumped takes no arguments"))
669 getargs(x, 0, 0, _("bumped takes no arguments"))
670 bumped = obsmod.getrevs(repo, 'bumped')
670 bumped = obsmod.getrevs(repo, 'bumped')
671 return subset & bumped
671 return subset & bumped
672
672
673 @predicate('bundle()', safe=True)
673 @predicate('bundle()', safe=True)
674 def bundle(repo, subset, x):
674 def bundle(repo, subset, x):
675 """Changesets in the bundle.
675 """Changesets in the bundle.
676
676
677 Bundle must be specified by the -R option."""
677 Bundle must be specified by the -R option."""
678
678
679 try:
679 try:
680 bundlerevs = repo.changelog.bundlerevs
680 bundlerevs = repo.changelog.bundlerevs
681 except AttributeError:
681 except AttributeError:
682 raise error.Abort(_("no bundle provided - specify with -R"))
682 raise error.Abort(_("no bundle provided - specify with -R"))
683 return subset & bundlerevs
683 return subset & bundlerevs
684
684
685 def checkstatus(repo, subset, pat, field):
685 def checkstatus(repo, subset, pat, field):
686 hasset = matchmod.patkind(pat) == 'set'
686 hasset = matchmod.patkind(pat) == 'set'
687
687
688 mcache = [None]
688 mcache = [None]
689 def matches(x):
689 def matches(x):
690 c = repo[x]
690 c = repo[x]
691 if not mcache[0] or hasset:
691 if not mcache[0] or hasset:
692 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
692 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
693 m = mcache[0]
693 m = mcache[0]
694 fname = None
694 fname = None
695 if not m.anypats() and len(m.files()) == 1:
695 if not m.anypats() and len(m.files()) == 1:
696 fname = m.files()[0]
696 fname = m.files()[0]
697 if fname is not None:
697 if fname is not None:
698 if fname not in c.files():
698 if fname not in c.files():
699 return False
699 return False
700 else:
700 else:
701 for f in c.files():
701 for f in c.files():
702 if m(f):
702 if m(f):
703 break
703 break
704 else:
704 else:
705 return False
705 return False
706 files = repo.status(c.p1().node(), c.node())[field]
706 files = repo.status(c.p1().node(), c.node())[field]
707 if fname is not None:
707 if fname is not None:
708 if fname in files:
708 if fname in files:
709 return True
709 return True
710 else:
710 else:
711 for f in files:
711 for f in files:
712 if m(f):
712 if m(f):
713 return True
713 return True
714
714
715 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
715 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
716
716
717 def _children(repo, subset, parentset):
717 def _children(repo, subset, parentset):
718 if not parentset:
718 if not parentset:
719 return baseset()
719 return baseset()
720 cs = set()
720 cs = set()
721 pr = repo.changelog.parentrevs
721 pr = repo.changelog.parentrevs
722 minrev = parentset.min()
722 minrev = parentset.min()
723 nullrev = node.nullrev
723 nullrev = node.nullrev
724 for r in subset:
724 for r in subset:
725 if r <= minrev:
725 if r <= minrev:
726 continue
726 continue
727 p1, p2 = pr(r)
727 p1, p2 = pr(r)
728 if p1 in parentset:
728 if p1 in parentset:
729 cs.add(r)
729 cs.add(r)
730 if p2 != nullrev and p2 in parentset:
730 if p2 != nullrev and p2 in parentset:
731 cs.add(r)
731 cs.add(r)
732 return baseset(cs)
732 return baseset(cs)
733
733
734 @predicate('children(set)', safe=True)
734 @predicate('children(set)', safe=True)
735 def children(repo, subset, x):
735 def children(repo, subset, x):
736 """Child changesets of changesets in set.
736 """Child changesets of changesets in set.
737 """
737 """
738 s = getset(repo, fullreposet(repo), x)
738 s = getset(repo, fullreposet(repo), x)
739 cs = _children(repo, subset, s)
739 cs = _children(repo, subset, s)
740 return subset & cs
740 return subset & cs
741
741
742 @predicate('closed()', safe=True)
742 @predicate('closed()', safe=True)
743 def closed(repo, subset, x):
743 def closed(repo, subset, x):
744 """Changeset is closed.
744 """Changeset is closed.
745 """
745 """
746 # i18n: "closed" is a keyword
746 # i18n: "closed" is a keyword
747 getargs(x, 0, 0, _("closed takes no arguments"))
747 getargs(x, 0, 0, _("closed takes no arguments"))
748 return subset.filter(lambda r: repo[r].closesbranch(),
748 return subset.filter(lambda r: repo[r].closesbranch(),
749 condrepr='<branch closed>')
749 condrepr='<branch closed>')
750
750
751 @predicate('contains(pattern)')
751 @predicate('contains(pattern)')
752 def contains(repo, subset, x):
752 def contains(repo, subset, x):
753 """The revision's manifest contains a file matching pattern (but might not
753 """The revision's manifest contains a file matching pattern (but might not
754 modify it). See :hg:`help patterns` for information about file patterns.
754 modify it). See :hg:`help patterns` for information about file patterns.
755
755
756 The pattern without explicit kind like ``glob:`` is expected to be
756 The pattern without explicit kind like ``glob:`` is expected to be
757 relative to the current directory and match against a file exactly
757 relative to the current directory and match against a file exactly
758 for efficiency.
758 for efficiency.
759 """
759 """
760 # i18n: "contains" is a keyword
760 # i18n: "contains" is a keyword
761 pat = getstring(x, _("contains requires a pattern"))
761 pat = getstring(x, _("contains requires a pattern"))
762
762
763 def matches(x):
763 def matches(x):
764 if not matchmod.patkind(pat):
764 if not matchmod.patkind(pat):
765 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
765 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
766 if pats in repo[x]:
766 if pats in repo[x]:
767 return True
767 return True
768 else:
768 else:
769 c = repo[x]
769 c = repo[x]
770 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
770 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
771 for f in c.manifest():
771 for f in c.manifest():
772 if m(f):
772 if m(f):
773 return True
773 return True
774 return False
774 return False
775
775
776 return subset.filter(matches, condrepr=('<contains %r>', pat))
776 return subset.filter(matches, condrepr=('<contains %r>', pat))
777
777
778 @predicate('converted([id])', safe=True)
778 @predicate('converted([id])', safe=True)
779 def converted(repo, subset, x):
779 def converted(repo, subset, x):
780 """Changesets converted from the given identifier in the old repository if
780 """Changesets converted from the given identifier in the old repository if
781 present, or all converted changesets if no identifier is specified.
781 present, or all converted changesets if no identifier is specified.
782 """
782 """
783
783
784 # There is exactly no chance of resolving the revision, so do a simple
784 # There is exactly no chance of resolving the revision, so do a simple
785 # string compare and hope for the best
785 # string compare and hope for the best
786
786
787 rev = None
787 rev = None
788 # i18n: "converted" is a keyword
788 # i18n: "converted" is a keyword
789 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
789 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
790 if l:
790 if l:
791 # i18n: "converted" is a keyword
791 # i18n: "converted" is a keyword
792 rev = getstring(l[0], _('converted requires a revision'))
792 rev = getstring(l[0], _('converted requires a revision'))
793
793
794 def _matchvalue(r):
794 def _matchvalue(r):
795 source = repo[r].extra().get('convert_revision', None)
795 source = repo[r].extra().get('convert_revision', None)
796 return source is not None and (rev is None or source.startswith(rev))
796 return source is not None and (rev is None or source.startswith(rev))
797
797
798 return subset.filter(lambda r: _matchvalue(r),
798 return subset.filter(lambda r: _matchvalue(r),
799 condrepr=('<converted %r>', rev))
799 condrepr=('<converted %r>', rev))
800
800
801 @predicate('date(interval)', safe=True)
801 @predicate('date(interval)', safe=True)
802 def date(repo, subset, x):
802 def date(repo, subset, x):
803 """Changesets within the interval, see :hg:`help dates`.
803 """Changesets within the interval, see :hg:`help dates`.
804 """
804 """
805 # i18n: "date" is a keyword
805 # i18n: "date" is a keyword
806 ds = getstring(x, _("date requires a string"))
806 ds = getstring(x, _("date requires a string"))
807 dm = util.matchdate(ds)
807 dm = util.matchdate(ds)
808 return subset.filter(lambda x: dm(repo[x].date()[0]),
808 return subset.filter(lambda x: dm(repo[x].date()[0]),
809 condrepr=('<date %r>', ds))
809 condrepr=('<date %r>', ds))
810
810
811 @predicate('desc(string)', safe=True)
811 @predicate('desc(string)', safe=True)
812 def desc(repo, subset, x):
812 def desc(repo, subset, x):
813 """Search commit message for string. The match is case-insensitive.
813 """Search commit message for string. The match is case-insensitive.
814
814
815 Pattern matching is supported for `string`. See
815 Pattern matching is supported for `string`. See
816 :hg:`help revisions.patterns`.
816 :hg:`help revisions.patterns`.
817 """
817 """
818 # i18n: "desc" is a keyword
818 # i18n: "desc" is a keyword
819 ds = getstring(x, _("desc requires a string"))
819 ds = getstring(x, _("desc requires a string"))
820
820
821 kind, pattern, matcher = _substringmatcher(ds, casesensitive=False)
821 kind, pattern, matcher = _substringmatcher(ds, casesensitive=False)
822
822
823 return subset.filter(lambda r: matcher(repo[r].description()),
823 return subset.filter(lambda r: matcher(repo[r].description()),
824 condrepr=('<desc %r>', ds))
824 condrepr=('<desc %r>', ds))
825
825
826 def _descendants(repo, subset, x, followfirst=False):
826 def _descendants(repo, subset, x, followfirst=False):
827 roots = getset(repo, fullreposet(repo), x)
827 roots = getset(repo, fullreposet(repo), x)
828 if not roots:
828 if not roots:
829 return baseset()
829 return baseset()
830 s = _revdescendants(repo, roots, followfirst)
830 s = _revdescendants(repo, roots, followfirst)
831
831
832 # Both sets need to be ascending in order to lazily return the union
832 # Both sets need to be ascending in order to lazily return the union
833 # in the correct order.
833 # in the correct order.
834 base = subset & roots
834 base = subset & roots
835 desc = subset & s
835 desc = subset & s
836 result = base + desc
836 result = base + desc
837 if subset.isascending():
837 if subset.isascending():
838 result.sort()
838 result.sort()
839 elif subset.isdescending():
839 elif subset.isdescending():
840 result.sort(reverse=True)
840 result.sort(reverse=True)
841 else:
841 else:
842 result = subset & result
842 result = subset & result
843 return result
843 return result
844
844
845 @predicate('descendants(set)', safe=True)
845 @predicate('descendants(set)', safe=True)
846 def descendants(repo, subset, x):
846 def descendants(repo, subset, x):
847 """Changesets which are descendants of changesets in set.
847 """Changesets which are descendants of changesets in set.
848 """
848 """
849 return _descendants(repo, subset, x)
849 return _descendants(repo, subset, x)
850
850
851 @predicate('_firstdescendants', safe=True)
851 @predicate('_firstdescendants', safe=True)
852 def _firstdescendants(repo, subset, x):
852 def _firstdescendants(repo, subset, x):
853 # ``_firstdescendants(set)``
853 # ``_firstdescendants(set)``
854 # Like ``descendants(set)`` but follows only the first parents.
854 # Like ``descendants(set)`` but follows only the first parents.
855 return _descendants(repo, subset, x, followfirst=True)
855 return _descendants(repo, subset, x, followfirst=True)
856
856
857 @predicate('destination([set])', safe=True)
857 @predicate('destination([set])', safe=True)
858 def destination(repo, subset, x):
858 def destination(repo, subset, x):
859 """Changesets that were created by a graft, transplant or rebase operation,
859 """Changesets that were created by a graft, transplant or rebase operation,
860 with the given revisions specified as the source. Omitting the optional set
860 with the given revisions specified as the source. Omitting the optional set
861 is the same as passing all().
861 is the same as passing all().
862 """
862 """
863 if x is not None:
863 if x is not None:
864 sources = getset(repo, fullreposet(repo), x)
864 sources = getset(repo, fullreposet(repo), x)
865 else:
865 else:
866 sources = fullreposet(repo)
866 sources = fullreposet(repo)
867
867
868 dests = set()
868 dests = set()
869
869
870 # subset contains all of the possible destinations that can be returned, so
870 # subset contains all of the possible destinations that can be returned, so
871 # iterate over them and see if their source(s) were provided in the arg set.
871 # iterate over them and see if their source(s) were provided in the arg set.
872 # Even if the immediate src of r is not in the arg set, src's source (or
872 # Even if the immediate src of r is not in the arg set, src's source (or
873 # further back) may be. Scanning back further than the immediate src allows
873 # further back) may be. Scanning back further than the immediate src allows
874 # transitive transplants and rebases to yield the same results as transitive
874 # transitive transplants and rebases to yield the same results as transitive
875 # grafts.
875 # grafts.
876 for r in subset:
876 for r in subset:
877 src = _getrevsource(repo, r)
877 src = _getrevsource(repo, r)
878 lineage = None
878 lineage = None
879
879
880 while src is not None:
880 while src is not None:
881 if lineage is None:
881 if lineage is None:
882 lineage = list()
882 lineage = list()
883
883
884 lineage.append(r)
884 lineage.append(r)
885
885
886 # The visited lineage is a match if the current source is in the arg
886 # The visited lineage is a match if the current source is in the arg
887 # set. Since every candidate dest is visited by way of iterating
887 # set. Since every candidate dest is visited by way of iterating
888 # subset, any dests further back in the lineage will be tested by a
888 # subset, any dests further back in the lineage will be tested by a
889 # different iteration over subset. Likewise, if the src was already
889 # different iteration over subset. Likewise, if the src was already
890 # selected, the current lineage can be selected without going back
890 # selected, the current lineage can be selected without going back
891 # further.
891 # further.
892 if src in sources or src in dests:
892 if src in sources or src in dests:
893 dests.update(lineage)
893 dests.update(lineage)
894 break
894 break
895
895
896 r = src
896 r = src
897 src = _getrevsource(repo, r)
897 src = _getrevsource(repo, r)
898
898
899 return subset.filter(dests.__contains__,
899 return subset.filter(dests.__contains__,
900 condrepr=lambda: '<destination %r>' % sorted(dests))
900 condrepr=lambda: '<destination %r>' % sorted(dests))
901
901
902 @predicate('divergent()', safe=True)
902 @predicate('divergent()', safe=True)
903 def divergent(repo, subset, x):
903 def divergent(repo, subset, x):
904 """
904 """
905 Final successors of changesets with an alternative set of final successors.
905 Final successors of changesets with an alternative set of final successors.
906 """
906 """
907 # i18n: "divergent" is a keyword
907 # i18n: "divergent" is a keyword
908 getargs(x, 0, 0, _("divergent takes no arguments"))
908 getargs(x, 0, 0, _("divergent takes no arguments"))
909 divergent = obsmod.getrevs(repo, 'divergent')
909 divergent = obsmod.getrevs(repo, 'divergent')
910 return subset & divergent
910 return subset & divergent
911
911
912 @predicate('extinct()', safe=True)
912 @predicate('extinct()', safe=True)
913 def extinct(repo, subset, x):
913 def extinct(repo, subset, x):
914 """Obsolete changesets with obsolete descendants only.
914 """Obsolete changesets with obsolete descendants only.
915 """
915 """
916 # i18n: "extinct" is a keyword
916 # i18n: "extinct" is a keyword
917 getargs(x, 0, 0, _("extinct takes no arguments"))
917 getargs(x, 0, 0, _("extinct takes no arguments"))
918 extincts = obsmod.getrevs(repo, 'extinct')
918 extincts = obsmod.getrevs(repo, 'extinct')
919 return subset & extincts
919 return subset & extincts
920
920
921 @predicate('extra(label, [value])', safe=True)
921 @predicate('extra(label, [value])', safe=True)
922 def extra(repo, subset, x):
922 def extra(repo, subset, x):
923 """Changesets with the given label in the extra metadata, with the given
923 """Changesets with the given label in the extra metadata, with the given
924 optional value.
924 optional value.
925
925
926 Pattern matching is supported for `value`. See
926 Pattern matching is supported for `value`. See
927 :hg:`help revisions.patterns`.
927 :hg:`help revisions.patterns`.
928 """
928 """
929 args = getargsdict(x, 'extra', 'label value')
929 args = getargsdict(x, 'extra', 'label value')
930 if 'label' not in args:
930 if 'label' not in args:
931 # i18n: "extra" is a keyword
931 # i18n: "extra" is a keyword
932 raise error.ParseError(_('extra takes at least 1 argument'))
932 raise error.ParseError(_('extra takes at least 1 argument'))
933 # i18n: "extra" is a keyword
933 # i18n: "extra" is a keyword
934 label = getstring(args['label'], _('first argument to extra must be '
934 label = getstring(args['label'], _('first argument to extra must be '
935 'a string'))
935 'a string'))
936 value = None
936 value = None
937
937
938 if 'value' in args:
938 if 'value' in args:
939 # i18n: "extra" is a keyword
939 # i18n: "extra" is a keyword
940 value = getstring(args['value'], _('second argument to extra must be '
940 value = getstring(args['value'], _('second argument to extra must be '
941 'a string'))
941 'a string'))
942 kind, value, matcher = util.stringmatcher(value)
942 kind, value, matcher = util.stringmatcher(value)
943
943
944 def _matchvalue(r):
944 def _matchvalue(r):
945 extra = repo[r].extra()
945 extra = repo[r].extra()
946 return label in extra and (value is None or matcher(extra[label]))
946 return label in extra and (value is None or matcher(extra[label]))
947
947
948 return subset.filter(lambda r: _matchvalue(r),
948 return subset.filter(lambda r: _matchvalue(r),
949 condrepr=('<extra[%r] %r>', label, value))
949 condrepr=('<extra[%r] %r>', label, value))
950
950
951 @predicate('filelog(pattern)', safe=True)
951 @predicate('filelog(pattern)', safe=True)
952 def filelog(repo, subset, x):
952 def filelog(repo, subset, x):
953 """Changesets connected to the specified filelog.
953 """Changesets connected to the specified filelog.
954
954
955 For performance reasons, visits only revisions mentioned in the file-level
955 For performance reasons, visits only revisions mentioned in the file-level
956 filelog, rather than filtering through all changesets (much faster, but
956 filelog, rather than filtering through all changesets (much faster, but
957 doesn't include deletes or duplicate changes). For a slower, more accurate
957 doesn't include deletes or duplicate changes). For a slower, more accurate
958 result, use ``file()``.
958 result, use ``file()``.
959
959
960 The pattern without explicit kind like ``glob:`` is expected to be
960 The pattern without explicit kind like ``glob:`` is expected to be
961 relative to the current directory and match against a file exactly
961 relative to the current directory and match against a file exactly
962 for efficiency.
962 for efficiency.
963
963
964 If some linkrev points to revisions filtered by the current repoview, we'll
964 If some linkrev points to revisions filtered by the current repoview, we'll
965 work around it to return a non-filtered value.
965 work around it to return a non-filtered value.
966 """
966 """
967
967
968 # i18n: "filelog" is a keyword
968 # i18n: "filelog" is a keyword
969 pat = getstring(x, _("filelog requires a pattern"))
969 pat = getstring(x, _("filelog requires a pattern"))
970 s = set()
970 s = set()
971 cl = repo.changelog
971 cl = repo.changelog
972
972
973 if not matchmod.patkind(pat):
973 if not matchmod.patkind(pat):
974 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
974 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
975 files = [f]
975 files = [f]
976 else:
976 else:
977 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
977 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
978 files = (f for f in repo[None] if m(f))
978 files = (f for f in repo[None] if m(f))
979
979
980 for f in files:
980 for f in files:
981 fl = repo.file(f)
981 fl = repo.file(f)
982 known = {}
982 known = {}
983 scanpos = 0
983 scanpos = 0
984 for fr in list(fl):
984 for fr in list(fl):
985 fn = fl.node(fr)
985 fn = fl.node(fr)
986 if fn in known:
986 if fn in known:
987 s.add(known[fn])
987 s.add(known[fn])
988 continue
988 continue
989
989
990 lr = fl.linkrev(fr)
990 lr = fl.linkrev(fr)
991 if lr in cl:
991 if lr in cl:
992 s.add(lr)
992 s.add(lr)
993 elif scanpos is not None:
993 elif scanpos is not None:
994 # lowest matching changeset is filtered, scan further
994 # lowest matching changeset is filtered, scan further
995 # ahead in changelog
995 # ahead in changelog
996 start = max(lr, scanpos) + 1
996 start = max(lr, scanpos) + 1
997 scanpos = None
997 scanpos = None
998 for r in cl.revs(start):
998 for r in cl.revs(start):
999 # minimize parsing of non-matching entries
999 # minimize parsing of non-matching entries
1000 if f in cl.revision(r) and f in cl.readfiles(r):
1000 if f in cl.revision(r) and f in cl.readfiles(r):
1001 try:
1001 try:
1002 # try to use manifest delta fastpath
1002 # try to use manifest delta fastpath
1003 n = repo[r].filenode(f)
1003 n = repo[r].filenode(f)
1004 if n not in known:
1004 if n not in known:
1005 if n == fn:
1005 if n == fn:
1006 s.add(r)
1006 s.add(r)
1007 scanpos = r
1007 scanpos = r
1008 break
1008 break
1009 else:
1009 else:
1010 known[n] = r
1010 known[n] = r
1011 except error.ManifestLookupError:
1011 except error.ManifestLookupError:
1012 # deletion in changelog
1012 # deletion in changelog
1013 continue
1013 continue
1014
1014
1015 return subset & s
1015 return subset & s
1016
1016
1017 @predicate('first(set, [n])', safe=True)
1017 @predicate('first(set, [n])', safe=True)
1018 def first(repo, subset, x):
1018 def first(repo, subset, x):
1019 """An alias for limit().
1019 """An alias for limit().
1020 """
1020 """
1021 return limit(repo, subset, x)
1021 return limit(repo, subset, x)
1022
1022
1023 def _follow(repo, subset, x, name, followfirst=False):
1023 def _follow(repo, subset, x, name, followfirst=False):
1024 l = getargs(x, 0, 2, _("%s takes no arguments or a pattern "
1024 l = getargs(x, 0, 2, _("%s takes no arguments or a pattern "
1025 "and an optional revset") % name)
1025 "and an optional revset") % name)
1026 c = repo['.']
1026 c = repo['.']
1027 if l:
1027 if l:
1028 x = getstring(l[0], _("%s expected a pattern") % name)
1028 x = getstring(l[0], _("%s expected a pattern") % name)
1029 rev = None
1029 rev = None
1030 if len(l) >= 2:
1030 if len(l) >= 2:
1031 revs = getset(repo, fullreposet(repo), l[1])
1031 revs = getset(repo, fullreposet(repo), l[1])
1032 if len(revs) != 1:
1032 if len(revs) != 1:
1033 raise error.RepoLookupError(
1033 raise error.RepoLookupError(
1034 _("%s expected one starting revision") % name)
1034 _("%s expected one starting revision") % name)
1035 rev = revs.last()
1035 rev = revs.last()
1036 c = repo[rev]
1036 c = repo[rev]
1037 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
1037 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
1038 ctx=repo[rev], default='path')
1038 ctx=repo[rev], default='path')
1039
1039
1040 files = c.manifest().walk(matcher)
1040 files = c.manifest().walk(matcher)
1041
1041
1042 s = set()
1042 s = set()
1043 for fname in files:
1043 for fname in files:
1044 fctx = c[fname]
1044 fctx = c[fname]
1045 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
1045 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
1046 # include the revision responsible for the most recent version
1046 # include the revision responsible for the most recent version
1047 s.add(fctx.introrev())
1047 s.add(fctx.introrev())
1048 else:
1048 else:
1049 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1049 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1050
1050
1051 return subset & s
1051 return subset & s
1052
1052
1053 @predicate('follow([pattern[, startrev]])', safe=True)
1053 @predicate('follow([pattern[, startrev]])', safe=True)
1054 def follow(repo, subset, x):
1054 def follow(repo, subset, x):
1055 """
1055 """
1056 An alias for ``::.`` (ancestors of the working directory's first parent).
1056 An alias for ``::.`` (ancestors of the working directory's first parent).
1057 If pattern is specified, the histories of files matching given
1057 If pattern is specified, the histories of files matching given
1058 pattern in the revision given by startrev are followed, including copies.
1058 pattern in the revision given by startrev are followed, including copies.
1059 """
1059 """
1060 return _follow(repo, subset, x, 'follow')
1060 return _follow(repo, subset, x, 'follow')
1061
1061
1062 @predicate('_followfirst', safe=True)
1062 @predicate('_followfirst', safe=True)
1063 def _followfirst(repo, subset, x):
1063 def _followfirst(repo, subset, x):
1064 # ``followfirst([pattern[, startrev]])``
1064 # ``followfirst([pattern[, startrev]])``
1065 # Like ``follow([pattern[, startrev]])`` but follows only the first parent
1065 # Like ``follow([pattern[, startrev]])`` but follows only the first parent
1066 # of every revisions or files revisions.
1066 # of every revisions or files revisions.
1067 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1067 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1068
1068
1069 @predicate('followlines(file, fromline, toline[, rev=.])', safe=True)
1069 @predicate('followlines(file, fromline, toline[, startrev=.])', safe=True)
1070 def followlines(repo, subset, x):
1070 def followlines(repo, subset, x):
1071 """Changesets modifying `file` in line range ('fromline', 'toline').
1071 """Changesets modifying `file` in line range ('fromline', 'toline').
1072
1072
1073 Line range corresponds to 'file' content at 'rev' and should hence be
1073 Line range corresponds to 'file' content at 'startrev' and should hence be
1074 consistent with file size. If rev is not specified, working directory's
1074 consistent with file size. If startrev is not specified, working directory's
1075 parent is used.
1075 parent is used.
1076 """
1076 """
1077 from . import context # avoid circular import issues
1077 from . import context # avoid circular import issues
1078
1078
1079 args = getargsdict(x, 'followlines', 'file *lines rev')
1079 args = getargsdict(x, 'followlines', 'file *lines startrev')
1080 if len(args['lines']) != 2:
1080 if len(args['lines']) != 2:
1081 raise error.ParseError(_("followlines takes at least three arguments"))
1081 raise error.ParseError(_("followlines takes at least three arguments"))
1082
1082
1083 rev = '.'
1083 rev = '.'
1084 if 'rev' in args:
1084 if 'startrev' in args:
1085 revs = getset(repo, fullreposet(repo), args['rev'])
1085 revs = getset(repo, fullreposet(repo), args['startrev'])
1086 if len(revs) != 1:
1086 if len(revs) != 1:
1087 raise error.ParseError(
1087 raise error.ParseError(
1088 _("followlines expects exactly one revision"))
1088 _("followlines expects exactly one revision"))
1089 rev = revs.last()
1089 rev = revs.last()
1090
1090
1091 pat = getstring(args['file'], _("followlines requires a pattern"))
1091 pat = getstring(args['file'], _("followlines requires a pattern"))
1092 if not matchmod.patkind(pat):
1092 if not matchmod.patkind(pat):
1093 fname = pathutil.canonpath(repo.root, repo.getcwd(), pat)
1093 fname = pathutil.canonpath(repo.root, repo.getcwd(), pat)
1094 else:
1094 else:
1095 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[rev])
1095 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[rev])
1096 files = [f for f in repo[rev] if m(f)]
1096 files = [f for f in repo[rev] if m(f)]
1097 if len(files) != 1:
1097 if len(files) != 1:
1098 raise error.ParseError(_("followlines expects exactly one file"))
1098 raise error.ParseError(_("followlines expects exactly one file"))
1099 fname = files[0]
1099 fname = files[0]
1100
1100
1101 try:
1101 try:
1102 fromline, toline = [int(getsymbol(a)) for a in args['lines']]
1102 fromline, toline = [int(getsymbol(a)) for a in args['lines']]
1103 except ValueError:
1103 except ValueError:
1104 raise error.ParseError(_("line range bounds must be integers"))
1104 raise error.ParseError(_("line range bounds must be integers"))
1105 if toline - fromline < 0:
1105 if toline - fromline < 0:
1106 raise error.ParseError(_("line range must be positive"))
1106 raise error.ParseError(_("line range must be positive"))
1107 if fromline < 1:
1107 if fromline < 1:
1108 raise error.ParseError(_("fromline must be strictly positive"))
1108 raise error.ParseError(_("fromline must be strictly positive"))
1109 fromline -= 1
1109 fromline -= 1
1110
1110
1111 fctx = repo[rev].filectx(fname)
1111 fctx = repo[rev].filectx(fname)
1112 revs = (c.rev() for c in context.blockancestors(fctx, fromline, toline))
1112 revs = (c.rev() for c in context.blockancestors(fctx, fromline, toline))
1113 return subset & generatorset(revs, iterasc=False)
1113 return subset & generatorset(revs, iterasc=False)
1114
1114
1115 @predicate('all()', safe=True)
1115 @predicate('all()', safe=True)
1116 def getall(repo, subset, x):
1116 def getall(repo, subset, x):
1117 """All changesets, the same as ``0:tip``.
1117 """All changesets, the same as ``0:tip``.
1118 """
1118 """
1119 # i18n: "all" is a keyword
1119 # i18n: "all" is a keyword
1120 getargs(x, 0, 0, _("all takes no arguments"))
1120 getargs(x, 0, 0, _("all takes no arguments"))
1121 return subset & spanset(repo) # drop "null" if any
1121 return subset & spanset(repo) # drop "null" if any
1122
1122
1123 @predicate('grep(regex)')
1123 @predicate('grep(regex)')
1124 def grep(repo, subset, x):
1124 def grep(repo, subset, x):
1125 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1125 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1126 to ensure special escape characters are handled correctly. Unlike
1126 to ensure special escape characters are handled correctly. Unlike
1127 ``keyword(string)``, the match is case-sensitive.
1127 ``keyword(string)``, the match is case-sensitive.
1128 """
1128 """
1129 try:
1129 try:
1130 # i18n: "grep" is a keyword
1130 # i18n: "grep" is a keyword
1131 gr = re.compile(getstring(x, _("grep requires a string")))
1131 gr = re.compile(getstring(x, _("grep requires a string")))
1132 except re.error as e:
1132 except re.error as e:
1133 raise error.ParseError(_('invalid match pattern: %s') % e)
1133 raise error.ParseError(_('invalid match pattern: %s') % e)
1134
1134
1135 def matches(x):
1135 def matches(x):
1136 c = repo[x]
1136 c = repo[x]
1137 for e in c.files() + [c.user(), c.description()]:
1137 for e in c.files() + [c.user(), c.description()]:
1138 if gr.search(e):
1138 if gr.search(e):
1139 return True
1139 return True
1140 return False
1140 return False
1141
1141
1142 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1142 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1143
1143
1144 @predicate('_matchfiles', safe=True)
1144 @predicate('_matchfiles', safe=True)
1145 def _matchfiles(repo, subset, x):
1145 def _matchfiles(repo, subset, x):
1146 # _matchfiles takes a revset list of prefixed arguments:
1146 # _matchfiles takes a revset list of prefixed arguments:
1147 #
1147 #
1148 # [p:foo, i:bar, x:baz]
1148 # [p:foo, i:bar, x:baz]
1149 #
1149 #
1150 # builds a match object from them and filters subset. Allowed
1150 # builds a match object from them and filters subset. Allowed
1151 # prefixes are 'p:' for regular patterns, 'i:' for include
1151 # prefixes are 'p:' for regular patterns, 'i:' for include
1152 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1152 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1153 # a revision identifier, or the empty string to reference the
1153 # a revision identifier, or the empty string to reference the
1154 # working directory, from which the match object is
1154 # working directory, from which the match object is
1155 # initialized. Use 'd:' to set the default matching mode, default
1155 # initialized. Use 'd:' to set the default matching mode, default
1156 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1156 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1157
1157
1158 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1158 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1159 pats, inc, exc = [], [], []
1159 pats, inc, exc = [], [], []
1160 rev, default = None, None
1160 rev, default = None, None
1161 for arg in l:
1161 for arg in l:
1162 s = getstring(arg, "_matchfiles requires string arguments")
1162 s = getstring(arg, "_matchfiles requires string arguments")
1163 prefix, value = s[:2], s[2:]
1163 prefix, value = s[:2], s[2:]
1164 if prefix == 'p:':
1164 if prefix == 'p:':
1165 pats.append(value)
1165 pats.append(value)
1166 elif prefix == 'i:':
1166 elif prefix == 'i:':
1167 inc.append(value)
1167 inc.append(value)
1168 elif prefix == 'x:':
1168 elif prefix == 'x:':
1169 exc.append(value)
1169 exc.append(value)
1170 elif prefix == 'r:':
1170 elif prefix == 'r:':
1171 if rev is not None:
1171 if rev is not None:
1172 raise error.ParseError('_matchfiles expected at most one '
1172 raise error.ParseError('_matchfiles expected at most one '
1173 'revision')
1173 'revision')
1174 if value != '': # empty means working directory; leave rev as None
1174 if value != '': # empty means working directory; leave rev as None
1175 rev = value
1175 rev = value
1176 elif prefix == 'd:':
1176 elif prefix == 'd:':
1177 if default is not None:
1177 if default is not None:
1178 raise error.ParseError('_matchfiles expected at most one '
1178 raise error.ParseError('_matchfiles expected at most one '
1179 'default mode')
1179 'default mode')
1180 default = value
1180 default = value
1181 else:
1181 else:
1182 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1182 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1183 if not default:
1183 if not default:
1184 default = 'glob'
1184 default = 'glob'
1185
1185
1186 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1186 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1187 exclude=exc, ctx=repo[rev], default=default)
1187 exclude=exc, ctx=repo[rev], default=default)
1188
1188
1189 # This directly read the changelog data as creating changectx for all
1189 # This directly read the changelog data as creating changectx for all
1190 # revisions is quite expensive.
1190 # revisions is quite expensive.
1191 getfiles = repo.changelog.readfiles
1191 getfiles = repo.changelog.readfiles
1192 wdirrev = node.wdirrev
1192 wdirrev = node.wdirrev
1193 def matches(x):
1193 def matches(x):
1194 if x == wdirrev:
1194 if x == wdirrev:
1195 files = repo[x].files()
1195 files = repo[x].files()
1196 else:
1196 else:
1197 files = getfiles(x)
1197 files = getfiles(x)
1198 for f in files:
1198 for f in files:
1199 if m(f):
1199 if m(f):
1200 return True
1200 return True
1201 return False
1201 return False
1202
1202
1203 return subset.filter(matches,
1203 return subset.filter(matches,
1204 condrepr=('<matchfiles patterns=%r, include=%r '
1204 condrepr=('<matchfiles patterns=%r, include=%r '
1205 'exclude=%r, default=%r, rev=%r>',
1205 'exclude=%r, default=%r, rev=%r>',
1206 pats, inc, exc, default, rev))
1206 pats, inc, exc, default, rev))
1207
1207
1208 @predicate('file(pattern)', safe=True)
1208 @predicate('file(pattern)', safe=True)
1209 def hasfile(repo, subset, x):
1209 def hasfile(repo, subset, x):
1210 """Changesets affecting files matched by pattern.
1210 """Changesets affecting files matched by pattern.
1211
1211
1212 For a faster but less accurate result, consider using ``filelog()``
1212 For a faster but less accurate result, consider using ``filelog()``
1213 instead.
1213 instead.
1214
1214
1215 This predicate uses ``glob:`` as the default kind of pattern.
1215 This predicate uses ``glob:`` as the default kind of pattern.
1216 """
1216 """
1217 # i18n: "file" is a keyword
1217 # i18n: "file" is a keyword
1218 pat = getstring(x, _("file requires a pattern"))
1218 pat = getstring(x, _("file requires a pattern"))
1219 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1219 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1220
1220
1221 @predicate('head()', safe=True)
1221 @predicate('head()', safe=True)
1222 def head(repo, subset, x):
1222 def head(repo, subset, x):
1223 """Changeset is a named branch head.
1223 """Changeset is a named branch head.
1224 """
1224 """
1225 # i18n: "head" is a keyword
1225 # i18n: "head" is a keyword
1226 getargs(x, 0, 0, _("head takes no arguments"))
1226 getargs(x, 0, 0, _("head takes no arguments"))
1227 hs = set()
1227 hs = set()
1228 cl = repo.changelog
1228 cl = repo.changelog
1229 for ls in repo.branchmap().itervalues():
1229 for ls in repo.branchmap().itervalues():
1230 hs.update(cl.rev(h) for h in ls)
1230 hs.update(cl.rev(h) for h in ls)
1231 return subset & baseset(hs)
1231 return subset & baseset(hs)
1232
1232
1233 @predicate('heads(set)', safe=True)
1233 @predicate('heads(set)', safe=True)
1234 def heads(repo, subset, x):
1234 def heads(repo, subset, x):
1235 """Members of set with no children in set.
1235 """Members of set with no children in set.
1236 """
1236 """
1237 s = getset(repo, subset, x)
1237 s = getset(repo, subset, x)
1238 ps = parents(repo, subset, x)
1238 ps = parents(repo, subset, x)
1239 return s - ps
1239 return s - ps
1240
1240
1241 @predicate('hidden()', safe=True)
1241 @predicate('hidden()', safe=True)
1242 def hidden(repo, subset, x):
1242 def hidden(repo, subset, x):
1243 """Hidden changesets.
1243 """Hidden changesets.
1244 """
1244 """
1245 # i18n: "hidden" is a keyword
1245 # i18n: "hidden" is a keyword
1246 getargs(x, 0, 0, _("hidden takes no arguments"))
1246 getargs(x, 0, 0, _("hidden takes no arguments"))
1247 hiddenrevs = repoview.filterrevs(repo, 'visible')
1247 hiddenrevs = repoview.filterrevs(repo, 'visible')
1248 return subset & hiddenrevs
1248 return subset & hiddenrevs
1249
1249
1250 @predicate('keyword(string)', safe=True)
1250 @predicate('keyword(string)', safe=True)
1251 def keyword(repo, subset, x):
1251 def keyword(repo, subset, x):
1252 """Search commit message, user name, and names of changed files for
1252 """Search commit message, user name, and names of changed files for
1253 string. The match is case-insensitive.
1253 string. The match is case-insensitive.
1254
1254
1255 For a regular expression or case sensitive search of these fields, use
1255 For a regular expression or case sensitive search of these fields, use
1256 ``grep(regex)``.
1256 ``grep(regex)``.
1257 """
1257 """
1258 # i18n: "keyword" is a keyword
1258 # i18n: "keyword" is a keyword
1259 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1259 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1260
1260
1261 def matches(r):
1261 def matches(r):
1262 c = repo[r]
1262 c = repo[r]
1263 return any(kw in encoding.lower(t)
1263 return any(kw in encoding.lower(t)
1264 for t in c.files() + [c.user(), c.description()])
1264 for t in c.files() + [c.user(), c.description()])
1265
1265
1266 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1266 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1267
1267
1268 @predicate('limit(set[, n[, offset]])', safe=True)
1268 @predicate('limit(set[, n[, offset]])', safe=True)
1269 def limit(repo, subset, x):
1269 def limit(repo, subset, x):
1270 """First n members of set, defaulting to 1, starting from offset.
1270 """First n members of set, defaulting to 1, starting from offset.
1271 """
1271 """
1272 args = getargsdict(x, 'limit', 'set n offset')
1272 args = getargsdict(x, 'limit', 'set n offset')
1273 if 'set' not in args:
1273 if 'set' not in args:
1274 # i18n: "limit" is a keyword
1274 # i18n: "limit" is a keyword
1275 raise error.ParseError(_("limit requires one to three arguments"))
1275 raise error.ParseError(_("limit requires one to three arguments"))
1276 try:
1276 try:
1277 lim, ofs = 1, 0
1277 lim, ofs = 1, 0
1278 if 'n' in args:
1278 if 'n' in args:
1279 # i18n: "limit" is a keyword
1279 # i18n: "limit" is a keyword
1280 lim = int(getstring(args['n'], _("limit requires a number")))
1280 lim = int(getstring(args['n'], _("limit requires a number")))
1281 if 'offset' in args:
1281 if 'offset' in args:
1282 # i18n: "limit" is a keyword
1282 # i18n: "limit" is a keyword
1283 ofs = int(getstring(args['offset'], _("limit requires a number")))
1283 ofs = int(getstring(args['offset'], _("limit requires a number")))
1284 if ofs < 0:
1284 if ofs < 0:
1285 raise error.ParseError(_("negative offset"))
1285 raise error.ParseError(_("negative offset"))
1286 except (TypeError, ValueError):
1286 except (TypeError, ValueError):
1287 # i18n: "limit" is a keyword
1287 # i18n: "limit" is a keyword
1288 raise error.ParseError(_("limit expects a number"))
1288 raise error.ParseError(_("limit expects a number"))
1289 os = getset(repo, fullreposet(repo), args['set'])
1289 os = getset(repo, fullreposet(repo), args['set'])
1290 result = []
1290 result = []
1291 it = iter(os)
1291 it = iter(os)
1292 for x in xrange(ofs):
1292 for x in xrange(ofs):
1293 y = next(it, None)
1293 y = next(it, None)
1294 if y is None:
1294 if y is None:
1295 break
1295 break
1296 for x in xrange(lim):
1296 for x in xrange(lim):
1297 y = next(it, None)
1297 y = next(it, None)
1298 if y is None:
1298 if y is None:
1299 break
1299 break
1300 elif y in subset:
1300 elif y in subset:
1301 result.append(y)
1301 result.append(y)
1302 return baseset(result, datarepr=('<limit n=%d, offset=%d, %r, %r>',
1302 return baseset(result, datarepr=('<limit n=%d, offset=%d, %r, %r>',
1303 lim, ofs, subset, os))
1303 lim, ofs, subset, os))
1304
1304
1305 @predicate('last(set, [n])', safe=True)
1305 @predicate('last(set, [n])', safe=True)
1306 def last(repo, subset, x):
1306 def last(repo, subset, x):
1307 """Last n members of set, defaulting to 1.
1307 """Last n members of set, defaulting to 1.
1308 """
1308 """
1309 # i18n: "last" is a keyword
1309 # i18n: "last" is a keyword
1310 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1310 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1311 try:
1311 try:
1312 lim = 1
1312 lim = 1
1313 if len(l) == 2:
1313 if len(l) == 2:
1314 # i18n: "last" is a keyword
1314 # i18n: "last" is a keyword
1315 lim = int(getstring(l[1], _("last requires a number")))
1315 lim = int(getstring(l[1], _("last requires a number")))
1316 except (TypeError, ValueError):
1316 except (TypeError, ValueError):
1317 # i18n: "last" is a keyword
1317 # i18n: "last" is a keyword
1318 raise error.ParseError(_("last expects a number"))
1318 raise error.ParseError(_("last expects a number"))
1319 os = getset(repo, fullreposet(repo), l[0])
1319 os = getset(repo, fullreposet(repo), l[0])
1320 os.reverse()
1320 os.reverse()
1321 result = []
1321 result = []
1322 it = iter(os)
1322 it = iter(os)
1323 for x in xrange(lim):
1323 for x in xrange(lim):
1324 y = next(it, None)
1324 y = next(it, None)
1325 if y is None:
1325 if y is None:
1326 break
1326 break
1327 elif y in subset:
1327 elif y in subset:
1328 result.append(y)
1328 result.append(y)
1329 return baseset(result, datarepr=('<last n=%d, %r, %r>', lim, subset, os))
1329 return baseset(result, datarepr=('<last n=%d, %r, %r>', lim, subset, os))
1330
1330
1331 @predicate('max(set)', safe=True)
1331 @predicate('max(set)', safe=True)
1332 def maxrev(repo, subset, x):
1332 def maxrev(repo, subset, x):
1333 """Changeset with highest revision number in set.
1333 """Changeset with highest revision number in set.
1334 """
1334 """
1335 os = getset(repo, fullreposet(repo), x)
1335 os = getset(repo, fullreposet(repo), x)
1336 try:
1336 try:
1337 m = os.max()
1337 m = os.max()
1338 if m in subset:
1338 if m in subset:
1339 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1339 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1340 except ValueError:
1340 except ValueError:
1341 # os.max() throws a ValueError when the collection is empty.
1341 # os.max() throws a ValueError when the collection is empty.
1342 # Same as python's max().
1342 # Same as python's max().
1343 pass
1343 pass
1344 return baseset(datarepr=('<max %r, %r>', subset, os))
1344 return baseset(datarepr=('<max %r, %r>', subset, os))
1345
1345
1346 @predicate('merge()', safe=True)
1346 @predicate('merge()', safe=True)
1347 def merge(repo, subset, x):
1347 def merge(repo, subset, x):
1348 """Changeset is a merge changeset.
1348 """Changeset is a merge changeset.
1349 """
1349 """
1350 # i18n: "merge" is a keyword
1350 # i18n: "merge" is a keyword
1351 getargs(x, 0, 0, _("merge takes no arguments"))
1351 getargs(x, 0, 0, _("merge takes no arguments"))
1352 cl = repo.changelog
1352 cl = repo.changelog
1353 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1353 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1354 condrepr='<merge>')
1354 condrepr='<merge>')
1355
1355
1356 @predicate('branchpoint()', safe=True)
1356 @predicate('branchpoint()', safe=True)
1357 def branchpoint(repo, subset, x):
1357 def branchpoint(repo, subset, x):
1358 """Changesets with more than one child.
1358 """Changesets with more than one child.
1359 """
1359 """
1360 # i18n: "branchpoint" is a keyword
1360 # i18n: "branchpoint" is a keyword
1361 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1361 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1362 cl = repo.changelog
1362 cl = repo.changelog
1363 if not subset:
1363 if not subset:
1364 return baseset()
1364 return baseset()
1365 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1365 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1366 # (and if it is not, it should.)
1366 # (and if it is not, it should.)
1367 baserev = min(subset)
1367 baserev = min(subset)
1368 parentscount = [0]*(len(repo) - baserev)
1368 parentscount = [0]*(len(repo) - baserev)
1369 for r in cl.revs(start=baserev + 1):
1369 for r in cl.revs(start=baserev + 1):
1370 for p in cl.parentrevs(r):
1370 for p in cl.parentrevs(r):
1371 if p >= baserev:
1371 if p >= baserev:
1372 parentscount[p - baserev] += 1
1372 parentscount[p - baserev] += 1
1373 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1373 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1374 condrepr='<branchpoint>')
1374 condrepr='<branchpoint>')
1375
1375
1376 @predicate('min(set)', safe=True)
1376 @predicate('min(set)', safe=True)
1377 def minrev(repo, subset, x):
1377 def minrev(repo, subset, x):
1378 """Changeset with lowest revision number in set.
1378 """Changeset with lowest revision number in set.
1379 """
1379 """
1380 os = getset(repo, fullreposet(repo), x)
1380 os = getset(repo, fullreposet(repo), x)
1381 try:
1381 try:
1382 m = os.min()
1382 m = os.min()
1383 if m in subset:
1383 if m in subset:
1384 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1384 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1385 except ValueError:
1385 except ValueError:
1386 # os.min() throws a ValueError when the collection is empty.
1386 # os.min() throws a ValueError when the collection is empty.
1387 # Same as python's min().
1387 # Same as python's min().
1388 pass
1388 pass
1389 return baseset(datarepr=('<min %r, %r>', subset, os))
1389 return baseset(datarepr=('<min %r, %r>', subset, os))
1390
1390
1391 @predicate('modifies(pattern)', safe=True)
1391 @predicate('modifies(pattern)', safe=True)
1392 def modifies(repo, subset, x):
1392 def modifies(repo, subset, x):
1393 """Changesets modifying files matched by pattern.
1393 """Changesets modifying files matched by pattern.
1394
1394
1395 The pattern without explicit kind like ``glob:`` is expected to be
1395 The pattern without explicit kind like ``glob:`` is expected to be
1396 relative to the current directory and match against a file or a
1396 relative to the current directory and match against a file or a
1397 directory.
1397 directory.
1398 """
1398 """
1399 # i18n: "modifies" is a keyword
1399 # i18n: "modifies" is a keyword
1400 pat = getstring(x, _("modifies requires a pattern"))
1400 pat = getstring(x, _("modifies requires a pattern"))
1401 return checkstatus(repo, subset, pat, 0)
1401 return checkstatus(repo, subset, pat, 0)
1402
1402
1403 @predicate('named(namespace)')
1403 @predicate('named(namespace)')
1404 def named(repo, subset, x):
1404 def named(repo, subset, x):
1405 """The changesets in a given namespace.
1405 """The changesets in a given namespace.
1406
1406
1407 Pattern matching is supported for `namespace`. See
1407 Pattern matching is supported for `namespace`. See
1408 :hg:`help revisions.patterns`.
1408 :hg:`help revisions.patterns`.
1409 """
1409 """
1410 # i18n: "named" is a keyword
1410 # i18n: "named" is a keyword
1411 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1411 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1412
1412
1413 ns = getstring(args[0],
1413 ns = getstring(args[0],
1414 # i18n: "named" is a keyword
1414 # i18n: "named" is a keyword
1415 _('the argument to named must be a string'))
1415 _('the argument to named must be a string'))
1416 kind, pattern, matcher = util.stringmatcher(ns)
1416 kind, pattern, matcher = util.stringmatcher(ns)
1417 namespaces = set()
1417 namespaces = set()
1418 if kind == 'literal':
1418 if kind == 'literal':
1419 if pattern not in repo.names:
1419 if pattern not in repo.names:
1420 raise error.RepoLookupError(_("namespace '%s' does not exist")
1420 raise error.RepoLookupError(_("namespace '%s' does not exist")
1421 % ns)
1421 % ns)
1422 namespaces.add(repo.names[pattern])
1422 namespaces.add(repo.names[pattern])
1423 else:
1423 else:
1424 for name, ns in repo.names.iteritems():
1424 for name, ns in repo.names.iteritems():
1425 if matcher(name):
1425 if matcher(name):
1426 namespaces.add(ns)
1426 namespaces.add(ns)
1427 if not namespaces:
1427 if not namespaces:
1428 raise error.RepoLookupError(_("no namespace exists"
1428 raise error.RepoLookupError(_("no namespace exists"
1429 " that match '%s'") % pattern)
1429 " that match '%s'") % pattern)
1430
1430
1431 names = set()
1431 names = set()
1432 for ns in namespaces:
1432 for ns in namespaces:
1433 for name in ns.listnames(repo):
1433 for name in ns.listnames(repo):
1434 if name not in ns.deprecated:
1434 if name not in ns.deprecated:
1435 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1435 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1436
1436
1437 names -= set([node.nullrev])
1437 names -= set([node.nullrev])
1438 return subset & names
1438 return subset & names
1439
1439
1440 @predicate('id(string)', safe=True)
1440 @predicate('id(string)', safe=True)
1441 def node_(repo, subset, x):
1441 def node_(repo, subset, x):
1442 """Revision non-ambiguously specified by the given hex string prefix.
1442 """Revision non-ambiguously specified by the given hex string prefix.
1443 """
1443 """
1444 # i18n: "id" is a keyword
1444 # i18n: "id" is a keyword
1445 l = getargs(x, 1, 1, _("id requires one argument"))
1445 l = getargs(x, 1, 1, _("id requires one argument"))
1446 # i18n: "id" is a keyword
1446 # i18n: "id" is a keyword
1447 n = getstring(l[0], _("id requires a string"))
1447 n = getstring(l[0], _("id requires a string"))
1448 if len(n) == 40:
1448 if len(n) == 40:
1449 try:
1449 try:
1450 rn = repo.changelog.rev(node.bin(n))
1450 rn = repo.changelog.rev(node.bin(n))
1451 except (LookupError, TypeError):
1451 except (LookupError, TypeError):
1452 rn = None
1452 rn = None
1453 else:
1453 else:
1454 rn = None
1454 rn = None
1455 pm = repo.changelog._partialmatch(n)
1455 pm = repo.changelog._partialmatch(n)
1456 if pm is not None:
1456 if pm is not None:
1457 rn = repo.changelog.rev(pm)
1457 rn = repo.changelog.rev(pm)
1458
1458
1459 if rn is None:
1459 if rn is None:
1460 return baseset()
1460 return baseset()
1461 result = baseset([rn])
1461 result = baseset([rn])
1462 return result & subset
1462 return result & subset
1463
1463
1464 @predicate('obsolete()', safe=True)
1464 @predicate('obsolete()', safe=True)
1465 def obsolete(repo, subset, x):
1465 def obsolete(repo, subset, x):
1466 """Mutable changeset with a newer version."""
1466 """Mutable changeset with a newer version."""
1467 # i18n: "obsolete" is a keyword
1467 # i18n: "obsolete" is a keyword
1468 getargs(x, 0, 0, _("obsolete takes no arguments"))
1468 getargs(x, 0, 0, _("obsolete takes no arguments"))
1469 obsoletes = obsmod.getrevs(repo, 'obsolete')
1469 obsoletes = obsmod.getrevs(repo, 'obsolete')
1470 return subset & obsoletes
1470 return subset & obsoletes
1471
1471
1472 @predicate('only(set, [set])', safe=True)
1472 @predicate('only(set, [set])', safe=True)
1473 def only(repo, subset, x):
1473 def only(repo, subset, x):
1474 """Changesets that are ancestors of the first set that are not ancestors
1474 """Changesets that are ancestors of the first set that are not ancestors
1475 of any other head in the repo. If a second set is specified, the result
1475 of any other head in the repo. If a second set is specified, the result
1476 is ancestors of the first set that are not ancestors of the second set
1476 is ancestors of the first set that are not ancestors of the second set
1477 (i.e. ::<set1> - ::<set2>).
1477 (i.e. ::<set1> - ::<set2>).
1478 """
1478 """
1479 cl = repo.changelog
1479 cl = repo.changelog
1480 # i18n: "only" is a keyword
1480 # i18n: "only" is a keyword
1481 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1481 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1482 include = getset(repo, fullreposet(repo), args[0])
1482 include = getset(repo, fullreposet(repo), args[0])
1483 if len(args) == 1:
1483 if len(args) == 1:
1484 if not include:
1484 if not include:
1485 return baseset()
1485 return baseset()
1486
1486
1487 descendants = set(_revdescendants(repo, include, False))
1487 descendants = set(_revdescendants(repo, include, False))
1488 exclude = [rev for rev in cl.headrevs()
1488 exclude = [rev for rev in cl.headrevs()
1489 if not rev in descendants and not rev in include]
1489 if not rev in descendants and not rev in include]
1490 else:
1490 else:
1491 exclude = getset(repo, fullreposet(repo), args[1])
1491 exclude = getset(repo, fullreposet(repo), args[1])
1492
1492
1493 results = set(cl.findmissingrevs(common=exclude, heads=include))
1493 results = set(cl.findmissingrevs(common=exclude, heads=include))
1494 # XXX we should turn this into a baseset instead of a set, smartset may do
1494 # XXX we should turn this into a baseset instead of a set, smartset may do
1495 # some optimizations from the fact this is a baseset.
1495 # some optimizations from the fact this is a baseset.
1496 return subset & results
1496 return subset & results
1497
1497
1498 @predicate('origin([set])', safe=True)
1498 @predicate('origin([set])', safe=True)
1499 def origin(repo, subset, x):
1499 def origin(repo, subset, x):
1500 """
1500 """
1501 Changesets that were specified as a source for the grafts, transplants or
1501 Changesets that were specified as a source for the grafts, transplants or
1502 rebases that created the given revisions. Omitting the optional set is the
1502 rebases that created the given revisions. Omitting the optional set is the
1503 same as passing all(). If a changeset created by these operations is itself
1503 same as passing all(). If a changeset created by these operations is itself
1504 specified as a source for one of these operations, only the source changeset
1504 specified as a source for one of these operations, only the source changeset
1505 for the first operation is selected.
1505 for the first operation is selected.
1506 """
1506 """
1507 if x is not None:
1507 if x is not None:
1508 dests = getset(repo, fullreposet(repo), x)
1508 dests = getset(repo, fullreposet(repo), x)
1509 else:
1509 else:
1510 dests = fullreposet(repo)
1510 dests = fullreposet(repo)
1511
1511
1512 def _firstsrc(rev):
1512 def _firstsrc(rev):
1513 src = _getrevsource(repo, rev)
1513 src = _getrevsource(repo, rev)
1514 if src is None:
1514 if src is None:
1515 return None
1515 return None
1516
1516
1517 while True:
1517 while True:
1518 prev = _getrevsource(repo, src)
1518 prev = _getrevsource(repo, src)
1519
1519
1520 if prev is None:
1520 if prev is None:
1521 return src
1521 return src
1522 src = prev
1522 src = prev
1523
1523
1524 o = set([_firstsrc(r) for r in dests])
1524 o = set([_firstsrc(r) for r in dests])
1525 o -= set([None])
1525 o -= set([None])
1526 # XXX we should turn this into a baseset instead of a set, smartset may do
1526 # XXX we should turn this into a baseset instead of a set, smartset may do
1527 # some optimizations from the fact this is a baseset.
1527 # some optimizations from the fact this is a baseset.
1528 return subset & o
1528 return subset & o
1529
1529
1530 @predicate('outgoing([path])', safe=True)
1530 @predicate('outgoing([path])', safe=True)
1531 def outgoing(repo, subset, x):
1531 def outgoing(repo, subset, x):
1532 """Changesets not found in the specified destination repository, or the
1532 """Changesets not found in the specified destination repository, or the
1533 default push location.
1533 default push location.
1534 """
1534 """
1535 # Avoid cycles.
1535 # Avoid cycles.
1536 from . import (
1536 from . import (
1537 discovery,
1537 discovery,
1538 hg,
1538 hg,
1539 )
1539 )
1540 # i18n: "outgoing" is a keyword
1540 # i18n: "outgoing" is a keyword
1541 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1541 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1542 # i18n: "outgoing" is a keyword
1542 # i18n: "outgoing" is a keyword
1543 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1543 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1544 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1544 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1545 dest, branches = hg.parseurl(dest)
1545 dest, branches = hg.parseurl(dest)
1546 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1546 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1547 if revs:
1547 if revs:
1548 revs = [repo.lookup(rev) for rev in revs]
1548 revs = [repo.lookup(rev) for rev in revs]
1549 other = hg.peer(repo, {}, dest)
1549 other = hg.peer(repo, {}, dest)
1550 repo.ui.pushbuffer()
1550 repo.ui.pushbuffer()
1551 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1551 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1552 repo.ui.popbuffer()
1552 repo.ui.popbuffer()
1553 cl = repo.changelog
1553 cl = repo.changelog
1554 o = set([cl.rev(r) for r in outgoing.missing])
1554 o = set([cl.rev(r) for r in outgoing.missing])
1555 return subset & o
1555 return subset & o
1556
1556
1557 @predicate('p1([set])', safe=True)
1557 @predicate('p1([set])', safe=True)
1558 def p1(repo, subset, x):
1558 def p1(repo, subset, x):
1559 """First parent of changesets in set, or the working directory.
1559 """First parent of changesets in set, or the working directory.
1560 """
1560 """
1561 if x is None:
1561 if x is None:
1562 p = repo[x].p1().rev()
1562 p = repo[x].p1().rev()
1563 if p >= 0:
1563 if p >= 0:
1564 return subset & baseset([p])
1564 return subset & baseset([p])
1565 return baseset()
1565 return baseset()
1566
1566
1567 ps = set()
1567 ps = set()
1568 cl = repo.changelog
1568 cl = repo.changelog
1569 for r in getset(repo, fullreposet(repo), x):
1569 for r in getset(repo, fullreposet(repo), x):
1570 ps.add(cl.parentrevs(r)[0])
1570 ps.add(cl.parentrevs(r)[0])
1571 ps -= set([node.nullrev])
1571 ps -= set([node.nullrev])
1572 # XXX we should turn this into a baseset instead of a set, smartset may do
1572 # XXX we should turn this into a baseset instead of a set, smartset may do
1573 # some optimizations from the fact this is a baseset.
1573 # some optimizations from the fact this is a baseset.
1574 return subset & ps
1574 return subset & ps
1575
1575
1576 @predicate('p2([set])', safe=True)
1576 @predicate('p2([set])', safe=True)
1577 def p2(repo, subset, x):
1577 def p2(repo, subset, x):
1578 """Second parent of changesets in set, or the working directory.
1578 """Second parent of changesets in set, or the working directory.
1579 """
1579 """
1580 if x is None:
1580 if x is None:
1581 ps = repo[x].parents()
1581 ps = repo[x].parents()
1582 try:
1582 try:
1583 p = ps[1].rev()
1583 p = ps[1].rev()
1584 if p >= 0:
1584 if p >= 0:
1585 return subset & baseset([p])
1585 return subset & baseset([p])
1586 return baseset()
1586 return baseset()
1587 except IndexError:
1587 except IndexError:
1588 return baseset()
1588 return baseset()
1589
1589
1590 ps = set()
1590 ps = set()
1591 cl = repo.changelog
1591 cl = repo.changelog
1592 for r in getset(repo, fullreposet(repo), x):
1592 for r in getset(repo, fullreposet(repo), x):
1593 ps.add(cl.parentrevs(r)[1])
1593 ps.add(cl.parentrevs(r)[1])
1594 ps -= set([node.nullrev])
1594 ps -= set([node.nullrev])
1595 # XXX we should turn this into a baseset instead of a set, smartset may do
1595 # XXX we should turn this into a baseset instead of a set, smartset may do
1596 # some optimizations from the fact this is a baseset.
1596 # some optimizations from the fact this is a baseset.
1597 return subset & ps
1597 return subset & ps
1598
1598
1599 def parentpost(repo, subset, x, order):
1599 def parentpost(repo, subset, x, order):
1600 return p1(repo, subset, x)
1600 return p1(repo, subset, x)
1601
1601
1602 @predicate('parents([set])', safe=True)
1602 @predicate('parents([set])', safe=True)
1603 def parents(repo, subset, x):
1603 def parents(repo, subset, x):
1604 """
1604 """
1605 The set of all parents for all changesets in set, or the working directory.
1605 The set of all parents for all changesets in set, or the working directory.
1606 """
1606 """
1607 if x is None:
1607 if x is None:
1608 ps = set(p.rev() for p in repo[x].parents())
1608 ps = set(p.rev() for p in repo[x].parents())
1609 else:
1609 else:
1610 ps = set()
1610 ps = set()
1611 cl = repo.changelog
1611 cl = repo.changelog
1612 up = ps.update
1612 up = ps.update
1613 parentrevs = cl.parentrevs
1613 parentrevs = cl.parentrevs
1614 for r in getset(repo, fullreposet(repo), x):
1614 for r in getset(repo, fullreposet(repo), x):
1615 if r == node.wdirrev:
1615 if r == node.wdirrev:
1616 up(p.rev() for p in repo[r].parents())
1616 up(p.rev() for p in repo[r].parents())
1617 else:
1617 else:
1618 up(parentrevs(r))
1618 up(parentrevs(r))
1619 ps -= set([node.nullrev])
1619 ps -= set([node.nullrev])
1620 return subset & ps
1620 return subset & ps
1621
1621
1622 def _phase(repo, subset, target):
1622 def _phase(repo, subset, target):
1623 """helper to select all rev in phase <target>"""
1623 """helper to select all rev in phase <target>"""
1624 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1624 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1625 if repo._phasecache._phasesets:
1625 if repo._phasecache._phasesets:
1626 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1626 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1627 s = baseset(s)
1627 s = baseset(s)
1628 s.sort() # set are non ordered, so we enforce ascending
1628 s.sort() # set are non ordered, so we enforce ascending
1629 return subset & s
1629 return subset & s
1630 else:
1630 else:
1631 phase = repo._phasecache.phase
1631 phase = repo._phasecache.phase
1632 condition = lambda r: phase(repo, r) == target
1632 condition = lambda r: phase(repo, r) == target
1633 return subset.filter(condition, condrepr=('<phase %r>', target),
1633 return subset.filter(condition, condrepr=('<phase %r>', target),
1634 cache=False)
1634 cache=False)
1635
1635
1636 @predicate('draft()', safe=True)
1636 @predicate('draft()', safe=True)
1637 def draft(repo, subset, x):
1637 def draft(repo, subset, x):
1638 """Changeset in draft phase."""
1638 """Changeset in draft phase."""
1639 # i18n: "draft" is a keyword
1639 # i18n: "draft" is a keyword
1640 getargs(x, 0, 0, _("draft takes no arguments"))
1640 getargs(x, 0, 0, _("draft takes no arguments"))
1641 target = phases.draft
1641 target = phases.draft
1642 return _phase(repo, subset, target)
1642 return _phase(repo, subset, target)
1643
1643
1644 @predicate('secret()', safe=True)
1644 @predicate('secret()', safe=True)
1645 def secret(repo, subset, x):
1645 def secret(repo, subset, x):
1646 """Changeset in secret phase."""
1646 """Changeset in secret phase."""
1647 # i18n: "secret" is a keyword
1647 # i18n: "secret" is a keyword
1648 getargs(x, 0, 0, _("secret takes no arguments"))
1648 getargs(x, 0, 0, _("secret takes no arguments"))
1649 target = phases.secret
1649 target = phases.secret
1650 return _phase(repo, subset, target)
1650 return _phase(repo, subset, target)
1651
1651
1652 def parentspec(repo, subset, x, n, order):
1652 def parentspec(repo, subset, x, n, order):
1653 """``set^0``
1653 """``set^0``
1654 The set.
1654 The set.
1655 ``set^1`` (or ``set^``), ``set^2``
1655 ``set^1`` (or ``set^``), ``set^2``
1656 First or second parent, respectively, of all changesets in set.
1656 First or second parent, respectively, of all changesets in set.
1657 """
1657 """
1658 try:
1658 try:
1659 n = int(n[1])
1659 n = int(n[1])
1660 if n not in (0, 1, 2):
1660 if n not in (0, 1, 2):
1661 raise ValueError
1661 raise ValueError
1662 except (TypeError, ValueError):
1662 except (TypeError, ValueError):
1663 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1663 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1664 ps = set()
1664 ps = set()
1665 cl = repo.changelog
1665 cl = repo.changelog
1666 for r in getset(repo, fullreposet(repo), x):
1666 for r in getset(repo, fullreposet(repo), x):
1667 if n == 0:
1667 if n == 0:
1668 ps.add(r)
1668 ps.add(r)
1669 elif n == 1:
1669 elif n == 1:
1670 ps.add(cl.parentrevs(r)[0])
1670 ps.add(cl.parentrevs(r)[0])
1671 elif n == 2:
1671 elif n == 2:
1672 parents = cl.parentrevs(r)
1672 parents = cl.parentrevs(r)
1673 if parents[1] != node.nullrev:
1673 if parents[1] != node.nullrev:
1674 ps.add(parents[1])
1674 ps.add(parents[1])
1675 return subset & ps
1675 return subset & ps
1676
1676
1677 @predicate('present(set)', safe=True)
1677 @predicate('present(set)', safe=True)
1678 def present(repo, subset, x):
1678 def present(repo, subset, x):
1679 """An empty set, if any revision in set isn't found; otherwise,
1679 """An empty set, if any revision in set isn't found; otherwise,
1680 all revisions in set.
1680 all revisions in set.
1681
1681
1682 If any of specified revisions is not present in the local repository,
1682 If any of specified revisions is not present in the local repository,
1683 the query is normally aborted. But this predicate allows the query
1683 the query is normally aborted. But this predicate allows the query
1684 to continue even in such cases.
1684 to continue even in such cases.
1685 """
1685 """
1686 try:
1686 try:
1687 return getset(repo, subset, x)
1687 return getset(repo, subset, x)
1688 except error.RepoLookupError:
1688 except error.RepoLookupError:
1689 return baseset()
1689 return baseset()
1690
1690
1691 # for internal use
1691 # for internal use
1692 @predicate('_notpublic', safe=True)
1692 @predicate('_notpublic', safe=True)
1693 def _notpublic(repo, subset, x):
1693 def _notpublic(repo, subset, x):
1694 getargs(x, 0, 0, "_notpublic takes no arguments")
1694 getargs(x, 0, 0, "_notpublic takes no arguments")
1695 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1695 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1696 if repo._phasecache._phasesets:
1696 if repo._phasecache._phasesets:
1697 s = set()
1697 s = set()
1698 for u in repo._phasecache._phasesets[1:]:
1698 for u in repo._phasecache._phasesets[1:]:
1699 s.update(u)
1699 s.update(u)
1700 s = baseset(s - repo.changelog.filteredrevs)
1700 s = baseset(s - repo.changelog.filteredrevs)
1701 s.sort()
1701 s.sort()
1702 return subset & s
1702 return subset & s
1703 else:
1703 else:
1704 phase = repo._phasecache.phase
1704 phase = repo._phasecache.phase
1705 target = phases.public
1705 target = phases.public
1706 condition = lambda r: phase(repo, r) != target
1706 condition = lambda r: phase(repo, r) != target
1707 return subset.filter(condition, condrepr=('<phase %r>', target),
1707 return subset.filter(condition, condrepr=('<phase %r>', target),
1708 cache=False)
1708 cache=False)
1709
1709
1710 @predicate('public()', safe=True)
1710 @predicate('public()', safe=True)
1711 def public(repo, subset, x):
1711 def public(repo, subset, x):
1712 """Changeset in public phase."""
1712 """Changeset in public phase."""
1713 # i18n: "public" is a keyword
1713 # i18n: "public" is a keyword
1714 getargs(x, 0, 0, _("public takes no arguments"))
1714 getargs(x, 0, 0, _("public takes no arguments"))
1715 phase = repo._phasecache.phase
1715 phase = repo._phasecache.phase
1716 target = phases.public
1716 target = phases.public
1717 condition = lambda r: phase(repo, r) == target
1717 condition = lambda r: phase(repo, r) == target
1718 return subset.filter(condition, condrepr=('<phase %r>', target),
1718 return subset.filter(condition, condrepr=('<phase %r>', target),
1719 cache=False)
1719 cache=False)
1720
1720
1721 @predicate('remote([id [,path]])', safe=True)
1721 @predicate('remote([id [,path]])', safe=True)
1722 def remote(repo, subset, x):
1722 def remote(repo, subset, x):
1723 """Local revision that corresponds to the given identifier in a
1723 """Local revision that corresponds to the given identifier in a
1724 remote repository, if present. Here, the '.' identifier is a
1724 remote repository, if present. Here, the '.' identifier is a
1725 synonym for the current local branch.
1725 synonym for the current local branch.
1726 """
1726 """
1727
1727
1728 from . import hg # avoid start-up nasties
1728 from . import hg # avoid start-up nasties
1729 # i18n: "remote" is a keyword
1729 # i18n: "remote" is a keyword
1730 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1730 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1731
1731
1732 q = '.'
1732 q = '.'
1733 if len(l) > 0:
1733 if len(l) > 0:
1734 # i18n: "remote" is a keyword
1734 # i18n: "remote" is a keyword
1735 q = getstring(l[0], _("remote requires a string id"))
1735 q = getstring(l[0], _("remote requires a string id"))
1736 if q == '.':
1736 if q == '.':
1737 q = repo['.'].branch()
1737 q = repo['.'].branch()
1738
1738
1739 dest = ''
1739 dest = ''
1740 if len(l) > 1:
1740 if len(l) > 1:
1741 # i18n: "remote" is a keyword
1741 # i18n: "remote" is a keyword
1742 dest = getstring(l[1], _("remote requires a repository path"))
1742 dest = getstring(l[1], _("remote requires a repository path"))
1743 dest = repo.ui.expandpath(dest or 'default')
1743 dest = repo.ui.expandpath(dest or 'default')
1744 dest, branches = hg.parseurl(dest)
1744 dest, branches = hg.parseurl(dest)
1745 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1745 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1746 if revs:
1746 if revs:
1747 revs = [repo.lookup(rev) for rev in revs]
1747 revs = [repo.lookup(rev) for rev in revs]
1748 other = hg.peer(repo, {}, dest)
1748 other = hg.peer(repo, {}, dest)
1749 n = other.lookup(q)
1749 n = other.lookup(q)
1750 if n in repo:
1750 if n in repo:
1751 r = repo[n].rev()
1751 r = repo[n].rev()
1752 if r in subset:
1752 if r in subset:
1753 return baseset([r])
1753 return baseset([r])
1754 return baseset()
1754 return baseset()
1755
1755
1756 @predicate('removes(pattern)', safe=True)
1756 @predicate('removes(pattern)', safe=True)
1757 def removes(repo, subset, x):
1757 def removes(repo, subset, x):
1758 """Changesets which remove files matching pattern.
1758 """Changesets which remove files matching pattern.
1759
1759
1760 The pattern without explicit kind like ``glob:`` is expected to be
1760 The pattern without explicit kind like ``glob:`` is expected to be
1761 relative to the current directory and match against a file or a
1761 relative to the current directory and match against a file or a
1762 directory.
1762 directory.
1763 """
1763 """
1764 # i18n: "removes" is a keyword
1764 # i18n: "removes" is a keyword
1765 pat = getstring(x, _("removes requires a pattern"))
1765 pat = getstring(x, _("removes requires a pattern"))
1766 return checkstatus(repo, subset, pat, 2)
1766 return checkstatus(repo, subset, pat, 2)
1767
1767
1768 @predicate('rev(number)', safe=True)
1768 @predicate('rev(number)', safe=True)
1769 def rev(repo, subset, x):
1769 def rev(repo, subset, x):
1770 """Revision with the given numeric identifier.
1770 """Revision with the given numeric identifier.
1771 """
1771 """
1772 # i18n: "rev" is a keyword
1772 # i18n: "rev" is a keyword
1773 l = getargs(x, 1, 1, _("rev requires one argument"))
1773 l = getargs(x, 1, 1, _("rev requires one argument"))
1774 try:
1774 try:
1775 # i18n: "rev" is a keyword
1775 # i18n: "rev" is a keyword
1776 l = int(getstring(l[0], _("rev requires a number")))
1776 l = int(getstring(l[0], _("rev requires a number")))
1777 except (TypeError, ValueError):
1777 except (TypeError, ValueError):
1778 # i18n: "rev" is a keyword
1778 # i18n: "rev" is a keyword
1779 raise error.ParseError(_("rev expects a number"))
1779 raise error.ParseError(_("rev expects a number"))
1780 if l not in repo.changelog and l != node.nullrev:
1780 if l not in repo.changelog and l != node.nullrev:
1781 return baseset()
1781 return baseset()
1782 return subset & baseset([l])
1782 return subset & baseset([l])
1783
1783
1784 @predicate('matching(revision [, field])', safe=True)
1784 @predicate('matching(revision [, field])', safe=True)
1785 def matching(repo, subset, x):
1785 def matching(repo, subset, x):
1786 """Changesets in which a given set of fields match the set of fields in the
1786 """Changesets in which a given set of fields match the set of fields in the
1787 selected revision or set.
1787 selected revision or set.
1788
1788
1789 To match more than one field pass the list of fields to match separated
1789 To match more than one field pass the list of fields to match separated
1790 by spaces (e.g. ``author description``).
1790 by spaces (e.g. ``author description``).
1791
1791
1792 Valid fields are most regular revision fields and some special fields.
1792 Valid fields are most regular revision fields and some special fields.
1793
1793
1794 Regular revision fields are ``description``, ``author``, ``branch``,
1794 Regular revision fields are ``description``, ``author``, ``branch``,
1795 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1795 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1796 and ``diff``.
1796 and ``diff``.
1797 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1797 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1798 contents of the revision. Two revisions matching their ``diff`` will
1798 contents of the revision. Two revisions matching their ``diff`` will
1799 also match their ``files``.
1799 also match their ``files``.
1800
1800
1801 Special fields are ``summary`` and ``metadata``:
1801 Special fields are ``summary`` and ``metadata``:
1802 ``summary`` matches the first line of the description.
1802 ``summary`` matches the first line of the description.
1803 ``metadata`` is equivalent to matching ``description user date``
1803 ``metadata`` is equivalent to matching ``description user date``
1804 (i.e. it matches the main metadata fields).
1804 (i.e. it matches the main metadata fields).
1805
1805
1806 ``metadata`` is the default field which is used when no fields are
1806 ``metadata`` is the default field which is used when no fields are
1807 specified. You can match more than one field at a time.
1807 specified. You can match more than one field at a time.
1808 """
1808 """
1809 # i18n: "matching" is a keyword
1809 # i18n: "matching" is a keyword
1810 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1810 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1811
1811
1812 revs = getset(repo, fullreposet(repo), l[0])
1812 revs = getset(repo, fullreposet(repo), l[0])
1813
1813
1814 fieldlist = ['metadata']
1814 fieldlist = ['metadata']
1815 if len(l) > 1:
1815 if len(l) > 1:
1816 fieldlist = getstring(l[1],
1816 fieldlist = getstring(l[1],
1817 # i18n: "matching" is a keyword
1817 # i18n: "matching" is a keyword
1818 _("matching requires a string "
1818 _("matching requires a string "
1819 "as its second argument")).split()
1819 "as its second argument")).split()
1820
1820
1821 # Make sure that there are no repeated fields,
1821 # Make sure that there are no repeated fields,
1822 # expand the 'special' 'metadata' field type
1822 # expand the 'special' 'metadata' field type
1823 # and check the 'files' whenever we check the 'diff'
1823 # and check the 'files' whenever we check the 'diff'
1824 fields = []
1824 fields = []
1825 for field in fieldlist:
1825 for field in fieldlist:
1826 if field == 'metadata':
1826 if field == 'metadata':
1827 fields += ['user', 'description', 'date']
1827 fields += ['user', 'description', 'date']
1828 elif field == 'diff':
1828 elif field == 'diff':
1829 # a revision matching the diff must also match the files
1829 # a revision matching the diff must also match the files
1830 # since matching the diff is very costly, make sure to
1830 # since matching the diff is very costly, make sure to
1831 # also match the files first
1831 # also match the files first
1832 fields += ['files', 'diff']
1832 fields += ['files', 'diff']
1833 else:
1833 else:
1834 if field == 'author':
1834 if field == 'author':
1835 field = 'user'
1835 field = 'user'
1836 fields.append(field)
1836 fields.append(field)
1837 fields = set(fields)
1837 fields = set(fields)
1838 if 'summary' in fields and 'description' in fields:
1838 if 'summary' in fields and 'description' in fields:
1839 # If a revision matches its description it also matches its summary
1839 # If a revision matches its description it also matches its summary
1840 fields.discard('summary')
1840 fields.discard('summary')
1841
1841
1842 # We may want to match more than one field
1842 # We may want to match more than one field
1843 # Not all fields take the same amount of time to be matched
1843 # Not all fields take the same amount of time to be matched
1844 # Sort the selected fields in order of increasing matching cost
1844 # Sort the selected fields in order of increasing matching cost
1845 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1845 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1846 'files', 'description', 'substate', 'diff']
1846 'files', 'description', 'substate', 'diff']
1847 def fieldkeyfunc(f):
1847 def fieldkeyfunc(f):
1848 try:
1848 try:
1849 return fieldorder.index(f)
1849 return fieldorder.index(f)
1850 except ValueError:
1850 except ValueError:
1851 # assume an unknown field is very costly
1851 # assume an unknown field is very costly
1852 return len(fieldorder)
1852 return len(fieldorder)
1853 fields = list(fields)
1853 fields = list(fields)
1854 fields.sort(key=fieldkeyfunc)
1854 fields.sort(key=fieldkeyfunc)
1855
1855
1856 # Each field will be matched with its own "getfield" function
1856 # Each field will be matched with its own "getfield" function
1857 # which will be added to the getfieldfuncs array of functions
1857 # which will be added to the getfieldfuncs array of functions
1858 getfieldfuncs = []
1858 getfieldfuncs = []
1859 _funcs = {
1859 _funcs = {
1860 'user': lambda r: repo[r].user(),
1860 'user': lambda r: repo[r].user(),
1861 'branch': lambda r: repo[r].branch(),
1861 'branch': lambda r: repo[r].branch(),
1862 'date': lambda r: repo[r].date(),
1862 'date': lambda r: repo[r].date(),
1863 'description': lambda r: repo[r].description(),
1863 'description': lambda r: repo[r].description(),
1864 'files': lambda r: repo[r].files(),
1864 'files': lambda r: repo[r].files(),
1865 'parents': lambda r: repo[r].parents(),
1865 'parents': lambda r: repo[r].parents(),
1866 'phase': lambda r: repo[r].phase(),
1866 'phase': lambda r: repo[r].phase(),
1867 'substate': lambda r: repo[r].substate,
1867 'substate': lambda r: repo[r].substate,
1868 'summary': lambda r: repo[r].description().splitlines()[0],
1868 'summary': lambda r: repo[r].description().splitlines()[0],
1869 'diff': lambda r: list(repo[r].diff(git=True),)
1869 'diff': lambda r: list(repo[r].diff(git=True),)
1870 }
1870 }
1871 for info in fields:
1871 for info in fields:
1872 getfield = _funcs.get(info, None)
1872 getfield = _funcs.get(info, None)
1873 if getfield is None:
1873 if getfield is None:
1874 raise error.ParseError(
1874 raise error.ParseError(
1875 # i18n: "matching" is a keyword
1875 # i18n: "matching" is a keyword
1876 _("unexpected field name passed to matching: %s") % info)
1876 _("unexpected field name passed to matching: %s") % info)
1877 getfieldfuncs.append(getfield)
1877 getfieldfuncs.append(getfield)
1878 # convert the getfield array of functions into a "getinfo" function
1878 # convert the getfield array of functions into a "getinfo" function
1879 # which returns an array of field values (or a single value if there
1879 # which returns an array of field values (or a single value if there
1880 # is only one field to match)
1880 # is only one field to match)
1881 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1881 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1882
1882
1883 def matches(x):
1883 def matches(x):
1884 for rev in revs:
1884 for rev in revs:
1885 target = getinfo(rev)
1885 target = getinfo(rev)
1886 match = True
1886 match = True
1887 for n, f in enumerate(getfieldfuncs):
1887 for n, f in enumerate(getfieldfuncs):
1888 if target[n] != f(x):
1888 if target[n] != f(x):
1889 match = False
1889 match = False
1890 if match:
1890 if match:
1891 return True
1891 return True
1892 return False
1892 return False
1893
1893
1894 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1894 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1895
1895
1896 @predicate('reverse(set)', safe=True, takeorder=True)
1896 @predicate('reverse(set)', safe=True, takeorder=True)
1897 def reverse(repo, subset, x, order):
1897 def reverse(repo, subset, x, order):
1898 """Reverse order of set.
1898 """Reverse order of set.
1899 """
1899 """
1900 l = getset(repo, subset, x)
1900 l = getset(repo, subset, x)
1901 if order == defineorder:
1901 if order == defineorder:
1902 l.reverse()
1902 l.reverse()
1903 return l
1903 return l
1904
1904
1905 @predicate('roots(set)', safe=True)
1905 @predicate('roots(set)', safe=True)
1906 def roots(repo, subset, x):
1906 def roots(repo, subset, x):
1907 """Changesets in set with no parent changeset in set.
1907 """Changesets in set with no parent changeset in set.
1908 """
1908 """
1909 s = getset(repo, fullreposet(repo), x)
1909 s = getset(repo, fullreposet(repo), x)
1910 parents = repo.changelog.parentrevs
1910 parents = repo.changelog.parentrevs
1911 def filter(r):
1911 def filter(r):
1912 for p in parents(r):
1912 for p in parents(r):
1913 if 0 <= p and p in s:
1913 if 0 <= p and p in s:
1914 return False
1914 return False
1915 return True
1915 return True
1916 return subset & s.filter(filter, condrepr='<roots>')
1916 return subset & s.filter(filter, condrepr='<roots>')
1917
1917
1918 _sortkeyfuncs = {
1918 _sortkeyfuncs = {
1919 'rev': lambda c: c.rev(),
1919 'rev': lambda c: c.rev(),
1920 'branch': lambda c: c.branch(),
1920 'branch': lambda c: c.branch(),
1921 'desc': lambda c: c.description(),
1921 'desc': lambda c: c.description(),
1922 'user': lambda c: c.user(),
1922 'user': lambda c: c.user(),
1923 'author': lambda c: c.user(),
1923 'author': lambda c: c.user(),
1924 'date': lambda c: c.date()[0],
1924 'date': lambda c: c.date()[0],
1925 }
1925 }
1926
1926
1927 def _getsortargs(x):
1927 def _getsortargs(x):
1928 """Parse sort options into (set, [(key, reverse)], opts)"""
1928 """Parse sort options into (set, [(key, reverse)], opts)"""
1929 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
1929 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
1930 if 'set' not in args:
1930 if 'set' not in args:
1931 # i18n: "sort" is a keyword
1931 # i18n: "sort" is a keyword
1932 raise error.ParseError(_('sort requires one or two arguments'))
1932 raise error.ParseError(_('sort requires one or two arguments'))
1933 keys = "rev"
1933 keys = "rev"
1934 if 'keys' in args:
1934 if 'keys' in args:
1935 # i18n: "sort" is a keyword
1935 # i18n: "sort" is a keyword
1936 keys = getstring(args['keys'], _("sort spec must be a string"))
1936 keys = getstring(args['keys'], _("sort spec must be a string"))
1937
1937
1938 keyflags = []
1938 keyflags = []
1939 for k in keys.split():
1939 for k in keys.split():
1940 fk = k
1940 fk = k
1941 reverse = (k[0] == '-')
1941 reverse = (k[0] == '-')
1942 if reverse:
1942 if reverse:
1943 k = k[1:]
1943 k = k[1:]
1944 if k not in _sortkeyfuncs and k != 'topo':
1944 if k not in _sortkeyfuncs and k != 'topo':
1945 raise error.ParseError(_("unknown sort key %r") % fk)
1945 raise error.ParseError(_("unknown sort key %r") % fk)
1946 keyflags.append((k, reverse))
1946 keyflags.append((k, reverse))
1947
1947
1948 if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
1948 if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
1949 # i18n: "topo" is a keyword
1949 # i18n: "topo" is a keyword
1950 raise error.ParseError(_('topo sort order cannot be combined '
1950 raise error.ParseError(_('topo sort order cannot be combined '
1951 'with other sort keys'))
1951 'with other sort keys'))
1952
1952
1953 opts = {}
1953 opts = {}
1954 if 'topo.firstbranch' in args:
1954 if 'topo.firstbranch' in args:
1955 if any(k == 'topo' for k, reverse in keyflags):
1955 if any(k == 'topo' for k, reverse in keyflags):
1956 opts['topo.firstbranch'] = args['topo.firstbranch']
1956 opts['topo.firstbranch'] = args['topo.firstbranch']
1957 else:
1957 else:
1958 # i18n: "topo" and "topo.firstbranch" are keywords
1958 # i18n: "topo" and "topo.firstbranch" are keywords
1959 raise error.ParseError(_('topo.firstbranch can only be used '
1959 raise error.ParseError(_('topo.firstbranch can only be used '
1960 'when using the topo sort key'))
1960 'when using the topo sort key'))
1961
1961
1962 return args['set'], keyflags, opts
1962 return args['set'], keyflags, opts
1963
1963
1964 @predicate('sort(set[, [-]key... [, ...]])', safe=True, takeorder=True)
1964 @predicate('sort(set[, [-]key... [, ...]])', safe=True, takeorder=True)
1965 def sort(repo, subset, x, order):
1965 def sort(repo, subset, x, order):
1966 """Sort set by keys. The default sort order is ascending, specify a key
1966 """Sort set by keys. The default sort order is ascending, specify a key
1967 as ``-key`` to sort in descending order.
1967 as ``-key`` to sort in descending order.
1968
1968
1969 The keys can be:
1969 The keys can be:
1970
1970
1971 - ``rev`` for the revision number,
1971 - ``rev`` for the revision number,
1972 - ``branch`` for the branch name,
1972 - ``branch`` for the branch name,
1973 - ``desc`` for the commit message (description),
1973 - ``desc`` for the commit message (description),
1974 - ``user`` for user name (``author`` can be used as an alias),
1974 - ``user`` for user name (``author`` can be used as an alias),
1975 - ``date`` for the commit date
1975 - ``date`` for the commit date
1976 - ``topo`` for a reverse topographical sort
1976 - ``topo`` for a reverse topographical sort
1977
1977
1978 The ``topo`` sort order cannot be combined with other sort keys. This sort
1978 The ``topo`` sort order cannot be combined with other sort keys. This sort
1979 takes one optional argument, ``topo.firstbranch``, which takes a revset that
1979 takes one optional argument, ``topo.firstbranch``, which takes a revset that
1980 specifies what topographical branches to prioritize in the sort.
1980 specifies what topographical branches to prioritize in the sort.
1981
1981
1982 """
1982 """
1983 s, keyflags, opts = _getsortargs(x)
1983 s, keyflags, opts = _getsortargs(x)
1984 revs = getset(repo, subset, s)
1984 revs = getset(repo, subset, s)
1985
1985
1986 if not keyflags or order != defineorder:
1986 if not keyflags or order != defineorder:
1987 return revs
1987 return revs
1988 if len(keyflags) == 1 and keyflags[0][0] == "rev":
1988 if len(keyflags) == 1 and keyflags[0][0] == "rev":
1989 revs.sort(reverse=keyflags[0][1])
1989 revs.sort(reverse=keyflags[0][1])
1990 return revs
1990 return revs
1991 elif keyflags[0][0] == "topo":
1991 elif keyflags[0][0] == "topo":
1992 firstbranch = ()
1992 firstbranch = ()
1993 if 'topo.firstbranch' in opts:
1993 if 'topo.firstbranch' in opts:
1994 firstbranch = getset(repo, subset, opts['topo.firstbranch'])
1994 firstbranch = getset(repo, subset, opts['topo.firstbranch'])
1995 revs = baseset(_toposort(revs, repo.changelog.parentrevs, firstbranch),
1995 revs = baseset(_toposort(revs, repo.changelog.parentrevs, firstbranch),
1996 istopo=True)
1996 istopo=True)
1997 if keyflags[0][1]:
1997 if keyflags[0][1]:
1998 revs.reverse()
1998 revs.reverse()
1999 return revs
1999 return revs
2000
2000
2001 # sort() is guaranteed to be stable
2001 # sort() is guaranteed to be stable
2002 ctxs = [repo[r] for r in revs]
2002 ctxs = [repo[r] for r in revs]
2003 for k, reverse in reversed(keyflags):
2003 for k, reverse in reversed(keyflags):
2004 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
2004 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
2005 return baseset([c.rev() for c in ctxs])
2005 return baseset([c.rev() for c in ctxs])
2006
2006
2007 def _toposort(revs, parentsfunc, firstbranch=()):
2007 def _toposort(revs, parentsfunc, firstbranch=()):
2008 """Yield revisions from heads to roots one (topo) branch at a time.
2008 """Yield revisions from heads to roots one (topo) branch at a time.
2009
2009
2010 This function aims to be used by a graph generator that wishes to minimize
2010 This function aims to be used by a graph generator that wishes to minimize
2011 the number of parallel branches and their interleaving.
2011 the number of parallel branches and their interleaving.
2012
2012
2013 Example iteration order (numbers show the "true" order in a changelog):
2013 Example iteration order (numbers show the "true" order in a changelog):
2014
2014
2015 o 4
2015 o 4
2016 |
2016 |
2017 o 1
2017 o 1
2018 |
2018 |
2019 | o 3
2019 | o 3
2020 | |
2020 | |
2021 | o 2
2021 | o 2
2022 |/
2022 |/
2023 o 0
2023 o 0
2024
2024
2025 Note that the ancestors of merges are understood by the current
2025 Note that the ancestors of merges are understood by the current
2026 algorithm to be on the same branch. This means no reordering will
2026 algorithm to be on the same branch. This means no reordering will
2027 occur behind a merge.
2027 occur behind a merge.
2028 """
2028 """
2029
2029
2030 ### Quick summary of the algorithm
2030 ### Quick summary of the algorithm
2031 #
2031 #
2032 # This function is based around a "retention" principle. We keep revisions
2032 # This function is based around a "retention" principle. We keep revisions
2033 # in memory until we are ready to emit a whole branch that immediately
2033 # in memory until we are ready to emit a whole branch that immediately
2034 # "merges" into an existing one. This reduces the number of parallel
2034 # "merges" into an existing one. This reduces the number of parallel
2035 # branches with interleaved revisions.
2035 # branches with interleaved revisions.
2036 #
2036 #
2037 # During iteration revs are split into two groups:
2037 # During iteration revs are split into two groups:
2038 # A) revision already emitted
2038 # A) revision already emitted
2039 # B) revision in "retention". They are stored as different subgroups.
2039 # B) revision in "retention". They are stored as different subgroups.
2040 #
2040 #
2041 # for each REV, we do the following logic:
2041 # for each REV, we do the following logic:
2042 #
2042 #
2043 # 1) if REV is a parent of (A), we will emit it. If there is a
2043 # 1) if REV is a parent of (A), we will emit it. If there is a
2044 # retention group ((B) above) that is blocked on REV being
2044 # retention group ((B) above) that is blocked on REV being
2045 # available, we emit all the revisions out of that retention
2045 # available, we emit all the revisions out of that retention
2046 # group first.
2046 # group first.
2047 #
2047 #
2048 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
2048 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
2049 # available, if such subgroup exist, we add REV to it and the subgroup is
2049 # available, if such subgroup exist, we add REV to it and the subgroup is
2050 # now awaiting for REV.parents() to be available.
2050 # now awaiting for REV.parents() to be available.
2051 #
2051 #
2052 # 3) finally if no such group existed in (B), we create a new subgroup.
2052 # 3) finally if no such group existed in (B), we create a new subgroup.
2053 #
2053 #
2054 #
2054 #
2055 # To bootstrap the algorithm, we emit the tipmost revision (which
2055 # To bootstrap the algorithm, we emit the tipmost revision (which
2056 # puts it in group (A) from above).
2056 # puts it in group (A) from above).
2057
2057
2058 revs.sort(reverse=True)
2058 revs.sort(reverse=True)
2059
2059
2060 # Set of parents of revision that have been emitted. They can be considered
2060 # Set of parents of revision that have been emitted. They can be considered
2061 # unblocked as the graph generator is already aware of them so there is no
2061 # unblocked as the graph generator is already aware of them so there is no
2062 # need to delay the revisions that reference them.
2062 # need to delay the revisions that reference them.
2063 #
2063 #
2064 # If someone wants to prioritize a branch over the others, pre-filling this
2064 # If someone wants to prioritize a branch over the others, pre-filling this
2065 # set will force all other branches to wait until this branch is ready to be
2065 # set will force all other branches to wait until this branch is ready to be
2066 # emitted.
2066 # emitted.
2067 unblocked = set(firstbranch)
2067 unblocked = set(firstbranch)
2068
2068
2069 # list of groups waiting to be displayed, each group is defined by:
2069 # list of groups waiting to be displayed, each group is defined by:
2070 #
2070 #
2071 # (revs: lists of revs waiting to be displayed,
2071 # (revs: lists of revs waiting to be displayed,
2072 # blocked: set of that cannot be displayed before those in 'revs')
2072 # blocked: set of that cannot be displayed before those in 'revs')
2073 #
2073 #
2074 # The second value ('blocked') correspond to parents of any revision in the
2074 # The second value ('blocked') correspond to parents of any revision in the
2075 # group ('revs') that is not itself contained in the group. The main idea
2075 # group ('revs') that is not itself contained in the group. The main idea
2076 # of this algorithm is to delay as much as possible the emission of any
2076 # of this algorithm is to delay as much as possible the emission of any
2077 # revision. This means waiting for the moment we are about to display
2077 # revision. This means waiting for the moment we are about to display
2078 # these parents to display the revs in a group.
2078 # these parents to display the revs in a group.
2079 #
2079 #
2080 # This first implementation is smart until it encounters a merge: it will
2080 # This first implementation is smart until it encounters a merge: it will
2081 # emit revs as soon as any parent is about to be emitted and can grow an
2081 # emit revs as soon as any parent is about to be emitted and can grow an
2082 # arbitrary number of revs in 'blocked'. In practice this mean we properly
2082 # arbitrary number of revs in 'blocked'. In practice this mean we properly
2083 # retains new branches but gives up on any special ordering for ancestors
2083 # retains new branches but gives up on any special ordering for ancestors
2084 # of merges. The implementation can be improved to handle this better.
2084 # of merges. The implementation can be improved to handle this better.
2085 #
2085 #
2086 # The first subgroup is special. It corresponds to all the revision that
2086 # The first subgroup is special. It corresponds to all the revision that
2087 # were already emitted. The 'revs' lists is expected to be empty and the
2087 # were already emitted. The 'revs' lists is expected to be empty and the
2088 # 'blocked' set contains the parents revisions of already emitted revision.
2088 # 'blocked' set contains the parents revisions of already emitted revision.
2089 #
2089 #
2090 # You could pre-seed the <parents> set of groups[0] to a specific
2090 # You could pre-seed the <parents> set of groups[0] to a specific
2091 # changesets to select what the first emitted branch should be.
2091 # changesets to select what the first emitted branch should be.
2092 groups = [([], unblocked)]
2092 groups = [([], unblocked)]
2093 pendingheap = []
2093 pendingheap = []
2094 pendingset = set()
2094 pendingset = set()
2095
2095
2096 heapq.heapify(pendingheap)
2096 heapq.heapify(pendingheap)
2097 heappop = heapq.heappop
2097 heappop = heapq.heappop
2098 heappush = heapq.heappush
2098 heappush = heapq.heappush
2099 for currentrev in revs:
2099 for currentrev in revs:
2100 # Heap works with smallest element, we want highest so we invert
2100 # Heap works with smallest element, we want highest so we invert
2101 if currentrev not in pendingset:
2101 if currentrev not in pendingset:
2102 heappush(pendingheap, -currentrev)
2102 heappush(pendingheap, -currentrev)
2103 pendingset.add(currentrev)
2103 pendingset.add(currentrev)
2104 # iterates on pending rev until after the current rev have been
2104 # iterates on pending rev until after the current rev have been
2105 # processed.
2105 # processed.
2106 rev = None
2106 rev = None
2107 while rev != currentrev:
2107 while rev != currentrev:
2108 rev = -heappop(pendingheap)
2108 rev = -heappop(pendingheap)
2109 pendingset.remove(rev)
2109 pendingset.remove(rev)
2110
2110
2111 # Seek for a subgroup blocked, waiting for the current revision.
2111 # Seek for a subgroup blocked, waiting for the current revision.
2112 matching = [i for i, g in enumerate(groups) if rev in g[1]]
2112 matching = [i for i, g in enumerate(groups) if rev in g[1]]
2113
2113
2114 if matching:
2114 if matching:
2115 # The main idea is to gather together all sets that are blocked
2115 # The main idea is to gather together all sets that are blocked
2116 # on the same revision.
2116 # on the same revision.
2117 #
2117 #
2118 # Groups are merged when a common blocking ancestor is
2118 # Groups are merged when a common blocking ancestor is
2119 # observed. For example, given two groups:
2119 # observed. For example, given two groups:
2120 #
2120 #
2121 # revs [5, 4] waiting for 1
2121 # revs [5, 4] waiting for 1
2122 # revs [3, 2] waiting for 1
2122 # revs [3, 2] waiting for 1
2123 #
2123 #
2124 # These two groups will be merged when we process
2124 # These two groups will be merged when we process
2125 # 1. In theory, we could have merged the groups when
2125 # 1. In theory, we could have merged the groups when
2126 # we added 2 to the group it is now in (we could have
2126 # we added 2 to the group it is now in (we could have
2127 # noticed the groups were both blocked on 1 then), but
2127 # noticed the groups were both blocked on 1 then), but
2128 # the way it works now makes the algorithm simpler.
2128 # the way it works now makes the algorithm simpler.
2129 #
2129 #
2130 # We also always keep the oldest subgroup first. We can
2130 # We also always keep the oldest subgroup first. We can
2131 # probably improve the behavior by having the longest set
2131 # probably improve the behavior by having the longest set
2132 # first. That way, graph algorithms could minimise the length
2132 # first. That way, graph algorithms could minimise the length
2133 # of parallel lines their drawing. This is currently not done.
2133 # of parallel lines their drawing. This is currently not done.
2134 targetidx = matching.pop(0)
2134 targetidx = matching.pop(0)
2135 trevs, tparents = groups[targetidx]
2135 trevs, tparents = groups[targetidx]
2136 for i in matching:
2136 for i in matching:
2137 gr = groups[i]
2137 gr = groups[i]
2138 trevs.extend(gr[0])
2138 trevs.extend(gr[0])
2139 tparents |= gr[1]
2139 tparents |= gr[1]
2140 # delete all merged subgroups (except the one we kept)
2140 # delete all merged subgroups (except the one we kept)
2141 # (starting from the last subgroup for performance and
2141 # (starting from the last subgroup for performance and
2142 # sanity reasons)
2142 # sanity reasons)
2143 for i in reversed(matching):
2143 for i in reversed(matching):
2144 del groups[i]
2144 del groups[i]
2145 else:
2145 else:
2146 # This is a new head. We create a new subgroup for it.
2146 # This is a new head. We create a new subgroup for it.
2147 targetidx = len(groups)
2147 targetidx = len(groups)
2148 groups.append(([], set([rev])))
2148 groups.append(([], set([rev])))
2149
2149
2150 gr = groups[targetidx]
2150 gr = groups[targetidx]
2151
2151
2152 # We now add the current nodes to this subgroups. This is done
2152 # We now add the current nodes to this subgroups. This is done
2153 # after the subgroup merging because all elements from a subgroup
2153 # after the subgroup merging because all elements from a subgroup
2154 # that relied on this rev must precede it.
2154 # that relied on this rev must precede it.
2155 #
2155 #
2156 # we also update the <parents> set to include the parents of the
2156 # we also update the <parents> set to include the parents of the
2157 # new nodes.
2157 # new nodes.
2158 if rev == currentrev: # only display stuff in rev
2158 if rev == currentrev: # only display stuff in rev
2159 gr[0].append(rev)
2159 gr[0].append(rev)
2160 gr[1].remove(rev)
2160 gr[1].remove(rev)
2161 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
2161 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
2162 gr[1].update(parents)
2162 gr[1].update(parents)
2163 for p in parents:
2163 for p in parents:
2164 if p not in pendingset:
2164 if p not in pendingset:
2165 pendingset.add(p)
2165 pendingset.add(p)
2166 heappush(pendingheap, -p)
2166 heappush(pendingheap, -p)
2167
2167
2168 # Look for a subgroup to display
2168 # Look for a subgroup to display
2169 #
2169 #
2170 # When unblocked is empty (if clause), we were not waiting for any
2170 # When unblocked is empty (if clause), we were not waiting for any
2171 # revisions during the first iteration (if no priority was given) or
2171 # revisions during the first iteration (if no priority was given) or
2172 # if we emitted a whole disconnected set of the graph (reached a
2172 # if we emitted a whole disconnected set of the graph (reached a
2173 # root). In that case we arbitrarily take the oldest known
2173 # root). In that case we arbitrarily take the oldest known
2174 # subgroup. The heuristic could probably be better.
2174 # subgroup. The heuristic could probably be better.
2175 #
2175 #
2176 # Otherwise (elif clause) if the subgroup is blocked on
2176 # Otherwise (elif clause) if the subgroup is blocked on
2177 # a revision we just emitted, we can safely emit it as
2177 # a revision we just emitted, we can safely emit it as
2178 # well.
2178 # well.
2179 if not unblocked:
2179 if not unblocked:
2180 if len(groups) > 1: # display other subset
2180 if len(groups) > 1: # display other subset
2181 targetidx = 1
2181 targetidx = 1
2182 gr = groups[1]
2182 gr = groups[1]
2183 elif not gr[1] & unblocked:
2183 elif not gr[1] & unblocked:
2184 gr = None
2184 gr = None
2185
2185
2186 if gr is not None:
2186 if gr is not None:
2187 # update the set of awaited revisions with the one from the
2187 # update the set of awaited revisions with the one from the
2188 # subgroup
2188 # subgroup
2189 unblocked |= gr[1]
2189 unblocked |= gr[1]
2190 # output all revisions in the subgroup
2190 # output all revisions in the subgroup
2191 for r in gr[0]:
2191 for r in gr[0]:
2192 yield r
2192 yield r
2193 # delete the subgroup that you just output
2193 # delete the subgroup that you just output
2194 # unless it is groups[0] in which case you just empty it.
2194 # unless it is groups[0] in which case you just empty it.
2195 if targetidx:
2195 if targetidx:
2196 del groups[targetidx]
2196 del groups[targetidx]
2197 else:
2197 else:
2198 gr[0][:] = []
2198 gr[0][:] = []
2199 # Check if we have some subgroup waiting for revisions we are not going to
2199 # Check if we have some subgroup waiting for revisions we are not going to
2200 # iterate over
2200 # iterate over
2201 for g in groups:
2201 for g in groups:
2202 for r in g[0]:
2202 for r in g[0]:
2203 yield r
2203 yield r
2204
2204
2205 @predicate('subrepo([pattern])')
2205 @predicate('subrepo([pattern])')
2206 def subrepo(repo, subset, x):
2206 def subrepo(repo, subset, x):
2207 """Changesets that add, modify or remove the given subrepo. If no subrepo
2207 """Changesets that add, modify or remove the given subrepo. If no subrepo
2208 pattern is named, any subrepo changes are returned.
2208 pattern is named, any subrepo changes are returned.
2209 """
2209 """
2210 # i18n: "subrepo" is a keyword
2210 # i18n: "subrepo" is a keyword
2211 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
2211 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
2212 pat = None
2212 pat = None
2213 if len(args) != 0:
2213 if len(args) != 0:
2214 pat = getstring(args[0], _("subrepo requires a pattern"))
2214 pat = getstring(args[0], _("subrepo requires a pattern"))
2215
2215
2216 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
2216 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
2217
2217
2218 def submatches(names):
2218 def submatches(names):
2219 k, p, m = util.stringmatcher(pat)
2219 k, p, m = util.stringmatcher(pat)
2220 for name in names:
2220 for name in names:
2221 if m(name):
2221 if m(name):
2222 yield name
2222 yield name
2223
2223
2224 def matches(x):
2224 def matches(x):
2225 c = repo[x]
2225 c = repo[x]
2226 s = repo.status(c.p1().node(), c.node(), match=m)
2226 s = repo.status(c.p1().node(), c.node(), match=m)
2227
2227
2228 if pat is None:
2228 if pat is None:
2229 return s.added or s.modified or s.removed
2229 return s.added or s.modified or s.removed
2230
2230
2231 if s.added:
2231 if s.added:
2232 return any(submatches(c.substate.keys()))
2232 return any(submatches(c.substate.keys()))
2233
2233
2234 if s.modified:
2234 if s.modified:
2235 subs = set(c.p1().substate.keys())
2235 subs = set(c.p1().substate.keys())
2236 subs.update(c.substate.keys())
2236 subs.update(c.substate.keys())
2237
2237
2238 for path in submatches(subs):
2238 for path in submatches(subs):
2239 if c.p1().substate.get(path) != c.substate.get(path):
2239 if c.p1().substate.get(path) != c.substate.get(path):
2240 return True
2240 return True
2241
2241
2242 if s.removed:
2242 if s.removed:
2243 return any(submatches(c.p1().substate.keys()))
2243 return any(submatches(c.p1().substate.keys()))
2244
2244
2245 return False
2245 return False
2246
2246
2247 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
2247 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
2248
2248
2249 def _substringmatcher(pattern, casesensitive=True):
2249 def _substringmatcher(pattern, casesensitive=True):
2250 kind, pattern, matcher = util.stringmatcher(pattern,
2250 kind, pattern, matcher = util.stringmatcher(pattern,
2251 casesensitive=casesensitive)
2251 casesensitive=casesensitive)
2252 if kind == 'literal':
2252 if kind == 'literal':
2253 if not casesensitive:
2253 if not casesensitive:
2254 pattern = encoding.lower(pattern)
2254 pattern = encoding.lower(pattern)
2255 matcher = lambda s: pattern in encoding.lower(s)
2255 matcher = lambda s: pattern in encoding.lower(s)
2256 else:
2256 else:
2257 matcher = lambda s: pattern in s
2257 matcher = lambda s: pattern in s
2258 return kind, pattern, matcher
2258 return kind, pattern, matcher
2259
2259
2260 @predicate('tag([name])', safe=True)
2260 @predicate('tag([name])', safe=True)
2261 def tag(repo, subset, x):
2261 def tag(repo, subset, x):
2262 """The specified tag by name, or all tagged revisions if no name is given.
2262 """The specified tag by name, or all tagged revisions if no name is given.
2263
2263
2264 Pattern matching is supported for `name`. See
2264 Pattern matching is supported for `name`. See
2265 :hg:`help revisions.patterns`.
2265 :hg:`help revisions.patterns`.
2266 """
2266 """
2267 # i18n: "tag" is a keyword
2267 # i18n: "tag" is a keyword
2268 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2268 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2269 cl = repo.changelog
2269 cl = repo.changelog
2270 if args:
2270 if args:
2271 pattern = getstring(args[0],
2271 pattern = getstring(args[0],
2272 # i18n: "tag" is a keyword
2272 # i18n: "tag" is a keyword
2273 _('the argument to tag must be a string'))
2273 _('the argument to tag must be a string'))
2274 kind, pattern, matcher = util.stringmatcher(pattern)
2274 kind, pattern, matcher = util.stringmatcher(pattern)
2275 if kind == 'literal':
2275 if kind == 'literal':
2276 # avoid resolving all tags
2276 # avoid resolving all tags
2277 tn = repo._tagscache.tags.get(pattern, None)
2277 tn = repo._tagscache.tags.get(pattern, None)
2278 if tn is None:
2278 if tn is None:
2279 raise error.RepoLookupError(_("tag '%s' does not exist")
2279 raise error.RepoLookupError(_("tag '%s' does not exist")
2280 % pattern)
2280 % pattern)
2281 s = set([repo[tn].rev()])
2281 s = set([repo[tn].rev()])
2282 else:
2282 else:
2283 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2283 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2284 else:
2284 else:
2285 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2285 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2286 return subset & s
2286 return subset & s
2287
2287
2288 @predicate('tagged', safe=True)
2288 @predicate('tagged', safe=True)
2289 def tagged(repo, subset, x):
2289 def tagged(repo, subset, x):
2290 return tag(repo, subset, x)
2290 return tag(repo, subset, x)
2291
2291
2292 @predicate('unstable()', safe=True)
2292 @predicate('unstable()', safe=True)
2293 def unstable(repo, subset, x):
2293 def unstable(repo, subset, x):
2294 """Non-obsolete changesets with obsolete ancestors.
2294 """Non-obsolete changesets with obsolete ancestors.
2295 """
2295 """
2296 # i18n: "unstable" is a keyword
2296 # i18n: "unstable" is a keyword
2297 getargs(x, 0, 0, _("unstable takes no arguments"))
2297 getargs(x, 0, 0, _("unstable takes no arguments"))
2298 unstables = obsmod.getrevs(repo, 'unstable')
2298 unstables = obsmod.getrevs(repo, 'unstable')
2299 return subset & unstables
2299 return subset & unstables
2300
2300
2301
2301
2302 @predicate('user(string)', safe=True)
2302 @predicate('user(string)', safe=True)
2303 def user(repo, subset, x):
2303 def user(repo, subset, x):
2304 """User name contains string. The match is case-insensitive.
2304 """User name contains string. The match is case-insensitive.
2305
2305
2306 Pattern matching is supported for `string`. See
2306 Pattern matching is supported for `string`. See
2307 :hg:`help revisions.patterns`.
2307 :hg:`help revisions.patterns`.
2308 """
2308 """
2309 return author(repo, subset, x)
2309 return author(repo, subset, x)
2310
2310
2311 @predicate('wdir', safe=True)
2311 @predicate('wdir', safe=True)
2312 def wdir(repo, subset, x):
2312 def wdir(repo, subset, x):
2313 """Working directory. (EXPERIMENTAL)"""
2313 """Working directory. (EXPERIMENTAL)"""
2314 # i18n: "wdir" is a keyword
2314 # i18n: "wdir" is a keyword
2315 getargs(x, 0, 0, _("wdir takes no arguments"))
2315 getargs(x, 0, 0, _("wdir takes no arguments"))
2316 if node.wdirrev in subset or isinstance(subset, fullreposet):
2316 if node.wdirrev in subset or isinstance(subset, fullreposet):
2317 return baseset([node.wdirrev])
2317 return baseset([node.wdirrev])
2318 return baseset()
2318 return baseset()
2319
2319
2320 def _orderedlist(repo, subset, x):
2320 def _orderedlist(repo, subset, x):
2321 s = getstring(x, "internal error")
2321 s = getstring(x, "internal error")
2322 if not s:
2322 if not s:
2323 return baseset()
2323 return baseset()
2324 # remove duplicates here. it's difficult for caller to deduplicate sets
2324 # remove duplicates here. it's difficult for caller to deduplicate sets
2325 # because different symbols can point to the same rev.
2325 # because different symbols can point to the same rev.
2326 cl = repo.changelog
2326 cl = repo.changelog
2327 ls = []
2327 ls = []
2328 seen = set()
2328 seen = set()
2329 for t in s.split('\0'):
2329 for t in s.split('\0'):
2330 try:
2330 try:
2331 # fast path for integer revision
2331 # fast path for integer revision
2332 r = int(t)
2332 r = int(t)
2333 if str(r) != t or r not in cl:
2333 if str(r) != t or r not in cl:
2334 raise ValueError
2334 raise ValueError
2335 revs = [r]
2335 revs = [r]
2336 except ValueError:
2336 except ValueError:
2337 revs = stringset(repo, subset, t)
2337 revs = stringset(repo, subset, t)
2338
2338
2339 for r in revs:
2339 for r in revs:
2340 if r in seen:
2340 if r in seen:
2341 continue
2341 continue
2342 if (r in subset
2342 if (r in subset
2343 or r == node.nullrev and isinstance(subset, fullreposet)):
2343 or r == node.nullrev and isinstance(subset, fullreposet)):
2344 ls.append(r)
2344 ls.append(r)
2345 seen.add(r)
2345 seen.add(r)
2346 return baseset(ls)
2346 return baseset(ls)
2347
2347
2348 # for internal use
2348 # for internal use
2349 @predicate('_list', safe=True, takeorder=True)
2349 @predicate('_list', safe=True, takeorder=True)
2350 def _list(repo, subset, x, order):
2350 def _list(repo, subset, x, order):
2351 if order == followorder:
2351 if order == followorder:
2352 # slow path to take the subset order
2352 # slow path to take the subset order
2353 return subset & _orderedlist(repo, fullreposet(repo), x)
2353 return subset & _orderedlist(repo, fullreposet(repo), x)
2354 else:
2354 else:
2355 return _orderedlist(repo, subset, x)
2355 return _orderedlist(repo, subset, x)
2356
2356
2357 def _orderedintlist(repo, subset, x):
2357 def _orderedintlist(repo, subset, x):
2358 s = getstring(x, "internal error")
2358 s = getstring(x, "internal error")
2359 if not s:
2359 if not s:
2360 return baseset()
2360 return baseset()
2361 ls = [int(r) for r in s.split('\0')]
2361 ls = [int(r) for r in s.split('\0')]
2362 s = subset
2362 s = subset
2363 return baseset([r for r in ls if r in s])
2363 return baseset([r for r in ls if r in s])
2364
2364
2365 # for internal use
2365 # for internal use
2366 @predicate('_intlist', safe=True, takeorder=True)
2366 @predicate('_intlist', safe=True, takeorder=True)
2367 def _intlist(repo, subset, x, order):
2367 def _intlist(repo, subset, x, order):
2368 if order == followorder:
2368 if order == followorder:
2369 # slow path to take the subset order
2369 # slow path to take the subset order
2370 return subset & _orderedintlist(repo, fullreposet(repo), x)
2370 return subset & _orderedintlist(repo, fullreposet(repo), x)
2371 else:
2371 else:
2372 return _orderedintlist(repo, subset, x)
2372 return _orderedintlist(repo, subset, x)
2373
2373
2374 def _orderedhexlist(repo, subset, x):
2374 def _orderedhexlist(repo, subset, x):
2375 s = getstring(x, "internal error")
2375 s = getstring(x, "internal error")
2376 if not s:
2376 if not s:
2377 return baseset()
2377 return baseset()
2378 cl = repo.changelog
2378 cl = repo.changelog
2379 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2379 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2380 s = subset
2380 s = subset
2381 return baseset([r for r in ls if r in s])
2381 return baseset([r for r in ls if r in s])
2382
2382
2383 # for internal use
2383 # for internal use
2384 @predicate('_hexlist', safe=True, takeorder=True)
2384 @predicate('_hexlist', safe=True, takeorder=True)
2385 def _hexlist(repo, subset, x, order):
2385 def _hexlist(repo, subset, x, order):
2386 if order == followorder:
2386 if order == followorder:
2387 # slow path to take the subset order
2387 # slow path to take the subset order
2388 return subset & _orderedhexlist(repo, fullreposet(repo), x)
2388 return subset & _orderedhexlist(repo, fullreposet(repo), x)
2389 else:
2389 else:
2390 return _orderedhexlist(repo, subset, x)
2390 return _orderedhexlist(repo, subset, x)
2391
2391
2392 methods = {
2392 methods = {
2393 "range": rangeset,
2393 "range": rangeset,
2394 "rangepre": rangepre,
2394 "rangepre": rangepre,
2395 "dagrange": dagrange,
2395 "dagrange": dagrange,
2396 "string": stringset,
2396 "string": stringset,
2397 "symbol": stringset,
2397 "symbol": stringset,
2398 "and": andset,
2398 "and": andset,
2399 "or": orset,
2399 "or": orset,
2400 "not": notset,
2400 "not": notset,
2401 "difference": differenceset,
2401 "difference": differenceset,
2402 "list": listset,
2402 "list": listset,
2403 "keyvalue": keyvaluepair,
2403 "keyvalue": keyvaluepair,
2404 "func": func,
2404 "func": func,
2405 "ancestor": ancestorspec,
2405 "ancestor": ancestorspec,
2406 "parent": parentspec,
2406 "parent": parentspec,
2407 "parentpost": parentpost,
2407 "parentpost": parentpost,
2408 }
2408 }
2409
2409
2410 # Constants for ordering requirement, used in _analyze():
2410 # Constants for ordering requirement, used in _analyze():
2411 #
2411 #
2412 # If 'define', any nested functions and operations can change the ordering of
2412 # If 'define', any nested functions and operations can change the ordering of
2413 # the entries in the set. If 'follow', any nested functions and operations
2413 # the entries in the set. If 'follow', any nested functions and operations
2414 # should take the ordering specified by the first operand to the '&' operator.
2414 # should take the ordering specified by the first operand to the '&' operator.
2415 #
2415 #
2416 # For instance,
2416 # For instance,
2417 #
2417 #
2418 # X & (Y | Z)
2418 # X & (Y | Z)
2419 # ^ ^^^^^^^
2419 # ^ ^^^^^^^
2420 # | follow
2420 # | follow
2421 # define
2421 # define
2422 #
2422 #
2423 # will be evaluated as 'or(y(x()), z(x()))', where 'x()' can change the order
2423 # will be evaluated as 'or(y(x()), z(x()))', where 'x()' can change the order
2424 # of the entries in the set, but 'y()', 'z()' and 'or()' shouldn't.
2424 # of the entries in the set, but 'y()', 'z()' and 'or()' shouldn't.
2425 #
2425 #
2426 # 'any' means the order doesn't matter. For instance,
2426 # 'any' means the order doesn't matter. For instance,
2427 #
2427 #
2428 # X & !Y
2428 # X & !Y
2429 # ^
2429 # ^
2430 # any
2430 # any
2431 #
2431 #
2432 # 'y()' can either enforce its ordering requirement or take the ordering
2432 # 'y()' can either enforce its ordering requirement or take the ordering
2433 # specified by 'x()' because 'not()' doesn't care the order.
2433 # specified by 'x()' because 'not()' doesn't care the order.
2434 #
2434 #
2435 # Transition of ordering requirement:
2435 # Transition of ordering requirement:
2436 #
2436 #
2437 # 1. starts with 'define'
2437 # 1. starts with 'define'
2438 # 2. shifts to 'follow' by 'x & y'
2438 # 2. shifts to 'follow' by 'x & y'
2439 # 3. changes back to 'define' on function call 'f(x)' or function-like
2439 # 3. changes back to 'define' on function call 'f(x)' or function-like
2440 # operation 'x (f) y' because 'f' may have its own ordering requirement
2440 # operation 'x (f) y' because 'f' may have its own ordering requirement
2441 # for 'x' and 'y' (e.g. 'first(x)')
2441 # for 'x' and 'y' (e.g. 'first(x)')
2442 #
2442 #
2443 anyorder = 'any' # don't care the order
2443 anyorder = 'any' # don't care the order
2444 defineorder = 'define' # should define the order
2444 defineorder = 'define' # should define the order
2445 followorder = 'follow' # must follow the current order
2445 followorder = 'follow' # must follow the current order
2446
2446
2447 # transition table for 'x & y', from the current expression 'x' to 'y'
2447 # transition table for 'x & y', from the current expression 'x' to 'y'
2448 _tofolloworder = {
2448 _tofolloworder = {
2449 anyorder: anyorder,
2449 anyorder: anyorder,
2450 defineorder: followorder,
2450 defineorder: followorder,
2451 followorder: followorder,
2451 followorder: followorder,
2452 }
2452 }
2453
2453
2454 def _matchonly(revs, bases):
2454 def _matchonly(revs, bases):
2455 """
2455 """
2456 >>> f = lambda *args: _matchonly(*map(parse, args))
2456 >>> f = lambda *args: _matchonly(*map(parse, args))
2457 >>> f('ancestors(A)', 'not ancestors(B)')
2457 >>> f('ancestors(A)', 'not ancestors(B)')
2458 ('list', ('symbol', 'A'), ('symbol', 'B'))
2458 ('list', ('symbol', 'A'), ('symbol', 'B'))
2459 """
2459 """
2460 if (revs is not None
2460 if (revs is not None
2461 and revs[0] == 'func'
2461 and revs[0] == 'func'
2462 and getsymbol(revs[1]) == 'ancestors'
2462 and getsymbol(revs[1]) == 'ancestors'
2463 and bases is not None
2463 and bases is not None
2464 and bases[0] == 'not'
2464 and bases[0] == 'not'
2465 and bases[1][0] == 'func'
2465 and bases[1][0] == 'func'
2466 and getsymbol(bases[1][1]) == 'ancestors'):
2466 and getsymbol(bases[1][1]) == 'ancestors'):
2467 return ('list', revs[2], bases[1][2])
2467 return ('list', revs[2], bases[1][2])
2468
2468
2469 def _fixops(x):
2469 def _fixops(x):
2470 """Rewrite raw parsed tree to resolve ambiguous syntax which cannot be
2470 """Rewrite raw parsed tree to resolve ambiguous syntax which cannot be
2471 handled well by our simple top-down parser"""
2471 handled well by our simple top-down parser"""
2472 if not isinstance(x, tuple):
2472 if not isinstance(x, tuple):
2473 return x
2473 return x
2474
2474
2475 op = x[0]
2475 op = x[0]
2476 if op == 'parent':
2476 if op == 'parent':
2477 # x^:y means (x^) : y, not x ^ (:y)
2477 # x^:y means (x^) : y, not x ^ (:y)
2478 # x^: means (x^) :, not x ^ (:)
2478 # x^: means (x^) :, not x ^ (:)
2479 post = ('parentpost', x[1])
2479 post = ('parentpost', x[1])
2480 if x[2][0] == 'dagrangepre':
2480 if x[2][0] == 'dagrangepre':
2481 return _fixops(('dagrange', post, x[2][1]))
2481 return _fixops(('dagrange', post, x[2][1]))
2482 elif x[2][0] == 'rangepre':
2482 elif x[2][0] == 'rangepre':
2483 return _fixops(('range', post, x[2][1]))
2483 return _fixops(('range', post, x[2][1]))
2484 elif x[2][0] == 'rangeall':
2484 elif x[2][0] == 'rangeall':
2485 return _fixops(('rangepost', post))
2485 return _fixops(('rangepost', post))
2486 elif op == 'or':
2486 elif op == 'or':
2487 # make number of arguments deterministic:
2487 # make number of arguments deterministic:
2488 # x + y + z -> (or x y z) -> (or (list x y z))
2488 # x + y + z -> (or x y z) -> (or (list x y z))
2489 return (op, _fixops(('list',) + x[1:]))
2489 return (op, _fixops(('list',) + x[1:]))
2490
2490
2491 return (op,) + tuple(_fixops(y) for y in x[1:])
2491 return (op,) + tuple(_fixops(y) for y in x[1:])
2492
2492
2493 def _analyze(x, order):
2493 def _analyze(x, order):
2494 if x is None:
2494 if x is None:
2495 return x
2495 return x
2496
2496
2497 op = x[0]
2497 op = x[0]
2498 if op == 'minus':
2498 if op == 'minus':
2499 return _analyze(('and', x[1], ('not', x[2])), order)
2499 return _analyze(('and', x[1], ('not', x[2])), order)
2500 elif op == 'only':
2500 elif op == 'only':
2501 t = ('func', ('symbol', 'only'), ('list', x[1], x[2]))
2501 t = ('func', ('symbol', 'only'), ('list', x[1], x[2]))
2502 return _analyze(t, order)
2502 return _analyze(t, order)
2503 elif op == 'onlypost':
2503 elif op == 'onlypost':
2504 return _analyze(('func', ('symbol', 'only'), x[1]), order)
2504 return _analyze(('func', ('symbol', 'only'), x[1]), order)
2505 elif op == 'dagrangepre':
2505 elif op == 'dagrangepre':
2506 return _analyze(('func', ('symbol', 'ancestors'), x[1]), order)
2506 return _analyze(('func', ('symbol', 'ancestors'), x[1]), order)
2507 elif op == 'dagrangepost':
2507 elif op == 'dagrangepost':
2508 return _analyze(('func', ('symbol', 'descendants'), x[1]), order)
2508 return _analyze(('func', ('symbol', 'descendants'), x[1]), order)
2509 elif op == 'rangeall':
2509 elif op == 'rangeall':
2510 return _analyze(('rangepre', ('string', 'tip')), order)
2510 return _analyze(('rangepre', ('string', 'tip')), order)
2511 elif op == 'rangepost':
2511 elif op == 'rangepost':
2512 return _analyze(('range', x[1], ('string', 'tip')), order)
2512 return _analyze(('range', x[1], ('string', 'tip')), order)
2513 elif op == 'negate':
2513 elif op == 'negate':
2514 s = getstring(x[1], _("can't negate that"))
2514 s = getstring(x[1], _("can't negate that"))
2515 return _analyze(('string', '-' + s), order)
2515 return _analyze(('string', '-' + s), order)
2516 elif op in ('string', 'symbol'):
2516 elif op in ('string', 'symbol'):
2517 return x
2517 return x
2518 elif op == 'and':
2518 elif op == 'and':
2519 ta = _analyze(x[1], order)
2519 ta = _analyze(x[1], order)
2520 tb = _analyze(x[2], _tofolloworder[order])
2520 tb = _analyze(x[2], _tofolloworder[order])
2521 return (op, ta, tb, order)
2521 return (op, ta, tb, order)
2522 elif op == 'or':
2522 elif op == 'or':
2523 return (op, _analyze(x[1], order), order)
2523 return (op, _analyze(x[1], order), order)
2524 elif op == 'not':
2524 elif op == 'not':
2525 return (op, _analyze(x[1], anyorder), order)
2525 return (op, _analyze(x[1], anyorder), order)
2526 elif op in ('rangepre', 'parentpost'):
2526 elif op in ('rangepre', 'parentpost'):
2527 return (op, _analyze(x[1], defineorder), order)
2527 return (op, _analyze(x[1], defineorder), order)
2528 elif op == 'group':
2528 elif op == 'group':
2529 return _analyze(x[1], order)
2529 return _analyze(x[1], order)
2530 elif op in ('dagrange', 'range', 'parent', 'ancestor'):
2530 elif op in ('dagrange', 'range', 'parent', 'ancestor'):
2531 ta = _analyze(x[1], defineorder)
2531 ta = _analyze(x[1], defineorder)
2532 tb = _analyze(x[2], defineorder)
2532 tb = _analyze(x[2], defineorder)
2533 return (op, ta, tb, order)
2533 return (op, ta, tb, order)
2534 elif op == 'list':
2534 elif op == 'list':
2535 return (op,) + tuple(_analyze(y, order) for y in x[1:])
2535 return (op,) + tuple(_analyze(y, order) for y in x[1:])
2536 elif op == 'keyvalue':
2536 elif op == 'keyvalue':
2537 return (op, x[1], _analyze(x[2], order))
2537 return (op, x[1], _analyze(x[2], order))
2538 elif op == 'func':
2538 elif op == 'func':
2539 f = getsymbol(x[1])
2539 f = getsymbol(x[1])
2540 d = defineorder
2540 d = defineorder
2541 if f == 'present':
2541 if f == 'present':
2542 # 'present(set)' is known to return the argument set with no
2542 # 'present(set)' is known to return the argument set with no
2543 # modification, so forward the current order to its argument
2543 # modification, so forward the current order to its argument
2544 d = order
2544 d = order
2545 return (op, x[1], _analyze(x[2], d), order)
2545 return (op, x[1], _analyze(x[2], d), order)
2546 raise ValueError('invalid operator %r' % op)
2546 raise ValueError('invalid operator %r' % op)
2547
2547
2548 def analyze(x, order=defineorder):
2548 def analyze(x, order=defineorder):
2549 """Transform raw parsed tree to evaluatable tree which can be fed to
2549 """Transform raw parsed tree to evaluatable tree which can be fed to
2550 optimize() or getset()
2550 optimize() or getset()
2551
2551
2552 All pseudo operations should be mapped to real operations or functions
2552 All pseudo operations should be mapped to real operations or functions
2553 defined in methods or symbols table respectively.
2553 defined in methods or symbols table respectively.
2554
2554
2555 'order' specifies how the current expression 'x' is ordered (see the
2555 'order' specifies how the current expression 'x' is ordered (see the
2556 constants defined above.)
2556 constants defined above.)
2557 """
2557 """
2558 return _analyze(x, order)
2558 return _analyze(x, order)
2559
2559
2560 def _optimize(x, small):
2560 def _optimize(x, small):
2561 if x is None:
2561 if x is None:
2562 return 0, x
2562 return 0, x
2563
2563
2564 smallbonus = 1
2564 smallbonus = 1
2565 if small:
2565 if small:
2566 smallbonus = .5
2566 smallbonus = .5
2567
2567
2568 op = x[0]
2568 op = x[0]
2569 if op in ('string', 'symbol'):
2569 if op in ('string', 'symbol'):
2570 return smallbonus, x # single revisions are small
2570 return smallbonus, x # single revisions are small
2571 elif op == 'and':
2571 elif op == 'and':
2572 wa, ta = _optimize(x[1], True)
2572 wa, ta = _optimize(x[1], True)
2573 wb, tb = _optimize(x[2], True)
2573 wb, tb = _optimize(x[2], True)
2574 order = x[3]
2574 order = x[3]
2575 w = min(wa, wb)
2575 w = min(wa, wb)
2576
2576
2577 # (::x and not ::y)/(not ::y and ::x) have a fast path
2577 # (::x and not ::y)/(not ::y and ::x) have a fast path
2578 tm = _matchonly(ta, tb) or _matchonly(tb, ta)
2578 tm = _matchonly(ta, tb) or _matchonly(tb, ta)
2579 if tm:
2579 if tm:
2580 return w, ('func', ('symbol', 'only'), tm, order)
2580 return w, ('func', ('symbol', 'only'), tm, order)
2581
2581
2582 if tb is not None and tb[0] == 'not':
2582 if tb is not None and tb[0] == 'not':
2583 return wa, ('difference', ta, tb[1], order)
2583 return wa, ('difference', ta, tb[1], order)
2584
2584
2585 if wa > wb:
2585 if wa > wb:
2586 return w, (op, tb, ta, order)
2586 return w, (op, tb, ta, order)
2587 return w, (op, ta, tb, order)
2587 return w, (op, ta, tb, order)
2588 elif op == 'or':
2588 elif op == 'or':
2589 # fast path for machine-generated expression, that is likely to have
2589 # fast path for machine-generated expression, that is likely to have
2590 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2590 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2591 order = x[2]
2591 order = x[2]
2592 ws, ts, ss = [], [], []
2592 ws, ts, ss = [], [], []
2593 def flushss():
2593 def flushss():
2594 if not ss:
2594 if not ss:
2595 return
2595 return
2596 if len(ss) == 1:
2596 if len(ss) == 1:
2597 w, t = ss[0]
2597 w, t = ss[0]
2598 else:
2598 else:
2599 s = '\0'.join(t[1] for w, t in ss)
2599 s = '\0'.join(t[1] for w, t in ss)
2600 y = ('func', ('symbol', '_list'), ('string', s), order)
2600 y = ('func', ('symbol', '_list'), ('string', s), order)
2601 w, t = _optimize(y, False)
2601 w, t = _optimize(y, False)
2602 ws.append(w)
2602 ws.append(w)
2603 ts.append(t)
2603 ts.append(t)
2604 del ss[:]
2604 del ss[:]
2605 for y in getlist(x[1]):
2605 for y in getlist(x[1]):
2606 w, t = _optimize(y, False)
2606 w, t = _optimize(y, False)
2607 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2607 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2608 ss.append((w, t))
2608 ss.append((w, t))
2609 continue
2609 continue
2610 flushss()
2610 flushss()
2611 ws.append(w)
2611 ws.append(w)
2612 ts.append(t)
2612 ts.append(t)
2613 flushss()
2613 flushss()
2614 if len(ts) == 1:
2614 if len(ts) == 1:
2615 return ws[0], ts[0] # 'or' operation is fully optimized out
2615 return ws[0], ts[0] # 'or' operation is fully optimized out
2616 # we can't reorder trees by weight because it would change the order.
2616 # we can't reorder trees by weight because it would change the order.
2617 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2617 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2618 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2618 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2619 return max(ws), (op, ('list',) + tuple(ts), order)
2619 return max(ws), (op, ('list',) + tuple(ts), order)
2620 elif op == 'not':
2620 elif op == 'not':
2621 # Optimize not public() to _notpublic() because we have a fast version
2621 # Optimize not public() to _notpublic() because we have a fast version
2622 if x[1][:3] == ('func', ('symbol', 'public'), None):
2622 if x[1][:3] == ('func', ('symbol', 'public'), None):
2623 order = x[1][3]
2623 order = x[1][3]
2624 newsym = ('func', ('symbol', '_notpublic'), None, order)
2624 newsym = ('func', ('symbol', '_notpublic'), None, order)
2625 o = _optimize(newsym, not small)
2625 o = _optimize(newsym, not small)
2626 return o[0], o[1]
2626 return o[0], o[1]
2627 else:
2627 else:
2628 o = _optimize(x[1], not small)
2628 o = _optimize(x[1], not small)
2629 order = x[2]
2629 order = x[2]
2630 return o[0], (op, o[1], order)
2630 return o[0], (op, o[1], order)
2631 elif op in ('rangepre', 'parentpost'):
2631 elif op in ('rangepre', 'parentpost'):
2632 o = _optimize(x[1], small)
2632 o = _optimize(x[1], small)
2633 order = x[2]
2633 order = x[2]
2634 return o[0], (op, o[1], order)
2634 return o[0], (op, o[1], order)
2635 elif op in ('dagrange', 'range', 'parent', 'ancestor'):
2635 elif op in ('dagrange', 'range', 'parent', 'ancestor'):
2636 wa, ta = _optimize(x[1], small)
2636 wa, ta = _optimize(x[1], small)
2637 wb, tb = _optimize(x[2], small)
2637 wb, tb = _optimize(x[2], small)
2638 order = x[3]
2638 order = x[3]
2639 return wa + wb, (op, ta, tb, order)
2639 return wa + wb, (op, ta, tb, order)
2640 elif op == 'list':
2640 elif op == 'list':
2641 ws, ts = zip(*(_optimize(y, small) for y in x[1:]))
2641 ws, ts = zip(*(_optimize(y, small) for y in x[1:]))
2642 return sum(ws), (op,) + ts
2642 return sum(ws), (op,) + ts
2643 elif op == 'keyvalue':
2643 elif op == 'keyvalue':
2644 w, t = _optimize(x[2], small)
2644 w, t = _optimize(x[2], small)
2645 return w, (op, x[1], t)
2645 return w, (op, x[1], t)
2646 elif op == 'func':
2646 elif op == 'func':
2647 f = getsymbol(x[1])
2647 f = getsymbol(x[1])
2648 wa, ta = _optimize(x[2], small)
2648 wa, ta = _optimize(x[2], small)
2649 if f in ('author', 'branch', 'closed', 'date', 'desc', 'file', 'grep',
2649 if f in ('author', 'branch', 'closed', 'date', 'desc', 'file', 'grep',
2650 'keyword', 'outgoing', 'user', 'destination'):
2650 'keyword', 'outgoing', 'user', 'destination'):
2651 w = 10 # slow
2651 w = 10 # slow
2652 elif f in ('modifies', 'adds', 'removes'):
2652 elif f in ('modifies', 'adds', 'removes'):
2653 w = 30 # slower
2653 w = 30 # slower
2654 elif f == "contains":
2654 elif f == "contains":
2655 w = 100 # very slow
2655 w = 100 # very slow
2656 elif f == "ancestor":
2656 elif f == "ancestor":
2657 w = 1 * smallbonus
2657 w = 1 * smallbonus
2658 elif f in ('reverse', 'limit', 'first', 'wdir', '_intlist'):
2658 elif f in ('reverse', 'limit', 'first', 'wdir', '_intlist'):
2659 w = 0
2659 w = 0
2660 elif f == "sort":
2660 elif f == "sort":
2661 w = 10 # assume most sorts look at changelog
2661 w = 10 # assume most sorts look at changelog
2662 else:
2662 else:
2663 w = 1
2663 w = 1
2664 order = x[3]
2664 order = x[3]
2665 return w + wa, (op, x[1], ta, order)
2665 return w + wa, (op, x[1], ta, order)
2666 raise ValueError('invalid operator %r' % op)
2666 raise ValueError('invalid operator %r' % op)
2667
2667
2668 def optimize(tree):
2668 def optimize(tree):
2669 """Optimize evaluatable tree
2669 """Optimize evaluatable tree
2670
2670
2671 All pseudo operations should be transformed beforehand.
2671 All pseudo operations should be transformed beforehand.
2672 """
2672 """
2673 _weight, newtree = _optimize(tree, small=True)
2673 _weight, newtree = _optimize(tree, small=True)
2674 return newtree
2674 return newtree
2675
2675
2676 # the set of valid characters for the initial letter of symbols in
2676 # the set of valid characters for the initial letter of symbols in
2677 # alias declarations and definitions
2677 # alias declarations and definitions
2678 _aliassyminitletters = _syminitletters | set(pycompat.sysstr('$'))
2678 _aliassyminitletters = _syminitletters | set(pycompat.sysstr('$'))
2679
2679
2680 def _parsewith(spec, lookup=None, syminitletters=None):
2680 def _parsewith(spec, lookup=None, syminitletters=None):
2681 """Generate a parse tree of given spec with given tokenizing options
2681 """Generate a parse tree of given spec with given tokenizing options
2682
2682
2683 >>> _parsewith('foo($1)', syminitletters=_aliassyminitletters)
2683 >>> _parsewith('foo($1)', syminitletters=_aliassyminitletters)
2684 ('func', ('symbol', 'foo'), ('symbol', '$1'))
2684 ('func', ('symbol', 'foo'), ('symbol', '$1'))
2685 >>> _parsewith('$1')
2685 >>> _parsewith('$1')
2686 Traceback (most recent call last):
2686 Traceback (most recent call last):
2687 ...
2687 ...
2688 ParseError: ("syntax error in revset '$1'", 0)
2688 ParseError: ("syntax error in revset '$1'", 0)
2689 >>> _parsewith('foo bar')
2689 >>> _parsewith('foo bar')
2690 Traceback (most recent call last):
2690 Traceback (most recent call last):
2691 ...
2691 ...
2692 ParseError: ('invalid token', 4)
2692 ParseError: ('invalid token', 4)
2693 """
2693 """
2694 p = parser.parser(elements)
2694 p = parser.parser(elements)
2695 tree, pos = p.parse(tokenize(spec, lookup=lookup,
2695 tree, pos = p.parse(tokenize(spec, lookup=lookup,
2696 syminitletters=syminitletters))
2696 syminitletters=syminitletters))
2697 if pos != len(spec):
2697 if pos != len(spec):
2698 raise error.ParseError(_('invalid token'), pos)
2698 raise error.ParseError(_('invalid token'), pos)
2699 return _fixops(parser.simplifyinfixops(tree, ('list', 'or')))
2699 return _fixops(parser.simplifyinfixops(tree, ('list', 'or')))
2700
2700
2701 class _aliasrules(parser.basealiasrules):
2701 class _aliasrules(parser.basealiasrules):
2702 """Parsing and expansion rule set of revset aliases"""
2702 """Parsing and expansion rule set of revset aliases"""
2703 _section = _('revset alias')
2703 _section = _('revset alias')
2704
2704
2705 @staticmethod
2705 @staticmethod
2706 def _parse(spec):
2706 def _parse(spec):
2707 """Parse alias declaration/definition ``spec``
2707 """Parse alias declaration/definition ``spec``
2708
2708
2709 This allows symbol names to use also ``$`` as an initial letter
2709 This allows symbol names to use also ``$`` as an initial letter
2710 (for backward compatibility), and callers of this function should
2710 (for backward compatibility), and callers of this function should
2711 examine whether ``$`` is used also for unexpected symbols or not.
2711 examine whether ``$`` is used also for unexpected symbols or not.
2712 """
2712 """
2713 return _parsewith(spec, syminitletters=_aliassyminitletters)
2713 return _parsewith(spec, syminitletters=_aliassyminitletters)
2714
2714
2715 @staticmethod
2715 @staticmethod
2716 def _trygetfunc(tree):
2716 def _trygetfunc(tree):
2717 if tree[0] == 'func' and tree[1][0] == 'symbol':
2717 if tree[0] == 'func' and tree[1][0] == 'symbol':
2718 return tree[1][1], getlist(tree[2])
2718 return tree[1][1], getlist(tree[2])
2719
2719
2720 def expandaliases(ui, tree):
2720 def expandaliases(ui, tree):
2721 aliases = _aliasrules.buildmap(ui.configitems('revsetalias'))
2721 aliases = _aliasrules.buildmap(ui.configitems('revsetalias'))
2722 tree = _aliasrules.expand(aliases, tree)
2722 tree = _aliasrules.expand(aliases, tree)
2723 # warn about problematic (but not referred) aliases
2723 # warn about problematic (but not referred) aliases
2724 for name, alias in sorted(aliases.iteritems()):
2724 for name, alias in sorted(aliases.iteritems()):
2725 if alias.error and not alias.warned:
2725 if alias.error and not alias.warned:
2726 ui.warn(_('warning: %s\n') % (alias.error))
2726 ui.warn(_('warning: %s\n') % (alias.error))
2727 alias.warned = True
2727 alias.warned = True
2728 return tree
2728 return tree
2729
2729
2730 def foldconcat(tree):
2730 def foldconcat(tree):
2731 """Fold elements to be concatenated by `##`
2731 """Fold elements to be concatenated by `##`
2732 """
2732 """
2733 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2733 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2734 return tree
2734 return tree
2735 if tree[0] == '_concat':
2735 if tree[0] == '_concat':
2736 pending = [tree]
2736 pending = [tree]
2737 l = []
2737 l = []
2738 while pending:
2738 while pending:
2739 e = pending.pop()
2739 e = pending.pop()
2740 if e[0] == '_concat':
2740 if e[0] == '_concat':
2741 pending.extend(reversed(e[1:]))
2741 pending.extend(reversed(e[1:]))
2742 elif e[0] in ('string', 'symbol'):
2742 elif e[0] in ('string', 'symbol'):
2743 l.append(e[1])
2743 l.append(e[1])
2744 else:
2744 else:
2745 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2745 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2746 raise error.ParseError(msg)
2746 raise error.ParseError(msg)
2747 return ('string', ''.join(l))
2747 return ('string', ''.join(l))
2748 else:
2748 else:
2749 return tuple(foldconcat(t) for t in tree)
2749 return tuple(foldconcat(t) for t in tree)
2750
2750
2751 def parse(spec, lookup=None):
2751 def parse(spec, lookup=None):
2752 return _parsewith(spec, lookup=lookup)
2752 return _parsewith(spec, lookup=lookup)
2753
2753
2754 def posttreebuilthook(tree, repo):
2754 def posttreebuilthook(tree, repo):
2755 # hook for extensions to execute code on the optimized tree
2755 # hook for extensions to execute code on the optimized tree
2756 pass
2756 pass
2757
2757
2758 def match(ui, spec, repo=None, order=defineorder):
2758 def match(ui, spec, repo=None, order=defineorder):
2759 """Create a matcher for a single revision spec
2759 """Create a matcher for a single revision spec
2760
2760
2761 If order=followorder, a matcher takes the ordering specified by the input
2761 If order=followorder, a matcher takes the ordering specified by the input
2762 set.
2762 set.
2763 """
2763 """
2764 return matchany(ui, [spec], repo=repo, order=order)
2764 return matchany(ui, [spec], repo=repo, order=order)
2765
2765
2766 def matchany(ui, specs, repo=None, order=defineorder):
2766 def matchany(ui, specs, repo=None, order=defineorder):
2767 """Create a matcher that will include any revisions matching one of the
2767 """Create a matcher that will include any revisions matching one of the
2768 given specs
2768 given specs
2769
2769
2770 If order=followorder, a matcher takes the ordering specified by the input
2770 If order=followorder, a matcher takes the ordering specified by the input
2771 set.
2771 set.
2772 """
2772 """
2773 if not specs:
2773 if not specs:
2774 def mfunc(repo, subset=None):
2774 def mfunc(repo, subset=None):
2775 return baseset()
2775 return baseset()
2776 return mfunc
2776 return mfunc
2777 if not all(specs):
2777 if not all(specs):
2778 raise error.ParseError(_("empty query"))
2778 raise error.ParseError(_("empty query"))
2779 lookup = None
2779 lookup = None
2780 if repo:
2780 if repo:
2781 lookup = repo.__contains__
2781 lookup = repo.__contains__
2782 if len(specs) == 1:
2782 if len(specs) == 1:
2783 tree = parse(specs[0], lookup)
2783 tree = parse(specs[0], lookup)
2784 else:
2784 else:
2785 tree = ('or', ('list',) + tuple(parse(s, lookup) for s in specs))
2785 tree = ('or', ('list',) + tuple(parse(s, lookup) for s in specs))
2786
2786
2787 if ui:
2787 if ui:
2788 tree = expandaliases(ui, tree)
2788 tree = expandaliases(ui, tree)
2789 tree = foldconcat(tree)
2789 tree = foldconcat(tree)
2790 tree = analyze(tree, order)
2790 tree = analyze(tree, order)
2791 tree = optimize(tree)
2791 tree = optimize(tree)
2792 posttreebuilthook(tree, repo)
2792 posttreebuilthook(tree, repo)
2793 return makematcher(tree)
2793 return makematcher(tree)
2794
2794
2795 def makematcher(tree):
2795 def makematcher(tree):
2796 """Create a matcher from an evaluatable tree"""
2796 """Create a matcher from an evaluatable tree"""
2797 def mfunc(repo, subset=None):
2797 def mfunc(repo, subset=None):
2798 if subset is None:
2798 if subset is None:
2799 subset = fullreposet(repo)
2799 subset = fullreposet(repo)
2800 if util.safehasattr(subset, 'isascending'):
2800 if util.safehasattr(subset, 'isascending'):
2801 result = getset(repo, subset, tree)
2801 result = getset(repo, subset, tree)
2802 else:
2802 else:
2803 result = getset(repo, baseset(subset), tree)
2803 result = getset(repo, baseset(subset), tree)
2804 return result
2804 return result
2805 return mfunc
2805 return mfunc
2806
2806
2807 def formatspec(expr, *args):
2807 def formatspec(expr, *args):
2808 '''
2808 '''
2809 This is a convenience function for using revsets internally, and
2809 This is a convenience function for using revsets internally, and
2810 escapes arguments appropriately. Aliases are intentionally ignored
2810 escapes arguments appropriately. Aliases are intentionally ignored
2811 so that intended expression behavior isn't accidentally subverted.
2811 so that intended expression behavior isn't accidentally subverted.
2812
2812
2813 Supported arguments:
2813 Supported arguments:
2814
2814
2815 %r = revset expression, parenthesized
2815 %r = revset expression, parenthesized
2816 %d = int(arg), no quoting
2816 %d = int(arg), no quoting
2817 %s = string(arg), escaped and single-quoted
2817 %s = string(arg), escaped and single-quoted
2818 %b = arg.branch(), escaped and single-quoted
2818 %b = arg.branch(), escaped and single-quoted
2819 %n = hex(arg), single-quoted
2819 %n = hex(arg), single-quoted
2820 %% = a literal '%'
2820 %% = a literal '%'
2821
2821
2822 Prefixing the type with 'l' specifies a parenthesized list of that type.
2822 Prefixing the type with 'l' specifies a parenthesized list of that type.
2823
2823
2824 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2824 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2825 '(10 or 11):: and ((this()) or (that()))'
2825 '(10 or 11):: and ((this()) or (that()))'
2826 >>> formatspec('%d:: and not %d::', 10, 20)
2826 >>> formatspec('%d:: and not %d::', 10, 20)
2827 '10:: and not 20::'
2827 '10:: and not 20::'
2828 >>> formatspec('%ld or %ld', [], [1])
2828 >>> formatspec('%ld or %ld', [], [1])
2829 "_list('') or 1"
2829 "_list('') or 1"
2830 >>> formatspec('keyword(%s)', 'foo\\xe9')
2830 >>> formatspec('keyword(%s)', 'foo\\xe9')
2831 "keyword('foo\\\\xe9')"
2831 "keyword('foo\\\\xe9')"
2832 >>> b = lambda: 'default'
2832 >>> b = lambda: 'default'
2833 >>> b.branch = b
2833 >>> b.branch = b
2834 >>> formatspec('branch(%b)', b)
2834 >>> formatspec('branch(%b)', b)
2835 "branch('default')"
2835 "branch('default')"
2836 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2836 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2837 "root(_list('a\\x00b\\x00c\\x00d'))"
2837 "root(_list('a\\x00b\\x00c\\x00d'))"
2838 '''
2838 '''
2839
2839
2840 def quote(s):
2840 def quote(s):
2841 return repr(str(s))
2841 return repr(str(s))
2842
2842
2843 def argtype(c, arg):
2843 def argtype(c, arg):
2844 if c == 'd':
2844 if c == 'd':
2845 return str(int(arg))
2845 return str(int(arg))
2846 elif c == 's':
2846 elif c == 's':
2847 return quote(arg)
2847 return quote(arg)
2848 elif c == 'r':
2848 elif c == 'r':
2849 parse(arg) # make sure syntax errors are confined
2849 parse(arg) # make sure syntax errors are confined
2850 return '(%s)' % arg
2850 return '(%s)' % arg
2851 elif c == 'n':
2851 elif c == 'n':
2852 return quote(node.hex(arg))
2852 return quote(node.hex(arg))
2853 elif c == 'b':
2853 elif c == 'b':
2854 return quote(arg.branch())
2854 return quote(arg.branch())
2855
2855
2856 def listexp(s, t):
2856 def listexp(s, t):
2857 l = len(s)
2857 l = len(s)
2858 if l == 0:
2858 if l == 0:
2859 return "_list('')"
2859 return "_list('')"
2860 elif l == 1:
2860 elif l == 1:
2861 return argtype(t, s[0])
2861 return argtype(t, s[0])
2862 elif t == 'd':
2862 elif t == 'd':
2863 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2863 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2864 elif t == 's':
2864 elif t == 's':
2865 return "_list('%s')" % "\0".join(s)
2865 return "_list('%s')" % "\0".join(s)
2866 elif t == 'n':
2866 elif t == 'n':
2867 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2867 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2868 elif t == 'b':
2868 elif t == 'b':
2869 return "_list('%s')" % "\0".join(a.branch() for a in s)
2869 return "_list('%s')" % "\0".join(a.branch() for a in s)
2870
2870
2871 m = l // 2
2871 m = l // 2
2872 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2872 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2873
2873
2874 ret = ''
2874 ret = ''
2875 pos = 0
2875 pos = 0
2876 arg = 0
2876 arg = 0
2877 while pos < len(expr):
2877 while pos < len(expr):
2878 c = expr[pos]
2878 c = expr[pos]
2879 if c == '%':
2879 if c == '%':
2880 pos += 1
2880 pos += 1
2881 d = expr[pos]
2881 d = expr[pos]
2882 if d == '%':
2882 if d == '%':
2883 ret += d
2883 ret += d
2884 elif d in 'dsnbr':
2884 elif d in 'dsnbr':
2885 ret += argtype(d, args[arg])
2885 ret += argtype(d, args[arg])
2886 arg += 1
2886 arg += 1
2887 elif d == 'l':
2887 elif d == 'l':
2888 # a list of some type
2888 # a list of some type
2889 pos += 1
2889 pos += 1
2890 d = expr[pos]
2890 d = expr[pos]
2891 ret += listexp(list(args[arg]), d)
2891 ret += listexp(list(args[arg]), d)
2892 arg += 1
2892 arg += 1
2893 else:
2893 else:
2894 raise error.Abort(_('unexpected revspec format character %s')
2894 raise error.Abort(_('unexpected revspec format character %s')
2895 % d)
2895 % d)
2896 else:
2896 else:
2897 ret += c
2897 ret += c
2898 pos += 1
2898 pos += 1
2899
2899
2900 return ret
2900 return ret
2901
2901
2902 def prettyformat(tree):
2902 def prettyformat(tree):
2903 return parser.prettyformat(tree, ('string', 'symbol'))
2903 return parser.prettyformat(tree, ('string', 'symbol'))
2904
2904
2905 def depth(tree):
2905 def depth(tree):
2906 if isinstance(tree, tuple):
2906 if isinstance(tree, tuple):
2907 return max(map(depth, tree)) + 1
2907 return max(map(depth, tree)) + 1
2908 else:
2908 else:
2909 return 0
2909 return 0
2910
2910
2911 def funcsused(tree):
2911 def funcsused(tree):
2912 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2912 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2913 return set()
2913 return set()
2914 else:
2914 else:
2915 funcs = set()
2915 funcs = set()
2916 for s in tree[1:]:
2916 for s in tree[1:]:
2917 funcs |= funcsused(s)
2917 funcs |= funcsused(s)
2918 if tree[0] == 'func':
2918 if tree[0] == 'func':
2919 funcs.add(tree[1][1])
2919 funcs.add(tree[1][1])
2920 return funcs
2920 return funcs
2921
2921
2922 def _formatsetrepr(r):
2922 def _formatsetrepr(r):
2923 """Format an optional printable representation of a set
2923 """Format an optional printable representation of a set
2924
2924
2925 ======== =================================
2925 ======== =================================
2926 type(r) example
2926 type(r) example
2927 ======== =================================
2927 ======== =================================
2928 tuple ('<not %r>', other)
2928 tuple ('<not %r>', other)
2929 str '<branch closed>'
2929 str '<branch closed>'
2930 callable lambda: '<branch %r>' % sorted(b)
2930 callable lambda: '<branch %r>' % sorted(b)
2931 object other
2931 object other
2932 ======== =================================
2932 ======== =================================
2933 """
2933 """
2934 if r is None:
2934 if r is None:
2935 return ''
2935 return ''
2936 elif isinstance(r, tuple):
2936 elif isinstance(r, tuple):
2937 return r[0] % r[1:]
2937 return r[0] % r[1:]
2938 elif isinstance(r, str):
2938 elif isinstance(r, str):
2939 return r
2939 return r
2940 elif callable(r):
2940 elif callable(r):
2941 return r()
2941 return r()
2942 else:
2942 else:
2943 return repr(r)
2943 return repr(r)
2944
2944
2945 class abstractsmartset(object):
2945 class abstractsmartset(object):
2946
2946
2947 def __nonzero__(self):
2947 def __nonzero__(self):
2948 """True if the smartset is not empty"""
2948 """True if the smartset is not empty"""
2949 raise NotImplementedError()
2949 raise NotImplementedError()
2950
2950
2951 def __contains__(self, rev):
2951 def __contains__(self, rev):
2952 """provide fast membership testing"""
2952 """provide fast membership testing"""
2953 raise NotImplementedError()
2953 raise NotImplementedError()
2954
2954
2955 def __iter__(self):
2955 def __iter__(self):
2956 """iterate the set in the order it is supposed to be iterated"""
2956 """iterate the set in the order it is supposed to be iterated"""
2957 raise NotImplementedError()
2957 raise NotImplementedError()
2958
2958
2959 # Attributes containing a function to perform a fast iteration in a given
2959 # Attributes containing a function to perform a fast iteration in a given
2960 # direction. A smartset can have none, one, or both defined.
2960 # direction. A smartset can have none, one, or both defined.
2961 #
2961 #
2962 # Default value is None instead of a function returning None to avoid
2962 # Default value is None instead of a function returning None to avoid
2963 # initializing an iterator just for testing if a fast method exists.
2963 # initializing an iterator just for testing if a fast method exists.
2964 fastasc = None
2964 fastasc = None
2965 fastdesc = None
2965 fastdesc = None
2966
2966
2967 def isascending(self):
2967 def isascending(self):
2968 """True if the set will iterate in ascending order"""
2968 """True if the set will iterate in ascending order"""
2969 raise NotImplementedError()
2969 raise NotImplementedError()
2970
2970
2971 def isdescending(self):
2971 def isdescending(self):
2972 """True if the set will iterate in descending order"""
2972 """True if the set will iterate in descending order"""
2973 raise NotImplementedError()
2973 raise NotImplementedError()
2974
2974
2975 def istopo(self):
2975 def istopo(self):
2976 """True if the set will iterate in topographical order"""
2976 """True if the set will iterate in topographical order"""
2977 raise NotImplementedError()
2977 raise NotImplementedError()
2978
2978
2979 def min(self):
2979 def min(self):
2980 """return the minimum element in the set"""
2980 """return the minimum element in the set"""
2981 if self.fastasc is None:
2981 if self.fastasc is None:
2982 v = min(self)
2982 v = min(self)
2983 else:
2983 else:
2984 for v in self.fastasc():
2984 for v in self.fastasc():
2985 break
2985 break
2986 else:
2986 else:
2987 raise ValueError('arg is an empty sequence')
2987 raise ValueError('arg is an empty sequence')
2988 self.min = lambda: v
2988 self.min = lambda: v
2989 return v
2989 return v
2990
2990
2991 def max(self):
2991 def max(self):
2992 """return the maximum element in the set"""
2992 """return the maximum element in the set"""
2993 if self.fastdesc is None:
2993 if self.fastdesc is None:
2994 return max(self)
2994 return max(self)
2995 else:
2995 else:
2996 for v in self.fastdesc():
2996 for v in self.fastdesc():
2997 break
2997 break
2998 else:
2998 else:
2999 raise ValueError('arg is an empty sequence')
2999 raise ValueError('arg is an empty sequence')
3000 self.max = lambda: v
3000 self.max = lambda: v
3001 return v
3001 return v
3002
3002
3003 def first(self):
3003 def first(self):
3004 """return the first element in the set (user iteration perspective)
3004 """return the first element in the set (user iteration perspective)
3005
3005
3006 Return None if the set is empty"""
3006 Return None if the set is empty"""
3007 raise NotImplementedError()
3007 raise NotImplementedError()
3008
3008
3009 def last(self):
3009 def last(self):
3010 """return the last element in the set (user iteration perspective)
3010 """return the last element in the set (user iteration perspective)
3011
3011
3012 Return None if the set is empty"""
3012 Return None if the set is empty"""
3013 raise NotImplementedError()
3013 raise NotImplementedError()
3014
3014
3015 def __len__(self):
3015 def __len__(self):
3016 """return the length of the smartsets
3016 """return the length of the smartsets
3017
3017
3018 This can be expensive on smartset that could be lazy otherwise."""
3018 This can be expensive on smartset that could be lazy otherwise."""
3019 raise NotImplementedError()
3019 raise NotImplementedError()
3020
3020
3021 def reverse(self):
3021 def reverse(self):
3022 """reverse the expected iteration order"""
3022 """reverse the expected iteration order"""
3023 raise NotImplementedError()
3023 raise NotImplementedError()
3024
3024
3025 def sort(self, reverse=True):
3025 def sort(self, reverse=True):
3026 """get the set to iterate in an ascending or descending order"""
3026 """get the set to iterate in an ascending or descending order"""
3027 raise NotImplementedError()
3027 raise NotImplementedError()
3028
3028
3029 def __and__(self, other):
3029 def __and__(self, other):
3030 """Returns a new object with the intersection of the two collections.
3030 """Returns a new object with the intersection of the two collections.
3031
3031
3032 This is part of the mandatory API for smartset."""
3032 This is part of the mandatory API for smartset."""
3033 if isinstance(other, fullreposet):
3033 if isinstance(other, fullreposet):
3034 return self
3034 return self
3035 return self.filter(other.__contains__, condrepr=other, cache=False)
3035 return self.filter(other.__contains__, condrepr=other, cache=False)
3036
3036
3037 def __add__(self, other):
3037 def __add__(self, other):
3038 """Returns a new object with the union of the two collections.
3038 """Returns a new object with the union of the two collections.
3039
3039
3040 This is part of the mandatory API for smartset."""
3040 This is part of the mandatory API for smartset."""
3041 return addset(self, other)
3041 return addset(self, other)
3042
3042
3043 def __sub__(self, other):
3043 def __sub__(self, other):
3044 """Returns a new object with the substraction of the two collections.
3044 """Returns a new object with the substraction of the two collections.
3045
3045
3046 This is part of the mandatory API for smartset."""
3046 This is part of the mandatory API for smartset."""
3047 c = other.__contains__
3047 c = other.__contains__
3048 return self.filter(lambda r: not c(r), condrepr=('<not %r>', other),
3048 return self.filter(lambda r: not c(r), condrepr=('<not %r>', other),
3049 cache=False)
3049 cache=False)
3050
3050
3051 def filter(self, condition, condrepr=None, cache=True):
3051 def filter(self, condition, condrepr=None, cache=True):
3052 """Returns this smartset filtered by condition as a new smartset.
3052 """Returns this smartset filtered by condition as a new smartset.
3053
3053
3054 `condition` is a callable which takes a revision number and returns a
3054 `condition` is a callable which takes a revision number and returns a
3055 boolean. Optional `condrepr` provides a printable representation of
3055 boolean. Optional `condrepr` provides a printable representation of
3056 the given `condition`.
3056 the given `condition`.
3057
3057
3058 This is part of the mandatory API for smartset."""
3058 This is part of the mandatory API for smartset."""
3059 # builtin cannot be cached. but do not needs to
3059 # builtin cannot be cached. but do not needs to
3060 if cache and util.safehasattr(condition, 'func_code'):
3060 if cache and util.safehasattr(condition, 'func_code'):
3061 condition = util.cachefunc(condition)
3061 condition = util.cachefunc(condition)
3062 return filteredset(self, condition, condrepr)
3062 return filteredset(self, condition, condrepr)
3063
3063
3064 class baseset(abstractsmartset):
3064 class baseset(abstractsmartset):
3065 """Basic data structure that represents a revset and contains the basic
3065 """Basic data structure that represents a revset and contains the basic
3066 operation that it should be able to perform.
3066 operation that it should be able to perform.
3067
3067
3068 Every method in this class should be implemented by any smartset class.
3068 Every method in this class should be implemented by any smartset class.
3069 """
3069 """
3070 def __init__(self, data=(), datarepr=None, istopo=False):
3070 def __init__(self, data=(), datarepr=None, istopo=False):
3071 """
3071 """
3072 datarepr: a tuple of (format, obj, ...), a function or an object that
3072 datarepr: a tuple of (format, obj, ...), a function or an object that
3073 provides a printable representation of the given data.
3073 provides a printable representation of the given data.
3074 """
3074 """
3075 self._ascending = None
3075 self._ascending = None
3076 self._istopo = istopo
3076 self._istopo = istopo
3077 if not isinstance(data, list):
3077 if not isinstance(data, list):
3078 if isinstance(data, set):
3078 if isinstance(data, set):
3079 self._set = data
3079 self._set = data
3080 # set has no order we pick one for stability purpose
3080 # set has no order we pick one for stability purpose
3081 self._ascending = True
3081 self._ascending = True
3082 data = list(data)
3082 data = list(data)
3083 self._list = data
3083 self._list = data
3084 self._datarepr = datarepr
3084 self._datarepr = datarepr
3085
3085
3086 @util.propertycache
3086 @util.propertycache
3087 def _set(self):
3087 def _set(self):
3088 return set(self._list)
3088 return set(self._list)
3089
3089
3090 @util.propertycache
3090 @util.propertycache
3091 def _asclist(self):
3091 def _asclist(self):
3092 asclist = self._list[:]
3092 asclist = self._list[:]
3093 asclist.sort()
3093 asclist.sort()
3094 return asclist
3094 return asclist
3095
3095
3096 def __iter__(self):
3096 def __iter__(self):
3097 if self._ascending is None:
3097 if self._ascending is None:
3098 return iter(self._list)
3098 return iter(self._list)
3099 elif self._ascending:
3099 elif self._ascending:
3100 return iter(self._asclist)
3100 return iter(self._asclist)
3101 else:
3101 else:
3102 return reversed(self._asclist)
3102 return reversed(self._asclist)
3103
3103
3104 def fastasc(self):
3104 def fastasc(self):
3105 return iter(self._asclist)
3105 return iter(self._asclist)
3106
3106
3107 def fastdesc(self):
3107 def fastdesc(self):
3108 return reversed(self._asclist)
3108 return reversed(self._asclist)
3109
3109
3110 @util.propertycache
3110 @util.propertycache
3111 def __contains__(self):
3111 def __contains__(self):
3112 return self._set.__contains__
3112 return self._set.__contains__
3113
3113
3114 def __nonzero__(self):
3114 def __nonzero__(self):
3115 return bool(self._list)
3115 return bool(self._list)
3116
3116
3117 def sort(self, reverse=False):
3117 def sort(self, reverse=False):
3118 self._ascending = not bool(reverse)
3118 self._ascending = not bool(reverse)
3119 self._istopo = False
3119 self._istopo = False
3120
3120
3121 def reverse(self):
3121 def reverse(self):
3122 if self._ascending is None:
3122 if self._ascending is None:
3123 self._list.reverse()
3123 self._list.reverse()
3124 else:
3124 else:
3125 self._ascending = not self._ascending
3125 self._ascending = not self._ascending
3126 self._istopo = False
3126 self._istopo = False
3127
3127
3128 def __len__(self):
3128 def __len__(self):
3129 return len(self._list)
3129 return len(self._list)
3130
3130
3131 def isascending(self):
3131 def isascending(self):
3132 """Returns True if the collection is ascending order, False if not.
3132 """Returns True if the collection is ascending order, False if not.
3133
3133
3134 This is part of the mandatory API for smartset."""
3134 This is part of the mandatory API for smartset."""
3135 if len(self) <= 1:
3135 if len(self) <= 1:
3136 return True
3136 return True
3137 return self._ascending is not None and self._ascending
3137 return self._ascending is not None and self._ascending
3138
3138
3139 def isdescending(self):
3139 def isdescending(self):
3140 """Returns True if the collection is descending order, False if not.
3140 """Returns True if the collection is descending order, False if not.
3141
3141
3142 This is part of the mandatory API for smartset."""
3142 This is part of the mandatory API for smartset."""
3143 if len(self) <= 1:
3143 if len(self) <= 1:
3144 return True
3144 return True
3145 return self._ascending is not None and not self._ascending
3145 return self._ascending is not None and not self._ascending
3146
3146
3147 def istopo(self):
3147 def istopo(self):
3148 """Is the collection is in topographical order or not.
3148 """Is the collection is in topographical order or not.
3149
3149
3150 This is part of the mandatory API for smartset."""
3150 This is part of the mandatory API for smartset."""
3151 if len(self) <= 1:
3151 if len(self) <= 1:
3152 return True
3152 return True
3153 return self._istopo
3153 return self._istopo
3154
3154
3155 def first(self):
3155 def first(self):
3156 if self:
3156 if self:
3157 if self._ascending is None:
3157 if self._ascending is None:
3158 return self._list[0]
3158 return self._list[0]
3159 elif self._ascending:
3159 elif self._ascending:
3160 return self._asclist[0]
3160 return self._asclist[0]
3161 else:
3161 else:
3162 return self._asclist[-1]
3162 return self._asclist[-1]
3163 return None
3163 return None
3164
3164
3165 def last(self):
3165 def last(self):
3166 if self:
3166 if self:
3167 if self._ascending is None:
3167 if self._ascending is None:
3168 return self._list[-1]
3168 return self._list[-1]
3169 elif self._ascending:
3169 elif self._ascending:
3170 return self._asclist[-1]
3170 return self._asclist[-1]
3171 else:
3171 else:
3172 return self._asclist[0]
3172 return self._asclist[0]
3173 return None
3173 return None
3174
3174
3175 def __repr__(self):
3175 def __repr__(self):
3176 d = {None: '', False: '-', True: '+'}[self._ascending]
3176 d = {None: '', False: '-', True: '+'}[self._ascending]
3177 s = _formatsetrepr(self._datarepr)
3177 s = _formatsetrepr(self._datarepr)
3178 if not s:
3178 if not s:
3179 l = self._list
3179 l = self._list
3180 # if _list has been built from a set, it might have a different
3180 # if _list has been built from a set, it might have a different
3181 # order from one python implementation to another.
3181 # order from one python implementation to another.
3182 # We fallback to the sorted version for a stable output.
3182 # We fallback to the sorted version for a stable output.
3183 if self._ascending is not None:
3183 if self._ascending is not None:
3184 l = self._asclist
3184 l = self._asclist
3185 s = repr(l)
3185 s = repr(l)
3186 return '<%s%s %s>' % (type(self).__name__, d, s)
3186 return '<%s%s %s>' % (type(self).__name__, d, s)
3187
3187
3188 class filteredset(abstractsmartset):
3188 class filteredset(abstractsmartset):
3189 """Duck type for baseset class which iterates lazily over the revisions in
3189 """Duck type for baseset class which iterates lazily over the revisions in
3190 the subset and contains a function which tests for membership in the
3190 the subset and contains a function which tests for membership in the
3191 revset
3191 revset
3192 """
3192 """
3193 def __init__(self, subset, condition=lambda x: True, condrepr=None):
3193 def __init__(self, subset, condition=lambda x: True, condrepr=None):
3194 """
3194 """
3195 condition: a function that decide whether a revision in the subset
3195 condition: a function that decide whether a revision in the subset
3196 belongs to the revset or not.
3196 belongs to the revset or not.
3197 condrepr: a tuple of (format, obj, ...), a function or an object that
3197 condrepr: a tuple of (format, obj, ...), a function or an object that
3198 provides a printable representation of the given condition.
3198 provides a printable representation of the given condition.
3199 """
3199 """
3200 self._subset = subset
3200 self._subset = subset
3201 self._condition = condition
3201 self._condition = condition
3202 self._condrepr = condrepr
3202 self._condrepr = condrepr
3203
3203
3204 def __contains__(self, x):
3204 def __contains__(self, x):
3205 return x in self._subset and self._condition(x)
3205 return x in self._subset and self._condition(x)
3206
3206
3207 def __iter__(self):
3207 def __iter__(self):
3208 return self._iterfilter(self._subset)
3208 return self._iterfilter(self._subset)
3209
3209
3210 def _iterfilter(self, it):
3210 def _iterfilter(self, it):
3211 cond = self._condition
3211 cond = self._condition
3212 for x in it:
3212 for x in it:
3213 if cond(x):
3213 if cond(x):
3214 yield x
3214 yield x
3215
3215
3216 @property
3216 @property
3217 def fastasc(self):
3217 def fastasc(self):
3218 it = self._subset.fastasc
3218 it = self._subset.fastasc
3219 if it is None:
3219 if it is None:
3220 return None
3220 return None
3221 return lambda: self._iterfilter(it())
3221 return lambda: self._iterfilter(it())
3222
3222
3223 @property
3223 @property
3224 def fastdesc(self):
3224 def fastdesc(self):
3225 it = self._subset.fastdesc
3225 it = self._subset.fastdesc
3226 if it is None:
3226 if it is None:
3227 return None
3227 return None
3228 return lambda: self._iterfilter(it())
3228 return lambda: self._iterfilter(it())
3229
3229
3230 def __nonzero__(self):
3230 def __nonzero__(self):
3231 fast = None
3231 fast = None
3232 candidates = [self.fastasc if self.isascending() else None,
3232 candidates = [self.fastasc if self.isascending() else None,
3233 self.fastdesc if self.isdescending() else None,
3233 self.fastdesc if self.isdescending() else None,
3234 self.fastasc,
3234 self.fastasc,
3235 self.fastdesc]
3235 self.fastdesc]
3236 for candidate in candidates:
3236 for candidate in candidates:
3237 if candidate is not None:
3237 if candidate is not None:
3238 fast = candidate
3238 fast = candidate
3239 break
3239 break
3240
3240
3241 if fast is not None:
3241 if fast is not None:
3242 it = fast()
3242 it = fast()
3243 else:
3243 else:
3244 it = self
3244 it = self
3245
3245
3246 for r in it:
3246 for r in it:
3247 return True
3247 return True
3248 return False
3248 return False
3249
3249
3250 def __len__(self):
3250 def __len__(self):
3251 # Basic implementation to be changed in future patches.
3251 # Basic implementation to be changed in future patches.
3252 # until this gets improved, we use generator expression
3252 # until this gets improved, we use generator expression
3253 # here, since list comprehensions are free to call __len__ again
3253 # here, since list comprehensions are free to call __len__ again
3254 # causing infinite recursion
3254 # causing infinite recursion
3255 l = baseset(r for r in self)
3255 l = baseset(r for r in self)
3256 return len(l)
3256 return len(l)
3257
3257
3258 def sort(self, reverse=False):
3258 def sort(self, reverse=False):
3259 self._subset.sort(reverse=reverse)
3259 self._subset.sort(reverse=reverse)
3260
3260
3261 def reverse(self):
3261 def reverse(self):
3262 self._subset.reverse()
3262 self._subset.reverse()
3263
3263
3264 def isascending(self):
3264 def isascending(self):
3265 return self._subset.isascending()
3265 return self._subset.isascending()
3266
3266
3267 def isdescending(self):
3267 def isdescending(self):
3268 return self._subset.isdescending()
3268 return self._subset.isdescending()
3269
3269
3270 def istopo(self):
3270 def istopo(self):
3271 return self._subset.istopo()
3271 return self._subset.istopo()
3272
3272
3273 def first(self):
3273 def first(self):
3274 for x in self:
3274 for x in self:
3275 return x
3275 return x
3276 return None
3276 return None
3277
3277
3278 def last(self):
3278 def last(self):
3279 it = None
3279 it = None
3280 if self.isascending():
3280 if self.isascending():
3281 it = self.fastdesc
3281 it = self.fastdesc
3282 elif self.isdescending():
3282 elif self.isdescending():
3283 it = self.fastasc
3283 it = self.fastasc
3284 if it is not None:
3284 if it is not None:
3285 for x in it():
3285 for x in it():
3286 return x
3286 return x
3287 return None #empty case
3287 return None #empty case
3288 else:
3288 else:
3289 x = None
3289 x = None
3290 for x in self:
3290 for x in self:
3291 pass
3291 pass
3292 return x
3292 return x
3293
3293
3294 def __repr__(self):
3294 def __repr__(self):
3295 xs = [repr(self._subset)]
3295 xs = [repr(self._subset)]
3296 s = _formatsetrepr(self._condrepr)
3296 s = _formatsetrepr(self._condrepr)
3297 if s:
3297 if s:
3298 xs.append(s)
3298 xs.append(s)
3299 return '<%s %s>' % (type(self).__name__, ', '.join(xs))
3299 return '<%s %s>' % (type(self).__name__, ', '.join(xs))
3300
3300
3301 def _iterordered(ascending, iter1, iter2):
3301 def _iterordered(ascending, iter1, iter2):
3302 """produce an ordered iteration from two iterators with the same order
3302 """produce an ordered iteration from two iterators with the same order
3303
3303
3304 The ascending is used to indicated the iteration direction.
3304 The ascending is used to indicated the iteration direction.
3305 """
3305 """
3306 choice = max
3306 choice = max
3307 if ascending:
3307 if ascending:
3308 choice = min
3308 choice = min
3309
3309
3310 val1 = None
3310 val1 = None
3311 val2 = None
3311 val2 = None
3312 try:
3312 try:
3313 # Consume both iterators in an ordered way until one is empty
3313 # Consume both iterators in an ordered way until one is empty
3314 while True:
3314 while True:
3315 if val1 is None:
3315 if val1 is None:
3316 val1 = next(iter1)
3316 val1 = next(iter1)
3317 if val2 is None:
3317 if val2 is None:
3318 val2 = next(iter2)
3318 val2 = next(iter2)
3319 n = choice(val1, val2)
3319 n = choice(val1, val2)
3320 yield n
3320 yield n
3321 if val1 == n:
3321 if val1 == n:
3322 val1 = None
3322 val1 = None
3323 if val2 == n:
3323 if val2 == n:
3324 val2 = None
3324 val2 = None
3325 except StopIteration:
3325 except StopIteration:
3326 # Flush any remaining values and consume the other one
3326 # Flush any remaining values and consume the other one
3327 it = iter2
3327 it = iter2
3328 if val1 is not None:
3328 if val1 is not None:
3329 yield val1
3329 yield val1
3330 it = iter1
3330 it = iter1
3331 elif val2 is not None:
3331 elif val2 is not None:
3332 # might have been equality and both are empty
3332 # might have been equality and both are empty
3333 yield val2
3333 yield val2
3334 for val in it:
3334 for val in it:
3335 yield val
3335 yield val
3336
3336
3337 class addset(abstractsmartset):
3337 class addset(abstractsmartset):
3338 """Represent the addition of two sets
3338 """Represent the addition of two sets
3339
3339
3340 Wrapper structure for lazily adding two structures without losing much
3340 Wrapper structure for lazily adding two structures without losing much
3341 performance on the __contains__ method
3341 performance on the __contains__ method
3342
3342
3343 If the ascending attribute is set, that means the two structures are
3343 If the ascending attribute is set, that means the two structures are
3344 ordered in either an ascending or descending way. Therefore, we can add
3344 ordered in either an ascending or descending way. Therefore, we can add
3345 them maintaining the order by iterating over both at the same time
3345 them maintaining the order by iterating over both at the same time
3346
3346
3347 >>> xs = baseset([0, 3, 2])
3347 >>> xs = baseset([0, 3, 2])
3348 >>> ys = baseset([5, 2, 4])
3348 >>> ys = baseset([5, 2, 4])
3349
3349
3350 >>> rs = addset(xs, ys)
3350 >>> rs = addset(xs, ys)
3351 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3351 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3352 (True, True, False, True, 0, 4)
3352 (True, True, False, True, 0, 4)
3353 >>> rs = addset(xs, baseset([]))
3353 >>> rs = addset(xs, baseset([]))
3354 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3354 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3355 (True, True, False, 0, 2)
3355 (True, True, False, 0, 2)
3356 >>> rs = addset(baseset([]), baseset([]))
3356 >>> rs = addset(baseset([]), baseset([]))
3357 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3357 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3358 (False, False, None, None)
3358 (False, False, None, None)
3359
3359
3360 iterate unsorted:
3360 iterate unsorted:
3361 >>> rs = addset(xs, ys)
3361 >>> rs = addset(xs, ys)
3362 >>> # (use generator because pypy could call len())
3362 >>> # (use generator because pypy could call len())
3363 >>> list(x for x in rs) # without _genlist
3363 >>> list(x for x in rs) # without _genlist
3364 [0, 3, 2, 5, 4]
3364 [0, 3, 2, 5, 4]
3365 >>> assert not rs._genlist
3365 >>> assert not rs._genlist
3366 >>> len(rs)
3366 >>> len(rs)
3367 5
3367 5
3368 >>> [x for x in rs] # with _genlist
3368 >>> [x for x in rs] # with _genlist
3369 [0, 3, 2, 5, 4]
3369 [0, 3, 2, 5, 4]
3370 >>> assert rs._genlist
3370 >>> assert rs._genlist
3371
3371
3372 iterate ascending:
3372 iterate ascending:
3373 >>> rs = addset(xs, ys, ascending=True)
3373 >>> rs = addset(xs, ys, ascending=True)
3374 >>> # (use generator because pypy could call len())
3374 >>> # (use generator because pypy could call len())
3375 >>> list(x for x in rs), list(x for x in rs.fastasc()) # without _asclist
3375 >>> list(x for x in rs), list(x for x in rs.fastasc()) # without _asclist
3376 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3376 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3377 >>> assert not rs._asclist
3377 >>> assert not rs._asclist
3378 >>> len(rs)
3378 >>> len(rs)
3379 5
3379 5
3380 >>> [x for x in rs], [x for x in rs.fastasc()]
3380 >>> [x for x in rs], [x for x in rs.fastasc()]
3381 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3381 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3382 >>> assert rs._asclist
3382 >>> assert rs._asclist
3383
3383
3384 iterate descending:
3384 iterate descending:
3385 >>> rs = addset(xs, ys, ascending=False)
3385 >>> rs = addset(xs, ys, ascending=False)
3386 >>> # (use generator because pypy could call len())
3386 >>> # (use generator because pypy could call len())
3387 >>> list(x for x in rs), list(x for x in rs.fastdesc()) # without _asclist
3387 >>> list(x for x in rs), list(x for x in rs.fastdesc()) # without _asclist
3388 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3388 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3389 >>> assert not rs._asclist
3389 >>> assert not rs._asclist
3390 >>> len(rs)
3390 >>> len(rs)
3391 5
3391 5
3392 >>> [x for x in rs], [x for x in rs.fastdesc()]
3392 >>> [x for x in rs], [x for x in rs.fastdesc()]
3393 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3393 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3394 >>> assert rs._asclist
3394 >>> assert rs._asclist
3395
3395
3396 iterate ascending without fastasc:
3396 iterate ascending without fastasc:
3397 >>> rs = addset(xs, generatorset(ys), ascending=True)
3397 >>> rs = addset(xs, generatorset(ys), ascending=True)
3398 >>> assert rs.fastasc is None
3398 >>> assert rs.fastasc is None
3399 >>> [x for x in rs]
3399 >>> [x for x in rs]
3400 [0, 2, 3, 4, 5]
3400 [0, 2, 3, 4, 5]
3401
3401
3402 iterate descending without fastdesc:
3402 iterate descending without fastdesc:
3403 >>> rs = addset(generatorset(xs), ys, ascending=False)
3403 >>> rs = addset(generatorset(xs), ys, ascending=False)
3404 >>> assert rs.fastdesc is None
3404 >>> assert rs.fastdesc is None
3405 >>> [x for x in rs]
3405 >>> [x for x in rs]
3406 [5, 4, 3, 2, 0]
3406 [5, 4, 3, 2, 0]
3407 """
3407 """
3408 def __init__(self, revs1, revs2, ascending=None):
3408 def __init__(self, revs1, revs2, ascending=None):
3409 self._r1 = revs1
3409 self._r1 = revs1
3410 self._r2 = revs2
3410 self._r2 = revs2
3411 self._iter = None
3411 self._iter = None
3412 self._ascending = ascending
3412 self._ascending = ascending
3413 self._genlist = None
3413 self._genlist = None
3414 self._asclist = None
3414 self._asclist = None
3415
3415
3416 def __len__(self):
3416 def __len__(self):
3417 return len(self._list)
3417 return len(self._list)
3418
3418
3419 def __nonzero__(self):
3419 def __nonzero__(self):
3420 return bool(self._r1) or bool(self._r2)
3420 return bool(self._r1) or bool(self._r2)
3421
3421
3422 @util.propertycache
3422 @util.propertycache
3423 def _list(self):
3423 def _list(self):
3424 if not self._genlist:
3424 if not self._genlist:
3425 self._genlist = baseset(iter(self))
3425 self._genlist = baseset(iter(self))
3426 return self._genlist
3426 return self._genlist
3427
3427
3428 def __iter__(self):
3428 def __iter__(self):
3429 """Iterate over both collections without repeating elements
3429 """Iterate over both collections without repeating elements
3430
3430
3431 If the ascending attribute is not set, iterate over the first one and
3431 If the ascending attribute is not set, iterate over the first one and
3432 then over the second one checking for membership on the first one so we
3432 then over the second one checking for membership on the first one so we
3433 dont yield any duplicates.
3433 dont yield any duplicates.
3434
3434
3435 If the ascending attribute is set, iterate over both collections at the
3435 If the ascending attribute is set, iterate over both collections at the
3436 same time, yielding only one value at a time in the given order.
3436 same time, yielding only one value at a time in the given order.
3437 """
3437 """
3438 if self._ascending is None:
3438 if self._ascending is None:
3439 if self._genlist:
3439 if self._genlist:
3440 return iter(self._genlist)
3440 return iter(self._genlist)
3441 def arbitraryordergen():
3441 def arbitraryordergen():
3442 for r in self._r1:
3442 for r in self._r1:
3443 yield r
3443 yield r
3444 inr1 = self._r1.__contains__
3444 inr1 = self._r1.__contains__
3445 for r in self._r2:
3445 for r in self._r2:
3446 if not inr1(r):
3446 if not inr1(r):
3447 yield r
3447 yield r
3448 return arbitraryordergen()
3448 return arbitraryordergen()
3449 # try to use our own fast iterator if it exists
3449 # try to use our own fast iterator if it exists
3450 self._trysetasclist()
3450 self._trysetasclist()
3451 if self._ascending:
3451 if self._ascending:
3452 attr = 'fastasc'
3452 attr = 'fastasc'
3453 else:
3453 else:
3454 attr = 'fastdesc'
3454 attr = 'fastdesc'
3455 it = getattr(self, attr)
3455 it = getattr(self, attr)
3456 if it is not None:
3456 if it is not None:
3457 return it()
3457 return it()
3458 # maybe half of the component supports fast
3458 # maybe half of the component supports fast
3459 # get iterator for _r1
3459 # get iterator for _r1
3460 iter1 = getattr(self._r1, attr)
3460 iter1 = getattr(self._r1, attr)
3461 if iter1 is None:
3461 if iter1 is None:
3462 # let's avoid side effect (not sure it matters)
3462 # let's avoid side effect (not sure it matters)
3463 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3463 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3464 else:
3464 else:
3465 iter1 = iter1()
3465 iter1 = iter1()
3466 # get iterator for _r2
3466 # get iterator for _r2
3467 iter2 = getattr(self._r2, attr)
3467 iter2 = getattr(self._r2, attr)
3468 if iter2 is None:
3468 if iter2 is None:
3469 # let's avoid side effect (not sure it matters)
3469 # let's avoid side effect (not sure it matters)
3470 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3470 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3471 else:
3471 else:
3472 iter2 = iter2()
3472 iter2 = iter2()
3473 return _iterordered(self._ascending, iter1, iter2)
3473 return _iterordered(self._ascending, iter1, iter2)
3474
3474
3475 def _trysetasclist(self):
3475 def _trysetasclist(self):
3476 """populate the _asclist attribute if possible and necessary"""
3476 """populate the _asclist attribute if possible and necessary"""
3477 if self._genlist is not None and self._asclist is None:
3477 if self._genlist is not None and self._asclist is None:
3478 self._asclist = sorted(self._genlist)
3478 self._asclist = sorted(self._genlist)
3479
3479
3480 @property
3480 @property
3481 def fastasc(self):
3481 def fastasc(self):
3482 self._trysetasclist()
3482 self._trysetasclist()
3483 if self._asclist is not None:
3483 if self._asclist is not None:
3484 return self._asclist.__iter__
3484 return self._asclist.__iter__
3485 iter1 = self._r1.fastasc
3485 iter1 = self._r1.fastasc
3486 iter2 = self._r2.fastasc
3486 iter2 = self._r2.fastasc
3487 if None in (iter1, iter2):
3487 if None in (iter1, iter2):
3488 return None
3488 return None
3489 return lambda: _iterordered(True, iter1(), iter2())
3489 return lambda: _iterordered(True, iter1(), iter2())
3490
3490
3491 @property
3491 @property
3492 def fastdesc(self):
3492 def fastdesc(self):
3493 self._trysetasclist()
3493 self._trysetasclist()
3494 if self._asclist is not None:
3494 if self._asclist is not None:
3495 return self._asclist.__reversed__
3495 return self._asclist.__reversed__
3496 iter1 = self._r1.fastdesc
3496 iter1 = self._r1.fastdesc
3497 iter2 = self._r2.fastdesc
3497 iter2 = self._r2.fastdesc
3498 if None in (iter1, iter2):
3498 if None in (iter1, iter2):
3499 return None
3499 return None
3500 return lambda: _iterordered(False, iter1(), iter2())
3500 return lambda: _iterordered(False, iter1(), iter2())
3501
3501
3502 def __contains__(self, x):
3502 def __contains__(self, x):
3503 return x in self._r1 or x in self._r2
3503 return x in self._r1 or x in self._r2
3504
3504
3505 def sort(self, reverse=False):
3505 def sort(self, reverse=False):
3506 """Sort the added set
3506 """Sort the added set
3507
3507
3508 For this we use the cached list with all the generated values and if we
3508 For this we use the cached list with all the generated values and if we
3509 know they are ascending or descending we can sort them in a smart way.
3509 know they are ascending or descending we can sort them in a smart way.
3510 """
3510 """
3511 self._ascending = not reverse
3511 self._ascending = not reverse
3512
3512
3513 def isascending(self):
3513 def isascending(self):
3514 return self._ascending is not None and self._ascending
3514 return self._ascending is not None and self._ascending
3515
3515
3516 def isdescending(self):
3516 def isdescending(self):
3517 return self._ascending is not None and not self._ascending
3517 return self._ascending is not None and not self._ascending
3518
3518
3519 def istopo(self):
3519 def istopo(self):
3520 # not worth the trouble asserting if the two sets combined are still
3520 # not worth the trouble asserting if the two sets combined are still
3521 # in topographical order. Use the sort() predicate to explicitly sort
3521 # in topographical order. Use the sort() predicate to explicitly sort
3522 # again instead.
3522 # again instead.
3523 return False
3523 return False
3524
3524
3525 def reverse(self):
3525 def reverse(self):
3526 if self._ascending is None:
3526 if self._ascending is None:
3527 self._list.reverse()
3527 self._list.reverse()
3528 else:
3528 else:
3529 self._ascending = not self._ascending
3529 self._ascending = not self._ascending
3530
3530
3531 def first(self):
3531 def first(self):
3532 for x in self:
3532 for x in self:
3533 return x
3533 return x
3534 return None
3534 return None
3535
3535
3536 def last(self):
3536 def last(self):
3537 self.reverse()
3537 self.reverse()
3538 val = self.first()
3538 val = self.first()
3539 self.reverse()
3539 self.reverse()
3540 return val
3540 return val
3541
3541
3542 def __repr__(self):
3542 def __repr__(self):
3543 d = {None: '', False: '-', True: '+'}[self._ascending]
3543 d = {None: '', False: '-', True: '+'}[self._ascending]
3544 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3544 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3545
3545
3546 class generatorset(abstractsmartset):
3546 class generatorset(abstractsmartset):
3547 """Wrap a generator for lazy iteration
3547 """Wrap a generator for lazy iteration
3548
3548
3549 Wrapper structure for generators that provides lazy membership and can
3549 Wrapper structure for generators that provides lazy membership and can
3550 be iterated more than once.
3550 be iterated more than once.
3551 When asked for membership it generates values until either it finds the
3551 When asked for membership it generates values until either it finds the
3552 requested one or has gone through all the elements in the generator
3552 requested one or has gone through all the elements in the generator
3553 """
3553 """
3554 def __init__(self, gen, iterasc=None):
3554 def __init__(self, gen, iterasc=None):
3555 """
3555 """
3556 gen: a generator producing the values for the generatorset.
3556 gen: a generator producing the values for the generatorset.
3557 """
3557 """
3558 self._gen = gen
3558 self._gen = gen
3559 self._asclist = None
3559 self._asclist = None
3560 self._cache = {}
3560 self._cache = {}
3561 self._genlist = []
3561 self._genlist = []
3562 self._finished = False
3562 self._finished = False
3563 self._ascending = True
3563 self._ascending = True
3564 if iterasc is not None:
3564 if iterasc is not None:
3565 if iterasc:
3565 if iterasc:
3566 self.fastasc = self._iterator
3566 self.fastasc = self._iterator
3567 self.__contains__ = self._asccontains
3567 self.__contains__ = self._asccontains
3568 else:
3568 else:
3569 self.fastdesc = self._iterator
3569 self.fastdesc = self._iterator
3570 self.__contains__ = self._desccontains
3570 self.__contains__ = self._desccontains
3571
3571
3572 def __nonzero__(self):
3572 def __nonzero__(self):
3573 # Do not use 'for r in self' because it will enforce the iteration
3573 # Do not use 'for r in self' because it will enforce the iteration
3574 # order (default ascending), possibly unrolling a whole descending
3574 # order (default ascending), possibly unrolling a whole descending
3575 # iterator.
3575 # iterator.
3576 if self._genlist:
3576 if self._genlist:
3577 return True
3577 return True
3578 for r in self._consumegen():
3578 for r in self._consumegen():
3579 return True
3579 return True
3580 return False
3580 return False
3581
3581
3582 def __contains__(self, x):
3582 def __contains__(self, x):
3583 if x in self._cache:
3583 if x in self._cache:
3584 return self._cache[x]
3584 return self._cache[x]
3585
3585
3586 # Use new values only, as existing values would be cached.
3586 # Use new values only, as existing values would be cached.
3587 for l in self._consumegen():
3587 for l in self._consumegen():
3588 if l == x:
3588 if l == x:
3589 return True
3589 return True
3590
3590
3591 self._cache[x] = False
3591 self._cache[x] = False
3592 return False
3592 return False
3593
3593
3594 def _asccontains(self, x):
3594 def _asccontains(self, x):
3595 """version of contains optimised for ascending generator"""
3595 """version of contains optimised for ascending generator"""
3596 if x in self._cache:
3596 if x in self._cache:
3597 return self._cache[x]
3597 return self._cache[x]
3598
3598
3599 # Use new values only, as existing values would be cached.
3599 # Use new values only, as existing values would be cached.
3600 for l in self._consumegen():
3600 for l in self._consumegen():
3601 if l == x:
3601 if l == x:
3602 return True
3602 return True
3603 if l > x:
3603 if l > x:
3604 break
3604 break
3605
3605
3606 self._cache[x] = False
3606 self._cache[x] = False
3607 return False
3607 return False
3608
3608
3609 def _desccontains(self, x):
3609 def _desccontains(self, x):
3610 """version of contains optimised for descending generator"""
3610 """version of contains optimised for descending generator"""
3611 if x in self._cache:
3611 if x in self._cache:
3612 return self._cache[x]
3612 return self._cache[x]
3613
3613
3614 # Use new values only, as existing values would be cached.
3614 # Use new values only, as existing values would be cached.
3615 for l in self._consumegen():
3615 for l in self._consumegen():
3616 if l == x:
3616 if l == x:
3617 return True
3617 return True
3618 if l < x:
3618 if l < x:
3619 break
3619 break
3620
3620
3621 self._cache[x] = False
3621 self._cache[x] = False
3622 return False
3622 return False
3623
3623
3624 def __iter__(self):
3624 def __iter__(self):
3625 if self._ascending:
3625 if self._ascending:
3626 it = self.fastasc
3626 it = self.fastasc
3627 else:
3627 else:
3628 it = self.fastdesc
3628 it = self.fastdesc
3629 if it is not None:
3629 if it is not None:
3630 return it()
3630 return it()
3631 # we need to consume the iterator
3631 # we need to consume the iterator
3632 for x in self._consumegen():
3632 for x in self._consumegen():
3633 pass
3633 pass
3634 # recall the same code
3634 # recall the same code
3635 return iter(self)
3635 return iter(self)
3636
3636
3637 def _iterator(self):
3637 def _iterator(self):
3638 if self._finished:
3638 if self._finished:
3639 return iter(self._genlist)
3639 return iter(self._genlist)
3640
3640
3641 # We have to use this complex iteration strategy to allow multiple
3641 # We have to use this complex iteration strategy to allow multiple
3642 # iterations at the same time. We need to be able to catch revision
3642 # iterations at the same time. We need to be able to catch revision
3643 # removed from _consumegen and added to genlist in another instance.
3643 # removed from _consumegen and added to genlist in another instance.
3644 #
3644 #
3645 # Getting rid of it would provide an about 15% speed up on this
3645 # Getting rid of it would provide an about 15% speed up on this
3646 # iteration.
3646 # iteration.
3647 genlist = self._genlist
3647 genlist = self._genlist
3648 nextrev = self._consumegen().next
3648 nextrev = self._consumegen().next
3649 _len = len # cache global lookup
3649 _len = len # cache global lookup
3650 def gen():
3650 def gen():
3651 i = 0
3651 i = 0
3652 while True:
3652 while True:
3653 if i < _len(genlist):
3653 if i < _len(genlist):
3654 yield genlist[i]
3654 yield genlist[i]
3655 else:
3655 else:
3656 yield nextrev()
3656 yield nextrev()
3657 i += 1
3657 i += 1
3658 return gen()
3658 return gen()
3659
3659
3660 def _consumegen(self):
3660 def _consumegen(self):
3661 cache = self._cache
3661 cache = self._cache
3662 genlist = self._genlist.append
3662 genlist = self._genlist.append
3663 for item in self._gen:
3663 for item in self._gen:
3664 cache[item] = True
3664 cache[item] = True
3665 genlist(item)
3665 genlist(item)
3666 yield item
3666 yield item
3667 if not self._finished:
3667 if not self._finished:
3668 self._finished = True
3668 self._finished = True
3669 asc = self._genlist[:]
3669 asc = self._genlist[:]
3670 asc.sort()
3670 asc.sort()
3671 self._asclist = asc
3671 self._asclist = asc
3672 self.fastasc = asc.__iter__
3672 self.fastasc = asc.__iter__
3673 self.fastdesc = asc.__reversed__
3673 self.fastdesc = asc.__reversed__
3674
3674
3675 def __len__(self):
3675 def __len__(self):
3676 for x in self._consumegen():
3676 for x in self._consumegen():
3677 pass
3677 pass
3678 return len(self._genlist)
3678 return len(self._genlist)
3679
3679
3680 def sort(self, reverse=False):
3680 def sort(self, reverse=False):
3681 self._ascending = not reverse
3681 self._ascending = not reverse
3682
3682
3683 def reverse(self):
3683 def reverse(self):
3684 self._ascending = not self._ascending
3684 self._ascending = not self._ascending
3685
3685
3686 def isascending(self):
3686 def isascending(self):
3687 return self._ascending
3687 return self._ascending
3688
3688
3689 def isdescending(self):
3689 def isdescending(self):
3690 return not self._ascending
3690 return not self._ascending
3691
3691
3692 def istopo(self):
3692 def istopo(self):
3693 # not worth the trouble asserting if the two sets combined are still
3693 # not worth the trouble asserting if the two sets combined are still
3694 # in topographical order. Use the sort() predicate to explicitly sort
3694 # in topographical order. Use the sort() predicate to explicitly sort
3695 # again instead.
3695 # again instead.
3696 return False
3696 return False
3697
3697
3698 def first(self):
3698 def first(self):
3699 if self._ascending:
3699 if self._ascending:
3700 it = self.fastasc
3700 it = self.fastasc
3701 else:
3701 else:
3702 it = self.fastdesc
3702 it = self.fastdesc
3703 if it is None:
3703 if it is None:
3704 # we need to consume all and try again
3704 # we need to consume all and try again
3705 for x in self._consumegen():
3705 for x in self._consumegen():
3706 pass
3706 pass
3707 return self.first()
3707 return self.first()
3708 return next(it(), None)
3708 return next(it(), None)
3709
3709
3710 def last(self):
3710 def last(self):
3711 if self._ascending:
3711 if self._ascending:
3712 it = self.fastdesc
3712 it = self.fastdesc
3713 else:
3713 else:
3714 it = self.fastasc
3714 it = self.fastasc
3715 if it is None:
3715 if it is None:
3716 # we need to consume all and try again
3716 # we need to consume all and try again
3717 for x in self._consumegen():
3717 for x in self._consumegen():
3718 pass
3718 pass
3719 return self.first()
3719 return self.first()
3720 return next(it(), None)
3720 return next(it(), None)
3721
3721
3722 def __repr__(self):
3722 def __repr__(self):
3723 d = {False: '-', True: '+'}[self._ascending]
3723 d = {False: '-', True: '+'}[self._ascending]
3724 return '<%s%s>' % (type(self).__name__, d)
3724 return '<%s%s>' % (type(self).__name__, d)
3725
3725
3726 class spanset(abstractsmartset):
3726 class spanset(abstractsmartset):
3727 """Duck type for baseset class which represents a range of revisions and
3727 """Duck type for baseset class which represents a range of revisions and
3728 can work lazily and without having all the range in memory
3728 can work lazily and without having all the range in memory
3729
3729
3730 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3730 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3731 notable points:
3731 notable points:
3732 - when x < y it will be automatically descending,
3732 - when x < y it will be automatically descending,
3733 - revision filtered with this repoview will be skipped.
3733 - revision filtered with this repoview will be skipped.
3734
3734
3735 """
3735 """
3736 def __init__(self, repo, start=0, end=None):
3736 def __init__(self, repo, start=0, end=None):
3737 """
3737 """
3738 start: first revision included the set
3738 start: first revision included the set
3739 (default to 0)
3739 (default to 0)
3740 end: first revision excluded (last+1)
3740 end: first revision excluded (last+1)
3741 (default to len(repo)
3741 (default to len(repo)
3742
3742
3743 Spanset will be descending if `end` < `start`.
3743 Spanset will be descending if `end` < `start`.
3744 """
3744 """
3745 if end is None:
3745 if end is None:
3746 end = len(repo)
3746 end = len(repo)
3747 self._ascending = start <= end
3747 self._ascending = start <= end
3748 if not self._ascending:
3748 if not self._ascending:
3749 start, end = end + 1, start +1
3749 start, end = end + 1, start +1
3750 self._start = start
3750 self._start = start
3751 self._end = end
3751 self._end = end
3752 self._hiddenrevs = repo.changelog.filteredrevs
3752 self._hiddenrevs = repo.changelog.filteredrevs
3753
3753
3754 def sort(self, reverse=False):
3754 def sort(self, reverse=False):
3755 self._ascending = not reverse
3755 self._ascending = not reverse
3756
3756
3757 def reverse(self):
3757 def reverse(self):
3758 self._ascending = not self._ascending
3758 self._ascending = not self._ascending
3759
3759
3760 def istopo(self):
3760 def istopo(self):
3761 # not worth the trouble asserting if the two sets combined are still
3761 # not worth the trouble asserting if the two sets combined are still
3762 # in topographical order. Use the sort() predicate to explicitly sort
3762 # in topographical order. Use the sort() predicate to explicitly sort
3763 # again instead.
3763 # again instead.
3764 return False
3764 return False
3765
3765
3766 def _iterfilter(self, iterrange):
3766 def _iterfilter(self, iterrange):
3767 s = self._hiddenrevs
3767 s = self._hiddenrevs
3768 for r in iterrange:
3768 for r in iterrange:
3769 if r not in s:
3769 if r not in s:
3770 yield r
3770 yield r
3771
3771
3772 def __iter__(self):
3772 def __iter__(self):
3773 if self._ascending:
3773 if self._ascending:
3774 return self.fastasc()
3774 return self.fastasc()
3775 else:
3775 else:
3776 return self.fastdesc()
3776 return self.fastdesc()
3777
3777
3778 def fastasc(self):
3778 def fastasc(self):
3779 iterrange = xrange(self._start, self._end)
3779 iterrange = xrange(self._start, self._end)
3780 if self._hiddenrevs:
3780 if self._hiddenrevs:
3781 return self._iterfilter(iterrange)
3781 return self._iterfilter(iterrange)
3782 return iter(iterrange)
3782 return iter(iterrange)
3783
3783
3784 def fastdesc(self):
3784 def fastdesc(self):
3785 iterrange = xrange(self._end - 1, self._start - 1, -1)
3785 iterrange = xrange(self._end - 1, self._start - 1, -1)
3786 if self._hiddenrevs:
3786 if self._hiddenrevs:
3787 return self._iterfilter(iterrange)
3787 return self._iterfilter(iterrange)
3788 return iter(iterrange)
3788 return iter(iterrange)
3789
3789
3790 def __contains__(self, rev):
3790 def __contains__(self, rev):
3791 hidden = self._hiddenrevs
3791 hidden = self._hiddenrevs
3792 return ((self._start <= rev < self._end)
3792 return ((self._start <= rev < self._end)
3793 and not (hidden and rev in hidden))
3793 and not (hidden and rev in hidden))
3794
3794
3795 def __nonzero__(self):
3795 def __nonzero__(self):
3796 for r in self:
3796 for r in self:
3797 return True
3797 return True
3798 return False
3798 return False
3799
3799
3800 def __len__(self):
3800 def __len__(self):
3801 if not self._hiddenrevs:
3801 if not self._hiddenrevs:
3802 return abs(self._end - self._start)
3802 return abs(self._end - self._start)
3803 else:
3803 else:
3804 count = 0
3804 count = 0
3805 start = self._start
3805 start = self._start
3806 end = self._end
3806 end = self._end
3807 for rev in self._hiddenrevs:
3807 for rev in self._hiddenrevs:
3808 if (end < rev <= start) or (start <= rev < end):
3808 if (end < rev <= start) or (start <= rev < end):
3809 count += 1
3809 count += 1
3810 return abs(self._end - self._start) - count
3810 return abs(self._end - self._start) - count
3811
3811
3812 def isascending(self):
3812 def isascending(self):
3813 return self._ascending
3813 return self._ascending
3814
3814
3815 def isdescending(self):
3815 def isdescending(self):
3816 return not self._ascending
3816 return not self._ascending
3817
3817
3818 def first(self):
3818 def first(self):
3819 if self._ascending:
3819 if self._ascending:
3820 it = self.fastasc
3820 it = self.fastasc
3821 else:
3821 else:
3822 it = self.fastdesc
3822 it = self.fastdesc
3823 for x in it():
3823 for x in it():
3824 return x
3824 return x
3825 return None
3825 return None
3826
3826
3827 def last(self):
3827 def last(self):
3828 if self._ascending:
3828 if self._ascending:
3829 it = self.fastdesc
3829 it = self.fastdesc
3830 else:
3830 else:
3831 it = self.fastasc
3831 it = self.fastasc
3832 for x in it():
3832 for x in it():
3833 return x
3833 return x
3834 return None
3834 return None
3835
3835
3836 def __repr__(self):
3836 def __repr__(self):
3837 d = {False: '-', True: '+'}[self._ascending]
3837 d = {False: '-', True: '+'}[self._ascending]
3838 return '<%s%s %d:%d>' % (type(self).__name__, d,
3838 return '<%s%s %d:%d>' % (type(self).__name__, d,
3839 self._start, self._end - 1)
3839 self._start, self._end - 1)
3840
3840
3841 class fullreposet(spanset):
3841 class fullreposet(spanset):
3842 """a set containing all revisions in the repo
3842 """a set containing all revisions in the repo
3843
3843
3844 This class exists to host special optimization and magic to handle virtual
3844 This class exists to host special optimization and magic to handle virtual
3845 revisions such as "null".
3845 revisions such as "null".
3846 """
3846 """
3847
3847
3848 def __init__(self, repo):
3848 def __init__(self, repo):
3849 super(fullreposet, self).__init__(repo)
3849 super(fullreposet, self).__init__(repo)
3850
3850
3851 def __and__(self, other):
3851 def __and__(self, other):
3852 """As self contains the whole repo, all of the other set should also be
3852 """As self contains the whole repo, all of the other set should also be
3853 in self. Therefore `self & other = other`.
3853 in self. Therefore `self & other = other`.
3854
3854
3855 This boldly assumes the other contains valid revs only.
3855 This boldly assumes the other contains valid revs only.
3856 """
3856 """
3857 # other not a smartset, make is so
3857 # other not a smartset, make is so
3858 if not util.safehasattr(other, 'isascending'):
3858 if not util.safehasattr(other, 'isascending'):
3859 # filter out hidden revision
3859 # filter out hidden revision
3860 # (this boldly assumes all smartset are pure)
3860 # (this boldly assumes all smartset are pure)
3861 #
3861 #
3862 # `other` was used with "&", let's assume this is a set like
3862 # `other` was used with "&", let's assume this is a set like
3863 # object.
3863 # object.
3864 other = baseset(other - self._hiddenrevs)
3864 other = baseset(other - self._hiddenrevs)
3865
3865
3866 other.sort(reverse=self.isdescending())
3866 other.sort(reverse=self.isdescending())
3867 return other
3867 return other
3868
3868
3869 def prettyformatset(revs):
3869 def prettyformatset(revs):
3870 lines = []
3870 lines = []
3871 rs = repr(revs)
3871 rs = repr(revs)
3872 p = 0
3872 p = 0
3873 while p < len(rs):
3873 while p < len(rs):
3874 q = rs.find('<', p + 1)
3874 q = rs.find('<', p + 1)
3875 if q < 0:
3875 if q < 0:
3876 q = len(rs)
3876 q = len(rs)
3877 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3877 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3878 assert l >= 0
3878 assert l >= 0
3879 lines.append((l, rs[p:q].rstrip()))
3879 lines.append((l, rs[p:q].rstrip()))
3880 p = q
3880 p = q
3881 return '\n'.join(' ' * l + s for l, s in lines)
3881 return '\n'.join(' ' * l + s for l, s in lines)
3882
3882
3883 def loadpredicate(ui, extname, registrarobj):
3883 def loadpredicate(ui, extname, registrarobj):
3884 """Load revset predicates from specified registrarobj
3884 """Load revset predicates from specified registrarobj
3885 """
3885 """
3886 for name, func in registrarobj._table.iteritems():
3886 for name, func in registrarobj._table.iteritems():
3887 symbols[name] = func
3887 symbols[name] = func
3888 if func._safe:
3888 if func._safe:
3889 safesymbols.add(name)
3889 safesymbols.add(name)
3890
3890
3891 # load built-in predicates explicitly to setup safesymbols
3891 # load built-in predicates explicitly to setup safesymbols
3892 loadpredicate(None, None, predicate)
3892 loadpredicate(None, None, predicate)
3893
3893
3894 # tell hggettext to extract docstrings from these functions:
3894 # tell hggettext to extract docstrings from these functions:
3895 i18nfunctions = symbols.values()
3895 i18nfunctions = symbols.values()
@@ -1,770 +1,770 b''
1 $ HGMERGE=true; export HGMERGE
1 $ HGMERGE=true; export HGMERGE
2
2
3 init
3 init
4
4
5 $ hg init repo
5 $ hg init repo
6 $ cd repo
6 $ cd repo
7
7
8 commit
8 commit
9
9
10 $ echo 'a' > a
10 $ echo 'a' > a
11 $ hg ci -A -m test -u nobody -d '1 0'
11 $ hg ci -A -m test -u nobody -d '1 0'
12 adding a
12 adding a
13
13
14 annotate -c
14 annotate -c
15
15
16 $ hg annotate -c a
16 $ hg annotate -c a
17 8435f90966e4: a
17 8435f90966e4: a
18
18
19 annotate -cl
19 annotate -cl
20
20
21 $ hg annotate -cl a
21 $ hg annotate -cl a
22 8435f90966e4:1: a
22 8435f90966e4:1: a
23
23
24 annotate -d
24 annotate -d
25
25
26 $ hg annotate -d a
26 $ hg annotate -d a
27 Thu Jan 01 00:00:01 1970 +0000: a
27 Thu Jan 01 00:00:01 1970 +0000: a
28
28
29 annotate -n
29 annotate -n
30
30
31 $ hg annotate -n a
31 $ hg annotate -n a
32 0: a
32 0: a
33
33
34 annotate -nl
34 annotate -nl
35
35
36 $ hg annotate -nl a
36 $ hg annotate -nl a
37 0:1: a
37 0:1: a
38
38
39 annotate -u
39 annotate -u
40
40
41 $ hg annotate -u a
41 $ hg annotate -u a
42 nobody: a
42 nobody: a
43
43
44 annotate -cdnu
44 annotate -cdnu
45
45
46 $ hg annotate -cdnu a
46 $ hg annotate -cdnu a
47 nobody 0 8435f90966e4 Thu Jan 01 00:00:01 1970 +0000: a
47 nobody 0 8435f90966e4 Thu Jan 01 00:00:01 1970 +0000: a
48
48
49 annotate -cdnul
49 annotate -cdnul
50
50
51 $ hg annotate -cdnul a
51 $ hg annotate -cdnul a
52 nobody 0 8435f90966e4 Thu Jan 01 00:00:01 1970 +0000:1: a
52 nobody 0 8435f90966e4 Thu Jan 01 00:00:01 1970 +0000:1: a
53
53
54 annotate (JSON)
54 annotate (JSON)
55
55
56 $ hg annotate -Tjson a
56 $ hg annotate -Tjson a
57 [
57 [
58 {
58 {
59 "line": "a\n",
59 "line": "a\n",
60 "rev": 0
60 "rev": 0
61 }
61 }
62 ]
62 ]
63
63
64 $ hg annotate -Tjson -cdfnul a
64 $ hg annotate -Tjson -cdfnul a
65 [
65 [
66 {
66 {
67 "date": [1.0, 0],
67 "date": [1.0, 0],
68 "file": "a",
68 "file": "a",
69 "line": "a\n",
69 "line": "a\n",
70 "line_number": 1,
70 "line_number": 1,
71 "node": "8435f90966e442695d2ded29fdade2bac5ad8065",
71 "node": "8435f90966e442695d2ded29fdade2bac5ad8065",
72 "rev": 0,
72 "rev": 0,
73 "user": "nobody"
73 "user": "nobody"
74 }
74 }
75 ]
75 ]
76
76
77 $ cat <<EOF >>a
77 $ cat <<EOF >>a
78 > a
78 > a
79 > a
79 > a
80 > EOF
80 > EOF
81 $ hg ci -ma1 -d '1 0'
81 $ hg ci -ma1 -d '1 0'
82 $ hg cp a b
82 $ hg cp a b
83 $ hg ci -mb -d '1 0'
83 $ hg ci -mb -d '1 0'
84 $ cat <<EOF >> b
84 $ cat <<EOF >> b
85 > b4
85 > b4
86 > b5
86 > b5
87 > b6
87 > b6
88 > EOF
88 > EOF
89 $ hg ci -mb2 -d '2 0'
89 $ hg ci -mb2 -d '2 0'
90
90
91 annotate -n b
91 annotate -n b
92
92
93 $ hg annotate -n b
93 $ hg annotate -n b
94 0: a
94 0: a
95 1: a
95 1: a
96 1: a
96 1: a
97 3: b4
97 3: b4
98 3: b5
98 3: b5
99 3: b6
99 3: b6
100
100
101 annotate --no-follow b
101 annotate --no-follow b
102
102
103 $ hg annotate --no-follow b
103 $ hg annotate --no-follow b
104 2: a
104 2: a
105 2: a
105 2: a
106 2: a
106 2: a
107 3: b4
107 3: b4
108 3: b5
108 3: b5
109 3: b6
109 3: b6
110
110
111 annotate -nl b
111 annotate -nl b
112
112
113 $ hg annotate -nl b
113 $ hg annotate -nl b
114 0:1: a
114 0:1: a
115 1:2: a
115 1:2: a
116 1:3: a
116 1:3: a
117 3:4: b4
117 3:4: b4
118 3:5: b5
118 3:5: b5
119 3:6: b6
119 3:6: b6
120
120
121 annotate -nf b
121 annotate -nf b
122
122
123 $ hg annotate -nf b
123 $ hg annotate -nf b
124 0 a: a
124 0 a: a
125 1 a: a
125 1 a: a
126 1 a: a
126 1 a: a
127 3 b: b4
127 3 b: b4
128 3 b: b5
128 3 b: b5
129 3 b: b6
129 3 b: b6
130
130
131 annotate -nlf b
131 annotate -nlf b
132
132
133 $ hg annotate -nlf b
133 $ hg annotate -nlf b
134 0 a:1: a
134 0 a:1: a
135 1 a:2: a
135 1 a:2: a
136 1 a:3: a
136 1 a:3: a
137 3 b:4: b4
137 3 b:4: b4
138 3 b:5: b5
138 3 b:5: b5
139 3 b:6: b6
139 3 b:6: b6
140
140
141 $ hg up -C 2
141 $ hg up -C 2
142 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
142 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
143 $ cat <<EOF >> b
143 $ cat <<EOF >> b
144 > b4
144 > b4
145 > c
145 > c
146 > b5
146 > b5
147 > EOF
147 > EOF
148 $ hg ci -mb2.1 -d '2 0'
148 $ hg ci -mb2.1 -d '2 0'
149 created new head
149 created new head
150 $ hg merge
150 $ hg merge
151 merging b
151 merging b
152 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
152 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
153 (branch merge, don't forget to commit)
153 (branch merge, don't forget to commit)
154 $ hg ci -mmergeb -d '3 0'
154 $ hg ci -mmergeb -d '3 0'
155
155
156 annotate after merge
156 annotate after merge
157
157
158 $ hg annotate -nf b
158 $ hg annotate -nf b
159 0 a: a
159 0 a: a
160 1 a: a
160 1 a: a
161 1 a: a
161 1 a: a
162 3 b: b4
162 3 b: b4
163 4 b: c
163 4 b: c
164 3 b: b5
164 3 b: b5
165
165
166 annotate after merge with -l
166 annotate after merge with -l
167
167
168 $ hg annotate -nlf b
168 $ hg annotate -nlf b
169 0 a:1: a
169 0 a:1: a
170 1 a:2: a
170 1 a:2: a
171 1 a:3: a
171 1 a:3: a
172 3 b:4: b4
172 3 b:4: b4
173 4 b:5: c
173 4 b:5: c
174 3 b:5: b5
174 3 b:5: b5
175
175
176 $ hg up -C 1
176 $ hg up -C 1
177 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
177 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
178 $ hg cp a b
178 $ hg cp a b
179 $ cat <<EOF > b
179 $ cat <<EOF > b
180 > a
180 > a
181 > z
181 > z
182 > a
182 > a
183 > EOF
183 > EOF
184 $ hg ci -mc -d '3 0'
184 $ hg ci -mc -d '3 0'
185 created new head
185 created new head
186 $ hg merge
186 $ hg merge
187 merging b
187 merging b
188 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
188 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
189 (branch merge, don't forget to commit)
189 (branch merge, don't forget to commit)
190 $ cat <<EOF >> b
190 $ cat <<EOF >> b
191 > b4
191 > b4
192 > c
192 > c
193 > b5
193 > b5
194 > EOF
194 > EOF
195 $ echo d >> b
195 $ echo d >> b
196 $ hg ci -mmerge2 -d '4 0'
196 $ hg ci -mmerge2 -d '4 0'
197
197
198 annotate after rename merge
198 annotate after rename merge
199
199
200 $ hg annotate -nf b
200 $ hg annotate -nf b
201 0 a: a
201 0 a: a
202 6 b: z
202 6 b: z
203 1 a: a
203 1 a: a
204 3 b: b4
204 3 b: b4
205 4 b: c
205 4 b: c
206 3 b: b5
206 3 b: b5
207 7 b: d
207 7 b: d
208
208
209 annotate after rename merge with -l
209 annotate after rename merge with -l
210
210
211 $ hg annotate -nlf b
211 $ hg annotate -nlf b
212 0 a:1: a
212 0 a:1: a
213 6 b:2: z
213 6 b:2: z
214 1 a:3: a
214 1 a:3: a
215 3 b:4: b4
215 3 b:4: b4
216 4 b:5: c
216 4 b:5: c
217 3 b:5: b5
217 3 b:5: b5
218 7 b:7: d
218 7 b:7: d
219
219
220 Issue2807: alignment of line numbers with -l
220 Issue2807: alignment of line numbers with -l
221
221
222 $ echo more >> b
222 $ echo more >> b
223 $ hg ci -mmore -d '5 0'
223 $ hg ci -mmore -d '5 0'
224 $ echo more >> b
224 $ echo more >> b
225 $ hg ci -mmore -d '6 0'
225 $ hg ci -mmore -d '6 0'
226 $ echo more >> b
226 $ echo more >> b
227 $ hg ci -mmore -d '7 0'
227 $ hg ci -mmore -d '7 0'
228 $ hg annotate -nlf b
228 $ hg annotate -nlf b
229 0 a: 1: a
229 0 a: 1: a
230 6 b: 2: z
230 6 b: 2: z
231 1 a: 3: a
231 1 a: 3: a
232 3 b: 4: b4
232 3 b: 4: b4
233 4 b: 5: c
233 4 b: 5: c
234 3 b: 5: b5
234 3 b: 5: b5
235 7 b: 7: d
235 7 b: 7: d
236 8 b: 8: more
236 8 b: 8: more
237 9 b: 9: more
237 9 b: 9: more
238 10 b:10: more
238 10 b:10: more
239
239
240 linkrev vs rev
240 linkrev vs rev
241
241
242 $ hg annotate -r tip -n a
242 $ hg annotate -r tip -n a
243 0: a
243 0: a
244 1: a
244 1: a
245 1: a
245 1: a
246
246
247 linkrev vs rev with -l
247 linkrev vs rev with -l
248
248
249 $ hg annotate -r tip -nl a
249 $ hg annotate -r tip -nl a
250 0:1: a
250 0:1: a
251 1:2: a
251 1:2: a
252 1:3: a
252 1:3: a
253
253
254 Issue589: "undelete" sequence leads to crash
254 Issue589: "undelete" sequence leads to crash
255
255
256 annotate was crashing when trying to --follow something
256 annotate was crashing when trying to --follow something
257
257
258 like A -> B -> A
258 like A -> B -> A
259
259
260 generate ABA rename configuration
260 generate ABA rename configuration
261
261
262 $ echo foo > foo
262 $ echo foo > foo
263 $ hg add foo
263 $ hg add foo
264 $ hg ci -m addfoo
264 $ hg ci -m addfoo
265 $ hg rename foo bar
265 $ hg rename foo bar
266 $ hg ci -m renamefoo
266 $ hg ci -m renamefoo
267 $ hg rename bar foo
267 $ hg rename bar foo
268 $ hg ci -m renamebar
268 $ hg ci -m renamebar
269
269
270 annotate after ABA with follow
270 annotate after ABA with follow
271
271
272 $ hg annotate --follow foo
272 $ hg annotate --follow foo
273 foo: foo
273 foo: foo
274
274
275 missing file
275 missing file
276
276
277 $ hg ann nosuchfile
277 $ hg ann nosuchfile
278 abort: nosuchfile: no such file in rev e9e6b4fa872f
278 abort: nosuchfile: no such file in rev e9e6b4fa872f
279 [255]
279 [255]
280
280
281 annotate file without '\n' on last line
281 annotate file without '\n' on last line
282
282
283 $ printf "" > c
283 $ printf "" > c
284 $ hg ci -A -m test -u nobody -d '1 0'
284 $ hg ci -A -m test -u nobody -d '1 0'
285 adding c
285 adding c
286 $ hg annotate c
286 $ hg annotate c
287 $ printf "a\nb" > c
287 $ printf "a\nb" > c
288 $ hg ci -m test
288 $ hg ci -m test
289 $ hg annotate c
289 $ hg annotate c
290 [0-9]+: a (re)
290 [0-9]+: a (re)
291 [0-9]+: b (re)
291 [0-9]+: b (re)
292
292
293 Issue3841: check annotation of the file of which filelog includes
293 Issue3841: check annotation of the file of which filelog includes
294 merging between the revision and its ancestor
294 merging between the revision and its ancestor
295
295
296 to reproduce the situation with recent Mercurial, this script uses (1)
296 to reproduce the situation with recent Mercurial, this script uses (1)
297 "hg debugsetparents" to merge without ancestor check by "hg merge",
297 "hg debugsetparents" to merge without ancestor check by "hg merge",
298 and (2) the extension to allow filelog merging between the revision
298 and (2) the extension to allow filelog merging between the revision
299 and its ancestor by overriding "repo._filecommit".
299 and its ancestor by overriding "repo._filecommit".
300
300
301 $ cat > ../legacyrepo.py <<EOF
301 $ cat > ../legacyrepo.py <<EOF
302 > from mercurial import node, error
302 > from mercurial import node, error
303 > def reposetup(ui, repo):
303 > def reposetup(ui, repo):
304 > class legacyrepo(repo.__class__):
304 > class legacyrepo(repo.__class__):
305 > def _filecommit(self, fctx, manifest1, manifest2,
305 > def _filecommit(self, fctx, manifest1, manifest2,
306 > linkrev, tr, changelist):
306 > linkrev, tr, changelist):
307 > fname = fctx.path()
307 > fname = fctx.path()
308 > text = fctx.data()
308 > text = fctx.data()
309 > flog = self.file(fname)
309 > flog = self.file(fname)
310 > fparent1 = manifest1.get(fname, node.nullid)
310 > fparent1 = manifest1.get(fname, node.nullid)
311 > fparent2 = manifest2.get(fname, node.nullid)
311 > fparent2 = manifest2.get(fname, node.nullid)
312 > meta = {}
312 > meta = {}
313 > copy = fctx.renamed()
313 > copy = fctx.renamed()
314 > if copy and copy[0] != fname:
314 > if copy and copy[0] != fname:
315 > raise error.Abort('copying is not supported')
315 > raise error.Abort('copying is not supported')
316 > if fparent2 != node.nullid:
316 > if fparent2 != node.nullid:
317 > changelist.append(fname)
317 > changelist.append(fname)
318 > return flog.add(text, meta, tr, linkrev,
318 > return flog.add(text, meta, tr, linkrev,
319 > fparent1, fparent2)
319 > fparent1, fparent2)
320 > raise error.Abort('only merging is supported')
320 > raise error.Abort('only merging is supported')
321 > repo.__class__ = legacyrepo
321 > repo.__class__ = legacyrepo
322 > EOF
322 > EOF
323
323
324 $ cat > baz <<EOF
324 $ cat > baz <<EOF
325 > 1
325 > 1
326 > 2
326 > 2
327 > 3
327 > 3
328 > 4
328 > 4
329 > 5
329 > 5
330 > EOF
330 > EOF
331 $ hg add baz
331 $ hg add baz
332 $ hg commit -m "baz:0"
332 $ hg commit -m "baz:0"
333
333
334 $ cat > baz <<EOF
334 $ cat > baz <<EOF
335 > 1 baz:1
335 > 1 baz:1
336 > 2
336 > 2
337 > 3
337 > 3
338 > 4
338 > 4
339 > 5
339 > 5
340 > EOF
340 > EOF
341 $ hg commit -m "baz:1"
341 $ hg commit -m "baz:1"
342
342
343 $ cat > baz <<EOF
343 $ cat > baz <<EOF
344 > 1 baz:1
344 > 1 baz:1
345 > 2 baz:2
345 > 2 baz:2
346 > 3
346 > 3
347 > 4
347 > 4
348 > 5
348 > 5
349 > EOF
349 > EOF
350 $ hg debugsetparents 17 17
350 $ hg debugsetparents 17 17
351 $ hg --config extensions.legacyrepo=../legacyrepo.py commit -m "baz:2"
351 $ hg --config extensions.legacyrepo=../legacyrepo.py commit -m "baz:2"
352 $ hg debugindexdot .hg/store/data/baz.i
352 $ hg debugindexdot .hg/store/data/baz.i
353 digraph G {
353 digraph G {
354 -1 -> 0
354 -1 -> 0
355 0 -> 1
355 0 -> 1
356 1 -> 2
356 1 -> 2
357 1 -> 2
357 1 -> 2
358 }
358 }
359 $ hg annotate baz
359 $ hg annotate baz
360 17: 1 baz:1
360 17: 1 baz:1
361 18: 2 baz:2
361 18: 2 baz:2
362 16: 3
362 16: 3
363 16: 4
363 16: 4
364 16: 5
364 16: 5
365
365
366 $ cat > baz <<EOF
366 $ cat > baz <<EOF
367 > 1 baz:1
367 > 1 baz:1
368 > 2 baz:2
368 > 2 baz:2
369 > 3 baz:3
369 > 3 baz:3
370 > 4
370 > 4
371 > 5
371 > 5
372 > EOF
372 > EOF
373 $ hg commit -m "baz:3"
373 $ hg commit -m "baz:3"
374
374
375 $ cat > baz <<EOF
375 $ cat > baz <<EOF
376 > 1 baz:1
376 > 1 baz:1
377 > 2 baz:2
377 > 2 baz:2
378 > 3 baz:3
378 > 3 baz:3
379 > 4 baz:4
379 > 4 baz:4
380 > 5
380 > 5
381 > EOF
381 > EOF
382 $ hg debugsetparents 19 18
382 $ hg debugsetparents 19 18
383 $ hg --config extensions.legacyrepo=../legacyrepo.py commit -m "baz:4"
383 $ hg --config extensions.legacyrepo=../legacyrepo.py commit -m "baz:4"
384 $ hg debugindexdot .hg/store/data/baz.i
384 $ hg debugindexdot .hg/store/data/baz.i
385 digraph G {
385 digraph G {
386 -1 -> 0
386 -1 -> 0
387 0 -> 1
387 0 -> 1
388 1 -> 2
388 1 -> 2
389 1 -> 2
389 1 -> 2
390 2 -> 3
390 2 -> 3
391 3 -> 4
391 3 -> 4
392 2 -> 4
392 2 -> 4
393 }
393 }
394 $ hg annotate baz
394 $ hg annotate baz
395 17: 1 baz:1
395 17: 1 baz:1
396 18: 2 baz:2
396 18: 2 baz:2
397 19: 3 baz:3
397 19: 3 baz:3
398 20: 4 baz:4
398 20: 4 baz:4
399 16: 5
399 16: 5
400
400
401 annotate clean file
401 annotate clean file
402
402
403 $ hg annotate -ncr "wdir()" foo
403 $ hg annotate -ncr "wdir()" foo
404 11 472b18db256d : foo
404 11 472b18db256d : foo
405
405
406 annotate modified file
406 annotate modified file
407
407
408 $ echo foofoo >> foo
408 $ echo foofoo >> foo
409 $ hg annotate -r "wdir()" foo
409 $ hg annotate -r "wdir()" foo
410 11 : foo
410 11 : foo
411 20+: foofoo
411 20+: foofoo
412
412
413 $ hg annotate -cr "wdir()" foo
413 $ hg annotate -cr "wdir()" foo
414 472b18db256d : foo
414 472b18db256d : foo
415 b6bedd5477e7+: foofoo
415 b6bedd5477e7+: foofoo
416
416
417 $ hg annotate -ncr "wdir()" foo
417 $ hg annotate -ncr "wdir()" foo
418 11 472b18db256d : foo
418 11 472b18db256d : foo
419 20 b6bedd5477e7+: foofoo
419 20 b6bedd5477e7+: foofoo
420
420
421 $ hg annotate --debug -ncr "wdir()" foo
421 $ hg annotate --debug -ncr "wdir()" foo
422 11 472b18db256d1e8282064eab4bfdaf48cbfe83cd : foo
422 11 472b18db256d1e8282064eab4bfdaf48cbfe83cd : foo
423 20 b6bedd5477e797f25e568a6402d4697f3f895a72+: foofoo
423 20 b6bedd5477e797f25e568a6402d4697f3f895a72+: foofoo
424
424
425 $ hg annotate -udr "wdir()" foo
425 $ hg annotate -udr "wdir()" foo
426 test Thu Jan 01 00:00:00 1970 +0000: foo
426 test Thu Jan 01 00:00:00 1970 +0000: foo
427 test [A-Za-z0-9:+ ]+: foofoo (re)
427 test [A-Za-z0-9:+ ]+: foofoo (re)
428
428
429 $ hg annotate -ncr "wdir()" -Tjson foo
429 $ hg annotate -ncr "wdir()" -Tjson foo
430 [
430 [
431 {
431 {
432 "line": "foo\n",
432 "line": "foo\n",
433 "node": "472b18db256d1e8282064eab4bfdaf48cbfe83cd",
433 "node": "472b18db256d1e8282064eab4bfdaf48cbfe83cd",
434 "rev": 11
434 "rev": 11
435 },
435 },
436 {
436 {
437 "line": "foofoo\n",
437 "line": "foofoo\n",
438 "node": null,
438 "node": null,
439 "rev": null
439 "rev": null
440 }
440 }
441 ]
441 ]
442
442
443 annotate added file
443 annotate added file
444
444
445 $ echo bar > bar
445 $ echo bar > bar
446 $ hg add bar
446 $ hg add bar
447 $ hg annotate -ncr "wdir()" bar
447 $ hg annotate -ncr "wdir()" bar
448 20 b6bedd5477e7+: bar
448 20 b6bedd5477e7+: bar
449
449
450 annotate renamed file
450 annotate renamed file
451
451
452 $ hg rename foo renamefoo2
452 $ hg rename foo renamefoo2
453 $ hg annotate -ncr "wdir()" renamefoo2
453 $ hg annotate -ncr "wdir()" renamefoo2
454 11 472b18db256d : foo
454 11 472b18db256d : foo
455 20 b6bedd5477e7+: foofoo
455 20 b6bedd5477e7+: foofoo
456
456
457 annotate missing file
457 annotate missing file
458
458
459 $ rm baz
459 $ rm baz
460 #if windows
460 #if windows
461 $ hg annotate -ncr "wdir()" baz
461 $ hg annotate -ncr "wdir()" baz
462 abort: $TESTTMP\repo\baz: The system cannot find the file specified
462 abort: $TESTTMP\repo\baz: The system cannot find the file specified
463 [255]
463 [255]
464 #else
464 #else
465 $ hg annotate -ncr "wdir()" baz
465 $ hg annotate -ncr "wdir()" baz
466 abort: No such file or directory: $TESTTMP/repo/baz
466 abort: No such file or directory: $TESTTMP/repo/baz
467 [255]
467 [255]
468 #endif
468 #endif
469
469
470 annotate removed file
470 annotate removed file
471
471
472 $ hg rm baz
472 $ hg rm baz
473 #if windows
473 #if windows
474 $ hg annotate -ncr "wdir()" baz
474 $ hg annotate -ncr "wdir()" baz
475 abort: $TESTTMP\repo\baz: The system cannot find the file specified
475 abort: $TESTTMP\repo\baz: The system cannot find the file specified
476 [255]
476 [255]
477 #else
477 #else
478 $ hg annotate -ncr "wdir()" baz
478 $ hg annotate -ncr "wdir()" baz
479 abort: No such file or directory: $TESTTMP/repo/baz
479 abort: No such file or directory: $TESTTMP/repo/baz
480 [255]
480 [255]
481 #endif
481 #endif
482
482
483 $ hg revert --all --no-backup --quiet
483 $ hg revert --all --no-backup --quiet
484 $ hg id -n
484 $ hg id -n
485 20
485 20
486
486
487 Test followlines() revset
487 Test followlines() revset
488
488
489 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3, 5)'
489 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3, 5)'
490 16: baz:0
490 16: baz:0
491 19: baz:3
491 19: baz:3
492 20: baz:4
492 20: baz:4
493 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3, 5, rev=20)'
493 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3, 5, startrev=20)'
494 16: baz:0
494 16: baz:0
495 19: baz:3
495 19: baz:3
496 20: baz:4
496 20: baz:4
497 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3, 5, rev=.^)'
497 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3, 5, startrev=.^)'
498 16: baz:0
498 16: baz:0
499 19: baz:3
499 19: baz:3
500 $ printf "0\n0\n" | cat - baz > baz1
500 $ printf "0\n0\n" | cat - baz > baz1
501 $ mv baz1 baz
501 $ mv baz1 baz
502 $ hg ci -m 'added two lines with 0'
502 $ hg ci -m 'added two lines with 0'
503 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5, 7)'
503 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5, 7)'
504 16: baz:0
504 16: baz:0
505 19: baz:3
505 19: baz:3
506 20: baz:4
506 20: baz:4
507 $ echo 6 >> baz
507 $ echo 6 >> baz
508 $ hg ci -m 'added line 8'
508 $ hg ci -m 'added line 8'
509 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5, 7)'
509 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5, 7)'
510 16: baz:0
510 16: baz:0
511 19: baz:3
511 19: baz:3
512 20: baz:4
512 20: baz:4
513 $ sed 's/3/3+/' baz > baz.new
513 $ sed 's/3/3+/' baz > baz.new
514 $ mv baz.new baz
514 $ mv baz.new baz
515 $ hg ci -m 'baz:3->3+'
515 $ hg ci -m 'baz:3->3+'
516 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5, 7)'
516 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5, 7)'
517 16: baz:0
517 16: baz:0
518 19: baz:3
518 19: baz:3
519 20: baz:4
519 20: baz:4
520 23: baz:3->3+
520 23: baz:3->3+
521 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 1, 2)'
521 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 1, 2)'
522 21: added two lines with 0
522 21: added two lines with 0
523
523
524 file patterns are okay
524 file patterns are okay
525 $ hg log -T '{rev}: {desc}\n' -r 'followlines("path:baz", 1, 2)'
525 $ hg log -T '{rev}: {desc}\n' -r 'followlines("path:baz", 1, 2)'
526 21: added two lines with 0
526 21: added two lines with 0
527
527
528 renames are followed
528 renames are followed
529 $ hg mv baz qux
529 $ hg mv baz qux
530 $ sed 's/4/4+/' qux > qux.new
530 $ sed 's/4/4+/' qux > qux.new
531 $ mv qux.new qux
531 $ mv qux.new qux
532 $ hg ci -m 'qux:4->4+'
532 $ hg ci -m 'qux:4->4+'
533 $ hg log -T '{rev}: {desc}\n' -r 'followlines(qux, 5, 7)'
533 $ hg log -T '{rev}: {desc}\n' -r 'followlines(qux, 5, 7)'
534 16: baz:0
534 16: baz:0
535 19: baz:3
535 19: baz:3
536 20: baz:4
536 20: baz:4
537 23: baz:3->3+
537 23: baz:3->3+
538 24: qux:4->4+
538 24: qux:4->4+
539 $ hg up 23 --quiet
539 $ hg up 23 --quiet
540
540
541 merge
541 merge
542 $ echo 7 >> baz
542 $ echo 7 >> baz
543 $ hg ci -m 'one more line, out of line range'
543 $ hg ci -m 'one more line, out of line range'
544 created new head
544 created new head
545 $ sed 's/3+/3-/' baz > baz.new
545 $ sed 's/3+/3-/' baz > baz.new
546 $ mv baz.new baz
546 $ mv baz.new baz
547 $ hg ci -m 'baz:3+->3-'
547 $ hg ci -m 'baz:3+->3-'
548 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5, 7)'
548 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5, 7)'
549 16: baz:0
549 16: baz:0
550 19: baz:3
550 19: baz:3
551 20: baz:4
551 20: baz:4
552 23: baz:3->3+
552 23: baz:3->3+
553 26: baz:3+->3-
553 26: baz:3+->3-
554 $ hg merge 24
554 $ hg merge 24
555 merging baz and qux to qux
555 merging baz and qux to qux
556 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
556 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
557 (branch merge, don't forget to commit)
557 (branch merge, don't forget to commit)
558 $ hg ci -m merge
558 $ hg ci -m merge
559 $ hg log -T '{rev}: {desc}\n' -r 'followlines(qux, 5, 7)'
559 $ hg log -T '{rev}: {desc}\n' -r 'followlines(qux, 5, 7)'
560 16: baz:0
560 16: baz:0
561 19: baz:3
561 19: baz:3
562 20: baz:4
562 20: baz:4
563 23: baz:3->3+
563 23: baz:3->3+
564 24: qux:4->4+
564 24: qux:4->4+
565 26: baz:3+->3-
565 26: baz:3+->3-
566 27: merge
566 27: merge
567 $ hg up 24 --quiet
567 $ hg up 24 --quiet
568 $ hg merge 26
568 $ hg merge 26
569 merging qux and baz to qux
569 merging qux and baz to qux
570 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
570 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
571 (branch merge, don't forget to commit)
571 (branch merge, don't forget to commit)
572 $ hg ci -m 'merge from other side'
572 $ hg ci -m 'merge from other side'
573 created new head
573 created new head
574 $ hg log -T '{rev}: {desc}\n' -r 'followlines(qux, 5, 7)'
574 $ hg log -T '{rev}: {desc}\n' -r 'followlines(qux, 5, 7)'
575 16: baz:0
575 16: baz:0
576 19: baz:3
576 19: baz:3
577 20: baz:4
577 20: baz:4
578 23: baz:3->3+
578 23: baz:3->3+
579 24: qux:4->4+
579 24: qux:4->4+
580 26: baz:3+->3-
580 26: baz:3+->3-
581 28: merge from other side
581 28: merge from other side
582 $ hg up 23 --quiet
582 $ hg up 23 --quiet
583
583
584 check error cases
584 check error cases
585 $ hg log -r 'followlines()'
585 $ hg log -r 'followlines()'
586 hg: parse error: followlines takes at least 1 positional arguments
586 hg: parse error: followlines takes at least 1 positional arguments
587 [255]
587 [255]
588 $ hg log -r 'followlines(baz)'
588 $ hg log -r 'followlines(baz)'
589 hg: parse error: followlines takes at least three arguments
589 hg: parse error: followlines takes at least three arguments
590 [255]
590 [255]
591 $ hg log -r 'followlines(baz, 1)'
591 $ hg log -r 'followlines(baz, 1)'
592 hg: parse error: followlines takes at least three arguments
592 hg: parse error: followlines takes at least three arguments
593 [255]
593 [255]
594 $ hg log -r 'followlines(baz, 1, 2, rev=desc("b"))'
594 $ hg log -r 'followlines(baz, 1, 2, startrev=desc("b"))'
595 hg: parse error: followlines expects exactly one revision
595 hg: parse error: followlines expects exactly one revision
596 [255]
596 [255]
597 $ hg log -r 'followlines("glob:*", 1, 2)'
597 $ hg log -r 'followlines("glob:*", 1, 2)'
598 hg: parse error: followlines expects exactly one file
598 hg: parse error: followlines expects exactly one file
599 [255]
599 [255]
600 $ hg log -r 'followlines(baz, x, 4)'
600 $ hg log -r 'followlines(baz, x, 4)'
601 hg: parse error: line range bounds must be integers
601 hg: parse error: line range bounds must be integers
602 [255]
602 [255]
603 $ hg log -r 'followlines(baz, 5, 4)'
603 $ hg log -r 'followlines(baz, 5, 4)'
604 hg: parse error: line range must be positive
604 hg: parse error: line range must be positive
605 [255]
605 [255]
606 $ hg log -r 'followlines(baz, 0, 4)'
606 $ hg log -r 'followlines(baz, 0, 4)'
607 hg: parse error: fromline must be strictly positive
607 hg: parse error: fromline must be strictly positive
608 [255]
608 [255]
609 $ hg log -r 'followlines(baz, 2, 40)'
609 $ hg log -r 'followlines(baz, 2, 40)'
610 abort: line range exceeds file size
610 abort: line range exceeds file size
611 [255]
611 [255]
612
612
613 Test annotate with whitespace options
613 Test annotate with whitespace options
614
614
615 $ cd ..
615 $ cd ..
616 $ hg init repo-ws
616 $ hg init repo-ws
617 $ cd repo-ws
617 $ cd repo-ws
618 $ cat > a <<EOF
618 $ cat > a <<EOF
619 > aa
619 > aa
620 >
620 >
621 > b b
621 > b b
622 > EOF
622 > EOF
623 $ hg ci -Am "adda"
623 $ hg ci -Am "adda"
624 adding a
624 adding a
625 $ sed 's/EOL$//g' > a <<EOF
625 $ sed 's/EOL$//g' > a <<EOF
626 > a a
626 > a a
627 >
627 >
628 > EOL
628 > EOL
629 > b b
629 > b b
630 > EOF
630 > EOF
631 $ hg ci -m "changea"
631 $ hg ci -m "changea"
632
632
633 Annotate with no option
633 Annotate with no option
634
634
635 $ hg annotate a
635 $ hg annotate a
636 1: a a
636 1: a a
637 0:
637 0:
638 1:
638 1:
639 1: b b
639 1: b b
640
640
641 Annotate with --ignore-space-change
641 Annotate with --ignore-space-change
642
642
643 $ hg annotate --ignore-space-change a
643 $ hg annotate --ignore-space-change a
644 1: a a
644 1: a a
645 1:
645 1:
646 0:
646 0:
647 0: b b
647 0: b b
648
648
649 Annotate with --ignore-all-space
649 Annotate with --ignore-all-space
650
650
651 $ hg annotate --ignore-all-space a
651 $ hg annotate --ignore-all-space a
652 0: a a
652 0: a a
653 0:
653 0:
654 1:
654 1:
655 0: b b
655 0: b b
656
656
657 Annotate with --ignore-blank-lines (similar to no options case)
657 Annotate with --ignore-blank-lines (similar to no options case)
658
658
659 $ hg annotate --ignore-blank-lines a
659 $ hg annotate --ignore-blank-lines a
660 1: a a
660 1: a a
661 0:
661 0:
662 1:
662 1:
663 1: b b
663 1: b b
664
664
665 $ cd ..
665 $ cd ..
666
666
667 Annotate with linkrev pointing to another branch
667 Annotate with linkrev pointing to another branch
668 ------------------------------------------------
668 ------------------------------------------------
669
669
670 create history with a filerev whose linkrev points to another branch
670 create history with a filerev whose linkrev points to another branch
671
671
672 $ hg init branchedlinkrev
672 $ hg init branchedlinkrev
673 $ cd branchedlinkrev
673 $ cd branchedlinkrev
674 $ echo A > a
674 $ echo A > a
675 $ hg commit -Am 'contentA'
675 $ hg commit -Am 'contentA'
676 adding a
676 adding a
677 $ echo B >> a
677 $ echo B >> a
678 $ hg commit -m 'contentB'
678 $ hg commit -m 'contentB'
679 $ hg up --rev 'desc(contentA)'
679 $ hg up --rev 'desc(contentA)'
680 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
680 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
681 $ echo unrelated > unrelated
681 $ echo unrelated > unrelated
682 $ hg commit -Am 'unrelated'
682 $ hg commit -Am 'unrelated'
683 adding unrelated
683 adding unrelated
684 created new head
684 created new head
685 $ hg graft -r 'desc(contentB)'
685 $ hg graft -r 'desc(contentB)'
686 grafting 1:fd27c222e3e6 "contentB"
686 grafting 1:fd27c222e3e6 "contentB"
687 $ echo C >> a
687 $ echo C >> a
688 $ hg commit -m 'contentC'
688 $ hg commit -m 'contentC'
689 $ echo W >> a
689 $ echo W >> a
690 $ hg log -G
690 $ hg log -G
691 @ changeset: 4:072f1e8df249
691 @ changeset: 4:072f1e8df249
692 | tag: tip
692 | tag: tip
693 | user: test
693 | user: test
694 | date: Thu Jan 01 00:00:00 1970 +0000
694 | date: Thu Jan 01 00:00:00 1970 +0000
695 | summary: contentC
695 | summary: contentC
696 |
696 |
697 o changeset: 3:ff38df03cc4b
697 o changeset: 3:ff38df03cc4b
698 | user: test
698 | user: test
699 | date: Thu Jan 01 00:00:00 1970 +0000
699 | date: Thu Jan 01 00:00:00 1970 +0000
700 | summary: contentB
700 | summary: contentB
701 |
701 |
702 o changeset: 2:62aaf3f6fc06
702 o changeset: 2:62aaf3f6fc06
703 | parent: 0:f0932f74827e
703 | parent: 0:f0932f74827e
704 | user: test
704 | user: test
705 | date: Thu Jan 01 00:00:00 1970 +0000
705 | date: Thu Jan 01 00:00:00 1970 +0000
706 | summary: unrelated
706 | summary: unrelated
707 |
707 |
708 | o changeset: 1:fd27c222e3e6
708 | o changeset: 1:fd27c222e3e6
709 |/ user: test
709 |/ user: test
710 | date: Thu Jan 01 00:00:00 1970 +0000
710 | date: Thu Jan 01 00:00:00 1970 +0000
711 | summary: contentB
711 | summary: contentB
712 |
712 |
713 o changeset: 0:f0932f74827e
713 o changeset: 0:f0932f74827e
714 user: test
714 user: test
715 date: Thu Jan 01 00:00:00 1970 +0000
715 date: Thu Jan 01 00:00:00 1970 +0000
716 summary: contentA
716 summary: contentA
717
717
718
718
719 Annotate should list ancestor of starting revision only
719 Annotate should list ancestor of starting revision only
720
720
721 $ hg annotate a
721 $ hg annotate a
722 0: A
722 0: A
723 3: B
723 3: B
724 4: C
724 4: C
725
725
726 $ hg annotate a -r 'wdir()'
726 $ hg annotate a -r 'wdir()'
727 0 : A
727 0 : A
728 3 : B
728 3 : B
729 4 : C
729 4 : C
730 4+: W
730 4+: W
731
731
732 Even when the starting revision is the linkrev-shadowed one:
732 Even when the starting revision is the linkrev-shadowed one:
733
733
734 $ hg annotate a -r 3
734 $ hg annotate a -r 3
735 0: A
735 0: A
736 3: B
736 3: B
737
737
738 $ cd ..
738 $ cd ..
739
739
740 Issue5360: Deleted chunk in p1 of a merge changeset
740 Issue5360: Deleted chunk in p1 of a merge changeset
741
741
742 $ hg init repo-5360
742 $ hg init repo-5360
743 $ cd repo-5360
743 $ cd repo-5360
744 $ echo 1 > a
744 $ echo 1 > a
745 $ hg commit -A a -m 1
745 $ hg commit -A a -m 1
746 $ echo 2 >> a
746 $ echo 2 >> a
747 $ hg commit -m 2
747 $ hg commit -m 2
748 $ echo a > a
748 $ echo a > a
749 $ hg commit -m a
749 $ hg commit -m a
750 $ hg update '.^' -q
750 $ hg update '.^' -q
751 $ echo 3 >> a
751 $ echo 3 >> a
752 $ hg commit -m 3 -q
752 $ hg commit -m 3 -q
753 $ hg merge 2 -q
753 $ hg merge 2 -q
754 $ cat > a << EOF
754 $ cat > a << EOF
755 > b
755 > b
756 > 1
756 > 1
757 > 2
757 > 2
758 > 3
758 > 3
759 > a
759 > a
760 > EOF
760 > EOF
761 $ hg resolve --mark -q
761 $ hg resolve --mark -q
762 $ hg commit -m m
762 $ hg commit -m m
763 $ hg annotate a
763 $ hg annotate a
764 4: b
764 4: b
765 0: 1
765 0: 1
766 1: 2
766 1: 2
767 3: 3
767 3: 3
768 2: a
768 2: a
769
769
770 $ cd ..
770 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now