##// END OF EJS Templates
destutil: move default merge destination into a function...
Pierre-Yves David -
r26714:9903261d default
parent child Browse files
Show More
@@ -1,93 +1,147
1 1 # destutil.py - Mercurial utility function for command destination
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com> and other
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from .i18n import _
9 9 from . import (
10 10 bookmarks,
11 11 error,
12 12 obsolete,
13 13 )
14 14
15 15 def destupdate(repo, clean=False, check=False):
16 16 """destination for bare update operation
17 17
18 18 return (rev, movemark, activemark)
19 19
20 20 - rev: the revision to update to,
21 21 - movemark: node to move the active bookmark from
22 22 (cf bookmark.calculate update),
23 23 - activemark: a bookmark to activate at the end of the update.
24 24 """
25 25 node = None
26 26 wc = repo[None]
27 27 p1 = wc.p1()
28 28 activemark = None
29 29
30 30 # we also move the active bookmark, if any
31 31 node, movemark = bookmarks.calculateupdate(repo.ui, repo, None)
32 32 if node is not None:
33 33 activemark = node
34 34
35 35 if node is None:
36 36 try:
37 37 node = repo.branchtip(wc.branch())
38 38 except error.RepoLookupError:
39 39 if wc.branch() == 'default': # no default branch!
40 40 node = repo.lookup('tip') # update to tip
41 41 else:
42 42 raise error.Abort(_("branch %s not found") % wc.branch())
43 43
44 44 if p1.obsolete() and not p1.children():
45 45 # allow updating to successors
46 46 successors = obsolete.successorssets(repo, p1.node())
47 47
48 48 # behavior of certain cases is as follows,
49 49 #
50 50 # divergent changesets: update to highest rev, similar to what
51 51 # is currently done when there are more than one head
52 52 # (i.e. 'tip')
53 53 #
54 54 # replaced changesets: same as divergent except we know there
55 55 # is no conflict
56 56 #
57 57 # pruned changeset: no update is done; though, we could
58 58 # consider updating to the first non-obsolete parent,
59 59 # similar to what is current done for 'hg prune'
60 60
61 61 if successors:
62 62 # flatten the list here handles both divergent (len > 1)
63 63 # and the usual case (len = 1)
64 64 successors = [n for sub in successors for n in sub]
65 65
66 66 # get the max revision for the given successors set,
67 67 # i.e. the 'tip' of a set
68 68 node = repo.revs('max(%ln)', successors).first()
69 69 rev = repo[node].rev()
70 70
71 71 if not clean:
72 72 # Check that the update is linear.
73 73 #
74 74 # Mercurial do not allow update-merge for non linear pattern
75 75 # (that would be technically possible but was considered too confusing
76 76 # for user a long time ago)
77 77 #
78 78 # See mercurial.merge.update for details
79 79 if p1.rev() not in repo.changelog.ancestors([rev], inclusive=True):
80 80 dirty = wc.dirty(missing=True)
81 81 foreground = obsolete.foreground(repo, [p1.node()])
82 82 if not repo[rev].node() in foreground:
83 83 if dirty:
84 84 msg = _("uncommitted changes")
85 85 hint = _("commit and merge, or update --clean to"
86 86 " discard changes")
87 87 raise error.UpdateAbort(msg, hint=hint)
88 88 elif not check: # destination is not a descendant.
89 89 msg = _("not a linear update")
90 90 hint = _("merge or update --check to force update")
91 91 raise error.UpdateAbort(msg, hint=hint)
92 92
93 93 return rev, movemark, activemark
94
95 def destmerge(repo):
96 if repo._activebookmark:
97 bmheads = repo.bookmarkheads(repo._activebookmark)
98 curhead = repo[repo._activebookmark].node()
99 if len(bmheads) == 2:
100 if curhead == bmheads[0]:
101 node = bmheads[1]
102 else:
103 node = bmheads[0]
104 elif len(bmheads) > 2:
105 raise error.Abort(_("multiple matching bookmarks to merge - "
106 "please merge with an explicit rev or bookmark"),
107 hint=_("run 'hg heads' to see all heads"))
108 elif len(bmheads) <= 1:
109 raise error.Abort(_("no matching bookmark to merge - "
110 "please merge with an explicit rev or bookmark"),
111 hint=_("run 'hg heads' to see all heads"))
112 else:
113 branch = repo[None].branch()
114 bheads = repo.branchheads(branch)
115 nbhs = [bh for bh in bheads if not repo[bh].bookmarks()]
116
117 if len(nbhs) > 2:
118 raise error.Abort(_("branch '%s' has %d heads - "
119 "please merge with an explicit rev")
120 % (branch, len(bheads)),
121 hint=_("run 'hg heads .' to see heads"))
122
123 parent = repo.dirstate.p1()
124 if len(nbhs) <= 1:
125 if len(bheads) > 1:
126 raise error.Abort(_("heads are bookmarked - "
127 "please merge with an explicit rev"),
128 hint=_("run 'hg heads' to see all heads"))
129 if len(repo.heads()) > 1:
130 raise error.Abort(_("branch '%s' has one head - "
131 "please merge with an explicit rev")
132 % branch,
133 hint=_("run 'hg heads' to see all heads"))
134 msg, hint = _('nothing to merge'), None
135 if parent != repo.lookup(branch):
136 hint = _("use 'hg update' instead")
137 raise error.Abort(msg, hint=hint)
138
139 if parent not in bheads:
140 raise error.Abort(_('working directory not at a head revision'),
141 hint=_("use 'hg update' or merge with an "
142 "explicit revision"))
143 if parent == nbhs[0]:
144 node = nbhs[-1]
145 else:
146 node = nbhs[0]
147 return repo[node].rev()
@@ -1,3784 +1,3733
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import heapq
11 11 import re
12 12
13 13 from .i18n import _
14 14 from . import (
15 15 destutil,
16 16 encoding,
17 17 error,
18 18 hbisect,
19 19 match as matchmod,
20 20 node,
21 21 obsolete as obsmod,
22 22 parser,
23 23 pathutil,
24 24 phases,
25 25 repoview,
26 26 util,
27 27 )
28 28
29 29 def _revancestors(repo, revs, followfirst):
30 30 """Like revlog.ancestors(), but supports followfirst."""
31 31 if followfirst:
32 32 cut = 1
33 33 else:
34 34 cut = None
35 35 cl = repo.changelog
36 36
37 37 def iterate():
38 38 revs.sort(reverse=True)
39 39 irevs = iter(revs)
40 40 h = []
41 41
42 42 inputrev = next(irevs, None)
43 43 if inputrev is not None:
44 44 heapq.heappush(h, -inputrev)
45 45
46 46 seen = set()
47 47 while h:
48 48 current = -heapq.heappop(h)
49 49 if current == inputrev:
50 50 inputrev = next(irevs, None)
51 51 if inputrev is not None:
52 52 heapq.heappush(h, -inputrev)
53 53 if current not in seen:
54 54 seen.add(current)
55 55 yield current
56 56 for parent in cl.parentrevs(current)[:cut]:
57 57 if parent != node.nullrev:
58 58 heapq.heappush(h, -parent)
59 59
60 60 return generatorset(iterate(), iterasc=False)
61 61
62 62 def _revdescendants(repo, revs, followfirst):
63 63 """Like revlog.descendants() but supports followfirst."""
64 64 if followfirst:
65 65 cut = 1
66 66 else:
67 67 cut = None
68 68
69 69 def iterate():
70 70 cl = repo.changelog
71 71 # XXX this should be 'parentset.min()' assuming 'parentset' is a
72 72 # smartset (and if it is not, it should.)
73 73 first = min(revs)
74 74 nullrev = node.nullrev
75 75 if first == nullrev:
76 76 # Are there nodes with a null first parent and a non-null
77 77 # second one? Maybe. Do we care? Probably not.
78 78 for i in cl:
79 79 yield i
80 80 else:
81 81 seen = set(revs)
82 82 for i in cl.revs(first + 1):
83 83 for x in cl.parentrevs(i)[:cut]:
84 84 if x != nullrev and x in seen:
85 85 seen.add(i)
86 86 yield i
87 87 break
88 88
89 89 return generatorset(iterate(), iterasc=True)
90 90
91 91 def _reachablerootspure(repo, minroot, roots, heads, includepath):
92 92 """return (heads(::<roots> and ::<heads>))
93 93
94 94 If includepath is True, return (<roots>::<heads>)."""
95 95 if not roots:
96 96 return []
97 97 parentrevs = repo.changelog.parentrevs
98 98 roots = set(roots)
99 99 visit = list(heads)
100 100 reachable = set()
101 101 seen = {}
102 102 # prefetch all the things! (because python is slow)
103 103 reached = reachable.add
104 104 dovisit = visit.append
105 105 nextvisit = visit.pop
106 106 # open-code the post-order traversal due to the tiny size of
107 107 # sys.getrecursionlimit()
108 108 while visit:
109 109 rev = nextvisit()
110 110 if rev in roots:
111 111 reached(rev)
112 112 if not includepath:
113 113 continue
114 114 parents = parentrevs(rev)
115 115 seen[rev] = parents
116 116 for parent in parents:
117 117 if parent >= minroot and parent not in seen:
118 118 dovisit(parent)
119 119 if not reachable:
120 120 return baseset()
121 121 if not includepath:
122 122 return reachable
123 123 for rev in sorted(seen):
124 124 for parent in seen[rev]:
125 125 if parent in reachable:
126 126 reached(rev)
127 127 return reachable
128 128
129 129 def reachableroots(repo, roots, heads, includepath=False):
130 130 """return (heads(::<roots> and ::<heads>))
131 131
132 132 If includepath is True, return (<roots>::<heads>)."""
133 133 if not roots:
134 134 return baseset()
135 135 minroot = roots.min()
136 136 roots = list(roots)
137 137 heads = list(heads)
138 138 try:
139 139 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
140 140 except AttributeError:
141 141 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
142 142 revs = baseset(revs)
143 143 revs.sort()
144 144 return revs
145 145
146 146 elements = {
147 147 # token-type: binding-strength, primary, prefix, infix, suffix
148 148 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
149 149 "##": (20, None, None, ("_concat", 20), None),
150 150 "~": (18, None, None, ("ancestor", 18), None),
151 151 "^": (18, None, None, ("parent", 18), ("parentpost", 18)),
152 152 "-": (5, None, ("negate", 19), ("minus", 5), None),
153 153 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17),
154 154 ("dagrangepost", 17)),
155 155 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17),
156 156 ("dagrangepost", 17)),
157 157 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), ("rangepost", 15)),
158 158 "not": (10, None, ("not", 10), None, None),
159 159 "!": (10, None, ("not", 10), None, None),
160 160 "and": (5, None, None, ("and", 5), None),
161 161 "&": (5, None, None, ("and", 5), None),
162 162 "%": (5, None, None, ("only", 5), ("onlypost", 5)),
163 163 "or": (4, None, None, ("or", 4), None),
164 164 "|": (4, None, None, ("or", 4), None),
165 165 "+": (4, None, None, ("or", 4), None),
166 166 "=": (3, None, None, ("keyvalue", 3), None),
167 167 ",": (2, None, None, ("list", 2), None),
168 168 ")": (0, None, None, None, None),
169 169 "symbol": (0, "symbol", None, None, None),
170 170 "string": (0, "string", None, None, None),
171 171 "end": (0, None, None, None, None),
172 172 }
173 173
174 174 keywords = set(['and', 'or', 'not'])
175 175
176 176 # default set of valid characters for the initial letter of symbols
177 177 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
178 178 if c.isalnum() or c in '._@' or ord(c) > 127)
179 179
180 180 # default set of valid characters for non-initial letters of symbols
181 181 _symletters = set(c for c in [chr(i) for i in xrange(256)]
182 182 if c.isalnum() or c in '-._/@' or ord(c) > 127)
183 183
184 184 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
185 185 '''
186 186 Parse a revset statement into a stream of tokens
187 187
188 188 ``syminitletters`` is the set of valid characters for the initial
189 189 letter of symbols.
190 190
191 191 By default, character ``c`` is recognized as valid for initial
192 192 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
193 193
194 194 ``symletters`` is the set of valid characters for non-initial
195 195 letters of symbols.
196 196
197 197 By default, character ``c`` is recognized as valid for non-initial
198 198 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
199 199
200 200 Check that @ is a valid unquoted token character (issue3686):
201 201 >>> list(tokenize("@::"))
202 202 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
203 203
204 204 '''
205 205 if syminitletters is None:
206 206 syminitletters = _syminitletters
207 207 if symletters is None:
208 208 symletters = _symletters
209 209
210 210 if program and lookup:
211 211 # attempt to parse old-style ranges first to deal with
212 212 # things like old-tag which contain query metacharacters
213 213 parts = program.split(':', 1)
214 214 if all(lookup(sym) for sym in parts if sym):
215 215 if parts[0]:
216 216 yield ('symbol', parts[0], 0)
217 217 if len(parts) > 1:
218 218 s = len(parts[0])
219 219 yield (':', None, s)
220 220 if parts[1]:
221 221 yield ('symbol', parts[1], s + 1)
222 222 yield ('end', None, len(program))
223 223 return
224 224
225 225 pos, l = 0, len(program)
226 226 while pos < l:
227 227 c = program[pos]
228 228 if c.isspace(): # skip inter-token whitespace
229 229 pass
230 230 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
231 231 yield ('::', None, pos)
232 232 pos += 1 # skip ahead
233 233 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
234 234 yield ('..', None, pos)
235 235 pos += 1 # skip ahead
236 236 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
237 237 yield ('##', None, pos)
238 238 pos += 1 # skip ahead
239 239 elif c in "():=,-|&+!~^%": # handle simple operators
240 240 yield (c, None, pos)
241 241 elif (c in '"\'' or c == 'r' and
242 242 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
243 243 if c == 'r':
244 244 pos += 1
245 245 c = program[pos]
246 246 decode = lambda x: x
247 247 else:
248 248 decode = parser.unescapestr
249 249 pos += 1
250 250 s = pos
251 251 while pos < l: # find closing quote
252 252 d = program[pos]
253 253 if d == '\\': # skip over escaped characters
254 254 pos += 2
255 255 continue
256 256 if d == c:
257 257 yield ('string', decode(program[s:pos]), s)
258 258 break
259 259 pos += 1
260 260 else:
261 261 raise error.ParseError(_("unterminated string"), s)
262 262 # gather up a symbol/keyword
263 263 elif c in syminitletters:
264 264 s = pos
265 265 pos += 1
266 266 while pos < l: # find end of symbol
267 267 d = program[pos]
268 268 if d not in symletters:
269 269 break
270 270 if d == '.' and program[pos - 1] == '.': # special case for ..
271 271 pos -= 1
272 272 break
273 273 pos += 1
274 274 sym = program[s:pos]
275 275 if sym in keywords: # operator keywords
276 276 yield (sym, None, s)
277 277 elif '-' in sym:
278 278 # some jerk gave us foo-bar-baz, try to check if it's a symbol
279 279 if lookup and lookup(sym):
280 280 # looks like a real symbol
281 281 yield ('symbol', sym, s)
282 282 else:
283 283 # looks like an expression
284 284 parts = sym.split('-')
285 285 for p in parts[:-1]:
286 286 if p: # possible consecutive -
287 287 yield ('symbol', p, s)
288 288 s += len(p)
289 289 yield ('-', None, pos)
290 290 s += 1
291 291 if parts[-1]: # possible trailing -
292 292 yield ('symbol', parts[-1], s)
293 293 else:
294 294 yield ('symbol', sym, s)
295 295 pos -= 1
296 296 else:
297 297 raise error.ParseError(_("syntax error in revset '%s'") %
298 298 program, pos)
299 299 pos += 1
300 300 yield ('end', None, pos)
301 301
302 302 def parseerrordetail(inst):
303 303 """Compose error message from specified ParseError object
304 304 """
305 305 if len(inst.args) > 1:
306 306 return _('at %s: %s') % (inst.args[1], inst.args[0])
307 307 else:
308 308 return inst.args[0]
309 309
310 310 # helpers
311 311
312 312 def getstring(x, err):
313 313 if x and (x[0] == 'string' or x[0] == 'symbol'):
314 314 return x[1]
315 315 raise error.ParseError(err)
316 316
317 317 def getlist(x):
318 318 if not x:
319 319 return []
320 320 if x[0] == 'list':
321 321 return getlist(x[1]) + [x[2]]
322 322 return [x]
323 323
324 324 def getargs(x, min, max, err):
325 325 l = getlist(x)
326 326 if len(l) < min or (max >= 0 and len(l) > max):
327 327 raise error.ParseError(err)
328 328 return l
329 329
330 330 def getargsdict(x, funcname, keys):
331 331 return parser.buildargsdict(getlist(x), funcname, keys.split(),
332 332 keyvaluenode='keyvalue', keynode='symbol')
333 333
334 334 def isvalidsymbol(tree):
335 335 """Examine whether specified ``tree`` is valid ``symbol`` or not
336 336 """
337 337 return tree[0] == 'symbol' and len(tree) > 1
338 338
339 339 def getsymbol(tree):
340 340 """Get symbol name from valid ``symbol`` in ``tree``
341 341
342 342 This assumes that ``tree`` is already examined by ``isvalidsymbol``.
343 343 """
344 344 return tree[1]
345 345
346 346 def isvalidfunc(tree):
347 347 """Examine whether specified ``tree`` is valid ``func`` or not
348 348 """
349 349 return tree[0] == 'func' and len(tree) > 1 and isvalidsymbol(tree[1])
350 350
351 351 def getfuncname(tree):
352 352 """Get function name from valid ``func`` in ``tree``
353 353
354 354 This assumes that ``tree`` is already examined by ``isvalidfunc``.
355 355 """
356 356 return getsymbol(tree[1])
357 357
358 358 def getfuncargs(tree):
359 359 """Get list of function arguments from valid ``func`` in ``tree``
360 360
361 361 This assumes that ``tree`` is already examined by ``isvalidfunc``.
362 362 """
363 363 if len(tree) > 2:
364 364 return getlist(tree[2])
365 365 else:
366 366 return []
367 367
368 368 def getset(repo, subset, x):
369 369 if not x:
370 370 raise error.ParseError(_("missing argument"))
371 371 s = methods[x[0]](repo, subset, *x[1:])
372 372 if util.safehasattr(s, 'isascending'):
373 373 return s
374 374 if (repo.ui.configbool('devel', 'all-warnings')
375 375 or repo.ui.configbool('devel', 'old-revset')):
376 376 # else case should not happen, because all non-func are internal,
377 377 # ignoring for now.
378 378 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
379 379 repo.ui.develwarn('revset "%s" use list instead of smartset, '
380 380 '(upgrade your code)' % x[1][1])
381 381 return baseset(s)
382 382
383 383 def _getrevsource(repo, r):
384 384 extra = repo[r].extra()
385 385 for label in ('source', 'transplant_source', 'rebase_source'):
386 386 if label in extra:
387 387 try:
388 388 return repo[extra[label]].rev()
389 389 except error.RepoLookupError:
390 390 pass
391 391 return None
392 392
393 393 # operator methods
394 394
395 395 def stringset(repo, subset, x):
396 396 x = repo[x].rev()
397 397 if (x in subset
398 398 or x == node.nullrev and isinstance(subset, fullreposet)):
399 399 return baseset([x])
400 400 return baseset()
401 401
402 402 def rangeset(repo, subset, x, y):
403 403 m = getset(repo, fullreposet(repo), x)
404 404 n = getset(repo, fullreposet(repo), y)
405 405
406 406 if not m or not n:
407 407 return baseset()
408 408 m, n = m.first(), n.last()
409 409
410 410 if m == n:
411 411 r = baseset([m])
412 412 elif n == node.wdirrev:
413 413 r = spanset(repo, m, len(repo)) + baseset([n])
414 414 elif m == node.wdirrev:
415 415 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
416 416 elif m < n:
417 417 r = spanset(repo, m, n + 1)
418 418 else:
419 419 r = spanset(repo, m, n - 1)
420 420 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
421 421 # necessary to ensure we preserve the order in subset.
422 422 #
423 423 # This has performance implication, carrying the sorting over when possible
424 424 # would be more efficient.
425 425 return r & subset
426 426
427 427 def dagrange(repo, subset, x, y):
428 428 r = fullreposet(repo)
429 429 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
430 430 includepath=True)
431 431 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
432 432 # necessary to ensure we preserve the order in subset.
433 433 return xs & subset
434 434
435 435 def andset(repo, subset, x, y):
436 436 return getset(repo, getset(repo, subset, x), y)
437 437
438 438 def orset(repo, subset, *xs):
439 439 assert xs
440 440 if len(xs) == 1:
441 441 return getset(repo, subset, xs[0])
442 442 p = len(xs) // 2
443 443 a = orset(repo, subset, *xs[:p])
444 444 b = orset(repo, subset, *xs[p:])
445 445 return a + b
446 446
447 447 def notset(repo, subset, x):
448 448 return subset - getset(repo, subset, x)
449 449
450 450 def listset(repo, subset, a, b):
451 451 raise error.ParseError(_("can't use a list in this context"))
452 452
453 453 def keyvaluepair(repo, subset, k, v):
454 454 raise error.ParseError(_("can't use a key-value pair in this context"))
455 455
456 456 def func(repo, subset, a, b):
457 457 if a[0] == 'symbol' and a[1] in symbols:
458 458 return symbols[a[1]](repo, subset, b)
459 459
460 460 keep = lambda fn: getattr(fn, '__doc__', None) is not None
461 461
462 462 syms = [s for (s, fn) in symbols.items() if keep(fn)]
463 463 raise error.UnknownIdentifier(a[1], syms)
464 464
465 465 # functions
466 466
467 467 def _destupdate(repo, subset, x):
468 468 # experimental revset for update destination
469 469 args = getargsdict(x, 'limit', 'clean check')
470 470 return subset & baseset([destutil.destupdate(repo, **args)[0]])
471 471
472 472 def _mergedefaultdest(repo, subset, x):
473 473 # ``_mergedefaultdest()``
474 474
475 475 # default destination for merge.
476 476 # # XXX: Currently private because I expect the signature to change.
477 477 # # XXX: - taking rev as arguments,
478 478 # # XXX: - bailing out in case of ambiguity vs returning all data.
479 479 getargs(x, 0, 0, _("_mergedefaultdest takes no arguments"))
480 if repo._activebookmark:
481 bmheads = repo.bookmarkheads(repo._activebookmark)
482 curhead = repo[repo._activebookmark].node()
483 if len(bmheads) == 2:
484 if curhead == bmheads[0]:
485 node = bmheads[1]
486 else:
487 node = bmheads[0]
488 elif len(bmheads) > 2:
489 raise error.Abort(_("multiple matching bookmarks to merge - "
490 "please merge with an explicit rev or bookmark"),
491 hint=_("run 'hg heads' to see all heads"))
492 elif len(bmheads) <= 1:
493 raise error.Abort(_("no matching bookmark to merge - "
494 "please merge with an explicit rev or bookmark"),
495 hint=_("run 'hg heads' to see all heads"))
496 else:
497 branch = repo[None].branch()
498 bheads = repo.branchheads(branch)
499 nbhs = [bh for bh in bheads if not repo[bh].bookmarks()]
500
501 if len(nbhs) > 2:
502 raise error.Abort(_("branch '%s' has %d heads - "
503 "please merge with an explicit rev")
504 % (branch, len(bheads)),
505 hint=_("run 'hg heads .' to see heads"))
506
507 parent = repo.dirstate.p1()
508 if len(nbhs) <= 1:
509 if len(bheads) > 1:
510 raise error.Abort(_("heads are bookmarked - "
511 "please merge with an explicit rev"),
512 hint=_("run 'hg heads' to see all heads"))
513 if len(repo.heads()) > 1:
514 raise error.Abort(_("branch '%s' has one head - "
515 "please merge with an explicit rev")
516 % branch,
517 hint=_("run 'hg heads' to see all heads"))
518 msg, hint = _('nothing to merge'), None
519 if parent != repo.lookup(branch):
520 hint = _("use 'hg update' instead")
521 raise error.Abort(msg, hint=hint)
522
523 if parent not in bheads:
524 raise error.Abort(_('working directory not at a head revision'),
525 hint=_("use 'hg update' or merge with an "
526 "explicit revision"))
527 if parent == nbhs[0]:
528 node = nbhs[-1]
529 else:
530 node = nbhs[0]
531 return subset & baseset([repo[node].rev()])
480 return subset & baseset([destutil.destmerge(repo)])
532 481
533 482 def adds(repo, subset, x):
534 483 """``adds(pattern)``
535 484 Changesets that add a file matching pattern.
536 485
537 486 The pattern without explicit kind like ``glob:`` is expected to be
538 487 relative to the current directory and match against a file or a
539 488 directory.
540 489 """
541 490 # i18n: "adds" is a keyword
542 491 pat = getstring(x, _("adds requires a pattern"))
543 492 return checkstatus(repo, subset, pat, 1)
544 493
545 494 def ancestor(repo, subset, x):
546 495 """``ancestor(*changeset)``
547 496 A greatest common ancestor of the changesets.
548 497
549 498 Accepts 0 or more changesets.
550 499 Will return empty list when passed no args.
551 500 Greatest common ancestor of a single changeset is that changeset.
552 501 """
553 502 # i18n: "ancestor" is a keyword
554 503 l = getlist(x)
555 504 rl = fullreposet(repo)
556 505 anc = None
557 506
558 507 # (getset(repo, rl, i) for i in l) generates a list of lists
559 508 for revs in (getset(repo, rl, i) for i in l):
560 509 for r in revs:
561 510 if anc is None:
562 511 anc = repo[r]
563 512 else:
564 513 anc = anc.ancestor(repo[r])
565 514
566 515 if anc is not None and anc.rev() in subset:
567 516 return baseset([anc.rev()])
568 517 return baseset()
569 518
570 519 def _ancestors(repo, subset, x, followfirst=False):
571 520 heads = getset(repo, fullreposet(repo), x)
572 521 if not heads:
573 522 return baseset()
574 523 s = _revancestors(repo, heads, followfirst)
575 524 return subset & s
576 525
577 526 def ancestors(repo, subset, x):
578 527 """``ancestors(set)``
579 528 Changesets that are ancestors of a changeset in set.
580 529 """
581 530 return _ancestors(repo, subset, x)
582 531
583 532 def _firstancestors(repo, subset, x):
584 533 # ``_firstancestors(set)``
585 534 # Like ``ancestors(set)`` but follows only the first parents.
586 535 return _ancestors(repo, subset, x, followfirst=True)
587 536
588 537 def ancestorspec(repo, subset, x, n):
589 538 """``set~n``
590 539 Changesets that are the Nth ancestor (first parents only) of a changeset
591 540 in set.
592 541 """
593 542 try:
594 543 n = int(n[1])
595 544 except (TypeError, ValueError):
596 545 raise error.ParseError(_("~ expects a number"))
597 546 ps = set()
598 547 cl = repo.changelog
599 548 for r in getset(repo, fullreposet(repo), x):
600 549 for i in range(n):
601 550 r = cl.parentrevs(r)[0]
602 551 ps.add(r)
603 552 return subset & ps
604 553
605 554 def author(repo, subset, x):
606 555 """``author(string)``
607 556 Alias for ``user(string)``.
608 557 """
609 558 # i18n: "author" is a keyword
610 559 n = encoding.lower(getstring(x, _("author requires a string")))
611 560 kind, pattern, matcher = _substringmatcher(n)
612 561 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())))
613 562
614 563 def bisect(repo, subset, x):
615 564 """``bisect(string)``
616 565 Changesets marked in the specified bisect status:
617 566
618 567 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
619 568 - ``goods``, ``bads`` : csets topologically good/bad
620 569 - ``range`` : csets taking part in the bisection
621 570 - ``pruned`` : csets that are goods, bads or skipped
622 571 - ``untested`` : csets whose fate is yet unknown
623 572 - ``ignored`` : csets ignored due to DAG topology
624 573 - ``current`` : the cset currently being bisected
625 574 """
626 575 # i18n: "bisect" is a keyword
627 576 status = getstring(x, _("bisect requires a string")).lower()
628 577 state = set(hbisect.get(repo, status))
629 578 return subset & state
630 579
631 580 # Backward-compatibility
632 581 # - no help entry so that we do not advertise it any more
633 582 def bisected(repo, subset, x):
634 583 return bisect(repo, subset, x)
635 584
636 585 def bookmark(repo, subset, x):
637 586 """``bookmark([name])``
638 587 The named bookmark or all bookmarks.
639 588
640 589 If `name` starts with `re:`, the remainder of the name is treated as
641 590 a regular expression. To match a bookmark that actually starts with `re:`,
642 591 use the prefix `literal:`.
643 592 """
644 593 # i18n: "bookmark" is a keyword
645 594 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
646 595 if args:
647 596 bm = getstring(args[0],
648 597 # i18n: "bookmark" is a keyword
649 598 _('the argument to bookmark must be a string'))
650 599 kind, pattern, matcher = util.stringmatcher(bm)
651 600 bms = set()
652 601 if kind == 'literal':
653 602 bmrev = repo._bookmarks.get(pattern, None)
654 603 if not bmrev:
655 604 raise error.RepoLookupError(_("bookmark '%s' does not exist")
656 605 % pattern)
657 606 bms.add(repo[bmrev].rev())
658 607 else:
659 608 matchrevs = set()
660 609 for name, bmrev in repo._bookmarks.iteritems():
661 610 if matcher(name):
662 611 matchrevs.add(bmrev)
663 612 if not matchrevs:
664 613 raise error.RepoLookupError(_("no bookmarks exist"
665 614 " that match '%s'") % pattern)
666 615 for bmrev in matchrevs:
667 616 bms.add(repo[bmrev].rev())
668 617 else:
669 618 bms = set([repo[r].rev()
670 619 for r in repo._bookmarks.values()])
671 620 bms -= set([node.nullrev])
672 621 return subset & bms
673 622
674 623 def branch(repo, subset, x):
675 624 """``branch(string or set)``
676 625 All changesets belonging to the given branch or the branches of the given
677 626 changesets.
678 627
679 628 If `string` starts with `re:`, the remainder of the name is treated as
680 629 a regular expression. To match a branch that actually starts with `re:`,
681 630 use the prefix `literal:`.
682 631 """
683 632 getbi = repo.revbranchcache().branchinfo
684 633
685 634 try:
686 635 b = getstring(x, '')
687 636 except error.ParseError:
688 637 # not a string, but another revspec, e.g. tip()
689 638 pass
690 639 else:
691 640 kind, pattern, matcher = util.stringmatcher(b)
692 641 if kind == 'literal':
693 642 # note: falls through to the revspec case if no branch with
694 643 # this name exists and pattern kind is not specified explicitly
695 644 if pattern in repo.branchmap():
696 645 return subset.filter(lambda r: matcher(getbi(r)[0]))
697 646 if b.startswith('literal:'):
698 647 raise error.RepoLookupError(_("branch '%s' does not exist")
699 648 % pattern)
700 649 else:
701 650 return subset.filter(lambda r: matcher(getbi(r)[0]))
702 651
703 652 s = getset(repo, fullreposet(repo), x)
704 653 b = set()
705 654 for r in s:
706 655 b.add(getbi(r)[0])
707 656 c = s.__contains__
708 657 return subset.filter(lambda r: c(r) or getbi(r)[0] in b)
709 658
710 659 def bumped(repo, subset, x):
711 660 """``bumped()``
712 661 Mutable changesets marked as successors of public changesets.
713 662
714 663 Only non-public and non-obsolete changesets can be `bumped`.
715 664 """
716 665 # i18n: "bumped" is a keyword
717 666 getargs(x, 0, 0, _("bumped takes no arguments"))
718 667 bumped = obsmod.getrevs(repo, 'bumped')
719 668 return subset & bumped
720 669
721 670 def bundle(repo, subset, x):
722 671 """``bundle()``
723 672 Changesets in the bundle.
724 673
725 674 Bundle must be specified by the -R option."""
726 675
727 676 try:
728 677 bundlerevs = repo.changelog.bundlerevs
729 678 except AttributeError:
730 679 raise error.Abort(_("no bundle provided - specify with -R"))
731 680 return subset & bundlerevs
732 681
733 682 def checkstatus(repo, subset, pat, field):
734 683 hasset = matchmod.patkind(pat) == 'set'
735 684
736 685 mcache = [None]
737 686 def matches(x):
738 687 c = repo[x]
739 688 if not mcache[0] or hasset:
740 689 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
741 690 m = mcache[0]
742 691 fname = None
743 692 if not m.anypats() and len(m.files()) == 1:
744 693 fname = m.files()[0]
745 694 if fname is not None:
746 695 if fname not in c.files():
747 696 return False
748 697 else:
749 698 for f in c.files():
750 699 if m(f):
751 700 break
752 701 else:
753 702 return False
754 703 files = repo.status(c.p1().node(), c.node())[field]
755 704 if fname is not None:
756 705 if fname in files:
757 706 return True
758 707 else:
759 708 for f in files:
760 709 if m(f):
761 710 return True
762 711
763 712 return subset.filter(matches)
764 713
765 714 def _children(repo, narrow, parentset):
766 715 if not parentset:
767 716 return baseset()
768 717 cs = set()
769 718 pr = repo.changelog.parentrevs
770 719 minrev = parentset.min()
771 720 for r in narrow:
772 721 if r <= minrev:
773 722 continue
774 723 for p in pr(r):
775 724 if p in parentset:
776 725 cs.add(r)
777 726 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
778 727 # This does not break because of other fullreposet misbehavior.
779 728 return baseset(cs)
780 729
781 730 def children(repo, subset, x):
782 731 """``children(set)``
783 732 Child changesets of changesets in set.
784 733 """
785 734 s = getset(repo, fullreposet(repo), x)
786 735 cs = _children(repo, subset, s)
787 736 return subset & cs
788 737
789 738 def closed(repo, subset, x):
790 739 """``closed()``
791 740 Changeset is closed.
792 741 """
793 742 # i18n: "closed" is a keyword
794 743 getargs(x, 0, 0, _("closed takes no arguments"))
795 744 return subset.filter(lambda r: repo[r].closesbranch())
796 745
797 746 def contains(repo, subset, x):
798 747 """``contains(pattern)``
799 748 The revision's manifest contains a file matching pattern (but might not
800 749 modify it). See :hg:`help patterns` for information about file patterns.
801 750
802 751 The pattern without explicit kind like ``glob:`` is expected to be
803 752 relative to the current directory and match against a file exactly
804 753 for efficiency.
805 754 """
806 755 # i18n: "contains" is a keyword
807 756 pat = getstring(x, _("contains requires a pattern"))
808 757
809 758 def matches(x):
810 759 if not matchmod.patkind(pat):
811 760 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
812 761 if pats in repo[x]:
813 762 return True
814 763 else:
815 764 c = repo[x]
816 765 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
817 766 for f in c.manifest():
818 767 if m(f):
819 768 return True
820 769 return False
821 770
822 771 return subset.filter(matches)
823 772
824 773 def converted(repo, subset, x):
825 774 """``converted([id])``
826 775 Changesets converted from the given identifier in the old repository if
827 776 present, or all converted changesets if no identifier is specified.
828 777 """
829 778
830 779 # There is exactly no chance of resolving the revision, so do a simple
831 780 # string compare and hope for the best
832 781
833 782 rev = None
834 783 # i18n: "converted" is a keyword
835 784 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
836 785 if l:
837 786 # i18n: "converted" is a keyword
838 787 rev = getstring(l[0], _('converted requires a revision'))
839 788
840 789 def _matchvalue(r):
841 790 source = repo[r].extra().get('convert_revision', None)
842 791 return source is not None and (rev is None or source.startswith(rev))
843 792
844 793 return subset.filter(lambda r: _matchvalue(r))
845 794
846 795 def date(repo, subset, x):
847 796 """``date(interval)``
848 797 Changesets within the interval, see :hg:`help dates`.
849 798 """
850 799 # i18n: "date" is a keyword
851 800 ds = getstring(x, _("date requires a string"))
852 801 dm = util.matchdate(ds)
853 802 return subset.filter(lambda x: dm(repo[x].date()[0]))
854 803
855 804 def desc(repo, subset, x):
856 805 """``desc(string)``
857 806 Search commit message for string. The match is case-insensitive.
858 807 """
859 808 # i18n: "desc" is a keyword
860 809 ds = encoding.lower(getstring(x, _("desc requires a string")))
861 810
862 811 def matches(x):
863 812 c = repo[x]
864 813 return ds in encoding.lower(c.description())
865 814
866 815 return subset.filter(matches)
867 816
868 817 def _descendants(repo, subset, x, followfirst=False):
869 818 roots = getset(repo, fullreposet(repo), x)
870 819 if not roots:
871 820 return baseset()
872 821 s = _revdescendants(repo, roots, followfirst)
873 822
874 823 # Both sets need to be ascending in order to lazily return the union
875 824 # in the correct order.
876 825 base = subset & roots
877 826 desc = subset & s
878 827 result = base + desc
879 828 if subset.isascending():
880 829 result.sort()
881 830 elif subset.isdescending():
882 831 result.sort(reverse=True)
883 832 else:
884 833 result = subset & result
885 834 return result
886 835
887 836 def descendants(repo, subset, x):
888 837 """``descendants(set)``
889 838 Changesets which are descendants of changesets in set.
890 839 """
891 840 return _descendants(repo, subset, x)
892 841
893 842 def _firstdescendants(repo, subset, x):
894 843 # ``_firstdescendants(set)``
895 844 # Like ``descendants(set)`` but follows only the first parents.
896 845 return _descendants(repo, subset, x, followfirst=True)
897 846
898 847 def destination(repo, subset, x):
899 848 """``destination([set])``
900 849 Changesets that were created by a graft, transplant or rebase operation,
901 850 with the given revisions specified as the source. Omitting the optional set
902 851 is the same as passing all().
903 852 """
904 853 if x is not None:
905 854 sources = getset(repo, fullreposet(repo), x)
906 855 else:
907 856 sources = fullreposet(repo)
908 857
909 858 dests = set()
910 859
911 860 # subset contains all of the possible destinations that can be returned, so
912 861 # iterate over them and see if their source(s) were provided in the arg set.
913 862 # Even if the immediate src of r is not in the arg set, src's source (or
914 863 # further back) may be. Scanning back further than the immediate src allows
915 864 # transitive transplants and rebases to yield the same results as transitive
916 865 # grafts.
917 866 for r in subset:
918 867 src = _getrevsource(repo, r)
919 868 lineage = None
920 869
921 870 while src is not None:
922 871 if lineage is None:
923 872 lineage = list()
924 873
925 874 lineage.append(r)
926 875
927 876 # The visited lineage is a match if the current source is in the arg
928 877 # set. Since every candidate dest is visited by way of iterating
929 878 # subset, any dests further back in the lineage will be tested by a
930 879 # different iteration over subset. Likewise, if the src was already
931 880 # selected, the current lineage can be selected without going back
932 881 # further.
933 882 if src in sources or src in dests:
934 883 dests.update(lineage)
935 884 break
936 885
937 886 r = src
938 887 src = _getrevsource(repo, r)
939 888
940 889 return subset.filter(dests.__contains__)
941 890
942 891 def divergent(repo, subset, x):
943 892 """``divergent()``
944 893 Final successors of changesets with an alternative set of final successors.
945 894 """
946 895 # i18n: "divergent" is a keyword
947 896 getargs(x, 0, 0, _("divergent takes no arguments"))
948 897 divergent = obsmod.getrevs(repo, 'divergent')
949 898 return subset & divergent
950 899
951 900 def extinct(repo, subset, x):
952 901 """``extinct()``
953 902 Obsolete changesets with obsolete descendants only.
954 903 """
955 904 # i18n: "extinct" is a keyword
956 905 getargs(x, 0, 0, _("extinct takes no arguments"))
957 906 extincts = obsmod.getrevs(repo, 'extinct')
958 907 return subset & extincts
959 908
960 909 def extra(repo, subset, x):
961 910 """``extra(label, [value])``
962 911 Changesets with the given label in the extra metadata, with the given
963 912 optional value.
964 913
965 914 If `value` starts with `re:`, the remainder of the value is treated as
966 915 a regular expression. To match a value that actually starts with `re:`,
967 916 use the prefix `literal:`.
968 917 """
969 918 args = getargsdict(x, 'extra', 'label value')
970 919 if 'label' not in args:
971 920 # i18n: "extra" is a keyword
972 921 raise error.ParseError(_('extra takes at least 1 argument'))
973 922 # i18n: "extra" is a keyword
974 923 label = getstring(args['label'], _('first argument to extra must be '
975 924 'a string'))
976 925 value = None
977 926
978 927 if 'value' in args:
979 928 # i18n: "extra" is a keyword
980 929 value = getstring(args['value'], _('second argument to extra must be '
981 930 'a string'))
982 931 kind, value, matcher = util.stringmatcher(value)
983 932
984 933 def _matchvalue(r):
985 934 extra = repo[r].extra()
986 935 return label in extra and (value is None or matcher(extra[label]))
987 936
988 937 return subset.filter(lambda r: _matchvalue(r))
989 938
990 939 def filelog(repo, subset, x):
991 940 """``filelog(pattern)``
992 941 Changesets connected to the specified filelog.
993 942
994 943 For performance reasons, visits only revisions mentioned in the file-level
995 944 filelog, rather than filtering through all changesets (much faster, but
996 945 doesn't include deletes or duplicate changes). For a slower, more accurate
997 946 result, use ``file()``.
998 947
999 948 The pattern without explicit kind like ``glob:`` is expected to be
1000 949 relative to the current directory and match against a file exactly
1001 950 for efficiency.
1002 951
1003 952 If some linkrev points to revisions filtered by the current repoview, we'll
1004 953 work around it to return a non-filtered value.
1005 954 """
1006 955
1007 956 # i18n: "filelog" is a keyword
1008 957 pat = getstring(x, _("filelog requires a pattern"))
1009 958 s = set()
1010 959 cl = repo.changelog
1011 960
1012 961 if not matchmod.patkind(pat):
1013 962 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
1014 963 files = [f]
1015 964 else:
1016 965 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
1017 966 files = (f for f in repo[None] if m(f))
1018 967
1019 968 for f in files:
1020 969 backrevref = {} # final value for: filerev -> changerev
1021 970 lowestchild = {} # lowest known filerev child of a filerev
1022 971 delayed = [] # filerev with filtered linkrev, for post-processing
1023 972 lowesthead = None # cache for manifest content of all head revisions
1024 973 fl = repo.file(f)
1025 974 for fr in list(fl):
1026 975 rev = fl.linkrev(fr)
1027 976 if rev not in cl:
1028 977 # changerev pointed in linkrev is filtered
1029 978 # record it for post processing.
1030 979 delayed.append((fr, rev))
1031 980 continue
1032 981 for p in fl.parentrevs(fr):
1033 982 if 0 <= p and p not in lowestchild:
1034 983 lowestchild[p] = fr
1035 984 backrevref[fr] = rev
1036 985 s.add(rev)
1037 986
1038 987 # Post-processing of all filerevs we skipped because they were
1039 988 # filtered. If such filerevs have known and unfiltered children, this
1040 989 # means they have an unfiltered appearance out there. We'll use linkrev
1041 990 # adjustment to find one of these appearances. The lowest known child
1042 991 # will be used as a starting point because it is the best upper-bound we
1043 992 # have.
1044 993 #
1045 994 # This approach will fail when an unfiltered but linkrev-shadowed
1046 995 # appearance exists in a head changeset without unfiltered filerev
1047 996 # children anywhere.
1048 997 while delayed:
1049 998 # must be a descending iteration. To slowly fill lowest child
1050 999 # information that is of potential use by the next item.
1051 1000 fr, rev = delayed.pop()
1052 1001 lkr = rev
1053 1002
1054 1003 child = lowestchild.get(fr)
1055 1004
1056 1005 if child is None:
1057 1006 # search for existence of this file revision in a head revision.
1058 1007 # There are three possibilities:
1059 1008 # - the revision exists in a head and we can find an
1060 1009 # introduction from there,
1061 1010 # - the revision does not exist in a head because it has been
1062 1011 # changed since its introduction: we would have found a child
1063 1012 # and be in the other 'else' clause,
1064 1013 # - all versions of the revision are hidden.
1065 1014 if lowesthead is None:
1066 1015 lowesthead = {}
1067 1016 for h in repo.heads():
1068 1017 fnode = repo[h].manifest().get(f)
1069 1018 if fnode is not None:
1070 1019 lowesthead[fl.rev(fnode)] = h
1071 1020 headrev = lowesthead.get(fr)
1072 1021 if headrev is None:
1073 1022 # content is nowhere unfiltered
1074 1023 continue
1075 1024 rev = repo[headrev][f].introrev()
1076 1025 else:
1077 1026 # the lowest known child is a good upper bound
1078 1027 childcrev = backrevref[child]
1079 1028 # XXX this does not guarantee returning the lowest
1080 1029 # introduction of this revision, but this gives a
1081 1030 # result which is a good start and will fit in most
1082 1031 # cases. We probably need to fix the multiple
1083 1032 # introductions case properly (report each
1084 1033 # introduction, even for identical file revisions)
1085 1034 # once and for all at some point anyway.
1086 1035 for p in repo[childcrev][f].parents():
1087 1036 if p.filerev() == fr:
1088 1037 rev = p.rev()
1089 1038 break
1090 1039 if rev == lkr: # no shadowed entry found
1091 1040 # XXX This should never happen unless some manifest points
1092 1041 # to biggish file revisions (like a revision that uses a
1093 1042 # parent that never appears in the manifest ancestors)
1094 1043 continue
1095 1044
1096 1045 # Fill the data for the next iteration.
1097 1046 for p in fl.parentrevs(fr):
1098 1047 if 0 <= p and p not in lowestchild:
1099 1048 lowestchild[p] = fr
1100 1049 backrevref[fr] = rev
1101 1050 s.add(rev)
1102 1051
1103 1052 return subset & s
1104 1053
1105 1054 def first(repo, subset, x):
1106 1055 """``first(set, [n])``
1107 1056 An alias for limit().
1108 1057 """
1109 1058 return limit(repo, subset, x)
1110 1059
1111 1060 def _follow(repo, subset, x, name, followfirst=False):
1112 1061 l = getargs(x, 0, 1, _("%s takes no arguments or a pattern") % name)
1113 1062 c = repo['.']
1114 1063 if l:
1115 1064 x = getstring(l[0], _("%s expected a pattern") % name)
1116 1065 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
1117 1066 ctx=repo[None], default='path')
1118 1067
1119 1068 s = set()
1120 1069 for fname in c:
1121 1070 if matcher(fname):
1122 1071 fctx = c[fname]
1123 1072 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
1124 1073 # include the revision responsible for the most recent version
1125 1074 s.add(fctx.introrev())
1126 1075 else:
1127 1076 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1128 1077
1129 1078 return subset & s
1130 1079
1131 1080 def follow(repo, subset, x):
1132 1081 """``follow([pattern])``
1133 1082 An alias for ``::.`` (ancestors of the working directory's first parent).
1134 1083 If pattern is specified, the histories of files matching given
1135 1084 pattern is followed, including copies.
1136 1085 """
1137 1086 return _follow(repo, subset, x, 'follow')
1138 1087
1139 1088 def _followfirst(repo, subset, x):
1140 1089 # ``followfirst([pattern])``
1141 1090 # Like ``follow([pattern])`` but follows only the first parent of
1142 1091 # every revisions or files revisions.
1143 1092 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1144 1093
1145 1094 def getall(repo, subset, x):
1146 1095 """``all()``
1147 1096 All changesets, the same as ``0:tip``.
1148 1097 """
1149 1098 # i18n: "all" is a keyword
1150 1099 getargs(x, 0, 0, _("all takes no arguments"))
1151 1100 return subset & spanset(repo) # drop "null" if any
1152 1101
1153 1102 def grep(repo, subset, x):
1154 1103 """``grep(regex)``
1155 1104 Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1156 1105 to ensure special escape characters are handled correctly. Unlike
1157 1106 ``keyword(string)``, the match is case-sensitive.
1158 1107 """
1159 1108 try:
1160 1109 # i18n: "grep" is a keyword
1161 1110 gr = re.compile(getstring(x, _("grep requires a string")))
1162 1111 except re.error as e:
1163 1112 raise error.ParseError(_('invalid match pattern: %s') % e)
1164 1113
1165 1114 def matches(x):
1166 1115 c = repo[x]
1167 1116 for e in c.files() + [c.user(), c.description()]:
1168 1117 if gr.search(e):
1169 1118 return True
1170 1119 return False
1171 1120
1172 1121 return subset.filter(matches)
1173 1122
1174 1123 def _matchfiles(repo, subset, x):
1175 1124 # _matchfiles takes a revset list of prefixed arguments:
1176 1125 #
1177 1126 # [p:foo, i:bar, x:baz]
1178 1127 #
1179 1128 # builds a match object from them and filters subset. Allowed
1180 1129 # prefixes are 'p:' for regular patterns, 'i:' for include
1181 1130 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1182 1131 # a revision identifier, or the empty string to reference the
1183 1132 # working directory, from which the match object is
1184 1133 # initialized. Use 'd:' to set the default matching mode, default
1185 1134 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1186 1135
1187 1136 # i18n: "_matchfiles" is a keyword
1188 1137 l = getargs(x, 1, -1, _("_matchfiles requires at least one argument"))
1189 1138 pats, inc, exc = [], [], []
1190 1139 rev, default = None, None
1191 1140 for arg in l:
1192 1141 # i18n: "_matchfiles" is a keyword
1193 1142 s = getstring(arg, _("_matchfiles requires string arguments"))
1194 1143 prefix, value = s[:2], s[2:]
1195 1144 if prefix == 'p:':
1196 1145 pats.append(value)
1197 1146 elif prefix == 'i:':
1198 1147 inc.append(value)
1199 1148 elif prefix == 'x:':
1200 1149 exc.append(value)
1201 1150 elif prefix == 'r:':
1202 1151 if rev is not None:
1203 1152 # i18n: "_matchfiles" is a keyword
1204 1153 raise error.ParseError(_('_matchfiles expected at most one '
1205 1154 'revision'))
1206 1155 if value != '': # empty means working directory; leave rev as None
1207 1156 rev = value
1208 1157 elif prefix == 'd:':
1209 1158 if default is not None:
1210 1159 # i18n: "_matchfiles" is a keyword
1211 1160 raise error.ParseError(_('_matchfiles expected at most one '
1212 1161 'default mode'))
1213 1162 default = value
1214 1163 else:
1215 1164 # i18n: "_matchfiles" is a keyword
1216 1165 raise error.ParseError(_('invalid _matchfiles prefix: %s') % prefix)
1217 1166 if not default:
1218 1167 default = 'glob'
1219 1168
1220 1169 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1221 1170 exclude=exc, ctx=repo[rev], default=default)
1222 1171
1223 1172 def matches(x):
1224 1173 for f in repo[x].files():
1225 1174 if m(f):
1226 1175 return True
1227 1176 return False
1228 1177
1229 1178 return subset.filter(matches)
1230 1179
1231 1180 def hasfile(repo, subset, x):
1232 1181 """``file(pattern)``
1233 1182 Changesets affecting files matched by pattern.
1234 1183
1235 1184 For a faster but less accurate result, consider using ``filelog()``
1236 1185 instead.
1237 1186
1238 1187 This predicate uses ``glob:`` as the default kind of pattern.
1239 1188 """
1240 1189 # i18n: "file" is a keyword
1241 1190 pat = getstring(x, _("file requires a pattern"))
1242 1191 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1243 1192
1244 1193 def head(repo, subset, x):
1245 1194 """``head()``
1246 1195 Changeset is a named branch head.
1247 1196 """
1248 1197 # i18n: "head" is a keyword
1249 1198 getargs(x, 0, 0, _("head takes no arguments"))
1250 1199 hs = set()
1251 1200 cl = repo.changelog
1252 1201 for b, ls in repo.branchmap().iteritems():
1253 1202 hs.update(cl.rev(h) for h in ls)
1254 1203 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
1255 1204 # This does not break because of other fullreposet misbehavior.
1256 1205 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
1257 1206 # necessary to ensure we preserve the order in subset.
1258 1207 return baseset(hs) & subset
1259 1208
1260 1209 def heads(repo, subset, x):
1261 1210 """``heads(set)``
1262 1211 Members of set with no children in set.
1263 1212 """
1264 1213 s = getset(repo, subset, x)
1265 1214 ps = parents(repo, subset, x)
1266 1215 return s - ps
1267 1216
1268 1217 def hidden(repo, subset, x):
1269 1218 """``hidden()``
1270 1219 Hidden changesets.
1271 1220 """
1272 1221 # i18n: "hidden" is a keyword
1273 1222 getargs(x, 0, 0, _("hidden takes no arguments"))
1274 1223 hiddenrevs = repoview.filterrevs(repo, 'visible')
1275 1224 return subset & hiddenrevs
1276 1225
1277 1226 def keyword(repo, subset, x):
1278 1227 """``keyword(string)``
1279 1228 Search commit message, user name, and names of changed files for
1280 1229 string. The match is case-insensitive.
1281 1230 """
1282 1231 # i18n: "keyword" is a keyword
1283 1232 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1284 1233
1285 1234 def matches(r):
1286 1235 c = repo[r]
1287 1236 return any(kw in encoding.lower(t)
1288 1237 for t in c.files() + [c.user(), c.description()])
1289 1238
1290 1239 return subset.filter(matches)
1291 1240
1292 1241 def limit(repo, subset, x):
1293 1242 """``limit(set[, n[, offset]])``
1294 1243 First n members of set, defaulting to 1, starting from offset.
1295 1244 """
1296 1245 args = getargsdict(x, 'limit', 'set n offset')
1297 1246 if 'set' not in args:
1298 1247 # i18n: "limit" is a keyword
1299 1248 raise error.ParseError(_("limit requires one to three arguments"))
1300 1249 try:
1301 1250 lim, ofs = 1, 0
1302 1251 if 'n' in args:
1303 1252 # i18n: "limit" is a keyword
1304 1253 lim = int(getstring(args['n'], _("limit requires a number")))
1305 1254 if 'offset' in args:
1306 1255 # i18n: "limit" is a keyword
1307 1256 ofs = int(getstring(args['offset'], _("limit requires a number")))
1308 1257 if ofs < 0:
1309 1258 raise error.ParseError(_("negative offset"))
1310 1259 except (TypeError, ValueError):
1311 1260 # i18n: "limit" is a keyword
1312 1261 raise error.ParseError(_("limit expects a number"))
1313 1262 os = getset(repo, fullreposet(repo), args['set'])
1314 1263 result = []
1315 1264 it = iter(os)
1316 1265 for x in xrange(ofs):
1317 1266 y = next(it, None)
1318 1267 if y is None:
1319 1268 break
1320 1269 for x in xrange(lim):
1321 1270 y = next(it, None)
1322 1271 if y is None:
1323 1272 break
1324 1273 elif y in subset:
1325 1274 result.append(y)
1326 1275 return baseset(result)
1327 1276
1328 1277 def last(repo, subset, x):
1329 1278 """``last(set, [n])``
1330 1279 Last n members of set, defaulting to 1.
1331 1280 """
1332 1281 # i18n: "last" is a keyword
1333 1282 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1334 1283 try:
1335 1284 lim = 1
1336 1285 if len(l) == 2:
1337 1286 # i18n: "last" is a keyword
1338 1287 lim = int(getstring(l[1], _("last requires a number")))
1339 1288 except (TypeError, ValueError):
1340 1289 # i18n: "last" is a keyword
1341 1290 raise error.ParseError(_("last expects a number"))
1342 1291 os = getset(repo, fullreposet(repo), l[0])
1343 1292 os.reverse()
1344 1293 result = []
1345 1294 it = iter(os)
1346 1295 for x in xrange(lim):
1347 1296 y = next(it, None)
1348 1297 if y is None:
1349 1298 break
1350 1299 elif y in subset:
1351 1300 result.append(y)
1352 1301 return baseset(result)
1353 1302
1354 1303 def maxrev(repo, subset, x):
1355 1304 """``max(set)``
1356 1305 Changeset with highest revision number in set.
1357 1306 """
1358 1307 os = getset(repo, fullreposet(repo), x)
1359 1308 try:
1360 1309 m = os.max()
1361 1310 if m in subset:
1362 1311 return baseset([m])
1363 1312 except ValueError:
1364 1313 # os.max() throws a ValueError when the collection is empty.
1365 1314 # Same as python's max().
1366 1315 pass
1367 1316 return baseset()
1368 1317
1369 1318 def merge(repo, subset, x):
1370 1319 """``merge()``
1371 1320 Changeset is a merge changeset.
1372 1321 """
1373 1322 # i18n: "merge" is a keyword
1374 1323 getargs(x, 0, 0, _("merge takes no arguments"))
1375 1324 cl = repo.changelog
1376 1325 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1)
1377 1326
1378 1327 def branchpoint(repo, subset, x):
1379 1328 """``branchpoint()``
1380 1329 Changesets with more than one child.
1381 1330 """
1382 1331 # i18n: "branchpoint" is a keyword
1383 1332 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1384 1333 cl = repo.changelog
1385 1334 if not subset:
1386 1335 return baseset()
1387 1336 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1388 1337 # (and if it is not, it should.)
1389 1338 baserev = min(subset)
1390 1339 parentscount = [0]*(len(repo) - baserev)
1391 1340 for r in cl.revs(start=baserev + 1):
1392 1341 for p in cl.parentrevs(r):
1393 1342 if p >= baserev:
1394 1343 parentscount[p - baserev] += 1
1395 1344 return subset.filter(lambda r: parentscount[r - baserev] > 1)
1396 1345
1397 1346 def minrev(repo, subset, x):
1398 1347 """``min(set)``
1399 1348 Changeset with lowest revision number in set.
1400 1349 """
1401 1350 os = getset(repo, fullreposet(repo), x)
1402 1351 try:
1403 1352 m = os.min()
1404 1353 if m in subset:
1405 1354 return baseset([m])
1406 1355 except ValueError:
1407 1356 # os.min() throws a ValueError when the collection is empty.
1408 1357 # Same as python's min().
1409 1358 pass
1410 1359 return baseset()
1411 1360
1412 1361 def modifies(repo, subset, x):
1413 1362 """``modifies(pattern)``
1414 1363 Changesets modifying files matched by pattern.
1415 1364
1416 1365 The pattern without explicit kind like ``glob:`` is expected to be
1417 1366 relative to the current directory and match against a file or a
1418 1367 directory.
1419 1368 """
1420 1369 # i18n: "modifies" is a keyword
1421 1370 pat = getstring(x, _("modifies requires a pattern"))
1422 1371 return checkstatus(repo, subset, pat, 0)
1423 1372
1424 1373 def named(repo, subset, x):
1425 1374 """``named(namespace)``
1426 1375 The changesets in a given namespace.
1427 1376
1428 1377 If `namespace` starts with `re:`, the remainder of the string is treated as
1429 1378 a regular expression. To match a namespace that actually starts with `re:`,
1430 1379 use the prefix `literal:`.
1431 1380 """
1432 1381 # i18n: "named" is a keyword
1433 1382 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1434 1383
1435 1384 ns = getstring(args[0],
1436 1385 # i18n: "named" is a keyword
1437 1386 _('the argument to named must be a string'))
1438 1387 kind, pattern, matcher = util.stringmatcher(ns)
1439 1388 namespaces = set()
1440 1389 if kind == 'literal':
1441 1390 if pattern not in repo.names:
1442 1391 raise error.RepoLookupError(_("namespace '%s' does not exist")
1443 1392 % ns)
1444 1393 namespaces.add(repo.names[pattern])
1445 1394 else:
1446 1395 for name, ns in repo.names.iteritems():
1447 1396 if matcher(name):
1448 1397 namespaces.add(ns)
1449 1398 if not namespaces:
1450 1399 raise error.RepoLookupError(_("no namespace exists"
1451 1400 " that match '%s'") % pattern)
1452 1401
1453 1402 names = set()
1454 1403 for ns in namespaces:
1455 1404 for name in ns.listnames(repo):
1456 1405 if name not in ns.deprecated:
1457 1406 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1458 1407
1459 1408 names -= set([node.nullrev])
1460 1409 return subset & names
1461 1410
1462 1411 def node_(repo, subset, x):
1463 1412 """``id(string)``
1464 1413 Revision non-ambiguously specified by the given hex string prefix.
1465 1414 """
1466 1415 # i18n: "id" is a keyword
1467 1416 l = getargs(x, 1, 1, _("id requires one argument"))
1468 1417 # i18n: "id" is a keyword
1469 1418 n = getstring(l[0], _("id requires a string"))
1470 1419 if len(n) == 40:
1471 1420 try:
1472 1421 rn = repo.changelog.rev(node.bin(n))
1473 1422 except (LookupError, TypeError):
1474 1423 rn = None
1475 1424 else:
1476 1425 rn = None
1477 1426 pm = repo.changelog._partialmatch(n)
1478 1427 if pm is not None:
1479 1428 rn = repo.changelog.rev(pm)
1480 1429
1481 1430 if rn is None:
1482 1431 return baseset()
1483 1432 result = baseset([rn])
1484 1433 return result & subset
1485 1434
1486 1435 def obsolete(repo, subset, x):
1487 1436 """``obsolete()``
1488 1437 Mutable changeset with a newer version."""
1489 1438 # i18n: "obsolete" is a keyword
1490 1439 getargs(x, 0, 0, _("obsolete takes no arguments"))
1491 1440 obsoletes = obsmod.getrevs(repo, 'obsolete')
1492 1441 return subset & obsoletes
1493 1442
1494 1443 def only(repo, subset, x):
1495 1444 """``only(set, [set])``
1496 1445 Changesets that are ancestors of the first set that are not ancestors
1497 1446 of any other head in the repo. If a second set is specified, the result
1498 1447 is ancestors of the first set that are not ancestors of the second set
1499 1448 (i.e. ::<set1> - ::<set2>).
1500 1449 """
1501 1450 cl = repo.changelog
1502 1451 # i18n: "only" is a keyword
1503 1452 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1504 1453 include = getset(repo, fullreposet(repo), args[0])
1505 1454 if len(args) == 1:
1506 1455 if not include:
1507 1456 return baseset()
1508 1457
1509 1458 descendants = set(_revdescendants(repo, include, False))
1510 1459 exclude = [rev for rev in cl.headrevs()
1511 1460 if not rev in descendants and not rev in include]
1512 1461 else:
1513 1462 exclude = getset(repo, fullreposet(repo), args[1])
1514 1463
1515 1464 results = set(cl.findmissingrevs(common=exclude, heads=include))
1516 1465 # XXX we should turn this into a baseset instead of a set, smartset may do
1517 1466 # some optimisations from the fact this is a baseset.
1518 1467 return subset & results
1519 1468
1520 1469 def origin(repo, subset, x):
1521 1470 """``origin([set])``
1522 1471 Changesets that were specified as a source for the grafts, transplants or
1523 1472 rebases that created the given revisions. Omitting the optional set is the
1524 1473 same as passing all(). If a changeset created by these operations is itself
1525 1474 specified as a source for one of these operations, only the source changeset
1526 1475 for the first operation is selected.
1527 1476 """
1528 1477 if x is not None:
1529 1478 dests = getset(repo, fullreposet(repo), x)
1530 1479 else:
1531 1480 dests = fullreposet(repo)
1532 1481
1533 1482 def _firstsrc(rev):
1534 1483 src = _getrevsource(repo, rev)
1535 1484 if src is None:
1536 1485 return None
1537 1486
1538 1487 while True:
1539 1488 prev = _getrevsource(repo, src)
1540 1489
1541 1490 if prev is None:
1542 1491 return src
1543 1492 src = prev
1544 1493
1545 1494 o = set([_firstsrc(r) for r in dests])
1546 1495 o -= set([None])
1547 1496 # XXX we should turn this into a baseset instead of a set, smartset may do
1548 1497 # some optimisations from the fact this is a baseset.
1549 1498 return subset & o
1550 1499
1551 1500 def outgoing(repo, subset, x):
1552 1501 """``outgoing([path])``
1553 1502 Changesets not found in the specified destination repository, or the
1554 1503 default push location.
1555 1504 """
1556 1505 # Avoid cycles.
1557 1506 from . import (
1558 1507 discovery,
1559 1508 hg,
1560 1509 )
1561 1510 # i18n: "outgoing" is a keyword
1562 1511 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1563 1512 # i18n: "outgoing" is a keyword
1564 1513 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1565 1514 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1566 1515 dest, branches = hg.parseurl(dest)
1567 1516 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1568 1517 if revs:
1569 1518 revs = [repo.lookup(rev) for rev in revs]
1570 1519 other = hg.peer(repo, {}, dest)
1571 1520 repo.ui.pushbuffer()
1572 1521 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1573 1522 repo.ui.popbuffer()
1574 1523 cl = repo.changelog
1575 1524 o = set([cl.rev(r) for r in outgoing.missing])
1576 1525 return subset & o
1577 1526
1578 1527 def p1(repo, subset, x):
1579 1528 """``p1([set])``
1580 1529 First parent of changesets in set, or the working directory.
1581 1530 """
1582 1531 if x is None:
1583 1532 p = repo[x].p1().rev()
1584 1533 if p >= 0:
1585 1534 return subset & baseset([p])
1586 1535 return baseset()
1587 1536
1588 1537 ps = set()
1589 1538 cl = repo.changelog
1590 1539 for r in getset(repo, fullreposet(repo), x):
1591 1540 ps.add(cl.parentrevs(r)[0])
1592 1541 ps -= set([node.nullrev])
1593 1542 # XXX we should turn this into a baseset instead of a set, smartset may do
1594 1543 # some optimisations from the fact this is a baseset.
1595 1544 return subset & ps
1596 1545
1597 1546 def p2(repo, subset, x):
1598 1547 """``p2([set])``
1599 1548 Second parent of changesets in set, or the working directory.
1600 1549 """
1601 1550 if x is None:
1602 1551 ps = repo[x].parents()
1603 1552 try:
1604 1553 p = ps[1].rev()
1605 1554 if p >= 0:
1606 1555 return subset & baseset([p])
1607 1556 return baseset()
1608 1557 except IndexError:
1609 1558 return baseset()
1610 1559
1611 1560 ps = set()
1612 1561 cl = repo.changelog
1613 1562 for r in getset(repo, fullreposet(repo), x):
1614 1563 ps.add(cl.parentrevs(r)[1])
1615 1564 ps -= set([node.nullrev])
1616 1565 # XXX we should turn this into a baseset instead of a set, smartset may do
1617 1566 # some optimisations from the fact this is a baseset.
1618 1567 return subset & ps
1619 1568
1620 1569 def parents(repo, subset, x):
1621 1570 """``parents([set])``
1622 1571 The set of all parents for all changesets in set, or the working directory.
1623 1572 """
1624 1573 if x is None:
1625 1574 ps = set(p.rev() for p in repo[x].parents())
1626 1575 else:
1627 1576 ps = set()
1628 1577 cl = repo.changelog
1629 1578 up = ps.update
1630 1579 parentrevs = cl.parentrevs
1631 1580 for r in getset(repo, fullreposet(repo), x):
1632 1581 if r == node.wdirrev:
1633 1582 up(p.rev() for p in repo[r].parents())
1634 1583 else:
1635 1584 up(parentrevs(r))
1636 1585 ps -= set([node.nullrev])
1637 1586 return subset & ps
1638 1587
1639 1588 def _phase(repo, subset, target):
1640 1589 """helper to select all rev in phase <target>"""
1641 1590 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1642 1591 if repo._phasecache._phasesets:
1643 1592 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1644 1593 s = baseset(s)
1645 1594 s.sort() # set are non ordered, so we enforce ascending
1646 1595 return subset & s
1647 1596 else:
1648 1597 phase = repo._phasecache.phase
1649 1598 condition = lambda r: phase(repo, r) == target
1650 1599 return subset.filter(condition, cache=False)
1651 1600
1652 1601 def draft(repo, subset, x):
1653 1602 """``draft()``
1654 1603 Changeset in draft phase."""
1655 1604 # i18n: "draft" is a keyword
1656 1605 getargs(x, 0, 0, _("draft takes no arguments"))
1657 1606 target = phases.draft
1658 1607 return _phase(repo, subset, target)
1659 1608
1660 1609 def secret(repo, subset, x):
1661 1610 """``secret()``
1662 1611 Changeset in secret phase."""
1663 1612 # i18n: "secret" is a keyword
1664 1613 getargs(x, 0, 0, _("secret takes no arguments"))
1665 1614 target = phases.secret
1666 1615 return _phase(repo, subset, target)
1667 1616
1668 1617 def parentspec(repo, subset, x, n):
1669 1618 """``set^0``
1670 1619 The set.
1671 1620 ``set^1`` (or ``set^``), ``set^2``
1672 1621 First or second parent, respectively, of all changesets in set.
1673 1622 """
1674 1623 try:
1675 1624 n = int(n[1])
1676 1625 if n not in (0, 1, 2):
1677 1626 raise ValueError
1678 1627 except (TypeError, ValueError):
1679 1628 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1680 1629 ps = set()
1681 1630 cl = repo.changelog
1682 1631 for r in getset(repo, fullreposet(repo), x):
1683 1632 if n == 0:
1684 1633 ps.add(r)
1685 1634 elif n == 1:
1686 1635 ps.add(cl.parentrevs(r)[0])
1687 1636 elif n == 2:
1688 1637 parents = cl.parentrevs(r)
1689 1638 if len(parents) > 1:
1690 1639 ps.add(parents[1])
1691 1640 return subset & ps
1692 1641
1693 1642 def present(repo, subset, x):
1694 1643 """``present(set)``
1695 1644 An empty set, if any revision in set isn't found; otherwise,
1696 1645 all revisions in set.
1697 1646
1698 1647 If any of specified revisions is not present in the local repository,
1699 1648 the query is normally aborted. But this predicate allows the query
1700 1649 to continue even in such cases.
1701 1650 """
1702 1651 try:
1703 1652 return getset(repo, subset, x)
1704 1653 except error.RepoLookupError:
1705 1654 return baseset()
1706 1655
1707 1656 # for internal use
1708 1657 def _notpublic(repo, subset, x):
1709 1658 getargs(x, 0, 0, "_notpublic takes no arguments")
1710 1659 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1711 1660 if repo._phasecache._phasesets:
1712 1661 s = set()
1713 1662 for u in repo._phasecache._phasesets[1:]:
1714 1663 s.update(u)
1715 1664 s = baseset(s - repo.changelog.filteredrevs)
1716 1665 s.sort()
1717 1666 return subset & s
1718 1667 else:
1719 1668 phase = repo._phasecache.phase
1720 1669 target = phases.public
1721 1670 condition = lambda r: phase(repo, r) != target
1722 1671 return subset.filter(condition, cache=False)
1723 1672
1724 1673 def public(repo, subset, x):
1725 1674 """``public()``
1726 1675 Changeset in public phase."""
1727 1676 # i18n: "public" is a keyword
1728 1677 getargs(x, 0, 0, _("public takes no arguments"))
1729 1678 phase = repo._phasecache.phase
1730 1679 target = phases.public
1731 1680 condition = lambda r: phase(repo, r) == target
1732 1681 return subset.filter(condition, cache=False)
1733 1682
1734 1683 def remote(repo, subset, x):
1735 1684 """``remote([id [,path]])``
1736 1685 Local revision that corresponds to the given identifier in a
1737 1686 remote repository, if present. Here, the '.' identifier is a
1738 1687 synonym for the current local branch.
1739 1688 """
1740 1689
1741 1690 from . import hg # avoid start-up nasties
1742 1691 # i18n: "remote" is a keyword
1743 1692 l = getargs(x, 0, 2, _("remote takes one, two or no arguments"))
1744 1693
1745 1694 q = '.'
1746 1695 if len(l) > 0:
1747 1696 # i18n: "remote" is a keyword
1748 1697 q = getstring(l[0], _("remote requires a string id"))
1749 1698 if q == '.':
1750 1699 q = repo['.'].branch()
1751 1700
1752 1701 dest = ''
1753 1702 if len(l) > 1:
1754 1703 # i18n: "remote" is a keyword
1755 1704 dest = getstring(l[1], _("remote requires a repository path"))
1756 1705 dest = repo.ui.expandpath(dest or 'default')
1757 1706 dest, branches = hg.parseurl(dest)
1758 1707 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1759 1708 if revs:
1760 1709 revs = [repo.lookup(rev) for rev in revs]
1761 1710 other = hg.peer(repo, {}, dest)
1762 1711 n = other.lookup(q)
1763 1712 if n in repo:
1764 1713 r = repo[n].rev()
1765 1714 if r in subset:
1766 1715 return baseset([r])
1767 1716 return baseset()
1768 1717
1769 1718 def removes(repo, subset, x):
1770 1719 """``removes(pattern)``
1771 1720 Changesets which remove files matching pattern.
1772 1721
1773 1722 The pattern without explicit kind like ``glob:`` is expected to be
1774 1723 relative to the current directory and match against a file or a
1775 1724 directory.
1776 1725 """
1777 1726 # i18n: "removes" is a keyword
1778 1727 pat = getstring(x, _("removes requires a pattern"))
1779 1728 return checkstatus(repo, subset, pat, 2)
1780 1729
1781 1730 def rev(repo, subset, x):
1782 1731 """``rev(number)``
1783 1732 Revision with the given numeric identifier.
1784 1733 """
1785 1734 # i18n: "rev" is a keyword
1786 1735 l = getargs(x, 1, 1, _("rev requires one argument"))
1787 1736 try:
1788 1737 # i18n: "rev" is a keyword
1789 1738 l = int(getstring(l[0], _("rev requires a number")))
1790 1739 except (TypeError, ValueError):
1791 1740 # i18n: "rev" is a keyword
1792 1741 raise error.ParseError(_("rev expects a number"))
1793 1742 if l not in repo.changelog and l != node.nullrev:
1794 1743 return baseset()
1795 1744 return subset & baseset([l])
1796 1745
1797 1746 def matching(repo, subset, x):
1798 1747 """``matching(revision [, field])``
1799 1748 Changesets in which a given set of fields match the set of fields in the
1800 1749 selected revision or set.
1801 1750
1802 1751 To match more than one field pass the list of fields to match separated
1803 1752 by spaces (e.g. ``author description``).
1804 1753
1805 1754 Valid fields are most regular revision fields and some special fields.
1806 1755
1807 1756 Regular revision fields are ``description``, ``author``, ``branch``,
1808 1757 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1809 1758 and ``diff``.
1810 1759 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1811 1760 contents of the revision. Two revisions matching their ``diff`` will
1812 1761 also match their ``files``.
1813 1762
1814 1763 Special fields are ``summary`` and ``metadata``:
1815 1764 ``summary`` matches the first line of the description.
1816 1765 ``metadata`` is equivalent to matching ``description user date``
1817 1766 (i.e. it matches the main metadata fields).
1818 1767
1819 1768 ``metadata`` is the default field which is used when no fields are
1820 1769 specified. You can match more than one field at a time.
1821 1770 """
1822 1771 # i18n: "matching" is a keyword
1823 1772 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1824 1773
1825 1774 revs = getset(repo, fullreposet(repo), l[0])
1826 1775
1827 1776 fieldlist = ['metadata']
1828 1777 if len(l) > 1:
1829 1778 fieldlist = getstring(l[1],
1830 1779 # i18n: "matching" is a keyword
1831 1780 _("matching requires a string "
1832 1781 "as its second argument")).split()
1833 1782
1834 1783 # Make sure that there are no repeated fields,
1835 1784 # expand the 'special' 'metadata' field type
1836 1785 # and check the 'files' whenever we check the 'diff'
1837 1786 fields = []
1838 1787 for field in fieldlist:
1839 1788 if field == 'metadata':
1840 1789 fields += ['user', 'description', 'date']
1841 1790 elif field == 'diff':
1842 1791 # a revision matching the diff must also match the files
1843 1792 # since matching the diff is very costly, make sure to
1844 1793 # also match the files first
1845 1794 fields += ['files', 'diff']
1846 1795 else:
1847 1796 if field == 'author':
1848 1797 field = 'user'
1849 1798 fields.append(field)
1850 1799 fields = set(fields)
1851 1800 if 'summary' in fields and 'description' in fields:
1852 1801 # If a revision matches its description it also matches its summary
1853 1802 fields.discard('summary')
1854 1803
1855 1804 # We may want to match more than one field
1856 1805 # Not all fields take the same amount of time to be matched
1857 1806 # Sort the selected fields in order of increasing matching cost
1858 1807 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1859 1808 'files', 'description', 'substate', 'diff']
1860 1809 def fieldkeyfunc(f):
1861 1810 try:
1862 1811 return fieldorder.index(f)
1863 1812 except ValueError:
1864 1813 # assume an unknown field is very costly
1865 1814 return len(fieldorder)
1866 1815 fields = list(fields)
1867 1816 fields.sort(key=fieldkeyfunc)
1868 1817
1869 1818 # Each field will be matched with its own "getfield" function
1870 1819 # which will be added to the getfieldfuncs array of functions
1871 1820 getfieldfuncs = []
1872 1821 _funcs = {
1873 1822 'user': lambda r: repo[r].user(),
1874 1823 'branch': lambda r: repo[r].branch(),
1875 1824 'date': lambda r: repo[r].date(),
1876 1825 'description': lambda r: repo[r].description(),
1877 1826 'files': lambda r: repo[r].files(),
1878 1827 'parents': lambda r: repo[r].parents(),
1879 1828 'phase': lambda r: repo[r].phase(),
1880 1829 'substate': lambda r: repo[r].substate,
1881 1830 'summary': lambda r: repo[r].description().splitlines()[0],
1882 1831 'diff': lambda r: list(repo[r].diff(git=True),)
1883 1832 }
1884 1833 for info in fields:
1885 1834 getfield = _funcs.get(info, None)
1886 1835 if getfield is None:
1887 1836 raise error.ParseError(
1888 1837 # i18n: "matching" is a keyword
1889 1838 _("unexpected field name passed to matching: %s") % info)
1890 1839 getfieldfuncs.append(getfield)
1891 1840 # convert the getfield array of functions into a "getinfo" function
1892 1841 # which returns an array of field values (or a single value if there
1893 1842 # is only one field to match)
1894 1843 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1895 1844
1896 1845 def matches(x):
1897 1846 for rev in revs:
1898 1847 target = getinfo(rev)
1899 1848 match = True
1900 1849 for n, f in enumerate(getfieldfuncs):
1901 1850 if target[n] != f(x):
1902 1851 match = False
1903 1852 if match:
1904 1853 return True
1905 1854 return False
1906 1855
1907 1856 return subset.filter(matches)
1908 1857
1909 1858 def reverse(repo, subset, x):
1910 1859 """``reverse(set)``
1911 1860 Reverse order of set.
1912 1861 """
1913 1862 l = getset(repo, subset, x)
1914 1863 l.reverse()
1915 1864 return l
1916 1865
1917 1866 def roots(repo, subset, x):
1918 1867 """``roots(set)``
1919 1868 Changesets in set with no parent changeset in set.
1920 1869 """
1921 1870 s = getset(repo, fullreposet(repo), x)
1922 1871 parents = repo.changelog.parentrevs
1923 1872 def filter(r):
1924 1873 for p in parents(r):
1925 1874 if 0 <= p and p in s:
1926 1875 return False
1927 1876 return True
1928 1877 return subset & s.filter(filter)
1929 1878
1930 1879 def sort(repo, subset, x):
1931 1880 """``sort(set[, [-]key...])``
1932 1881 Sort set by keys. The default sort order is ascending, specify a key
1933 1882 as ``-key`` to sort in descending order.
1934 1883
1935 1884 The keys can be:
1936 1885
1937 1886 - ``rev`` for the revision number,
1938 1887 - ``branch`` for the branch name,
1939 1888 - ``desc`` for the commit message (description),
1940 1889 - ``user`` for user name (``author`` can be used as an alias),
1941 1890 - ``date`` for the commit date
1942 1891 """
1943 1892 # i18n: "sort" is a keyword
1944 1893 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1945 1894 keys = "rev"
1946 1895 if len(l) == 2:
1947 1896 # i18n: "sort" is a keyword
1948 1897 keys = getstring(l[1], _("sort spec must be a string"))
1949 1898
1950 1899 s = l[0]
1951 1900 keys = keys.split()
1952 1901 l = []
1953 1902 def invert(s):
1954 1903 return "".join(chr(255 - ord(c)) for c in s)
1955 1904 revs = getset(repo, subset, s)
1956 1905 if keys == ["rev"]:
1957 1906 revs.sort()
1958 1907 return revs
1959 1908 elif keys == ["-rev"]:
1960 1909 revs.sort(reverse=True)
1961 1910 return revs
1962 1911 for r in revs:
1963 1912 c = repo[r]
1964 1913 e = []
1965 1914 for k in keys:
1966 1915 if k == 'rev':
1967 1916 e.append(r)
1968 1917 elif k == '-rev':
1969 1918 e.append(-r)
1970 1919 elif k == 'branch':
1971 1920 e.append(c.branch())
1972 1921 elif k == '-branch':
1973 1922 e.append(invert(c.branch()))
1974 1923 elif k == 'desc':
1975 1924 e.append(c.description())
1976 1925 elif k == '-desc':
1977 1926 e.append(invert(c.description()))
1978 1927 elif k in 'user author':
1979 1928 e.append(c.user())
1980 1929 elif k in '-user -author':
1981 1930 e.append(invert(c.user()))
1982 1931 elif k == 'date':
1983 1932 e.append(c.date()[0])
1984 1933 elif k == '-date':
1985 1934 e.append(-c.date()[0])
1986 1935 else:
1987 1936 raise error.ParseError(_("unknown sort key %r") % k)
1988 1937 e.append(r)
1989 1938 l.append(e)
1990 1939 l.sort()
1991 1940 return baseset([e[-1] for e in l])
1992 1941
1993 1942 def subrepo(repo, subset, x):
1994 1943 """``subrepo([pattern])``
1995 1944 Changesets that add, modify or remove the given subrepo. If no subrepo
1996 1945 pattern is named, any subrepo changes are returned.
1997 1946 """
1998 1947 # i18n: "subrepo" is a keyword
1999 1948 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
2000 1949 if len(args) != 0:
2001 1950 pat = getstring(args[0], _("subrepo requires a pattern"))
2002 1951
2003 1952 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
2004 1953
2005 1954 def submatches(names):
2006 1955 k, p, m = util.stringmatcher(pat)
2007 1956 for name in names:
2008 1957 if m(name):
2009 1958 yield name
2010 1959
2011 1960 def matches(x):
2012 1961 c = repo[x]
2013 1962 s = repo.status(c.p1().node(), c.node(), match=m)
2014 1963
2015 1964 if len(args) == 0:
2016 1965 return s.added or s.modified or s.removed
2017 1966
2018 1967 if s.added:
2019 1968 return any(submatches(c.substate.keys()))
2020 1969
2021 1970 if s.modified:
2022 1971 subs = set(c.p1().substate.keys())
2023 1972 subs.update(c.substate.keys())
2024 1973
2025 1974 for path in submatches(subs):
2026 1975 if c.p1().substate.get(path) != c.substate.get(path):
2027 1976 return True
2028 1977
2029 1978 if s.removed:
2030 1979 return any(submatches(c.p1().substate.keys()))
2031 1980
2032 1981 return False
2033 1982
2034 1983 return subset.filter(matches)
2035 1984
2036 1985 def _substringmatcher(pattern):
2037 1986 kind, pattern, matcher = util.stringmatcher(pattern)
2038 1987 if kind == 'literal':
2039 1988 matcher = lambda s: pattern in s
2040 1989 return kind, pattern, matcher
2041 1990
2042 1991 def tag(repo, subset, x):
2043 1992 """``tag([name])``
2044 1993 The specified tag by name, or all tagged revisions if no name is given.
2045 1994
2046 1995 If `name` starts with `re:`, the remainder of the name is treated as
2047 1996 a regular expression. To match a tag that actually starts with `re:`,
2048 1997 use the prefix `literal:`.
2049 1998 """
2050 1999 # i18n: "tag" is a keyword
2051 2000 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2052 2001 cl = repo.changelog
2053 2002 if args:
2054 2003 pattern = getstring(args[0],
2055 2004 # i18n: "tag" is a keyword
2056 2005 _('the argument to tag must be a string'))
2057 2006 kind, pattern, matcher = util.stringmatcher(pattern)
2058 2007 if kind == 'literal':
2059 2008 # avoid resolving all tags
2060 2009 tn = repo._tagscache.tags.get(pattern, None)
2061 2010 if tn is None:
2062 2011 raise error.RepoLookupError(_("tag '%s' does not exist")
2063 2012 % pattern)
2064 2013 s = set([repo[tn].rev()])
2065 2014 else:
2066 2015 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2067 2016 else:
2068 2017 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2069 2018 return subset & s
2070 2019
2071 2020 def tagged(repo, subset, x):
2072 2021 return tag(repo, subset, x)
2073 2022
2074 2023 def unstable(repo, subset, x):
2075 2024 """``unstable()``
2076 2025 Non-obsolete changesets with obsolete ancestors.
2077 2026 """
2078 2027 # i18n: "unstable" is a keyword
2079 2028 getargs(x, 0, 0, _("unstable takes no arguments"))
2080 2029 unstables = obsmod.getrevs(repo, 'unstable')
2081 2030 return subset & unstables
2082 2031
2083 2032
2084 2033 def user(repo, subset, x):
2085 2034 """``user(string)``
2086 2035 User name contains string. The match is case-insensitive.
2087 2036
2088 2037 If `string` starts with `re:`, the remainder of the string is treated as
2089 2038 a regular expression. To match a user that actually contains `re:`, use
2090 2039 the prefix `literal:`.
2091 2040 """
2092 2041 return author(repo, subset, x)
2093 2042
2094 2043 # experimental
2095 2044 def wdir(repo, subset, x):
2096 2045 # i18n: "wdir" is a keyword
2097 2046 getargs(x, 0, 0, _("wdir takes no arguments"))
2098 2047 if node.wdirrev in subset or isinstance(subset, fullreposet):
2099 2048 return baseset([node.wdirrev])
2100 2049 return baseset()
2101 2050
2102 2051 # for internal use
2103 2052 def _list(repo, subset, x):
2104 2053 s = getstring(x, "internal error")
2105 2054 if not s:
2106 2055 return baseset()
2107 2056 # remove duplicates here. it's difficult for caller to deduplicate sets
2108 2057 # because different symbols can point to the same rev.
2109 2058 cl = repo.changelog
2110 2059 ls = []
2111 2060 seen = set()
2112 2061 for t in s.split('\0'):
2113 2062 try:
2114 2063 # fast path for integer revision
2115 2064 r = int(t)
2116 2065 if str(r) != t or r not in cl:
2117 2066 raise ValueError
2118 2067 revs = [r]
2119 2068 except ValueError:
2120 2069 revs = stringset(repo, subset, t)
2121 2070
2122 2071 for r in revs:
2123 2072 if r in seen:
2124 2073 continue
2125 2074 if (r in subset
2126 2075 or r == node.nullrev and isinstance(subset, fullreposet)):
2127 2076 ls.append(r)
2128 2077 seen.add(r)
2129 2078 return baseset(ls)
2130 2079
2131 2080 # for internal use
2132 2081 def _intlist(repo, subset, x):
2133 2082 s = getstring(x, "internal error")
2134 2083 if not s:
2135 2084 return baseset()
2136 2085 ls = [int(r) for r in s.split('\0')]
2137 2086 s = subset
2138 2087 return baseset([r for r in ls if r in s])
2139 2088
2140 2089 # for internal use
2141 2090 def _hexlist(repo, subset, x):
2142 2091 s = getstring(x, "internal error")
2143 2092 if not s:
2144 2093 return baseset()
2145 2094 cl = repo.changelog
2146 2095 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2147 2096 s = subset
2148 2097 return baseset([r for r in ls if r in s])
2149 2098
2150 2099 symbols = {
2151 2100 "_destupdate": _destupdate,
2152 2101 "_mergedefaultdest": _mergedefaultdest,
2153 2102 "adds": adds,
2154 2103 "all": getall,
2155 2104 "ancestor": ancestor,
2156 2105 "ancestors": ancestors,
2157 2106 "_firstancestors": _firstancestors,
2158 2107 "author": author,
2159 2108 "bisect": bisect,
2160 2109 "bisected": bisected,
2161 2110 "bookmark": bookmark,
2162 2111 "branch": branch,
2163 2112 "branchpoint": branchpoint,
2164 2113 "bumped": bumped,
2165 2114 "bundle": bundle,
2166 2115 "children": children,
2167 2116 "closed": closed,
2168 2117 "contains": contains,
2169 2118 "converted": converted,
2170 2119 "date": date,
2171 2120 "desc": desc,
2172 2121 "descendants": descendants,
2173 2122 "_firstdescendants": _firstdescendants,
2174 2123 "destination": destination,
2175 2124 "divergent": divergent,
2176 2125 "draft": draft,
2177 2126 "extinct": extinct,
2178 2127 "extra": extra,
2179 2128 "file": hasfile,
2180 2129 "filelog": filelog,
2181 2130 "first": first,
2182 2131 "follow": follow,
2183 2132 "_followfirst": _followfirst,
2184 2133 "grep": grep,
2185 2134 "head": head,
2186 2135 "heads": heads,
2187 2136 "hidden": hidden,
2188 2137 "id": node_,
2189 2138 "keyword": keyword,
2190 2139 "last": last,
2191 2140 "limit": limit,
2192 2141 "_matchfiles": _matchfiles,
2193 2142 "max": maxrev,
2194 2143 "merge": merge,
2195 2144 "min": minrev,
2196 2145 "modifies": modifies,
2197 2146 "named": named,
2198 2147 "obsolete": obsolete,
2199 2148 "only": only,
2200 2149 "origin": origin,
2201 2150 "outgoing": outgoing,
2202 2151 "p1": p1,
2203 2152 "p2": p2,
2204 2153 "parents": parents,
2205 2154 "present": present,
2206 2155 "public": public,
2207 2156 "_notpublic": _notpublic,
2208 2157 "remote": remote,
2209 2158 "removes": removes,
2210 2159 "rev": rev,
2211 2160 "reverse": reverse,
2212 2161 "roots": roots,
2213 2162 "sort": sort,
2214 2163 "secret": secret,
2215 2164 "subrepo": subrepo,
2216 2165 "matching": matching,
2217 2166 "tag": tag,
2218 2167 "tagged": tagged,
2219 2168 "user": user,
2220 2169 "unstable": unstable,
2221 2170 "wdir": wdir,
2222 2171 "_list": _list,
2223 2172 "_intlist": _intlist,
2224 2173 "_hexlist": _hexlist,
2225 2174 }
2226 2175
2227 2176 # symbols which can't be used for a DoS attack for any given input
2228 2177 # (e.g. those which accept regexes as plain strings shouldn't be included)
2229 2178 # functions that just return a lot of changesets (like all) don't count here
2230 2179 safesymbols = set([
2231 2180 "adds",
2232 2181 "all",
2233 2182 "ancestor",
2234 2183 "ancestors",
2235 2184 "_firstancestors",
2236 2185 "author",
2237 2186 "bisect",
2238 2187 "bisected",
2239 2188 "bookmark",
2240 2189 "branch",
2241 2190 "branchpoint",
2242 2191 "bumped",
2243 2192 "bundle",
2244 2193 "children",
2245 2194 "closed",
2246 2195 "converted",
2247 2196 "date",
2248 2197 "desc",
2249 2198 "descendants",
2250 2199 "_firstdescendants",
2251 2200 "destination",
2252 2201 "divergent",
2253 2202 "draft",
2254 2203 "extinct",
2255 2204 "extra",
2256 2205 "file",
2257 2206 "filelog",
2258 2207 "first",
2259 2208 "follow",
2260 2209 "_followfirst",
2261 2210 "head",
2262 2211 "heads",
2263 2212 "hidden",
2264 2213 "id",
2265 2214 "keyword",
2266 2215 "last",
2267 2216 "limit",
2268 2217 "_matchfiles",
2269 2218 "max",
2270 2219 "merge",
2271 2220 "min",
2272 2221 "modifies",
2273 2222 "obsolete",
2274 2223 "only",
2275 2224 "origin",
2276 2225 "outgoing",
2277 2226 "p1",
2278 2227 "p2",
2279 2228 "parents",
2280 2229 "present",
2281 2230 "public",
2282 2231 "_notpublic",
2283 2232 "remote",
2284 2233 "removes",
2285 2234 "rev",
2286 2235 "reverse",
2287 2236 "roots",
2288 2237 "sort",
2289 2238 "secret",
2290 2239 "matching",
2291 2240 "tag",
2292 2241 "tagged",
2293 2242 "user",
2294 2243 "unstable",
2295 2244 "wdir",
2296 2245 "_list",
2297 2246 "_intlist",
2298 2247 "_hexlist",
2299 2248 ])
2300 2249
2301 2250 methods = {
2302 2251 "range": rangeset,
2303 2252 "dagrange": dagrange,
2304 2253 "string": stringset,
2305 2254 "symbol": stringset,
2306 2255 "and": andset,
2307 2256 "or": orset,
2308 2257 "not": notset,
2309 2258 "list": listset,
2310 2259 "keyvalue": keyvaluepair,
2311 2260 "func": func,
2312 2261 "ancestor": ancestorspec,
2313 2262 "parent": parentspec,
2314 2263 "parentpost": p1,
2315 2264 }
2316 2265
2317 2266 def optimize(x, small):
2318 2267 if x is None:
2319 2268 return 0, x
2320 2269
2321 2270 smallbonus = 1
2322 2271 if small:
2323 2272 smallbonus = .5
2324 2273
2325 2274 op = x[0]
2326 2275 if op == 'minus':
2327 2276 return optimize(('and', x[1], ('not', x[2])), small)
2328 2277 elif op == 'only':
2329 2278 return optimize(('func', ('symbol', 'only'),
2330 2279 ('list', x[1], x[2])), small)
2331 2280 elif op == 'onlypost':
2332 2281 return optimize(('func', ('symbol', 'only'), x[1]), small)
2333 2282 elif op == 'dagrangepre':
2334 2283 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2335 2284 elif op == 'dagrangepost':
2336 2285 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
2337 2286 elif op == 'rangeall':
2338 2287 return optimize(('range', ('string', '0'), ('string', 'tip')), small)
2339 2288 elif op == 'rangepre':
2340 2289 return optimize(('range', ('string', '0'), x[1]), small)
2341 2290 elif op == 'rangepost':
2342 2291 return optimize(('range', x[1], ('string', 'tip')), small)
2343 2292 elif op == 'negate':
2344 2293 return optimize(('string',
2345 2294 '-' + getstring(x[1], _("can't negate that"))), small)
2346 2295 elif op in 'string symbol negate':
2347 2296 return smallbonus, x # single revisions are small
2348 2297 elif op == 'and':
2349 2298 wa, ta = optimize(x[1], True)
2350 2299 wb, tb = optimize(x[2], True)
2351 2300
2352 2301 # (::x and not ::y)/(not ::y and ::x) have a fast path
2353 2302 def isonly(revs, bases):
2354 2303 return (
2355 2304 revs is not None
2356 2305 and revs[0] == 'func'
2357 2306 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2358 2307 and bases is not None
2359 2308 and bases[0] == 'not'
2360 2309 and bases[1][0] == 'func'
2361 2310 and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
2362 2311
2363 2312 w = min(wa, wb)
2364 2313 if isonly(ta, tb):
2365 2314 return w, ('func', ('symbol', 'only'), ('list', ta[2], tb[1][2]))
2366 2315 if isonly(tb, ta):
2367 2316 return w, ('func', ('symbol', 'only'), ('list', tb[2], ta[1][2]))
2368 2317
2369 2318 if wa > wb:
2370 2319 return w, (op, tb, ta)
2371 2320 return w, (op, ta, tb)
2372 2321 elif op == 'or':
2373 2322 # fast path for machine-generated expression, that is likely to have
2374 2323 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2375 2324 ws, ts, ss = [], [], []
2376 2325 def flushss():
2377 2326 if not ss:
2378 2327 return
2379 2328 if len(ss) == 1:
2380 2329 w, t = ss[0]
2381 2330 else:
2382 2331 s = '\0'.join(t[1] for w, t in ss)
2383 2332 y = ('func', ('symbol', '_list'), ('string', s))
2384 2333 w, t = optimize(y, False)
2385 2334 ws.append(w)
2386 2335 ts.append(t)
2387 2336 del ss[:]
2388 2337 for y in x[1:]:
2389 2338 w, t = optimize(y, False)
2390 2339 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2391 2340 ss.append((w, t))
2392 2341 continue
2393 2342 flushss()
2394 2343 ws.append(w)
2395 2344 ts.append(t)
2396 2345 flushss()
2397 2346 if len(ts) == 1:
2398 2347 return ws[0], ts[0] # 'or' operation is fully optimized out
2399 2348 # we can't reorder trees by weight because it would change the order.
2400 2349 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2401 2350 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2402 2351 return max(ws), (op,) + tuple(ts)
2403 2352 elif op == 'not':
2404 2353 # Optimize not public() to _notpublic() because we have a fast version
2405 2354 if x[1] == ('func', ('symbol', 'public'), None):
2406 2355 newsym = ('func', ('symbol', '_notpublic'), None)
2407 2356 o = optimize(newsym, not small)
2408 2357 return o[0], o[1]
2409 2358 else:
2410 2359 o = optimize(x[1], not small)
2411 2360 return o[0], (op, o[1])
2412 2361 elif op == 'parentpost':
2413 2362 o = optimize(x[1], small)
2414 2363 return o[0], (op, o[1])
2415 2364 elif op == 'group':
2416 2365 return optimize(x[1], small)
2417 2366 elif op in 'dagrange range list parent ancestorspec':
2418 2367 if op == 'parent':
2419 2368 # x^:y means (x^) : y, not x ^ (:y)
2420 2369 post = ('parentpost', x[1])
2421 2370 if x[2][0] == 'dagrangepre':
2422 2371 return optimize(('dagrange', post, x[2][1]), small)
2423 2372 elif x[2][0] == 'rangepre':
2424 2373 return optimize(('range', post, x[2][1]), small)
2425 2374
2426 2375 wa, ta = optimize(x[1], small)
2427 2376 wb, tb = optimize(x[2], small)
2428 2377 return wa + wb, (op, ta, tb)
2429 2378 elif op == 'func':
2430 2379 f = getstring(x[1], _("not a symbol"))
2431 2380 wa, ta = optimize(x[2], small)
2432 2381 if f in ("author branch closed date desc file grep keyword "
2433 2382 "outgoing user"):
2434 2383 w = 10 # slow
2435 2384 elif f in "modifies adds removes":
2436 2385 w = 30 # slower
2437 2386 elif f == "contains":
2438 2387 w = 100 # very slow
2439 2388 elif f == "ancestor":
2440 2389 w = 1 * smallbonus
2441 2390 elif f in "reverse limit first _intlist":
2442 2391 w = 0
2443 2392 elif f in "sort":
2444 2393 w = 10 # assume most sorts look at changelog
2445 2394 else:
2446 2395 w = 1
2447 2396 return w + wa, (op, x[1], ta)
2448 2397 return 1, x
2449 2398
2450 2399 _aliasarg = ('func', ('symbol', '_aliasarg'))
2451 2400 def _getaliasarg(tree):
2452 2401 """If tree matches ('func', ('symbol', '_aliasarg'), ('string', X))
2453 2402 return X, None otherwise.
2454 2403 """
2455 2404 if (len(tree) == 3 and tree[:2] == _aliasarg
2456 2405 and tree[2][0] == 'string'):
2457 2406 return tree[2][1]
2458 2407 return None
2459 2408
2460 2409 def _checkaliasarg(tree, known=None):
2461 2410 """Check tree contains no _aliasarg construct or only ones which
2462 2411 value is in known. Used to avoid alias placeholders injection.
2463 2412 """
2464 2413 if isinstance(tree, tuple):
2465 2414 arg = _getaliasarg(tree)
2466 2415 if arg is not None and (not known or arg not in known):
2467 2416 raise error.UnknownIdentifier('_aliasarg', [])
2468 2417 for t in tree:
2469 2418 _checkaliasarg(t, known)
2470 2419
2471 2420 # the set of valid characters for the initial letter of symbols in
2472 2421 # alias declarations and definitions
2473 2422 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2474 2423 if c.isalnum() or c in '._@$' or ord(c) > 127)
2475 2424
2476 2425 def _tokenizealias(program, lookup=None):
2477 2426 """Parse alias declaration/definition into a stream of tokens
2478 2427
2479 2428 This allows symbol names to use also ``$`` as an initial letter
2480 2429 (for backward compatibility), and callers of this function should
2481 2430 examine whether ``$`` is used also for unexpected symbols or not.
2482 2431 """
2483 2432 return tokenize(program, lookup=lookup,
2484 2433 syminitletters=_aliassyminitletters)
2485 2434
2486 2435 def _parsealiasdecl(decl):
2487 2436 """Parse alias declaration ``decl``
2488 2437
2489 2438 This returns ``(name, tree, args, errorstr)`` tuple:
2490 2439
2491 2440 - ``name``: of declared alias (may be ``decl`` itself at error)
2492 2441 - ``tree``: parse result (or ``None`` at error)
2493 2442 - ``args``: list of alias argument names (or None for symbol declaration)
2494 2443 - ``errorstr``: detail about detected error (or None)
2495 2444
2496 2445 >>> _parsealiasdecl('foo')
2497 2446 ('foo', ('symbol', 'foo'), None, None)
2498 2447 >>> _parsealiasdecl('$foo')
2499 2448 ('$foo', None, None, "'$' not for alias arguments")
2500 2449 >>> _parsealiasdecl('foo::bar')
2501 2450 ('foo::bar', None, None, 'invalid format')
2502 2451 >>> _parsealiasdecl('foo bar')
2503 2452 ('foo bar', None, None, 'at 4: invalid token')
2504 2453 >>> _parsealiasdecl('foo()')
2505 2454 ('foo', ('func', ('symbol', 'foo')), [], None)
2506 2455 >>> _parsealiasdecl('$foo()')
2507 2456 ('$foo()', None, None, "'$' not for alias arguments")
2508 2457 >>> _parsealiasdecl('foo($1, $2)')
2509 2458 ('foo', ('func', ('symbol', 'foo')), ['$1', '$2'], None)
2510 2459 >>> _parsealiasdecl('foo(bar_bar, baz.baz)')
2511 2460 ('foo', ('func', ('symbol', 'foo')), ['bar_bar', 'baz.baz'], None)
2512 2461 >>> _parsealiasdecl('foo($1, $2, nested($1, $2))')
2513 2462 ('foo($1, $2, nested($1, $2))', None, None, 'invalid argument list')
2514 2463 >>> _parsealiasdecl('foo(bar($1, $2))')
2515 2464 ('foo(bar($1, $2))', None, None, 'invalid argument list')
2516 2465 >>> _parsealiasdecl('foo("string")')
2517 2466 ('foo("string")', None, None, 'invalid argument list')
2518 2467 >>> _parsealiasdecl('foo($1, $2')
2519 2468 ('foo($1, $2', None, None, 'at 10: unexpected token: end')
2520 2469 >>> _parsealiasdecl('foo("string')
2521 2470 ('foo("string', None, None, 'at 5: unterminated string')
2522 2471 >>> _parsealiasdecl('foo($1, $2, $1)')
2523 2472 ('foo', None, None, 'argument names collide with each other')
2524 2473 """
2525 2474 p = parser.parser(elements)
2526 2475 try:
2527 2476 tree, pos = p.parse(_tokenizealias(decl))
2528 2477 if (pos != len(decl)):
2529 2478 raise error.ParseError(_('invalid token'), pos)
2530 2479
2531 2480 if isvalidsymbol(tree):
2532 2481 # "name = ...." style
2533 2482 name = getsymbol(tree)
2534 2483 if name.startswith('$'):
2535 2484 return (decl, None, None, _("'$' not for alias arguments"))
2536 2485 return (name, ('symbol', name), None, None)
2537 2486
2538 2487 if isvalidfunc(tree):
2539 2488 # "name(arg, ....) = ...." style
2540 2489 name = getfuncname(tree)
2541 2490 if name.startswith('$'):
2542 2491 return (decl, None, None, _("'$' not for alias arguments"))
2543 2492 args = []
2544 2493 for arg in getfuncargs(tree):
2545 2494 if not isvalidsymbol(arg):
2546 2495 return (decl, None, None, _("invalid argument list"))
2547 2496 args.append(getsymbol(arg))
2548 2497 if len(args) != len(set(args)):
2549 2498 return (name, None, None,
2550 2499 _("argument names collide with each other"))
2551 2500 return (name, ('func', ('symbol', name)), args, None)
2552 2501
2553 2502 return (decl, None, None, _("invalid format"))
2554 2503 except error.ParseError as inst:
2555 2504 return (decl, None, None, parseerrordetail(inst))
2556 2505
2557 2506 def _parsealiasdefn(defn, args):
2558 2507 """Parse alias definition ``defn``
2559 2508
2560 2509 This function also replaces alias argument references in the
2561 2510 specified definition by ``_aliasarg(ARGNAME)``.
2562 2511
2563 2512 ``args`` is a list of alias argument names, or None if the alias
2564 2513 is declared as a symbol.
2565 2514
2566 2515 This returns "tree" as parsing result.
2567 2516
2568 2517 >>> args = ['$1', '$2', 'foo']
2569 2518 >>> print prettyformat(_parsealiasdefn('$1 or foo', args))
2570 2519 (or
2571 2520 (func
2572 2521 ('symbol', '_aliasarg')
2573 2522 ('string', '$1'))
2574 2523 (func
2575 2524 ('symbol', '_aliasarg')
2576 2525 ('string', 'foo')))
2577 2526 >>> try:
2578 2527 ... _parsealiasdefn('$1 or $bar', args)
2579 2528 ... except error.ParseError, inst:
2580 2529 ... print parseerrordetail(inst)
2581 2530 at 6: '$' not for alias arguments
2582 2531 >>> args = ['$1', '$10', 'foo']
2583 2532 >>> print prettyformat(_parsealiasdefn('$10 or foobar', args))
2584 2533 (or
2585 2534 (func
2586 2535 ('symbol', '_aliasarg')
2587 2536 ('string', '$10'))
2588 2537 ('symbol', 'foobar'))
2589 2538 >>> print prettyformat(_parsealiasdefn('"$1" or "foo"', args))
2590 2539 (or
2591 2540 ('string', '$1')
2592 2541 ('string', 'foo'))
2593 2542 """
2594 2543 def tokenizedefn(program, lookup=None):
2595 2544 if args:
2596 2545 argset = set(args)
2597 2546 else:
2598 2547 argset = set()
2599 2548
2600 2549 for t, value, pos in _tokenizealias(program, lookup=lookup):
2601 2550 if t == 'symbol':
2602 2551 if value in argset:
2603 2552 # emulate tokenization of "_aliasarg('ARGNAME')":
2604 2553 # "_aliasarg()" is an unknown symbol only used separate
2605 2554 # alias argument placeholders from regular strings.
2606 2555 yield ('symbol', '_aliasarg', pos)
2607 2556 yield ('(', None, pos)
2608 2557 yield ('string', value, pos)
2609 2558 yield (')', None, pos)
2610 2559 continue
2611 2560 elif value.startswith('$'):
2612 2561 raise error.ParseError(_("'$' not for alias arguments"),
2613 2562 pos)
2614 2563 yield (t, value, pos)
2615 2564
2616 2565 p = parser.parser(elements)
2617 2566 tree, pos = p.parse(tokenizedefn(defn))
2618 2567 if pos != len(defn):
2619 2568 raise error.ParseError(_('invalid token'), pos)
2620 2569 return parser.simplifyinfixops(tree, ('or',))
2621 2570
2622 2571 class revsetalias(object):
2623 2572 # whether own `error` information is already shown or not.
2624 2573 # this avoids showing same warning multiple times at each `findaliases`.
2625 2574 warned = False
2626 2575
2627 2576 def __init__(self, name, value):
2628 2577 '''Aliases like:
2629 2578
2630 2579 h = heads(default)
2631 2580 b($1) = ancestors($1) - ancestors(default)
2632 2581 '''
2633 2582 self.name, self.tree, self.args, self.error = _parsealiasdecl(name)
2634 2583 if self.error:
2635 2584 self.error = _('failed to parse the declaration of revset alias'
2636 2585 ' "%s": %s') % (self.name, self.error)
2637 2586 return
2638 2587
2639 2588 try:
2640 2589 self.replacement = _parsealiasdefn(value, self.args)
2641 2590 # Check for placeholder injection
2642 2591 _checkaliasarg(self.replacement, self.args)
2643 2592 except error.ParseError as inst:
2644 2593 self.error = _('failed to parse the definition of revset alias'
2645 2594 ' "%s": %s') % (self.name, parseerrordetail(inst))
2646 2595
2647 2596 def _getalias(aliases, tree):
2648 2597 """If tree looks like an unexpanded alias, return it. Return None
2649 2598 otherwise.
2650 2599 """
2651 2600 if isinstance(tree, tuple) and tree:
2652 2601 if tree[0] == 'symbol' and len(tree) == 2:
2653 2602 name = tree[1]
2654 2603 alias = aliases.get(name)
2655 2604 if alias and alias.args is None and alias.tree == tree:
2656 2605 return alias
2657 2606 if tree[0] == 'func' and len(tree) > 1:
2658 2607 if tree[1][0] == 'symbol' and len(tree[1]) == 2:
2659 2608 name = tree[1][1]
2660 2609 alias = aliases.get(name)
2661 2610 if alias and alias.args is not None and alias.tree == tree[:2]:
2662 2611 return alias
2663 2612 return None
2664 2613
2665 2614 def _expandargs(tree, args):
2666 2615 """Replace _aliasarg instances with the substitution value of the
2667 2616 same name in args, recursively.
2668 2617 """
2669 2618 if not tree or not isinstance(tree, tuple):
2670 2619 return tree
2671 2620 arg = _getaliasarg(tree)
2672 2621 if arg is not None:
2673 2622 return args[arg]
2674 2623 return tuple(_expandargs(t, args) for t in tree)
2675 2624
2676 2625 def _expandaliases(aliases, tree, expanding, cache):
2677 2626 """Expand aliases in tree, recursively.
2678 2627
2679 2628 'aliases' is a dictionary mapping user defined aliases to
2680 2629 revsetalias objects.
2681 2630 """
2682 2631 if not isinstance(tree, tuple):
2683 2632 # Do not expand raw strings
2684 2633 return tree
2685 2634 alias = _getalias(aliases, tree)
2686 2635 if alias is not None:
2687 2636 if alias.error:
2688 2637 raise error.Abort(alias.error)
2689 2638 if alias in expanding:
2690 2639 raise error.ParseError(_('infinite expansion of revset alias "%s" '
2691 2640 'detected') % alias.name)
2692 2641 expanding.append(alias)
2693 2642 if alias.name not in cache:
2694 2643 cache[alias.name] = _expandaliases(aliases, alias.replacement,
2695 2644 expanding, cache)
2696 2645 result = cache[alias.name]
2697 2646 expanding.pop()
2698 2647 if alias.args is not None:
2699 2648 l = getlist(tree[2])
2700 2649 if len(l) != len(alias.args):
2701 2650 raise error.ParseError(
2702 2651 _('invalid number of arguments: %s') % len(l))
2703 2652 l = [_expandaliases(aliases, a, [], cache) for a in l]
2704 2653 result = _expandargs(result, dict(zip(alias.args, l)))
2705 2654 else:
2706 2655 result = tuple(_expandaliases(aliases, t, expanding, cache)
2707 2656 for t in tree)
2708 2657 return result
2709 2658
2710 2659 def findaliases(ui, tree, showwarning=None):
2711 2660 _checkaliasarg(tree)
2712 2661 aliases = {}
2713 2662 for k, v in ui.configitems('revsetalias'):
2714 2663 alias = revsetalias(k, v)
2715 2664 aliases[alias.name] = alias
2716 2665 tree = _expandaliases(aliases, tree, [], {})
2717 2666 if showwarning:
2718 2667 # warn about problematic (but not referred) aliases
2719 2668 for name, alias in sorted(aliases.iteritems()):
2720 2669 if alias.error and not alias.warned:
2721 2670 showwarning(_('warning: %s\n') % (alias.error))
2722 2671 alias.warned = True
2723 2672 return tree
2724 2673
2725 2674 def foldconcat(tree):
2726 2675 """Fold elements to be concatenated by `##`
2727 2676 """
2728 2677 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2729 2678 return tree
2730 2679 if tree[0] == '_concat':
2731 2680 pending = [tree]
2732 2681 l = []
2733 2682 while pending:
2734 2683 e = pending.pop()
2735 2684 if e[0] == '_concat':
2736 2685 pending.extend(reversed(e[1:]))
2737 2686 elif e[0] in ('string', 'symbol'):
2738 2687 l.append(e[1])
2739 2688 else:
2740 2689 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2741 2690 raise error.ParseError(msg)
2742 2691 return ('string', ''.join(l))
2743 2692 else:
2744 2693 return tuple(foldconcat(t) for t in tree)
2745 2694
2746 2695 def parse(spec, lookup=None):
2747 2696 p = parser.parser(elements)
2748 2697 tree, pos = p.parse(tokenize(spec, lookup=lookup))
2749 2698 if pos != len(spec):
2750 2699 raise error.ParseError(_("invalid token"), pos)
2751 2700 return parser.simplifyinfixops(tree, ('or',))
2752 2701
2753 2702 def posttreebuilthook(tree, repo):
2754 2703 # hook for extensions to execute code on the optimized tree
2755 2704 pass
2756 2705
2757 2706 def match(ui, spec, repo=None):
2758 2707 if not spec:
2759 2708 raise error.ParseError(_("empty query"))
2760 2709 lookup = None
2761 2710 if repo:
2762 2711 lookup = repo.__contains__
2763 2712 tree = parse(spec, lookup)
2764 2713 return _makematcher(ui, tree, repo)
2765 2714
2766 2715 def matchany(ui, specs, repo=None):
2767 2716 """Create a matcher that will include any revisions matching one of the
2768 2717 given specs"""
2769 2718 if not specs:
2770 2719 def mfunc(repo, subset=None):
2771 2720 return baseset()
2772 2721 return mfunc
2773 2722 if not all(specs):
2774 2723 raise error.ParseError(_("empty query"))
2775 2724 lookup = None
2776 2725 if repo:
2777 2726 lookup = repo.__contains__
2778 2727 if len(specs) == 1:
2779 2728 tree = parse(specs[0], lookup)
2780 2729 else:
2781 2730 tree = ('or',) + tuple(parse(s, lookup) for s in specs)
2782 2731 return _makematcher(ui, tree, repo)
2783 2732
2784 2733 def _makematcher(ui, tree, repo):
2785 2734 if ui:
2786 2735 tree = findaliases(ui, tree, showwarning=ui.warn)
2787 2736 tree = foldconcat(tree)
2788 2737 weight, tree = optimize(tree, True)
2789 2738 posttreebuilthook(tree, repo)
2790 2739 def mfunc(repo, subset=None):
2791 2740 if subset is None:
2792 2741 subset = fullreposet(repo)
2793 2742 if util.safehasattr(subset, 'isascending'):
2794 2743 result = getset(repo, subset, tree)
2795 2744 else:
2796 2745 result = getset(repo, baseset(subset), tree)
2797 2746 return result
2798 2747 return mfunc
2799 2748
2800 2749 def formatspec(expr, *args):
2801 2750 '''
2802 2751 This is a convenience function for using revsets internally, and
2803 2752 escapes arguments appropriately. Aliases are intentionally ignored
2804 2753 so that intended expression behavior isn't accidentally subverted.
2805 2754
2806 2755 Supported arguments:
2807 2756
2808 2757 %r = revset expression, parenthesized
2809 2758 %d = int(arg), no quoting
2810 2759 %s = string(arg), escaped and single-quoted
2811 2760 %b = arg.branch(), escaped and single-quoted
2812 2761 %n = hex(arg), single-quoted
2813 2762 %% = a literal '%'
2814 2763
2815 2764 Prefixing the type with 'l' specifies a parenthesized list of that type.
2816 2765
2817 2766 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2818 2767 '(10 or 11):: and ((this()) or (that()))'
2819 2768 >>> formatspec('%d:: and not %d::', 10, 20)
2820 2769 '10:: and not 20::'
2821 2770 >>> formatspec('%ld or %ld', [], [1])
2822 2771 "_list('') or 1"
2823 2772 >>> formatspec('keyword(%s)', 'foo\\xe9')
2824 2773 "keyword('foo\\\\xe9')"
2825 2774 >>> b = lambda: 'default'
2826 2775 >>> b.branch = b
2827 2776 >>> formatspec('branch(%b)', b)
2828 2777 "branch('default')"
2829 2778 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2830 2779 "root(_list('a\\x00b\\x00c\\x00d'))"
2831 2780 '''
2832 2781
2833 2782 def quote(s):
2834 2783 return repr(str(s))
2835 2784
2836 2785 def argtype(c, arg):
2837 2786 if c == 'd':
2838 2787 return str(int(arg))
2839 2788 elif c == 's':
2840 2789 return quote(arg)
2841 2790 elif c == 'r':
2842 2791 parse(arg) # make sure syntax errors are confined
2843 2792 return '(%s)' % arg
2844 2793 elif c == 'n':
2845 2794 return quote(node.hex(arg))
2846 2795 elif c == 'b':
2847 2796 return quote(arg.branch())
2848 2797
2849 2798 def listexp(s, t):
2850 2799 l = len(s)
2851 2800 if l == 0:
2852 2801 return "_list('')"
2853 2802 elif l == 1:
2854 2803 return argtype(t, s[0])
2855 2804 elif t == 'd':
2856 2805 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2857 2806 elif t == 's':
2858 2807 return "_list('%s')" % "\0".join(s)
2859 2808 elif t == 'n':
2860 2809 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2861 2810 elif t == 'b':
2862 2811 return "_list('%s')" % "\0".join(a.branch() for a in s)
2863 2812
2864 2813 m = l // 2
2865 2814 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2866 2815
2867 2816 ret = ''
2868 2817 pos = 0
2869 2818 arg = 0
2870 2819 while pos < len(expr):
2871 2820 c = expr[pos]
2872 2821 if c == '%':
2873 2822 pos += 1
2874 2823 d = expr[pos]
2875 2824 if d == '%':
2876 2825 ret += d
2877 2826 elif d in 'dsnbr':
2878 2827 ret += argtype(d, args[arg])
2879 2828 arg += 1
2880 2829 elif d == 'l':
2881 2830 # a list of some type
2882 2831 pos += 1
2883 2832 d = expr[pos]
2884 2833 ret += listexp(list(args[arg]), d)
2885 2834 arg += 1
2886 2835 else:
2887 2836 raise error.Abort('unexpected revspec format character %s' % d)
2888 2837 else:
2889 2838 ret += c
2890 2839 pos += 1
2891 2840
2892 2841 return ret
2893 2842
2894 2843 def prettyformat(tree):
2895 2844 return parser.prettyformat(tree, ('string', 'symbol'))
2896 2845
2897 2846 def depth(tree):
2898 2847 if isinstance(tree, tuple):
2899 2848 return max(map(depth, tree)) + 1
2900 2849 else:
2901 2850 return 0
2902 2851
2903 2852 def funcsused(tree):
2904 2853 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2905 2854 return set()
2906 2855 else:
2907 2856 funcs = set()
2908 2857 for s in tree[1:]:
2909 2858 funcs |= funcsused(s)
2910 2859 if tree[0] == 'func':
2911 2860 funcs.add(tree[1][1])
2912 2861 return funcs
2913 2862
2914 2863 class abstractsmartset(object):
2915 2864
2916 2865 def __nonzero__(self):
2917 2866 """True if the smartset is not empty"""
2918 2867 raise NotImplementedError()
2919 2868
2920 2869 def __contains__(self, rev):
2921 2870 """provide fast membership testing"""
2922 2871 raise NotImplementedError()
2923 2872
2924 2873 def __iter__(self):
2925 2874 """iterate the set in the order it is supposed to be iterated"""
2926 2875 raise NotImplementedError()
2927 2876
2928 2877 # Attributes containing a function to perform a fast iteration in a given
2929 2878 # direction. A smartset can have none, one, or both defined.
2930 2879 #
2931 2880 # Default value is None instead of a function returning None to avoid
2932 2881 # initializing an iterator just for testing if a fast method exists.
2933 2882 fastasc = None
2934 2883 fastdesc = None
2935 2884
2936 2885 def isascending(self):
2937 2886 """True if the set will iterate in ascending order"""
2938 2887 raise NotImplementedError()
2939 2888
2940 2889 def isdescending(self):
2941 2890 """True if the set will iterate in descending order"""
2942 2891 raise NotImplementedError()
2943 2892
2944 2893 @util.cachefunc
2945 2894 def min(self):
2946 2895 """return the minimum element in the set"""
2947 2896 if self.fastasc is not None:
2948 2897 for r in self.fastasc():
2949 2898 return r
2950 2899 raise ValueError('arg is an empty sequence')
2951 2900 return min(self)
2952 2901
2953 2902 @util.cachefunc
2954 2903 def max(self):
2955 2904 """return the maximum element in the set"""
2956 2905 if self.fastdesc is not None:
2957 2906 for r in self.fastdesc():
2958 2907 return r
2959 2908 raise ValueError('arg is an empty sequence')
2960 2909 return max(self)
2961 2910
2962 2911 def first(self):
2963 2912 """return the first element in the set (user iteration perspective)
2964 2913
2965 2914 Return None if the set is empty"""
2966 2915 raise NotImplementedError()
2967 2916
2968 2917 def last(self):
2969 2918 """return the last element in the set (user iteration perspective)
2970 2919
2971 2920 Return None if the set is empty"""
2972 2921 raise NotImplementedError()
2973 2922
2974 2923 def __len__(self):
2975 2924 """return the length of the smartsets
2976 2925
2977 2926 This can be expensive on smartset that could be lazy otherwise."""
2978 2927 raise NotImplementedError()
2979 2928
2980 2929 def reverse(self):
2981 2930 """reverse the expected iteration order"""
2982 2931 raise NotImplementedError()
2983 2932
2984 2933 def sort(self, reverse=True):
2985 2934 """get the set to iterate in an ascending or descending order"""
2986 2935 raise NotImplementedError()
2987 2936
2988 2937 def __and__(self, other):
2989 2938 """Returns a new object with the intersection of the two collections.
2990 2939
2991 2940 This is part of the mandatory API for smartset."""
2992 2941 if isinstance(other, fullreposet):
2993 2942 return self
2994 2943 return self.filter(other.__contains__, cache=False)
2995 2944
2996 2945 def __add__(self, other):
2997 2946 """Returns a new object with the union of the two collections.
2998 2947
2999 2948 This is part of the mandatory API for smartset."""
3000 2949 return addset(self, other)
3001 2950
3002 2951 def __sub__(self, other):
3003 2952 """Returns a new object with the substraction of the two collections.
3004 2953
3005 2954 This is part of the mandatory API for smartset."""
3006 2955 c = other.__contains__
3007 2956 return self.filter(lambda r: not c(r), cache=False)
3008 2957
3009 2958 def filter(self, condition, cache=True):
3010 2959 """Returns this smartset filtered by condition as a new smartset.
3011 2960
3012 2961 `condition` is a callable which takes a revision number and returns a
3013 2962 boolean.
3014 2963
3015 2964 This is part of the mandatory API for smartset."""
3016 2965 # builtin cannot be cached. but do not needs to
3017 2966 if cache and util.safehasattr(condition, 'func_code'):
3018 2967 condition = util.cachefunc(condition)
3019 2968 return filteredset(self, condition)
3020 2969
3021 2970 class baseset(abstractsmartset):
3022 2971 """Basic data structure that represents a revset and contains the basic
3023 2972 operation that it should be able to perform.
3024 2973
3025 2974 Every method in this class should be implemented by any smartset class.
3026 2975 """
3027 2976 def __init__(self, data=()):
3028 2977 if not isinstance(data, list):
3029 2978 if isinstance(data, set):
3030 2979 self._set = data
3031 2980 data = list(data)
3032 2981 self._list = data
3033 2982 self._ascending = None
3034 2983
3035 2984 @util.propertycache
3036 2985 def _set(self):
3037 2986 return set(self._list)
3038 2987
3039 2988 @util.propertycache
3040 2989 def _asclist(self):
3041 2990 asclist = self._list[:]
3042 2991 asclist.sort()
3043 2992 return asclist
3044 2993
3045 2994 def __iter__(self):
3046 2995 if self._ascending is None:
3047 2996 return iter(self._list)
3048 2997 elif self._ascending:
3049 2998 return iter(self._asclist)
3050 2999 else:
3051 3000 return reversed(self._asclist)
3052 3001
3053 3002 def fastasc(self):
3054 3003 return iter(self._asclist)
3055 3004
3056 3005 def fastdesc(self):
3057 3006 return reversed(self._asclist)
3058 3007
3059 3008 @util.propertycache
3060 3009 def __contains__(self):
3061 3010 return self._set.__contains__
3062 3011
3063 3012 def __nonzero__(self):
3064 3013 return bool(self._list)
3065 3014
3066 3015 def sort(self, reverse=False):
3067 3016 self._ascending = not bool(reverse)
3068 3017
3069 3018 def reverse(self):
3070 3019 if self._ascending is None:
3071 3020 self._list.reverse()
3072 3021 else:
3073 3022 self._ascending = not self._ascending
3074 3023
3075 3024 def __len__(self):
3076 3025 return len(self._list)
3077 3026
3078 3027 def isascending(self):
3079 3028 """Returns True if the collection is ascending order, False if not.
3080 3029
3081 3030 This is part of the mandatory API for smartset."""
3082 3031 if len(self) <= 1:
3083 3032 return True
3084 3033 return self._ascending is not None and self._ascending
3085 3034
3086 3035 def isdescending(self):
3087 3036 """Returns True if the collection is descending order, False if not.
3088 3037
3089 3038 This is part of the mandatory API for smartset."""
3090 3039 if len(self) <= 1:
3091 3040 return True
3092 3041 return self._ascending is not None and not self._ascending
3093 3042
3094 3043 def first(self):
3095 3044 if self:
3096 3045 if self._ascending is None:
3097 3046 return self._list[0]
3098 3047 elif self._ascending:
3099 3048 return self._asclist[0]
3100 3049 else:
3101 3050 return self._asclist[-1]
3102 3051 return None
3103 3052
3104 3053 def last(self):
3105 3054 if self:
3106 3055 if self._ascending is None:
3107 3056 return self._list[-1]
3108 3057 elif self._ascending:
3109 3058 return self._asclist[-1]
3110 3059 else:
3111 3060 return self._asclist[0]
3112 3061 return None
3113 3062
3114 3063 def __repr__(self):
3115 3064 d = {None: '', False: '-', True: '+'}[self._ascending]
3116 3065 return '<%s%s %r>' % (type(self).__name__, d, self._list)
3117 3066
3118 3067 class filteredset(abstractsmartset):
3119 3068 """Duck type for baseset class which iterates lazily over the revisions in
3120 3069 the subset and contains a function which tests for membership in the
3121 3070 revset
3122 3071 """
3123 3072 def __init__(self, subset, condition=lambda x: True):
3124 3073 """
3125 3074 condition: a function that decide whether a revision in the subset
3126 3075 belongs to the revset or not.
3127 3076 """
3128 3077 self._subset = subset
3129 3078 self._condition = condition
3130 3079
3131 3080 def __contains__(self, x):
3132 3081 return x in self._subset and self._condition(x)
3133 3082
3134 3083 def __iter__(self):
3135 3084 return self._iterfilter(self._subset)
3136 3085
3137 3086 def _iterfilter(self, it):
3138 3087 cond = self._condition
3139 3088 for x in it:
3140 3089 if cond(x):
3141 3090 yield x
3142 3091
3143 3092 @property
3144 3093 def fastasc(self):
3145 3094 it = self._subset.fastasc
3146 3095 if it is None:
3147 3096 return None
3148 3097 return lambda: self._iterfilter(it())
3149 3098
3150 3099 @property
3151 3100 def fastdesc(self):
3152 3101 it = self._subset.fastdesc
3153 3102 if it is None:
3154 3103 return None
3155 3104 return lambda: self._iterfilter(it())
3156 3105
3157 3106 def __nonzero__(self):
3158 3107 fast = self.fastasc
3159 3108 if fast is None:
3160 3109 fast = self.fastdesc
3161 3110 if fast is not None:
3162 3111 it = fast()
3163 3112 else:
3164 3113 it = self
3165 3114
3166 3115 for r in it:
3167 3116 return True
3168 3117 return False
3169 3118
3170 3119 def __len__(self):
3171 3120 # Basic implementation to be changed in future patches.
3172 3121 l = baseset([r for r in self])
3173 3122 return len(l)
3174 3123
3175 3124 def sort(self, reverse=False):
3176 3125 self._subset.sort(reverse=reverse)
3177 3126
3178 3127 def reverse(self):
3179 3128 self._subset.reverse()
3180 3129
3181 3130 def isascending(self):
3182 3131 return self._subset.isascending()
3183 3132
3184 3133 def isdescending(self):
3185 3134 return self._subset.isdescending()
3186 3135
3187 3136 def first(self):
3188 3137 for x in self:
3189 3138 return x
3190 3139 return None
3191 3140
3192 3141 def last(self):
3193 3142 it = None
3194 3143 if self.isascending():
3195 3144 it = self.fastdesc
3196 3145 elif self.isdescending():
3197 3146 it = self.fastasc
3198 3147 if it is not None:
3199 3148 for x in it():
3200 3149 return x
3201 3150 return None #empty case
3202 3151 else:
3203 3152 x = None
3204 3153 for x in self:
3205 3154 pass
3206 3155 return x
3207 3156
3208 3157 def __repr__(self):
3209 3158 return '<%s %r>' % (type(self).__name__, self._subset)
3210 3159
3211 3160 def _iterordered(ascending, iter1, iter2):
3212 3161 """produce an ordered iteration from two iterators with the same order
3213 3162
3214 3163 The ascending is used to indicated the iteration direction.
3215 3164 """
3216 3165 choice = max
3217 3166 if ascending:
3218 3167 choice = min
3219 3168
3220 3169 val1 = None
3221 3170 val2 = None
3222 3171 try:
3223 3172 # Consume both iterators in an ordered way until one is empty
3224 3173 while True:
3225 3174 if val1 is None:
3226 3175 val1 = iter1.next()
3227 3176 if val2 is None:
3228 3177 val2 = iter2.next()
3229 3178 next = choice(val1, val2)
3230 3179 yield next
3231 3180 if val1 == next:
3232 3181 val1 = None
3233 3182 if val2 == next:
3234 3183 val2 = None
3235 3184 except StopIteration:
3236 3185 # Flush any remaining values and consume the other one
3237 3186 it = iter2
3238 3187 if val1 is not None:
3239 3188 yield val1
3240 3189 it = iter1
3241 3190 elif val2 is not None:
3242 3191 # might have been equality and both are empty
3243 3192 yield val2
3244 3193 for val in it:
3245 3194 yield val
3246 3195
3247 3196 class addset(abstractsmartset):
3248 3197 """Represent the addition of two sets
3249 3198
3250 3199 Wrapper structure for lazily adding two structures without losing much
3251 3200 performance on the __contains__ method
3252 3201
3253 3202 If the ascending attribute is set, that means the two structures are
3254 3203 ordered in either an ascending or descending way. Therefore, we can add
3255 3204 them maintaining the order by iterating over both at the same time
3256 3205
3257 3206 >>> xs = baseset([0, 3, 2])
3258 3207 >>> ys = baseset([5, 2, 4])
3259 3208
3260 3209 >>> rs = addset(xs, ys)
3261 3210 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3262 3211 (True, True, False, True, 0, 4)
3263 3212 >>> rs = addset(xs, baseset([]))
3264 3213 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3265 3214 (True, True, False, 0, 2)
3266 3215 >>> rs = addset(baseset([]), baseset([]))
3267 3216 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3268 3217 (False, False, None, None)
3269 3218
3270 3219 iterate unsorted:
3271 3220 >>> rs = addset(xs, ys)
3272 3221 >>> [x for x in rs] # without _genlist
3273 3222 [0, 3, 2, 5, 4]
3274 3223 >>> assert not rs._genlist
3275 3224 >>> len(rs)
3276 3225 5
3277 3226 >>> [x for x in rs] # with _genlist
3278 3227 [0, 3, 2, 5, 4]
3279 3228 >>> assert rs._genlist
3280 3229
3281 3230 iterate ascending:
3282 3231 >>> rs = addset(xs, ys, ascending=True)
3283 3232 >>> [x for x in rs], [x for x in rs.fastasc()] # without _asclist
3284 3233 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3285 3234 >>> assert not rs._asclist
3286 3235 >>> len(rs)
3287 3236 5
3288 3237 >>> [x for x in rs], [x for x in rs.fastasc()]
3289 3238 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3290 3239 >>> assert rs._asclist
3291 3240
3292 3241 iterate descending:
3293 3242 >>> rs = addset(xs, ys, ascending=False)
3294 3243 >>> [x for x in rs], [x for x in rs.fastdesc()] # without _asclist
3295 3244 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3296 3245 >>> assert not rs._asclist
3297 3246 >>> len(rs)
3298 3247 5
3299 3248 >>> [x for x in rs], [x for x in rs.fastdesc()]
3300 3249 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3301 3250 >>> assert rs._asclist
3302 3251
3303 3252 iterate ascending without fastasc:
3304 3253 >>> rs = addset(xs, generatorset(ys), ascending=True)
3305 3254 >>> assert rs.fastasc is None
3306 3255 >>> [x for x in rs]
3307 3256 [0, 2, 3, 4, 5]
3308 3257
3309 3258 iterate descending without fastdesc:
3310 3259 >>> rs = addset(generatorset(xs), ys, ascending=False)
3311 3260 >>> assert rs.fastdesc is None
3312 3261 >>> [x for x in rs]
3313 3262 [5, 4, 3, 2, 0]
3314 3263 """
3315 3264 def __init__(self, revs1, revs2, ascending=None):
3316 3265 self._r1 = revs1
3317 3266 self._r2 = revs2
3318 3267 self._iter = None
3319 3268 self._ascending = ascending
3320 3269 self._genlist = None
3321 3270 self._asclist = None
3322 3271
3323 3272 def __len__(self):
3324 3273 return len(self._list)
3325 3274
3326 3275 def __nonzero__(self):
3327 3276 return bool(self._r1) or bool(self._r2)
3328 3277
3329 3278 @util.propertycache
3330 3279 def _list(self):
3331 3280 if not self._genlist:
3332 3281 self._genlist = baseset(iter(self))
3333 3282 return self._genlist
3334 3283
3335 3284 def __iter__(self):
3336 3285 """Iterate over both collections without repeating elements
3337 3286
3338 3287 If the ascending attribute is not set, iterate over the first one and
3339 3288 then over the second one checking for membership on the first one so we
3340 3289 dont yield any duplicates.
3341 3290
3342 3291 If the ascending attribute is set, iterate over both collections at the
3343 3292 same time, yielding only one value at a time in the given order.
3344 3293 """
3345 3294 if self._ascending is None:
3346 3295 if self._genlist:
3347 3296 return iter(self._genlist)
3348 3297 def arbitraryordergen():
3349 3298 for r in self._r1:
3350 3299 yield r
3351 3300 inr1 = self._r1.__contains__
3352 3301 for r in self._r2:
3353 3302 if not inr1(r):
3354 3303 yield r
3355 3304 return arbitraryordergen()
3356 3305 # try to use our own fast iterator if it exists
3357 3306 self._trysetasclist()
3358 3307 if self._ascending:
3359 3308 attr = 'fastasc'
3360 3309 else:
3361 3310 attr = 'fastdesc'
3362 3311 it = getattr(self, attr)
3363 3312 if it is not None:
3364 3313 return it()
3365 3314 # maybe half of the component supports fast
3366 3315 # get iterator for _r1
3367 3316 iter1 = getattr(self._r1, attr)
3368 3317 if iter1 is None:
3369 3318 # let's avoid side effect (not sure it matters)
3370 3319 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3371 3320 else:
3372 3321 iter1 = iter1()
3373 3322 # get iterator for _r2
3374 3323 iter2 = getattr(self._r2, attr)
3375 3324 if iter2 is None:
3376 3325 # let's avoid side effect (not sure it matters)
3377 3326 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3378 3327 else:
3379 3328 iter2 = iter2()
3380 3329 return _iterordered(self._ascending, iter1, iter2)
3381 3330
3382 3331 def _trysetasclist(self):
3383 3332 """populate the _asclist attribute if possible and necessary"""
3384 3333 if self._genlist is not None and self._asclist is None:
3385 3334 self._asclist = sorted(self._genlist)
3386 3335
3387 3336 @property
3388 3337 def fastasc(self):
3389 3338 self._trysetasclist()
3390 3339 if self._asclist is not None:
3391 3340 return self._asclist.__iter__
3392 3341 iter1 = self._r1.fastasc
3393 3342 iter2 = self._r2.fastasc
3394 3343 if None in (iter1, iter2):
3395 3344 return None
3396 3345 return lambda: _iterordered(True, iter1(), iter2())
3397 3346
3398 3347 @property
3399 3348 def fastdesc(self):
3400 3349 self._trysetasclist()
3401 3350 if self._asclist is not None:
3402 3351 return self._asclist.__reversed__
3403 3352 iter1 = self._r1.fastdesc
3404 3353 iter2 = self._r2.fastdesc
3405 3354 if None in (iter1, iter2):
3406 3355 return None
3407 3356 return lambda: _iterordered(False, iter1(), iter2())
3408 3357
3409 3358 def __contains__(self, x):
3410 3359 return x in self._r1 or x in self._r2
3411 3360
3412 3361 def sort(self, reverse=False):
3413 3362 """Sort the added set
3414 3363
3415 3364 For this we use the cached list with all the generated values and if we
3416 3365 know they are ascending or descending we can sort them in a smart way.
3417 3366 """
3418 3367 self._ascending = not reverse
3419 3368
3420 3369 def isascending(self):
3421 3370 return self._ascending is not None and self._ascending
3422 3371
3423 3372 def isdescending(self):
3424 3373 return self._ascending is not None and not self._ascending
3425 3374
3426 3375 def reverse(self):
3427 3376 if self._ascending is None:
3428 3377 self._list.reverse()
3429 3378 else:
3430 3379 self._ascending = not self._ascending
3431 3380
3432 3381 def first(self):
3433 3382 for x in self:
3434 3383 return x
3435 3384 return None
3436 3385
3437 3386 def last(self):
3438 3387 self.reverse()
3439 3388 val = self.first()
3440 3389 self.reverse()
3441 3390 return val
3442 3391
3443 3392 def __repr__(self):
3444 3393 d = {None: '', False: '-', True: '+'}[self._ascending]
3445 3394 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3446 3395
3447 3396 class generatorset(abstractsmartset):
3448 3397 """Wrap a generator for lazy iteration
3449 3398
3450 3399 Wrapper structure for generators that provides lazy membership and can
3451 3400 be iterated more than once.
3452 3401 When asked for membership it generates values until either it finds the
3453 3402 requested one or has gone through all the elements in the generator
3454 3403 """
3455 3404 def __init__(self, gen, iterasc=None):
3456 3405 """
3457 3406 gen: a generator producing the values for the generatorset.
3458 3407 """
3459 3408 self._gen = gen
3460 3409 self._asclist = None
3461 3410 self._cache = {}
3462 3411 self._genlist = []
3463 3412 self._finished = False
3464 3413 self._ascending = True
3465 3414 if iterasc is not None:
3466 3415 if iterasc:
3467 3416 self.fastasc = self._iterator
3468 3417 self.__contains__ = self._asccontains
3469 3418 else:
3470 3419 self.fastdesc = self._iterator
3471 3420 self.__contains__ = self._desccontains
3472 3421
3473 3422 def __nonzero__(self):
3474 3423 # Do not use 'for r in self' because it will enforce the iteration
3475 3424 # order (default ascending), possibly unrolling a whole descending
3476 3425 # iterator.
3477 3426 if self._genlist:
3478 3427 return True
3479 3428 for r in self._consumegen():
3480 3429 return True
3481 3430 return False
3482 3431
3483 3432 def __contains__(self, x):
3484 3433 if x in self._cache:
3485 3434 return self._cache[x]
3486 3435
3487 3436 # Use new values only, as existing values would be cached.
3488 3437 for l in self._consumegen():
3489 3438 if l == x:
3490 3439 return True
3491 3440
3492 3441 self._cache[x] = False
3493 3442 return False
3494 3443
3495 3444 def _asccontains(self, x):
3496 3445 """version of contains optimised for ascending generator"""
3497 3446 if x in self._cache:
3498 3447 return self._cache[x]
3499 3448
3500 3449 # Use new values only, as existing values would be cached.
3501 3450 for l in self._consumegen():
3502 3451 if l == x:
3503 3452 return True
3504 3453 if l > x:
3505 3454 break
3506 3455
3507 3456 self._cache[x] = False
3508 3457 return False
3509 3458
3510 3459 def _desccontains(self, x):
3511 3460 """version of contains optimised for descending generator"""
3512 3461 if x in self._cache:
3513 3462 return self._cache[x]
3514 3463
3515 3464 # Use new values only, as existing values would be cached.
3516 3465 for l in self._consumegen():
3517 3466 if l == x:
3518 3467 return True
3519 3468 if l < x:
3520 3469 break
3521 3470
3522 3471 self._cache[x] = False
3523 3472 return False
3524 3473
3525 3474 def __iter__(self):
3526 3475 if self._ascending:
3527 3476 it = self.fastasc
3528 3477 else:
3529 3478 it = self.fastdesc
3530 3479 if it is not None:
3531 3480 return it()
3532 3481 # we need to consume the iterator
3533 3482 for x in self._consumegen():
3534 3483 pass
3535 3484 # recall the same code
3536 3485 return iter(self)
3537 3486
3538 3487 def _iterator(self):
3539 3488 if self._finished:
3540 3489 return iter(self._genlist)
3541 3490
3542 3491 # We have to use this complex iteration strategy to allow multiple
3543 3492 # iterations at the same time. We need to be able to catch revision
3544 3493 # removed from _consumegen and added to genlist in another instance.
3545 3494 #
3546 3495 # Getting rid of it would provide an about 15% speed up on this
3547 3496 # iteration.
3548 3497 genlist = self._genlist
3549 3498 nextrev = self._consumegen().next
3550 3499 _len = len # cache global lookup
3551 3500 def gen():
3552 3501 i = 0
3553 3502 while True:
3554 3503 if i < _len(genlist):
3555 3504 yield genlist[i]
3556 3505 else:
3557 3506 yield nextrev()
3558 3507 i += 1
3559 3508 return gen()
3560 3509
3561 3510 def _consumegen(self):
3562 3511 cache = self._cache
3563 3512 genlist = self._genlist.append
3564 3513 for item in self._gen:
3565 3514 cache[item] = True
3566 3515 genlist(item)
3567 3516 yield item
3568 3517 if not self._finished:
3569 3518 self._finished = True
3570 3519 asc = self._genlist[:]
3571 3520 asc.sort()
3572 3521 self._asclist = asc
3573 3522 self.fastasc = asc.__iter__
3574 3523 self.fastdesc = asc.__reversed__
3575 3524
3576 3525 def __len__(self):
3577 3526 for x in self._consumegen():
3578 3527 pass
3579 3528 return len(self._genlist)
3580 3529
3581 3530 def sort(self, reverse=False):
3582 3531 self._ascending = not reverse
3583 3532
3584 3533 def reverse(self):
3585 3534 self._ascending = not self._ascending
3586 3535
3587 3536 def isascending(self):
3588 3537 return self._ascending
3589 3538
3590 3539 def isdescending(self):
3591 3540 return not self._ascending
3592 3541
3593 3542 def first(self):
3594 3543 if self._ascending:
3595 3544 it = self.fastasc
3596 3545 else:
3597 3546 it = self.fastdesc
3598 3547 if it is None:
3599 3548 # we need to consume all and try again
3600 3549 for x in self._consumegen():
3601 3550 pass
3602 3551 return self.first()
3603 3552 return next(it(), None)
3604 3553
3605 3554 def last(self):
3606 3555 if self._ascending:
3607 3556 it = self.fastdesc
3608 3557 else:
3609 3558 it = self.fastasc
3610 3559 if it is None:
3611 3560 # we need to consume all and try again
3612 3561 for x in self._consumegen():
3613 3562 pass
3614 3563 return self.first()
3615 3564 return next(it(), None)
3616 3565
3617 3566 def __repr__(self):
3618 3567 d = {False: '-', True: '+'}[self._ascending]
3619 3568 return '<%s%s>' % (type(self).__name__, d)
3620 3569
3621 3570 class spanset(abstractsmartset):
3622 3571 """Duck type for baseset class which represents a range of revisions and
3623 3572 can work lazily and without having all the range in memory
3624 3573
3625 3574 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3626 3575 notable points:
3627 3576 - when x < y it will be automatically descending,
3628 3577 - revision filtered with this repoview will be skipped.
3629 3578
3630 3579 """
3631 3580 def __init__(self, repo, start=0, end=None):
3632 3581 """
3633 3582 start: first revision included the set
3634 3583 (default to 0)
3635 3584 end: first revision excluded (last+1)
3636 3585 (default to len(repo)
3637 3586
3638 3587 Spanset will be descending if `end` < `start`.
3639 3588 """
3640 3589 if end is None:
3641 3590 end = len(repo)
3642 3591 self._ascending = start <= end
3643 3592 if not self._ascending:
3644 3593 start, end = end + 1, start +1
3645 3594 self._start = start
3646 3595 self._end = end
3647 3596 self._hiddenrevs = repo.changelog.filteredrevs
3648 3597
3649 3598 def sort(self, reverse=False):
3650 3599 self._ascending = not reverse
3651 3600
3652 3601 def reverse(self):
3653 3602 self._ascending = not self._ascending
3654 3603
3655 3604 def _iterfilter(self, iterrange):
3656 3605 s = self._hiddenrevs
3657 3606 for r in iterrange:
3658 3607 if r not in s:
3659 3608 yield r
3660 3609
3661 3610 def __iter__(self):
3662 3611 if self._ascending:
3663 3612 return self.fastasc()
3664 3613 else:
3665 3614 return self.fastdesc()
3666 3615
3667 3616 def fastasc(self):
3668 3617 iterrange = xrange(self._start, self._end)
3669 3618 if self._hiddenrevs:
3670 3619 return self._iterfilter(iterrange)
3671 3620 return iter(iterrange)
3672 3621
3673 3622 def fastdesc(self):
3674 3623 iterrange = xrange(self._end - 1, self._start - 1, -1)
3675 3624 if self._hiddenrevs:
3676 3625 return self._iterfilter(iterrange)
3677 3626 return iter(iterrange)
3678 3627
3679 3628 def __contains__(self, rev):
3680 3629 hidden = self._hiddenrevs
3681 3630 return ((self._start <= rev < self._end)
3682 3631 and not (hidden and rev in hidden))
3683 3632
3684 3633 def __nonzero__(self):
3685 3634 for r in self:
3686 3635 return True
3687 3636 return False
3688 3637
3689 3638 def __len__(self):
3690 3639 if not self._hiddenrevs:
3691 3640 return abs(self._end - self._start)
3692 3641 else:
3693 3642 count = 0
3694 3643 start = self._start
3695 3644 end = self._end
3696 3645 for rev in self._hiddenrevs:
3697 3646 if (end < rev <= start) or (start <= rev < end):
3698 3647 count += 1
3699 3648 return abs(self._end - self._start) - count
3700 3649
3701 3650 def isascending(self):
3702 3651 return self._ascending
3703 3652
3704 3653 def isdescending(self):
3705 3654 return not self._ascending
3706 3655
3707 3656 def first(self):
3708 3657 if self._ascending:
3709 3658 it = self.fastasc
3710 3659 else:
3711 3660 it = self.fastdesc
3712 3661 for x in it():
3713 3662 return x
3714 3663 return None
3715 3664
3716 3665 def last(self):
3717 3666 if self._ascending:
3718 3667 it = self.fastdesc
3719 3668 else:
3720 3669 it = self.fastasc
3721 3670 for x in it():
3722 3671 return x
3723 3672 return None
3724 3673
3725 3674 def __repr__(self):
3726 3675 d = {False: '-', True: '+'}[self._ascending]
3727 3676 return '<%s%s %d:%d>' % (type(self).__name__, d,
3728 3677 self._start, self._end - 1)
3729 3678
3730 3679 class fullreposet(spanset):
3731 3680 """a set containing all revisions in the repo
3732 3681
3733 3682 This class exists to host special optimization and magic to handle virtual
3734 3683 revisions such as "null".
3735 3684 """
3736 3685
3737 3686 def __init__(self, repo):
3738 3687 super(fullreposet, self).__init__(repo)
3739 3688
3740 3689 def __and__(self, other):
3741 3690 """As self contains the whole repo, all of the other set should also be
3742 3691 in self. Therefore `self & other = other`.
3743 3692
3744 3693 This boldly assumes the other contains valid revs only.
3745 3694 """
3746 3695 # other not a smartset, make is so
3747 3696 if not util.safehasattr(other, 'isascending'):
3748 3697 # filter out hidden revision
3749 3698 # (this boldly assumes all smartset are pure)
3750 3699 #
3751 3700 # `other` was used with "&", let's assume this is a set like
3752 3701 # object.
3753 3702 other = baseset(other - self._hiddenrevs)
3754 3703
3755 3704 # XXX As fullreposet is also used as bootstrap, this is wrong.
3756 3705 #
3757 3706 # With a giveme312() revset returning [3,1,2], this makes
3758 3707 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3759 3708 # We cannot just drop it because other usage still need to sort it:
3760 3709 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3761 3710 #
3762 3711 # There is also some faulty revset implementations that rely on it
3763 3712 # (eg: children as of its state in e8075329c5fb)
3764 3713 #
3765 3714 # When we fix the two points above we can move this into the if clause
3766 3715 other.sort(reverse=self.isdescending())
3767 3716 return other
3768 3717
3769 3718 def prettyformatset(revs):
3770 3719 lines = []
3771 3720 rs = repr(revs)
3772 3721 p = 0
3773 3722 while p < len(rs):
3774 3723 q = rs.find('<', p + 1)
3775 3724 if q < 0:
3776 3725 q = len(rs)
3777 3726 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3778 3727 assert l >= 0
3779 3728 lines.append((l, rs[p:q].rstrip()))
3780 3729 p = q
3781 3730 return '\n'.join(' ' * l + s for l, s in lines)
3782 3731
3783 3732 # tell hggettext to extract docstrings from these functions:
3784 3733 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now