##// END OF EJS Templates
revset: make _parsealiasdecl() simply return the original parsed tree...
Yuya Nishihara -
r28708:ab06b5ef default
parent child Browse files
Show More
@@ -1,3586 +1,3586 b''
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import heapq
11 11 import re
12 12
13 13 from .i18n import _
14 14 from . import (
15 15 destutil,
16 16 encoding,
17 17 error,
18 18 hbisect,
19 19 match as matchmod,
20 20 node,
21 21 obsolete as obsmod,
22 22 parser,
23 23 pathutil,
24 24 phases,
25 25 registrar,
26 26 repoview,
27 27 util,
28 28 )
29 29
30 30 def _revancestors(repo, revs, followfirst):
31 31 """Like revlog.ancestors(), but supports followfirst."""
32 32 if followfirst:
33 33 cut = 1
34 34 else:
35 35 cut = None
36 36 cl = repo.changelog
37 37
38 38 def iterate():
39 39 revs.sort(reverse=True)
40 40 irevs = iter(revs)
41 41 h = []
42 42
43 43 inputrev = next(irevs, None)
44 44 if inputrev is not None:
45 45 heapq.heappush(h, -inputrev)
46 46
47 47 seen = set()
48 48 while h:
49 49 current = -heapq.heappop(h)
50 50 if current == inputrev:
51 51 inputrev = next(irevs, None)
52 52 if inputrev is not None:
53 53 heapq.heappush(h, -inputrev)
54 54 if current not in seen:
55 55 seen.add(current)
56 56 yield current
57 57 for parent in cl.parentrevs(current)[:cut]:
58 58 if parent != node.nullrev:
59 59 heapq.heappush(h, -parent)
60 60
61 61 return generatorset(iterate(), iterasc=False)
62 62
63 63 def _revdescendants(repo, revs, followfirst):
64 64 """Like revlog.descendants() but supports followfirst."""
65 65 if followfirst:
66 66 cut = 1
67 67 else:
68 68 cut = None
69 69
70 70 def iterate():
71 71 cl = repo.changelog
72 72 # XXX this should be 'parentset.min()' assuming 'parentset' is a
73 73 # smartset (and if it is not, it should.)
74 74 first = min(revs)
75 75 nullrev = node.nullrev
76 76 if first == nullrev:
77 77 # Are there nodes with a null first parent and a non-null
78 78 # second one? Maybe. Do we care? Probably not.
79 79 for i in cl:
80 80 yield i
81 81 else:
82 82 seen = set(revs)
83 83 for i in cl.revs(first + 1):
84 84 for x in cl.parentrevs(i)[:cut]:
85 85 if x != nullrev and x in seen:
86 86 seen.add(i)
87 87 yield i
88 88 break
89 89
90 90 return generatorset(iterate(), iterasc=True)
91 91
92 92 def _reachablerootspure(repo, minroot, roots, heads, includepath):
93 93 """return (heads(::<roots> and ::<heads>))
94 94
95 95 If includepath is True, return (<roots>::<heads>)."""
96 96 if not roots:
97 97 return []
98 98 parentrevs = repo.changelog.parentrevs
99 99 roots = set(roots)
100 100 visit = list(heads)
101 101 reachable = set()
102 102 seen = {}
103 103 # prefetch all the things! (because python is slow)
104 104 reached = reachable.add
105 105 dovisit = visit.append
106 106 nextvisit = visit.pop
107 107 # open-code the post-order traversal due to the tiny size of
108 108 # sys.getrecursionlimit()
109 109 while visit:
110 110 rev = nextvisit()
111 111 if rev in roots:
112 112 reached(rev)
113 113 if not includepath:
114 114 continue
115 115 parents = parentrevs(rev)
116 116 seen[rev] = parents
117 117 for parent in parents:
118 118 if parent >= minroot and parent not in seen:
119 119 dovisit(parent)
120 120 if not reachable:
121 121 return baseset()
122 122 if not includepath:
123 123 return reachable
124 124 for rev in sorted(seen):
125 125 for parent in seen[rev]:
126 126 if parent in reachable:
127 127 reached(rev)
128 128 return reachable
129 129
130 130 def reachableroots(repo, roots, heads, includepath=False):
131 131 """return (heads(::<roots> and ::<heads>))
132 132
133 133 If includepath is True, return (<roots>::<heads>)."""
134 134 if not roots:
135 135 return baseset()
136 136 minroot = roots.min()
137 137 roots = list(roots)
138 138 heads = list(heads)
139 139 try:
140 140 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
141 141 except AttributeError:
142 142 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
143 143 revs = baseset(revs)
144 144 revs.sort()
145 145 return revs
146 146
147 147 elements = {
148 148 # token-type: binding-strength, primary, prefix, infix, suffix
149 149 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
150 150 "##": (20, None, None, ("_concat", 20), None),
151 151 "~": (18, None, None, ("ancestor", 18), None),
152 152 "^": (18, None, None, ("parent", 18), ("parentpost", 18)),
153 153 "-": (5, None, ("negate", 19), ("minus", 5), None),
154 154 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17),
155 155 ("dagrangepost", 17)),
156 156 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17),
157 157 ("dagrangepost", 17)),
158 158 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), ("rangepost", 15)),
159 159 "not": (10, None, ("not", 10), None, None),
160 160 "!": (10, None, ("not", 10), None, None),
161 161 "and": (5, None, None, ("and", 5), None),
162 162 "&": (5, None, None, ("and", 5), None),
163 163 "%": (5, None, None, ("only", 5), ("onlypost", 5)),
164 164 "or": (4, None, None, ("or", 4), None),
165 165 "|": (4, None, None, ("or", 4), None),
166 166 "+": (4, None, None, ("or", 4), None),
167 167 "=": (3, None, None, ("keyvalue", 3), None),
168 168 ",": (2, None, None, ("list", 2), None),
169 169 ")": (0, None, None, None, None),
170 170 "symbol": (0, "symbol", None, None, None),
171 171 "string": (0, "string", None, None, None),
172 172 "end": (0, None, None, None, None),
173 173 }
174 174
175 175 keywords = set(['and', 'or', 'not'])
176 176
177 177 # default set of valid characters for the initial letter of symbols
178 178 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
179 179 if c.isalnum() or c in '._@' or ord(c) > 127)
180 180
181 181 # default set of valid characters for non-initial letters of symbols
182 182 _symletters = set(c for c in [chr(i) for i in xrange(256)]
183 183 if c.isalnum() or c in '-._/@' or ord(c) > 127)
184 184
185 185 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
186 186 '''
187 187 Parse a revset statement into a stream of tokens
188 188
189 189 ``syminitletters`` is the set of valid characters for the initial
190 190 letter of symbols.
191 191
192 192 By default, character ``c`` is recognized as valid for initial
193 193 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
194 194
195 195 ``symletters`` is the set of valid characters for non-initial
196 196 letters of symbols.
197 197
198 198 By default, character ``c`` is recognized as valid for non-initial
199 199 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
200 200
201 201 Check that @ is a valid unquoted token character (issue3686):
202 202 >>> list(tokenize("@::"))
203 203 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
204 204
205 205 '''
206 206 if syminitletters is None:
207 207 syminitletters = _syminitletters
208 208 if symletters is None:
209 209 symletters = _symletters
210 210
211 211 if program and lookup:
212 212 # attempt to parse old-style ranges first to deal with
213 213 # things like old-tag which contain query metacharacters
214 214 parts = program.split(':', 1)
215 215 if all(lookup(sym) for sym in parts if sym):
216 216 if parts[0]:
217 217 yield ('symbol', parts[0], 0)
218 218 if len(parts) > 1:
219 219 s = len(parts[0])
220 220 yield (':', None, s)
221 221 if parts[1]:
222 222 yield ('symbol', parts[1], s + 1)
223 223 yield ('end', None, len(program))
224 224 return
225 225
226 226 pos, l = 0, len(program)
227 227 while pos < l:
228 228 c = program[pos]
229 229 if c.isspace(): # skip inter-token whitespace
230 230 pass
231 231 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
232 232 yield ('::', None, pos)
233 233 pos += 1 # skip ahead
234 234 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
235 235 yield ('..', None, pos)
236 236 pos += 1 # skip ahead
237 237 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
238 238 yield ('##', None, pos)
239 239 pos += 1 # skip ahead
240 240 elif c in "():=,-|&+!~^%": # handle simple operators
241 241 yield (c, None, pos)
242 242 elif (c in '"\'' or c == 'r' and
243 243 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
244 244 if c == 'r':
245 245 pos += 1
246 246 c = program[pos]
247 247 decode = lambda x: x
248 248 else:
249 249 decode = parser.unescapestr
250 250 pos += 1
251 251 s = pos
252 252 while pos < l: # find closing quote
253 253 d = program[pos]
254 254 if d == '\\': # skip over escaped characters
255 255 pos += 2
256 256 continue
257 257 if d == c:
258 258 yield ('string', decode(program[s:pos]), s)
259 259 break
260 260 pos += 1
261 261 else:
262 262 raise error.ParseError(_("unterminated string"), s)
263 263 # gather up a symbol/keyword
264 264 elif c in syminitletters:
265 265 s = pos
266 266 pos += 1
267 267 while pos < l: # find end of symbol
268 268 d = program[pos]
269 269 if d not in symletters:
270 270 break
271 271 if d == '.' and program[pos - 1] == '.': # special case for ..
272 272 pos -= 1
273 273 break
274 274 pos += 1
275 275 sym = program[s:pos]
276 276 if sym in keywords: # operator keywords
277 277 yield (sym, None, s)
278 278 elif '-' in sym:
279 279 # some jerk gave us foo-bar-baz, try to check if it's a symbol
280 280 if lookup and lookup(sym):
281 281 # looks like a real symbol
282 282 yield ('symbol', sym, s)
283 283 else:
284 284 # looks like an expression
285 285 parts = sym.split('-')
286 286 for p in parts[:-1]:
287 287 if p: # possible consecutive -
288 288 yield ('symbol', p, s)
289 289 s += len(p)
290 290 yield ('-', None, pos)
291 291 s += 1
292 292 if parts[-1]: # possible trailing -
293 293 yield ('symbol', parts[-1], s)
294 294 else:
295 295 yield ('symbol', sym, s)
296 296 pos -= 1
297 297 else:
298 298 raise error.ParseError(_("syntax error in revset '%s'") %
299 299 program, pos)
300 300 pos += 1
301 301 yield ('end', None, pos)
302 302
303 303 def parseerrordetail(inst):
304 304 """Compose error message from specified ParseError object
305 305 """
306 306 if len(inst.args) > 1:
307 307 return _('at %s: %s') % (inst.args[1], inst.args[0])
308 308 else:
309 309 return inst.args[0]
310 310
311 311 # helpers
312 312
313 313 def getstring(x, err):
314 314 if x and (x[0] == 'string' or x[0] == 'symbol'):
315 315 return x[1]
316 316 raise error.ParseError(err)
317 317
318 318 def getlist(x):
319 319 if not x:
320 320 return []
321 321 if x[0] == 'list':
322 322 return list(x[1:])
323 323 return [x]
324 324
325 325 def getargs(x, min, max, err):
326 326 l = getlist(x)
327 327 if len(l) < min or (max >= 0 and len(l) > max):
328 328 raise error.ParseError(err)
329 329 return l
330 330
331 331 def getargsdict(x, funcname, keys):
332 332 return parser.buildargsdict(getlist(x), funcname, keys.split(),
333 333 keyvaluenode='keyvalue', keynode='symbol')
334 334
335 335 def getset(repo, subset, x):
336 336 if not x:
337 337 raise error.ParseError(_("missing argument"))
338 338 s = methods[x[0]](repo, subset, *x[1:])
339 339 if util.safehasattr(s, 'isascending'):
340 340 return s
341 341 if (repo.ui.configbool('devel', 'all-warnings')
342 342 or repo.ui.configbool('devel', 'old-revset')):
343 343 # else case should not happen, because all non-func are internal,
344 344 # ignoring for now.
345 345 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
346 346 repo.ui.develwarn('revset "%s" use list instead of smartset, '
347 347 '(upgrade your code)' % x[1][1])
348 348 return baseset(s)
349 349
350 350 def _getrevsource(repo, r):
351 351 extra = repo[r].extra()
352 352 for label in ('source', 'transplant_source', 'rebase_source'):
353 353 if label in extra:
354 354 try:
355 355 return repo[extra[label]].rev()
356 356 except error.RepoLookupError:
357 357 pass
358 358 return None
359 359
360 360 # operator methods
361 361
362 362 def stringset(repo, subset, x):
363 363 x = repo[x].rev()
364 364 if (x in subset
365 365 or x == node.nullrev and isinstance(subset, fullreposet)):
366 366 return baseset([x])
367 367 return baseset()
368 368
369 369 def rangeset(repo, subset, x, y):
370 370 m = getset(repo, fullreposet(repo), x)
371 371 n = getset(repo, fullreposet(repo), y)
372 372
373 373 if not m or not n:
374 374 return baseset()
375 375 m, n = m.first(), n.last()
376 376
377 377 if m == n:
378 378 r = baseset([m])
379 379 elif n == node.wdirrev:
380 380 r = spanset(repo, m, len(repo)) + baseset([n])
381 381 elif m == node.wdirrev:
382 382 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
383 383 elif m < n:
384 384 r = spanset(repo, m, n + 1)
385 385 else:
386 386 r = spanset(repo, m, n - 1)
387 387 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
388 388 # necessary to ensure we preserve the order in subset.
389 389 #
390 390 # This has performance implication, carrying the sorting over when possible
391 391 # would be more efficient.
392 392 return r & subset
393 393
394 394 def dagrange(repo, subset, x, y):
395 395 r = fullreposet(repo)
396 396 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
397 397 includepath=True)
398 398 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
399 399 # necessary to ensure we preserve the order in subset.
400 400 return xs & subset
401 401
402 402 def andset(repo, subset, x, y):
403 403 return getset(repo, getset(repo, subset, x), y)
404 404
405 405 def differenceset(repo, subset, x, y):
406 406 return getset(repo, subset, x) - getset(repo, subset, y)
407 407
408 408 def orset(repo, subset, *xs):
409 409 assert xs
410 410 if len(xs) == 1:
411 411 return getset(repo, subset, xs[0])
412 412 p = len(xs) // 2
413 413 a = orset(repo, subset, *xs[:p])
414 414 b = orset(repo, subset, *xs[p:])
415 415 return a + b
416 416
417 417 def notset(repo, subset, x):
418 418 return subset - getset(repo, subset, x)
419 419
420 420 def listset(repo, subset, *xs):
421 421 raise error.ParseError(_("can't use a list in this context"),
422 422 hint=_('see hg help "revsets.x or y"'))
423 423
424 424 def keyvaluepair(repo, subset, k, v):
425 425 raise error.ParseError(_("can't use a key-value pair in this context"))
426 426
427 427 def func(repo, subset, a, b):
428 428 if a[0] == 'symbol' and a[1] in symbols:
429 429 return symbols[a[1]](repo, subset, b)
430 430
431 431 keep = lambda fn: getattr(fn, '__doc__', None) is not None
432 432
433 433 syms = [s for (s, fn) in symbols.items() if keep(fn)]
434 434 raise error.UnknownIdentifier(a[1], syms)
435 435
436 436 # functions
437 437
438 438 # symbols are callables like:
439 439 # fn(repo, subset, x)
440 440 # with:
441 441 # repo - current repository instance
442 442 # subset - of revisions to be examined
443 443 # x - argument in tree form
444 444 symbols = {}
445 445
446 446 # symbols which can't be used for a DoS attack for any given input
447 447 # (e.g. those which accept regexes as plain strings shouldn't be included)
448 448 # functions that just return a lot of changesets (like all) don't count here
449 449 safesymbols = set()
450 450
451 451 predicate = registrar.revsetpredicate()
452 452
453 453 @predicate('_destupdate')
454 454 def _destupdate(repo, subset, x):
455 455 # experimental revset for update destination
456 456 args = getargsdict(x, 'limit', 'clean check')
457 457 return subset & baseset([destutil.destupdate(repo, **args)[0]])
458 458
459 459 @predicate('_destmerge')
460 460 def _destmerge(repo, subset, x):
461 461 # experimental revset for merge destination
462 462 sourceset = None
463 463 if x is not None:
464 464 sourceset = getset(repo, fullreposet(repo), x)
465 465 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
466 466
467 467 @predicate('adds(pattern)', safe=True)
468 468 def adds(repo, subset, x):
469 469 """Changesets that add a file matching pattern.
470 470
471 471 The pattern without explicit kind like ``glob:`` is expected to be
472 472 relative to the current directory and match against a file or a
473 473 directory.
474 474 """
475 475 # i18n: "adds" is a keyword
476 476 pat = getstring(x, _("adds requires a pattern"))
477 477 return checkstatus(repo, subset, pat, 1)
478 478
479 479 @predicate('ancestor(*changeset)', safe=True)
480 480 def ancestor(repo, subset, x):
481 481 """A greatest common ancestor of the changesets.
482 482
483 483 Accepts 0 or more changesets.
484 484 Will return empty list when passed no args.
485 485 Greatest common ancestor of a single changeset is that changeset.
486 486 """
487 487 # i18n: "ancestor" is a keyword
488 488 l = getlist(x)
489 489 rl = fullreposet(repo)
490 490 anc = None
491 491
492 492 # (getset(repo, rl, i) for i in l) generates a list of lists
493 493 for revs in (getset(repo, rl, i) for i in l):
494 494 for r in revs:
495 495 if anc is None:
496 496 anc = repo[r]
497 497 else:
498 498 anc = anc.ancestor(repo[r])
499 499
500 500 if anc is not None and anc.rev() in subset:
501 501 return baseset([anc.rev()])
502 502 return baseset()
503 503
504 504 def _ancestors(repo, subset, x, followfirst=False):
505 505 heads = getset(repo, fullreposet(repo), x)
506 506 if not heads:
507 507 return baseset()
508 508 s = _revancestors(repo, heads, followfirst)
509 509 return subset & s
510 510
511 511 @predicate('ancestors(set)', safe=True)
512 512 def ancestors(repo, subset, x):
513 513 """Changesets that are ancestors of a changeset in set.
514 514 """
515 515 return _ancestors(repo, subset, x)
516 516
517 517 @predicate('_firstancestors', safe=True)
518 518 def _firstancestors(repo, subset, x):
519 519 # ``_firstancestors(set)``
520 520 # Like ``ancestors(set)`` but follows only the first parents.
521 521 return _ancestors(repo, subset, x, followfirst=True)
522 522
523 523 def ancestorspec(repo, subset, x, n):
524 524 """``set~n``
525 525 Changesets that are the Nth ancestor (first parents only) of a changeset
526 526 in set.
527 527 """
528 528 try:
529 529 n = int(n[1])
530 530 except (TypeError, ValueError):
531 531 raise error.ParseError(_("~ expects a number"))
532 532 ps = set()
533 533 cl = repo.changelog
534 534 for r in getset(repo, fullreposet(repo), x):
535 535 for i in range(n):
536 536 r = cl.parentrevs(r)[0]
537 537 ps.add(r)
538 538 return subset & ps
539 539
540 540 @predicate('author(string)', safe=True)
541 541 def author(repo, subset, x):
542 542 """Alias for ``user(string)``.
543 543 """
544 544 # i18n: "author" is a keyword
545 545 n = encoding.lower(getstring(x, _("author requires a string")))
546 546 kind, pattern, matcher = _substringmatcher(n)
547 547 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())),
548 548 condrepr=('<user %r>', n))
549 549
550 550 @predicate('bisect(string)', safe=True)
551 551 def bisect(repo, subset, x):
552 552 """Changesets marked in the specified bisect status:
553 553
554 554 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
555 555 - ``goods``, ``bads`` : csets topologically good/bad
556 556 - ``range`` : csets taking part in the bisection
557 557 - ``pruned`` : csets that are goods, bads or skipped
558 558 - ``untested`` : csets whose fate is yet unknown
559 559 - ``ignored`` : csets ignored due to DAG topology
560 560 - ``current`` : the cset currently being bisected
561 561 """
562 562 # i18n: "bisect" is a keyword
563 563 status = getstring(x, _("bisect requires a string")).lower()
564 564 state = set(hbisect.get(repo, status))
565 565 return subset & state
566 566
567 567 # Backward-compatibility
568 568 # - no help entry so that we do not advertise it any more
569 569 @predicate('bisected', safe=True)
570 570 def bisected(repo, subset, x):
571 571 return bisect(repo, subset, x)
572 572
573 573 @predicate('bookmark([name])', safe=True)
574 574 def bookmark(repo, subset, x):
575 575 """The named bookmark or all bookmarks.
576 576
577 577 If `name` starts with `re:`, the remainder of the name is treated as
578 578 a regular expression. To match a bookmark that actually starts with `re:`,
579 579 use the prefix `literal:`.
580 580 """
581 581 # i18n: "bookmark" is a keyword
582 582 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
583 583 if args:
584 584 bm = getstring(args[0],
585 585 # i18n: "bookmark" is a keyword
586 586 _('the argument to bookmark must be a string'))
587 587 kind, pattern, matcher = util.stringmatcher(bm)
588 588 bms = set()
589 589 if kind == 'literal':
590 590 bmrev = repo._bookmarks.get(pattern, None)
591 591 if not bmrev:
592 592 raise error.RepoLookupError(_("bookmark '%s' does not exist")
593 593 % pattern)
594 594 bms.add(repo[bmrev].rev())
595 595 else:
596 596 matchrevs = set()
597 597 for name, bmrev in repo._bookmarks.iteritems():
598 598 if matcher(name):
599 599 matchrevs.add(bmrev)
600 600 if not matchrevs:
601 601 raise error.RepoLookupError(_("no bookmarks exist"
602 602 " that match '%s'") % pattern)
603 603 for bmrev in matchrevs:
604 604 bms.add(repo[bmrev].rev())
605 605 else:
606 606 bms = set([repo[r].rev()
607 607 for r in repo._bookmarks.values()])
608 608 bms -= set([node.nullrev])
609 609 return subset & bms
610 610
611 611 @predicate('branch(string or set)', safe=True)
612 612 def branch(repo, subset, x):
613 613 """
614 614 All changesets belonging to the given branch or the branches of the given
615 615 changesets.
616 616
617 617 If `string` starts with `re:`, the remainder of the name is treated as
618 618 a regular expression. To match a branch that actually starts with `re:`,
619 619 use the prefix `literal:`.
620 620 """
621 621 getbi = repo.revbranchcache().branchinfo
622 622
623 623 try:
624 624 b = getstring(x, '')
625 625 except error.ParseError:
626 626 # not a string, but another revspec, e.g. tip()
627 627 pass
628 628 else:
629 629 kind, pattern, matcher = util.stringmatcher(b)
630 630 if kind == 'literal':
631 631 # note: falls through to the revspec case if no branch with
632 632 # this name exists and pattern kind is not specified explicitly
633 633 if pattern in repo.branchmap():
634 634 return subset.filter(lambda r: matcher(getbi(r)[0]),
635 635 condrepr=('<branch %r>', b))
636 636 if b.startswith('literal:'):
637 637 raise error.RepoLookupError(_("branch '%s' does not exist")
638 638 % pattern)
639 639 else:
640 640 return subset.filter(lambda r: matcher(getbi(r)[0]),
641 641 condrepr=('<branch %r>', b))
642 642
643 643 s = getset(repo, fullreposet(repo), x)
644 644 b = set()
645 645 for r in s:
646 646 b.add(getbi(r)[0])
647 647 c = s.__contains__
648 648 return subset.filter(lambda r: c(r) or getbi(r)[0] in b,
649 649 condrepr=lambda: '<branch %r>' % sorted(b))
650 650
651 651 @predicate('bumped()', safe=True)
652 652 def bumped(repo, subset, x):
653 653 """Mutable changesets marked as successors of public changesets.
654 654
655 655 Only non-public and non-obsolete changesets can be `bumped`.
656 656 """
657 657 # i18n: "bumped" is a keyword
658 658 getargs(x, 0, 0, _("bumped takes no arguments"))
659 659 bumped = obsmod.getrevs(repo, 'bumped')
660 660 return subset & bumped
661 661
662 662 @predicate('bundle()', safe=True)
663 663 def bundle(repo, subset, x):
664 664 """Changesets in the bundle.
665 665
666 666 Bundle must be specified by the -R option."""
667 667
668 668 try:
669 669 bundlerevs = repo.changelog.bundlerevs
670 670 except AttributeError:
671 671 raise error.Abort(_("no bundle provided - specify with -R"))
672 672 return subset & bundlerevs
673 673
674 674 def checkstatus(repo, subset, pat, field):
675 675 hasset = matchmod.patkind(pat) == 'set'
676 676
677 677 mcache = [None]
678 678 def matches(x):
679 679 c = repo[x]
680 680 if not mcache[0] or hasset:
681 681 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
682 682 m = mcache[0]
683 683 fname = None
684 684 if not m.anypats() and len(m.files()) == 1:
685 685 fname = m.files()[0]
686 686 if fname is not None:
687 687 if fname not in c.files():
688 688 return False
689 689 else:
690 690 for f in c.files():
691 691 if m(f):
692 692 break
693 693 else:
694 694 return False
695 695 files = repo.status(c.p1().node(), c.node())[field]
696 696 if fname is not None:
697 697 if fname in files:
698 698 return True
699 699 else:
700 700 for f in files:
701 701 if m(f):
702 702 return True
703 703
704 704 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
705 705
706 706 def _children(repo, narrow, parentset):
707 707 if not parentset:
708 708 return baseset()
709 709 cs = set()
710 710 pr = repo.changelog.parentrevs
711 711 minrev = parentset.min()
712 712 for r in narrow:
713 713 if r <= minrev:
714 714 continue
715 715 for p in pr(r):
716 716 if p in parentset:
717 717 cs.add(r)
718 718 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
719 719 # This does not break because of other fullreposet misbehavior.
720 720 return baseset(cs)
721 721
722 722 @predicate('children(set)', safe=True)
723 723 def children(repo, subset, x):
724 724 """Child changesets of changesets in set.
725 725 """
726 726 s = getset(repo, fullreposet(repo), x)
727 727 cs = _children(repo, subset, s)
728 728 return subset & cs
729 729
730 730 @predicate('closed()', safe=True)
731 731 def closed(repo, subset, x):
732 732 """Changeset is closed.
733 733 """
734 734 # i18n: "closed" is a keyword
735 735 getargs(x, 0, 0, _("closed takes no arguments"))
736 736 return subset.filter(lambda r: repo[r].closesbranch(),
737 737 condrepr='<branch closed>')
738 738
739 739 @predicate('contains(pattern)')
740 740 def contains(repo, subset, x):
741 741 """The revision's manifest contains a file matching pattern (but might not
742 742 modify it). See :hg:`help patterns` for information about file patterns.
743 743
744 744 The pattern without explicit kind like ``glob:`` is expected to be
745 745 relative to the current directory and match against a file exactly
746 746 for efficiency.
747 747 """
748 748 # i18n: "contains" is a keyword
749 749 pat = getstring(x, _("contains requires a pattern"))
750 750
751 751 def matches(x):
752 752 if not matchmod.patkind(pat):
753 753 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
754 754 if pats in repo[x]:
755 755 return True
756 756 else:
757 757 c = repo[x]
758 758 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
759 759 for f in c.manifest():
760 760 if m(f):
761 761 return True
762 762 return False
763 763
764 764 return subset.filter(matches, condrepr=('<contains %r>', pat))
765 765
766 766 @predicate('converted([id])', safe=True)
767 767 def converted(repo, subset, x):
768 768 """Changesets converted from the given identifier in the old repository if
769 769 present, or all converted changesets if no identifier is specified.
770 770 """
771 771
772 772 # There is exactly no chance of resolving the revision, so do a simple
773 773 # string compare and hope for the best
774 774
775 775 rev = None
776 776 # i18n: "converted" is a keyword
777 777 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
778 778 if l:
779 779 # i18n: "converted" is a keyword
780 780 rev = getstring(l[0], _('converted requires a revision'))
781 781
782 782 def _matchvalue(r):
783 783 source = repo[r].extra().get('convert_revision', None)
784 784 return source is not None and (rev is None or source.startswith(rev))
785 785
786 786 return subset.filter(lambda r: _matchvalue(r),
787 787 condrepr=('<converted %r>', rev))
788 788
789 789 @predicate('date(interval)', safe=True)
790 790 def date(repo, subset, x):
791 791 """Changesets within the interval, see :hg:`help dates`.
792 792 """
793 793 # i18n: "date" is a keyword
794 794 ds = getstring(x, _("date requires a string"))
795 795 dm = util.matchdate(ds)
796 796 return subset.filter(lambda x: dm(repo[x].date()[0]),
797 797 condrepr=('<date %r>', ds))
798 798
799 799 @predicate('desc(string)', safe=True)
800 800 def desc(repo, subset, x):
801 801 """Search commit message for string. The match is case-insensitive.
802 802 """
803 803 # i18n: "desc" is a keyword
804 804 ds = encoding.lower(getstring(x, _("desc requires a string")))
805 805
806 806 def matches(x):
807 807 c = repo[x]
808 808 return ds in encoding.lower(c.description())
809 809
810 810 return subset.filter(matches, condrepr=('<desc %r>', ds))
811 811
812 812 def _descendants(repo, subset, x, followfirst=False):
813 813 roots = getset(repo, fullreposet(repo), x)
814 814 if not roots:
815 815 return baseset()
816 816 s = _revdescendants(repo, roots, followfirst)
817 817
818 818 # Both sets need to be ascending in order to lazily return the union
819 819 # in the correct order.
820 820 base = subset & roots
821 821 desc = subset & s
822 822 result = base + desc
823 823 if subset.isascending():
824 824 result.sort()
825 825 elif subset.isdescending():
826 826 result.sort(reverse=True)
827 827 else:
828 828 result = subset & result
829 829 return result
830 830
831 831 @predicate('descendants(set)', safe=True)
832 832 def descendants(repo, subset, x):
833 833 """Changesets which are descendants of changesets in set.
834 834 """
835 835 return _descendants(repo, subset, x)
836 836
837 837 @predicate('_firstdescendants', safe=True)
838 838 def _firstdescendants(repo, subset, x):
839 839 # ``_firstdescendants(set)``
840 840 # Like ``descendants(set)`` but follows only the first parents.
841 841 return _descendants(repo, subset, x, followfirst=True)
842 842
843 843 @predicate('destination([set])', safe=True)
844 844 def destination(repo, subset, x):
845 845 """Changesets that were created by a graft, transplant or rebase operation,
846 846 with the given revisions specified as the source. Omitting the optional set
847 847 is the same as passing all().
848 848 """
849 849 if x is not None:
850 850 sources = getset(repo, fullreposet(repo), x)
851 851 else:
852 852 sources = fullreposet(repo)
853 853
854 854 dests = set()
855 855
856 856 # subset contains all of the possible destinations that can be returned, so
857 857 # iterate over them and see if their source(s) were provided in the arg set.
858 858 # Even if the immediate src of r is not in the arg set, src's source (or
859 859 # further back) may be. Scanning back further than the immediate src allows
860 860 # transitive transplants and rebases to yield the same results as transitive
861 861 # grafts.
862 862 for r in subset:
863 863 src = _getrevsource(repo, r)
864 864 lineage = None
865 865
866 866 while src is not None:
867 867 if lineage is None:
868 868 lineage = list()
869 869
870 870 lineage.append(r)
871 871
872 872 # The visited lineage is a match if the current source is in the arg
873 873 # set. Since every candidate dest is visited by way of iterating
874 874 # subset, any dests further back in the lineage will be tested by a
875 875 # different iteration over subset. Likewise, if the src was already
876 876 # selected, the current lineage can be selected without going back
877 877 # further.
878 878 if src in sources or src in dests:
879 879 dests.update(lineage)
880 880 break
881 881
882 882 r = src
883 883 src = _getrevsource(repo, r)
884 884
885 885 return subset.filter(dests.__contains__,
886 886 condrepr=lambda: '<destination %r>' % sorted(dests))
887 887
888 888 @predicate('divergent()', safe=True)
889 889 def divergent(repo, subset, x):
890 890 """
891 891 Final successors of changesets with an alternative set of final successors.
892 892 """
893 893 # i18n: "divergent" is a keyword
894 894 getargs(x, 0, 0, _("divergent takes no arguments"))
895 895 divergent = obsmod.getrevs(repo, 'divergent')
896 896 return subset & divergent
897 897
898 898 @predicate('extinct()', safe=True)
899 899 def extinct(repo, subset, x):
900 900 """Obsolete changesets with obsolete descendants only.
901 901 """
902 902 # i18n: "extinct" is a keyword
903 903 getargs(x, 0, 0, _("extinct takes no arguments"))
904 904 extincts = obsmod.getrevs(repo, 'extinct')
905 905 return subset & extincts
906 906
907 907 @predicate('extra(label, [value])', safe=True)
908 908 def extra(repo, subset, x):
909 909 """Changesets with the given label in the extra metadata, with the given
910 910 optional value.
911 911
912 912 If `value` starts with `re:`, the remainder of the value is treated as
913 913 a regular expression. To match a value that actually starts with `re:`,
914 914 use the prefix `literal:`.
915 915 """
916 916 args = getargsdict(x, 'extra', 'label value')
917 917 if 'label' not in args:
918 918 # i18n: "extra" is a keyword
919 919 raise error.ParseError(_('extra takes at least 1 argument'))
920 920 # i18n: "extra" is a keyword
921 921 label = getstring(args['label'], _('first argument to extra must be '
922 922 'a string'))
923 923 value = None
924 924
925 925 if 'value' in args:
926 926 # i18n: "extra" is a keyword
927 927 value = getstring(args['value'], _('second argument to extra must be '
928 928 'a string'))
929 929 kind, value, matcher = util.stringmatcher(value)
930 930
931 931 def _matchvalue(r):
932 932 extra = repo[r].extra()
933 933 return label in extra and (value is None or matcher(extra[label]))
934 934
935 935 return subset.filter(lambda r: _matchvalue(r),
936 936 condrepr=('<extra[%r] %r>', label, value))
937 937
938 938 @predicate('filelog(pattern)', safe=True)
939 939 def filelog(repo, subset, x):
940 940 """Changesets connected to the specified filelog.
941 941
942 942 For performance reasons, visits only revisions mentioned in the file-level
943 943 filelog, rather than filtering through all changesets (much faster, but
944 944 doesn't include deletes or duplicate changes). For a slower, more accurate
945 945 result, use ``file()``.
946 946
947 947 The pattern without explicit kind like ``glob:`` is expected to be
948 948 relative to the current directory and match against a file exactly
949 949 for efficiency.
950 950
951 951 If some linkrev points to revisions filtered by the current repoview, we'll
952 952 work around it to return a non-filtered value.
953 953 """
954 954
955 955 # i18n: "filelog" is a keyword
956 956 pat = getstring(x, _("filelog requires a pattern"))
957 957 s = set()
958 958 cl = repo.changelog
959 959
960 960 if not matchmod.patkind(pat):
961 961 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
962 962 files = [f]
963 963 else:
964 964 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
965 965 files = (f for f in repo[None] if m(f))
966 966
967 967 for f in files:
968 968 fl = repo.file(f)
969 969 known = {}
970 970 scanpos = 0
971 971 for fr in list(fl):
972 972 fn = fl.node(fr)
973 973 if fn in known:
974 974 s.add(known[fn])
975 975 continue
976 976
977 977 lr = fl.linkrev(fr)
978 978 if lr in cl:
979 979 s.add(lr)
980 980 elif scanpos is not None:
981 981 # lowest matching changeset is filtered, scan further
982 982 # ahead in changelog
983 983 start = max(lr, scanpos) + 1
984 984 scanpos = None
985 985 for r in cl.revs(start):
986 986 # minimize parsing of non-matching entries
987 987 if f in cl.revision(r) and f in cl.readfiles(r):
988 988 try:
989 989 # try to use manifest delta fastpath
990 990 n = repo[r].filenode(f)
991 991 if n not in known:
992 992 if n == fn:
993 993 s.add(r)
994 994 scanpos = r
995 995 break
996 996 else:
997 997 known[n] = r
998 998 except error.ManifestLookupError:
999 999 # deletion in changelog
1000 1000 continue
1001 1001
1002 1002 return subset & s
1003 1003
1004 1004 @predicate('first(set, [n])', safe=True)
1005 1005 def first(repo, subset, x):
1006 1006 """An alias for limit().
1007 1007 """
1008 1008 return limit(repo, subset, x)
1009 1009
1010 1010 def _follow(repo, subset, x, name, followfirst=False):
1011 1011 l = getargs(x, 0, 1, _("%s takes no arguments or a pattern") % name)
1012 1012 c = repo['.']
1013 1013 if l:
1014 1014 x = getstring(l[0], _("%s expected a pattern") % name)
1015 1015 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
1016 1016 ctx=repo[None], default='path')
1017 1017
1018 1018 files = c.manifest().walk(matcher)
1019 1019
1020 1020 s = set()
1021 1021 for fname in files:
1022 1022 fctx = c[fname]
1023 1023 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
1024 1024 # include the revision responsible for the most recent version
1025 1025 s.add(fctx.introrev())
1026 1026 else:
1027 1027 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1028 1028
1029 1029 return subset & s
1030 1030
1031 1031 @predicate('follow([pattern])', safe=True)
1032 1032 def follow(repo, subset, x):
1033 1033 """
1034 1034 An alias for ``::.`` (ancestors of the working directory's first parent).
1035 1035 If pattern is specified, the histories of files matching given
1036 1036 pattern is followed, including copies.
1037 1037 """
1038 1038 return _follow(repo, subset, x, 'follow')
1039 1039
1040 1040 @predicate('_followfirst', safe=True)
1041 1041 def _followfirst(repo, subset, x):
1042 1042 # ``followfirst([pattern])``
1043 1043 # Like ``follow([pattern])`` but follows only the first parent of
1044 1044 # every revisions or files revisions.
1045 1045 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1046 1046
1047 1047 @predicate('all()', safe=True)
1048 1048 def getall(repo, subset, x):
1049 1049 """All changesets, the same as ``0:tip``.
1050 1050 """
1051 1051 # i18n: "all" is a keyword
1052 1052 getargs(x, 0, 0, _("all takes no arguments"))
1053 1053 return subset & spanset(repo) # drop "null" if any
1054 1054
1055 1055 @predicate('grep(regex)')
1056 1056 def grep(repo, subset, x):
1057 1057 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1058 1058 to ensure special escape characters are handled correctly. Unlike
1059 1059 ``keyword(string)``, the match is case-sensitive.
1060 1060 """
1061 1061 try:
1062 1062 # i18n: "grep" is a keyword
1063 1063 gr = re.compile(getstring(x, _("grep requires a string")))
1064 1064 except re.error as e:
1065 1065 raise error.ParseError(_('invalid match pattern: %s') % e)
1066 1066
1067 1067 def matches(x):
1068 1068 c = repo[x]
1069 1069 for e in c.files() + [c.user(), c.description()]:
1070 1070 if gr.search(e):
1071 1071 return True
1072 1072 return False
1073 1073
1074 1074 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1075 1075
1076 1076 @predicate('_matchfiles', safe=True)
1077 1077 def _matchfiles(repo, subset, x):
1078 1078 # _matchfiles takes a revset list of prefixed arguments:
1079 1079 #
1080 1080 # [p:foo, i:bar, x:baz]
1081 1081 #
1082 1082 # builds a match object from them and filters subset. Allowed
1083 1083 # prefixes are 'p:' for regular patterns, 'i:' for include
1084 1084 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1085 1085 # a revision identifier, or the empty string to reference the
1086 1086 # working directory, from which the match object is
1087 1087 # initialized. Use 'd:' to set the default matching mode, default
1088 1088 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1089 1089
1090 1090 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1091 1091 pats, inc, exc = [], [], []
1092 1092 rev, default = None, None
1093 1093 for arg in l:
1094 1094 s = getstring(arg, "_matchfiles requires string arguments")
1095 1095 prefix, value = s[:2], s[2:]
1096 1096 if prefix == 'p:':
1097 1097 pats.append(value)
1098 1098 elif prefix == 'i:':
1099 1099 inc.append(value)
1100 1100 elif prefix == 'x:':
1101 1101 exc.append(value)
1102 1102 elif prefix == 'r:':
1103 1103 if rev is not None:
1104 1104 raise error.ParseError('_matchfiles expected at most one '
1105 1105 'revision')
1106 1106 if value != '': # empty means working directory; leave rev as None
1107 1107 rev = value
1108 1108 elif prefix == 'd:':
1109 1109 if default is not None:
1110 1110 raise error.ParseError('_matchfiles expected at most one '
1111 1111 'default mode')
1112 1112 default = value
1113 1113 else:
1114 1114 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1115 1115 if not default:
1116 1116 default = 'glob'
1117 1117
1118 1118 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1119 1119 exclude=exc, ctx=repo[rev], default=default)
1120 1120
1121 1121 # This directly read the changelog data as creating changectx for all
1122 1122 # revisions is quite expensive.
1123 1123 getfiles = repo.changelog.readfiles
1124 1124 wdirrev = node.wdirrev
1125 1125 def matches(x):
1126 1126 if x == wdirrev:
1127 1127 files = repo[x].files()
1128 1128 else:
1129 1129 files = getfiles(x)
1130 1130 for f in files:
1131 1131 if m(f):
1132 1132 return True
1133 1133 return False
1134 1134
1135 1135 return subset.filter(matches,
1136 1136 condrepr=('<matchfiles patterns=%r, include=%r '
1137 1137 'exclude=%r, default=%r, rev=%r>',
1138 1138 pats, inc, exc, default, rev))
1139 1139
1140 1140 @predicate('file(pattern)', safe=True)
1141 1141 def hasfile(repo, subset, x):
1142 1142 """Changesets affecting files matched by pattern.
1143 1143
1144 1144 For a faster but less accurate result, consider using ``filelog()``
1145 1145 instead.
1146 1146
1147 1147 This predicate uses ``glob:`` as the default kind of pattern.
1148 1148 """
1149 1149 # i18n: "file" is a keyword
1150 1150 pat = getstring(x, _("file requires a pattern"))
1151 1151 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1152 1152
1153 1153 @predicate('head()', safe=True)
1154 1154 def head(repo, subset, x):
1155 1155 """Changeset is a named branch head.
1156 1156 """
1157 1157 # i18n: "head" is a keyword
1158 1158 getargs(x, 0, 0, _("head takes no arguments"))
1159 1159 hs = set()
1160 1160 cl = repo.changelog
1161 1161 for b, ls in repo.branchmap().iteritems():
1162 1162 hs.update(cl.rev(h) for h in ls)
1163 1163 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
1164 1164 # This does not break because of other fullreposet misbehavior.
1165 1165 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
1166 1166 # necessary to ensure we preserve the order in subset.
1167 1167 return baseset(hs) & subset
1168 1168
1169 1169 @predicate('heads(set)', safe=True)
1170 1170 def heads(repo, subset, x):
1171 1171 """Members of set with no children in set.
1172 1172 """
1173 1173 s = getset(repo, subset, x)
1174 1174 ps = parents(repo, subset, x)
1175 1175 return s - ps
1176 1176
1177 1177 @predicate('hidden()', safe=True)
1178 1178 def hidden(repo, subset, x):
1179 1179 """Hidden changesets.
1180 1180 """
1181 1181 # i18n: "hidden" is a keyword
1182 1182 getargs(x, 0, 0, _("hidden takes no arguments"))
1183 1183 hiddenrevs = repoview.filterrevs(repo, 'visible')
1184 1184 return subset & hiddenrevs
1185 1185
1186 1186 @predicate('keyword(string)', safe=True)
1187 1187 def keyword(repo, subset, x):
1188 1188 """Search commit message, user name, and names of changed files for
1189 1189 string. The match is case-insensitive.
1190 1190 """
1191 1191 # i18n: "keyword" is a keyword
1192 1192 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1193 1193
1194 1194 def matches(r):
1195 1195 c = repo[r]
1196 1196 return any(kw in encoding.lower(t)
1197 1197 for t in c.files() + [c.user(), c.description()])
1198 1198
1199 1199 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1200 1200
1201 1201 @predicate('limit(set[, n[, offset]])', safe=True)
1202 1202 def limit(repo, subset, x):
1203 1203 """First n members of set, defaulting to 1, starting from offset.
1204 1204 """
1205 1205 args = getargsdict(x, 'limit', 'set n offset')
1206 1206 if 'set' not in args:
1207 1207 # i18n: "limit" is a keyword
1208 1208 raise error.ParseError(_("limit requires one to three arguments"))
1209 1209 try:
1210 1210 lim, ofs = 1, 0
1211 1211 if 'n' in args:
1212 1212 # i18n: "limit" is a keyword
1213 1213 lim = int(getstring(args['n'], _("limit requires a number")))
1214 1214 if 'offset' in args:
1215 1215 # i18n: "limit" is a keyword
1216 1216 ofs = int(getstring(args['offset'], _("limit requires a number")))
1217 1217 if ofs < 0:
1218 1218 raise error.ParseError(_("negative offset"))
1219 1219 except (TypeError, ValueError):
1220 1220 # i18n: "limit" is a keyword
1221 1221 raise error.ParseError(_("limit expects a number"))
1222 1222 os = getset(repo, fullreposet(repo), args['set'])
1223 1223 result = []
1224 1224 it = iter(os)
1225 1225 for x in xrange(ofs):
1226 1226 y = next(it, None)
1227 1227 if y is None:
1228 1228 break
1229 1229 for x in xrange(lim):
1230 1230 y = next(it, None)
1231 1231 if y is None:
1232 1232 break
1233 1233 elif y in subset:
1234 1234 result.append(y)
1235 1235 return baseset(result, datarepr=('<limit n=%d, offset=%d, %r, %r>',
1236 1236 lim, ofs, subset, os))
1237 1237
1238 1238 @predicate('last(set, [n])', safe=True)
1239 1239 def last(repo, subset, x):
1240 1240 """Last n members of set, defaulting to 1.
1241 1241 """
1242 1242 # i18n: "last" is a keyword
1243 1243 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1244 1244 try:
1245 1245 lim = 1
1246 1246 if len(l) == 2:
1247 1247 # i18n: "last" is a keyword
1248 1248 lim = int(getstring(l[1], _("last requires a number")))
1249 1249 except (TypeError, ValueError):
1250 1250 # i18n: "last" is a keyword
1251 1251 raise error.ParseError(_("last expects a number"))
1252 1252 os = getset(repo, fullreposet(repo), l[0])
1253 1253 os.reverse()
1254 1254 result = []
1255 1255 it = iter(os)
1256 1256 for x in xrange(lim):
1257 1257 y = next(it, None)
1258 1258 if y is None:
1259 1259 break
1260 1260 elif y in subset:
1261 1261 result.append(y)
1262 1262 return baseset(result, datarepr=('<last n=%d, %r, %r>', lim, subset, os))
1263 1263
1264 1264 @predicate('max(set)', safe=True)
1265 1265 def maxrev(repo, subset, x):
1266 1266 """Changeset with highest revision number in set.
1267 1267 """
1268 1268 os = getset(repo, fullreposet(repo), x)
1269 1269 try:
1270 1270 m = os.max()
1271 1271 if m in subset:
1272 1272 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1273 1273 except ValueError:
1274 1274 # os.max() throws a ValueError when the collection is empty.
1275 1275 # Same as python's max().
1276 1276 pass
1277 1277 return baseset(datarepr=('<max %r, %r>', subset, os))
1278 1278
1279 1279 @predicate('merge()', safe=True)
1280 1280 def merge(repo, subset, x):
1281 1281 """Changeset is a merge changeset.
1282 1282 """
1283 1283 # i18n: "merge" is a keyword
1284 1284 getargs(x, 0, 0, _("merge takes no arguments"))
1285 1285 cl = repo.changelog
1286 1286 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1287 1287 condrepr='<merge>')
1288 1288
1289 1289 @predicate('branchpoint()', safe=True)
1290 1290 def branchpoint(repo, subset, x):
1291 1291 """Changesets with more than one child.
1292 1292 """
1293 1293 # i18n: "branchpoint" is a keyword
1294 1294 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1295 1295 cl = repo.changelog
1296 1296 if not subset:
1297 1297 return baseset()
1298 1298 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1299 1299 # (and if it is not, it should.)
1300 1300 baserev = min(subset)
1301 1301 parentscount = [0]*(len(repo) - baserev)
1302 1302 for r in cl.revs(start=baserev + 1):
1303 1303 for p in cl.parentrevs(r):
1304 1304 if p >= baserev:
1305 1305 parentscount[p - baserev] += 1
1306 1306 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1307 1307 condrepr='<branchpoint>')
1308 1308
1309 1309 @predicate('min(set)', safe=True)
1310 1310 def minrev(repo, subset, x):
1311 1311 """Changeset with lowest revision number in set.
1312 1312 """
1313 1313 os = getset(repo, fullreposet(repo), x)
1314 1314 try:
1315 1315 m = os.min()
1316 1316 if m in subset:
1317 1317 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1318 1318 except ValueError:
1319 1319 # os.min() throws a ValueError when the collection is empty.
1320 1320 # Same as python's min().
1321 1321 pass
1322 1322 return baseset(datarepr=('<min %r, %r>', subset, os))
1323 1323
1324 1324 @predicate('modifies(pattern)', safe=True)
1325 1325 def modifies(repo, subset, x):
1326 1326 """Changesets modifying files matched by pattern.
1327 1327
1328 1328 The pattern without explicit kind like ``glob:`` is expected to be
1329 1329 relative to the current directory and match against a file or a
1330 1330 directory.
1331 1331 """
1332 1332 # i18n: "modifies" is a keyword
1333 1333 pat = getstring(x, _("modifies requires a pattern"))
1334 1334 return checkstatus(repo, subset, pat, 0)
1335 1335
1336 1336 @predicate('named(namespace)')
1337 1337 def named(repo, subset, x):
1338 1338 """The changesets in a given namespace.
1339 1339
1340 1340 If `namespace` starts with `re:`, the remainder of the string is treated as
1341 1341 a regular expression. To match a namespace that actually starts with `re:`,
1342 1342 use the prefix `literal:`.
1343 1343 """
1344 1344 # i18n: "named" is a keyword
1345 1345 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1346 1346
1347 1347 ns = getstring(args[0],
1348 1348 # i18n: "named" is a keyword
1349 1349 _('the argument to named must be a string'))
1350 1350 kind, pattern, matcher = util.stringmatcher(ns)
1351 1351 namespaces = set()
1352 1352 if kind == 'literal':
1353 1353 if pattern not in repo.names:
1354 1354 raise error.RepoLookupError(_("namespace '%s' does not exist")
1355 1355 % ns)
1356 1356 namespaces.add(repo.names[pattern])
1357 1357 else:
1358 1358 for name, ns in repo.names.iteritems():
1359 1359 if matcher(name):
1360 1360 namespaces.add(ns)
1361 1361 if not namespaces:
1362 1362 raise error.RepoLookupError(_("no namespace exists"
1363 1363 " that match '%s'") % pattern)
1364 1364
1365 1365 names = set()
1366 1366 for ns in namespaces:
1367 1367 for name in ns.listnames(repo):
1368 1368 if name not in ns.deprecated:
1369 1369 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1370 1370
1371 1371 names -= set([node.nullrev])
1372 1372 return subset & names
1373 1373
1374 1374 @predicate('id(string)', safe=True)
1375 1375 def node_(repo, subset, x):
1376 1376 """Revision non-ambiguously specified by the given hex string prefix.
1377 1377 """
1378 1378 # i18n: "id" is a keyword
1379 1379 l = getargs(x, 1, 1, _("id requires one argument"))
1380 1380 # i18n: "id" is a keyword
1381 1381 n = getstring(l[0], _("id requires a string"))
1382 1382 if len(n) == 40:
1383 1383 try:
1384 1384 rn = repo.changelog.rev(node.bin(n))
1385 1385 except (LookupError, TypeError):
1386 1386 rn = None
1387 1387 else:
1388 1388 rn = None
1389 1389 pm = repo.changelog._partialmatch(n)
1390 1390 if pm is not None:
1391 1391 rn = repo.changelog.rev(pm)
1392 1392
1393 1393 if rn is None:
1394 1394 return baseset()
1395 1395 result = baseset([rn])
1396 1396 return result & subset
1397 1397
1398 1398 @predicate('obsolete()', safe=True)
1399 1399 def obsolete(repo, subset, x):
1400 1400 """Mutable changeset with a newer version."""
1401 1401 # i18n: "obsolete" is a keyword
1402 1402 getargs(x, 0, 0, _("obsolete takes no arguments"))
1403 1403 obsoletes = obsmod.getrevs(repo, 'obsolete')
1404 1404 return subset & obsoletes
1405 1405
1406 1406 @predicate('only(set, [set])', safe=True)
1407 1407 def only(repo, subset, x):
1408 1408 """Changesets that are ancestors of the first set that are not ancestors
1409 1409 of any other head in the repo. If a second set is specified, the result
1410 1410 is ancestors of the first set that are not ancestors of the second set
1411 1411 (i.e. ::<set1> - ::<set2>).
1412 1412 """
1413 1413 cl = repo.changelog
1414 1414 # i18n: "only" is a keyword
1415 1415 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1416 1416 include = getset(repo, fullreposet(repo), args[0])
1417 1417 if len(args) == 1:
1418 1418 if not include:
1419 1419 return baseset()
1420 1420
1421 1421 descendants = set(_revdescendants(repo, include, False))
1422 1422 exclude = [rev for rev in cl.headrevs()
1423 1423 if not rev in descendants and not rev in include]
1424 1424 else:
1425 1425 exclude = getset(repo, fullreposet(repo), args[1])
1426 1426
1427 1427 results = set(cl.findmissingrevs(common=exclude, heads=include))
1428 1428 # XXX we should turn this into a baseset instead of a set, smartset may do
1429 1429 # some optimisations from the fact this is a baseset.
1430 1430 return subset & results
1431 1431
1432 1432 @predicate('origin([set])', safe=True)
1433 1433 def origin(repo, subset, x):
1434 1434 """
1435 1435 Changesets that were specified as a source for the grafts, transplants or
1436 1436 rebases that created the given revisions. Omitting the optional set is the
1437 1437 same as passing all(). If a changeset created by these operations is itself
1438 1438 specified as a source for one of these operations, only the source changeset
1439 1439 for the first operation is selected.
1440 1440 """
1441 1441 if x is not None:
1442 1442 dests = getset(repo, fullreposet(repo), x)
1443 1443 else:
1444 1444 dests = fullreposet(repo)
1445 1445
1446 1446 def _firstsrc(rev):
1447 1447 src = _getrevsource(repo, rev)
1448 1448 if src is None:
1449 1449 return None
1450 1450
1451 1451 while True:
1452 1452 prev = _getrevsource(repo, src)
1453 1453
1454 1454 if prev is None:
1455 1455 return src
1456 1456 src = prev
1457 1457
1458 1458 o = set([_firstsrc(r) for r in dests])
1459 1459 o -= set([None])
1460 1460 # XXX we should turn this into a baseset instead of a set, smartset may do
1461 1461 # some optimisations from the fact this is a baseset.
1462 1462 return subset & o
1463 1463
1464 1464 @predicate('outgoing([path])', safe=True)
1465 1465 def outgoing(repo, subset, x):
1466 1466 """Changesets not found in the specified destination repository, or the
1467 1467 default push location.
1468 1468 """
1469 1469 # Avoid cycles.
1470 1470 from . import (
1471 1471 discovery,
1472 1472 hg,
1473 1473 )
1474 1474 # i18n: "outgoing" is a keyword
1475 1475 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1476 1476 # i18n: "outgoing" is a keyword
1477 1477 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1478 1478 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1479 1479 dest, branches = hg.parseurl(dest)
1480 1480 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1481 1481 if revs:
1482 1482 revs = [repo.lookup(rev) for rev in revs]
1483 1483 other = hg.peer(repo, {}, dest)
1484 1484 repo.ui.pushbuffer()
1485 1485 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1486 1486 repo.ui.popbuffer()
1487 1487 cl = repo.changelog
1488 1488 o = set([cl.rev(r) for r in outgoing.missing])
1489 1489 return subset & o
1490 1490
1491 1491 @predicate('p1([set])', safe=True)
1492 1492 def p1(repo, subset, x):
1493 1493 """First parent of changesets in set, or the working directory.
1494 1494 """
1495 1495 if x is None:
1496 1496 p = repo[x].p1().rev()
1497 1497 if p >= 0:
1498 1498 return subset & baseset([p])
1499 1499 return baseset()
1500 1500
1501 1501 ps = set()
1502 1502 cl = repo.changelog
1503 1503 for r in getset(repo, fullreposet(repo), x):
1504 1504 ps.add(cl.parentrevs(r)[0])
1505 1505 ps -= set([node.nullrev])
1506 1506 # XXX we should turn this into a baseset instead of a set, smartset may do
1507 1507 # some optimisations from the fact this is a baseset.
1508 1508 return subset & ps
1509 1509
1510 1510 @predicate('p2([set])', safe=True)
1511 1511 def p2(repo, subset, x):
1512 1512 """Second parent of changesets in set, or the working directory.
1513 1513 """
1514 1514 if x is None:
1515 1515 ps = repo[x].parents()
1516 1516 try:
1517 1517 p = ps[1].rev()
1518 1518 if p >= 0:
1519 1519 return subset & baseset([p])
1520 1520 return baseset()
1521 1521 except IndexError:
1522 1522 return baseset()
1523 1523
1524 1524 ps = set()
1525 1525 cl = repo.changelog
1526 1526 for r in getset(repo, fullreposet(repo), x):
1527 1527 ps.add(cl.parentrevs(r)[1])
1528 1528 ps -= set([node.nullrev])
1529 1529 # XXX we should turn this into a baseset instead of a set, smartset may do
1530 1530 # some optimisations from the fact this is a baseset.
1531 1531 return subset & ps
1532 1532
1533 1533 @predicate('parents([set])', safe=True)
1534 1534 def parents(repo, subset, x):
1535 1535 """
1536 1536 The set of all parents for all changesets in set, or the working directory.
1537 1537 """
1538 1538 if x is None:
1539 1539 ps = set(p.rev() for p in repo[x].parents())
1540 1540 else:
1541 1541 ps = set()
1542 1542 cl = repo.changelog
1543 1543 up = ps.update
1544 1544 parentrevs = cl.parentrevs
1545 1545 for r in getset(repo, fullreposet(repo), x):
1546 1546 if r == node.wdirrev:
1547 1547 up(p.rev() for p in repo[r].parents())
1548 1548 else:
1549 1549 up(parentrevs(r))
1550 1550 ps -= set([node.nullrev])
1551 1551 return subset & ps
1552 1552
1553 1553 def _phase(repo, subset, target):
1554 1554 """helper to select all rev in phase <target>"""
1555 1555 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1556 1556 if repo._phasecache._phasesets:
1557 1557 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1558 1558 s = baseset(s)
1559 1559 s.sort() # set are non ordered, so we enforce ascending
1560 1560 return subset & s
1561 1561 else:
1562 1562 phase = repo._phasecache.phase
1563 1563 condition = lambda r: phase(repo, r) == target
1564 1564 return subset.filter(condition, condrepr=('<phase %r>', target),
1565 1565 cache=False)
1566 1566
1567 1567 @predicate('draft()', safe=True)
1568 1568 def draft(repo, subset, x):
1569 1569 """Changeset in draft phase."""
1570 1570 # i18n: "draft" is a keyword
1571 1571 getargs(x, 0, 0, _("draft takes no arguments"))
1572 1572 target = phases.draft
1573 1573 return _phase(repo, subset, target)
1574 1574
1575 1575 @predicate('secret()', safe=True)
1576 1576 def secret(repo, subset, x):
1577 1577 """Changeset in secret phase."""
1578 1578 # i18n: "secret" is a keyword
1579 1579 getargs(x, 0, 0, _("secret takes no arguments"))
1580 1580 target = phases.secret
1581 1581 return _phase(repo, subset, target)
1582 1582
1583 1583 def parentspec(repo, subset, x, n):
1584 1584 """``set^0``
1585 1585 The set.
1586 1586 ``set^1`` (or ``set^``), ``set^2``
1587 1587 First or second parent, respectively, of all changesets in set.
1588 1588 """
1589 1589 try:
1590 1590 n = int(n[1])
1591 1591 if n not in (0, 1, 2):
1592 1592 raise ValueError
1593 1593 except (TypeError, ValueError):
1594 1594 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1595 1595 ps = set()
1596 1596 cl = repo.changelog
1597 1597 for r in getset(repo, fullreposet(repo), x):
1598 1598 if n == 0:
1599 1599 ps.add(r)
1600 1600 elif n == 1:
1601 1601 ps.add(cl.parentrevs(r)[0])
1602 1602 elif n == 2:
1603 1603 parents = cl.parentrevs(r)
1604 1604 if len(parents) > 1:
1605 1605 ps.add(parents[1])
1606 1606 return subset & ps
1607 1607
1608 1608 @predicate('present(set)', safe=True)
1609 1609 def present(repo, subset, x):
1610 1610 """An empty set, if any revision in set isn't found; otherwise,
1611 1611 all revisions in set.
1612 1612
1613 1613 If any of specified revisions is not present in the local repository,
1614 1614 the query is normally aborted. But this predicate allows the query
1615 1615 to continue even in such cases.
1616 1616 """
1617 1617 try:
1618 1618 return getset(repo, subset, x)
1619 1619 except error.RepoLookupError:
1620 1620 return baseset()
1621 1621
1622 1622 # for internal use
1623 1623 @predicate('_notpublic', safe=True)
1624 1624 def _notpublic(repo, subset, x):
1625 1625 getargs(x, 0, 0, "_notpublic takes no arguments")
1626 1626 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1627 1627 if repo._phasecache._phasesets:
1628 1628 s = set()
1629 1629 for u in repo._phasecache._phasesets[1:]:
1630 1630 s.update(u)
1631 1631 s = baseset(s - repo.changelog.filteredrevs)
1632 1632 s.sort()
1633 1633 return subset & s
1634 1634 else:
1635 1635 phase = repo._phasecache.phase
1636 1636 target = phases.public
1637 1637 condition = lambda r: phase(repo, r) != target
1638 1638 return subset.filter(condition, condrepr=('<phase %r>', target),
1639 1639 cache=False)
1640 1640
1641 1641 @predicate('public()', safe=True)
1642 1642 def public(repo, subset, x):
1643 1643 """Changeset in public phase."""
1644 1644 # i18n: "public" is a keyword
1645 1645 getargs(x, 0, 0, _("public takes no arguments"))
1646 1646 phase = repo._phasecache.phase
1647 1647 target = phases.public
1648 1648 condition = lambda r: phase(repo, r) == target
1649 1649 return subset.filter(condition, condrepr=('<phase %r>', target),
1650 1650 cache=False)
1651 1651
1652 1652 @predicate('remote([id [,path]])', safe=True)
1653 1653 def remote(repo, subset, x):
1654 1654 """Local revision that corresponds to the given identifier in a
1655 1655 remote repository, if present. Here, the '.' identifier is a
1656 1656 synonym for the current local branch.
1657 1657 """
1658 1658
1659 1659 from . import hg # avoid start-up nasties
1660 1660 # i18n: "remote" is a keyword
1661 1661 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1662 1662
1663 1663 q = '.'
1664 1664 if len(l) > 0:
1665 1665 # i18n: "remote" is a keyword
1666 1666 q = getstring(l[0], _("remote requires a string id"))
1667 1667 if q == '.':
1668 1668 q = repo['.'].branch()
1669 1669
1670 1670 dest = ''
1671 1671 if len(l) > 1:
1672 1672 # i18n: "remote" is a keyword
1673 1673 dest = getstring(l[1], _("remote requires a repository path"))
1674 1674 dest = repo.ui.expandpath(dest or 'default')
1675 1675 dest, branches = hg.parseurl(dest)
1676 1676 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1677 1677 if revs:
1678 1678 revs = [repo.lookup(rev) for rev in revs]
1679 1679 other = hg.peer(repo, {}, dest)
1680 1680 n = other.lookup(q)
1681 1681 if n in repo:
1682 1682 r = repo[n].rev()
1683 1683 if r in subset:
1684 1684 return baseset([r])
1685 1685 return baseset()
1686 1686
1687 1687 @predicate('removes(pattern)', safe=True)
1688 1688 def removes(repo, subset, x):
1689 1689 """Changesets which remove files matching pattern.
1690 1690
1691 1691 The pattern without explicit kind like ``glob:`` is expected to be
1692 1692 relative to the current directory and match against a file or a
1693 1693 directory.
1694 1694 """
1695 1695 # i18n: "removes" is a keyword
1696 1696 pat = getstring(x, _("removes requires a pattern"))
1697 1697 return checkstatus(repo, subset, pat, 2)
1698 1698
1699 1699 @predicate('rev(number)', safe=True)
1700 1700 def rev(repo, subset, x):
1701 1701 """Revision with the given numeric identifier.
1702 1702 """
1703 1703 # i18n: "rev" is a keyword
1704 1704 l = getargs(x, 1, 1, _("rev requires one argument"))
1705 1705 try:
1706 1706 # i18n: "rev" is a keyword
1707 1707 l = int(getstring(l[0], _("rev requires a number")))
1708 1708 except (TypeError, ValueError):
1709 1709 # i18n: "rev" is a keyword
1710 1710 raise error.ParseError(_("rev expects a number"))
1711 1711 if l not in repo.changelog and l != node.nullrev:
1712 1712 return baseset()
1713 1713 return subset & baseset([l])
1714 1714
1715 1715 @predicate('matching(revision [, field])', safe=True)
1716 1716 def matching(repo, subset, x):
1717 1717 """Changesets in which a given set of fields match the set of fields in the
1718 1718 selected revision or set.
1719 1719
1720 1720 To match more than one field pass the list of fields to match separated
1721 1721 by spaces (e.g. ``author description``).
1722 1722
1723 1723 Valid fields are most regular revision fields and some special fields.
1724 1724
1725 1725 Regular revision fields are ``description``, ``author``, ``branch``,
1726 1726 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1727 1727 and ``diff``.
1728 1728 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1729 1729 contents of the revision. Two revisions matching their ``diff`` will
1730 1730 also match their ``files``.
1731 1731
1732 1732 Special fields are ``summary`` and ``metadata``:
1733 1733 ``summary`` matches the first line of the description.
1734 1734 ``metadata`` is equivalent to matching ``description user date``
1735 1735 (i.e. it matches the main metadata fields).
1736 1736
1737 1737 ``metadata`` is the default field which is used when no fields are
1738 1738 specified. You can match more than one field at a time.
1739 1739 """
1740 1740 # i18n: "matching" is a keyword
1741 1741 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1742 1742
1743 1743 revs = getset(repo, fullreposet(repo), l[0])
1744 1744
1745 1745 fieldlist = ['metadata']
1746 1746 if len(l) > 1:
1747 1747 fieldlist = getstring(l[1],
1748 1748 # i18n: "matching" is a keyword
1749 1749 _("matching requires a string "
1750 1750 "as its second argument")).split()
1751 1751
1752 1752 # Make sure that there are no repeated fields,
1753 1753 # expand the 'special' 'metadata' field type
1754 1754 # and check the 'files' whenever we check the 'diff'
1755 1755 fields = []
1756 1756 for field in fieldlist:
1757 1757 if field == 'metadata':
1758 1758 fields += ['user', 'description', 'date']
1759 1759 elif field == 'diff':
1760 1760 # a revision matching the diff must also match the files
1761 1761 # since matching the diff is very costly, make sure to
1762 1762 # also match the files first
1763 1763 fields += ['files', 'diff']
1764 1764 else:
1765 1765 if field == 'author':
1766 1766 field = 'user'
1767 1767 fields.append(field)
1768 1768 fields = set(fields)
1769 1769 if 'summary' in fields and 'description' in fields:
1770 1770 # If a revision matches its description it also matches its summary
1771 1771 fields.discard('summary')
1772 1772
1773 1773 # We may want to match more than one field
1774 1774 # Not all fields take the same amount of time to be matched
1775 1775 # Sort the selected fields in order of increasing matching cost
1776 1776 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1777 1777 'files', 'description', 'substate', 'diff']
1778 1778 def fieldkeyfunc(f):
1779 1779 try:
1780 1780 return fieldorder.index(f)
1781 1781 except ValueError:
1782 1782 # assume an unknown field is very costly
1783 1783 return len(fieldorder)
1784 1784 fields = list(fields)
1785 1785 fields.sort(key=fieldkeyfunc)
1786 1786
1787 1787 # Each field will be matched with its own "getfield" function
1788 1788 # which will be added to the getfieldfuncs array of functions
1789 1789 getfieldfuncs = []
1790 1790 _funcs = {
1791 1791 'user': lambda r: repo[r].user(),
1792 1792 'branch': lambda r: repo[r].branch(),
1793 1793 'date': lambda r: repo[r].date(),
1794 1794 'description': lambda r: repo[r].description(),
1795 1795 'files': lambda r: repo[r].files(),
1796 1796 'parents': lambda r: repo[r].parents(),
1797 1797 'phase': lambda r: repo[r].phase(),
1798 1798 'substate': lambda r: repo[r].substate,
1799 1799 'summary': lambda r: repo[r].description().splitlines()[0],
1800 1800 'diff': lambda r: list(repo[r].diff(git=True),)
1801 1801 }
1802 1802 for info in fields:
1803 1803 getfield = _funcs.get(info, None)
1804 1804 if getfield is None:
1805 1805 raise error.ParseError(
1806 1806 # i18n: "matching" is a keyword
1807 1807 _("unexpected field name passed to matching: %s") % info)
1808 1808 getfieldfuncs.append(getfield)
1809 1809 # convert the getfield array of functions into a "getinfo" function
1810 1810 # which returns an array of field values (or a single value if there
1811 1811 # is only one field to match)
1812 1812 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1813 1813
1814 1814 def matches(x):
1815 1815 for rev in revs:
1816 1816 target = getinfo(rev)
1817 1817 match = True
1818 1818 for n, f in enumerate(getfieldfuncs):
1819 1819 if target[n] != f(x):
1820 1820 match = False
1821 1821 if match:
1822 1822 return True
1823 1823 return False
1824 1824
1825 1825 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1826 1826
1827 1827 @predicate('reverse(set)', safe=True)
1828 1828 def reverse(repo, subset, x):
1829 1829 """Reverse order of set.
1830 1830 """
1831 1831 l = getset(repo, subset, x)
1832 1832 l.reverse()
1833 1833 return l
1834 1834
1835 1835 @predicate('roots(set)', safe=True)
1836 1836 def roots(repo, subset, x):
1837 1837 """Changesets in set with no parent changeset in set.
1838 1838 """
1839 1839 s = getset(repo, fullreposet(repo), x)
1840 1840 parents = repo.changelog.parentrevs
1841 1841 def filter(r):
1842 1842 for p in parents(r):
1843 1843 if 0 <= p and p in s:
1844 1844 return False
1845 1845 return True
1846 1846 return subset & s.filter(filter, condrepr='<roots>')
1847 1847
1848 1848 @predicate('sort(set[, [-]key...])', safe=True)
1849 1849 def sort(repo, subset, x):
1850 1850 """Sort set by keys. The default sort order is ascending, specify a key
1851 1851 as ``-key`` to sort in descending order.
1852 1852
1853 1853 The keys can be:
1854 1854
1855 1855 - ``rev`` for the revision number,
1856 1856 - ``branch`` for the branch name,
1857 1857 - ``desc`` for the commit message (description),
1858 1858 - ``user`` for user name (``author`` can be used as an alias),
1859 1859 - ``date`` for the commit date
1860 1860 """
1861 1861 # i18n: "sort" is a keyword
1862 1862 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1863 1863 keys = "rev"
1864 1864 if len(l) == 2:
1865 1865 # i18n: "sort" is a keyword
1866 1866 keys = getstring(l[1], _("sort spec must be a string"))
1867 1867
1868 1868 s = l[0]
1869 1869 keys = keys.split()
1870 1870 l = []
1871 1871 def invert(s):
1872 1872 return "".join(chr(255 - ord(c)) for c in s)
1873 1873 revs = getset(repo, subset, s)
1874 1874 if keys == ["rev"]:
1875 1875 revs.sort()
1876 1876 return revs
1877 1877 elif keys == ["-rev"]:
1878 1878 revs.sort(reverse=True)
1879 1879 return revs
1880 1880 for r in revs:
1881 1881 c = repo[r]
1882 1882 e = []
1883 1883 for k in keys:
1884 1884 if k == 'rev':
1885 1885 e.append(r)
1886 1886 elif k == '-rev':
1887 1887 e.append(-r)
1888 1888 elif k == 'branch':
1889 1889 e.append(c.branch())
1890 1890 elif k == '-branch':
1891 1891 e.append(invert(c.branch()))
1892 1892 elif k == 'desc':
1893 1893 e.append(c.description())
1894 1894 elif k == '-desc':
1895 1895 e.append(invert(c.description()))
1896 1896 elif k in 'user author':
1897 1897 e.append(c.user())
1898 1898 elif k in '-user -author':
1899 1899 e.append(invert(c.user()))
1900 1900 elif k == 'date':
1901 1901 e.append(c.date()[0])
1902 1902 elif k == '-date':
1903 1903 e.append(-c.date()[0])
1904 1904 else:
1905 1905 raise error.ParseError(_("unknown sort key %r") % k)
1906 1906 e.append(r)
1907 1907 l.append(e)
1908 1908 l.sort()
1909 1909 return baseset([e[-1] for e in l])
1910 1910
1911 1911 @predicate('subrepo([pattern])')
1912 1912 def subrepo(repo, subset, x):
1913 1913 """Changesets that add, modify or remove the given subrepo. If no subrepo
1914 1914 pattern is named, any subrepo changes are returned.
1915 1915 """
1916 1916 # i18n: "subrepo" is a keyword
1917 1917 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
1918 1918 pat = None
1919 1919 if len(args) != 0:
1920 1920 pat = getstring(args[0], _("subrepo requires a pattern"))
1921 1921
1922 1922 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
1923 1923
1924 1924 def submatches(names):
1925 1925 k, p, m = util.stringmatcher(pat)
1926 1926 for name in names:
1927 1927 if m(name):
1928 1928 yield name
1929 1929
1930 1930 def matches(x):
1931 1931 c = repo[x]
1932 1932 s = repo.status(c.p1().node(), c.node(), match=m)
1933 1933
1934 1934 if pat is None:
1935 1935 return s.added or s.modified or s.removed
1936 1936
1937 1937 if s.added:
1938 1938 return any(submatches(c.substate.keys()))
1939 1939
1940 1940 if s.modified:
1941 1941 subs = set(c.p1().substate.keys())
1942 1942 subs.update(c.substate.keys())
1943 1943
1944 1944 for path in submatches(subs):
1945 1945 if c.p1().substate.get(path) != c.substate.get(path):
1946 1946 return True
1947 1947
1948 1948 if s.removed:
1949 1949 return any(submatches(c.p1().substate.keys()))
1950 1950
1951 1951 return False
1952 1952
1953 1953 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
1954 1954
1955 1955 def _substringmatcher(pattern):
1956 1956 kind, pattern, matcher = util.stringmatcher(pattern)
1957 1957 if kind == 'literal':
1958 1958 matcher = lambda s: pattern in s
1959 1959 return kind, pattern, matcher
1960 1960
1961 1961 @predicate('tag([name])', safe=True)
1962 1962 def tag(repo, subset, x):
1963 1963 """The specified tag by name, or all tagged revisions if no name is given.
1964 1964
1965 1965 If `name` starts with `re:`, the remainder of the name is treated as
1966 1966 a regular expression. To match a tag that actually starts with `re:`,
1967 1967 use the prefix `literal:`.
1968 1968 """
1969 1969 # i18n: "tag" is a keyword
1970 1970 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
1971 1971 cl = repo.changelog
1972 1972 if args:
1973 1973 pattern = getstring(args[0],
1974 1974 # i18n: "tag" is a keyword
1975 1975 _('the argument to tag must be a string'))
1976 1976 kind, pattern, matcher = util.stringmatcher(pattern)
1977 1977 if kind == 'literal':
1978 1978 # avoid resolving all tags
1979 1979 tn = repo._tagscache.tags.get(pattern, None)
1980 1980 if tn is None:
1981 1981 raise error.RepoLookupError(_("tag '%s' does not exist")
1982 1982 % pattern)
1983 1983 s = set([repo[tn].rev()])
1984 1984 else:
1985 1985 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
1986 1986 else:
1987 1987 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
1988 1988 return subset & s
1989 1989
1990 1990 @predicate('tagged', safe=True)
1991 1991 def tagged(repo, subset, x):
1992 1992 return tag(repo, subset, x)
1993 1993
1994 1994 @predicate('unstable()', safe=True)
1995 1995 def unstable(repo, subset, x):
1996 1996 """Non-obsolete changesets with obsolete ancestors.
1997 1997 """
1998 1998 # i18n: "unstable" is a keyword
1999 1999 getargs(x, 0, 0, _("unstable takes no arguments"))
2000 2000 unstables = obsmod.getrevs(repo, 'unstable')
2001 2001 return subset & unstables
2002 2002
2003 2003
2004 2004 @predicate('user(string)', safe=True)
2005 2005 def user(repo, subset, x):
2006 2006 """User name contains string. The match is case-insensitive.
2007 2007
2008 2008 If `string` starts with `re:`, the remainder of the string is treated as
2009 2009 a regular expression. To match a user that actually contains `re:`, use
2010 2010 the prefix `literal:`.
2011 2011 """
2012 2012 return author(repo, subset, x)
2013 2013
2014 2014 # experimental
2015 2015 @predicate('wdir', safe=True)
2016 2016 def wdir(repo, subset, x):
2017 2017 # i18n: "wdir" is a keyword
2018 2018 getargs(x, 0, 0, _("wdir takes no arguments"))
2019 2019 if node.wdirrev in subset or isinstance(subset, fullreposet):
2020 2020 return baseset([node.wdirrev])
2021 2021 return baseset()
2022 2022
2023 2023 # for internal use
2024 2024 @predicate('_list', safe=True)
2025 2025 def _list(repo, subset, x):
2026 2026 s = getstring(x, "internal error")
2027 2027 if not s:
2028 2028 return baseset()
2029 2029 # remove duplicates here. it's difficult for caller to deduplicate sets
2030 2030 # because different symbols can point to the same rev.
2031 2031 cl = repo.changelog
2032 2032 ls = []
2033 2033 seen = set()
2034 2034 for t in s.split('\0'):
2035 2035 try:
2036 2036 # fast path for integer revision
2037 2037 r = int(t)
2038 2038 if str(r) != t or r not in cl:
2039 2039 raise ValueError
2040 2040 revs = [r]
2041 2041 except ValueError:
2042 2042 revs = stringset(repo, subset, t)
2043 2043
2044 2044 for r in revs:
2045 2045 if r in seen:
2046 2046 continue
2047 2047 if (r in subset
2048 2048 or r == node.nullrev and isinstance(subset, fullreposet)):
2049 2049 ls.append(r)
2050 2050 seen.add(r)
2051 2051 return baseset(ls)
2052 2052
2053 2053 # for internal use
2054 2054 @predicate('_intlist', safe=True)
2055 2055 def _intlist(repo, subset, x):
2056 2056 s = getstring(x, "internal error")
2057 2057 if not s:
2058 2058 return baseset()
2059 2059 ls = [int(r) for r in s.split('\0')]
2060 2060 s = subset
2061 2061 return baseset([r for r in ls if r in s])
2062 2062
2063 2063 # for internal use
2064 2064 @predicate('_hexlist', safe=True)
2065 2065 def _hexlist(repo, subset, x):
2066 2066 s = getstring(x, "internal error")
2067 2067 if not s:
2068 2068 return baseset()
2069 2069 cl = repo.changelog
2070 2070 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2071 2071 s = subset
2072 2072 return baseset([r for r in ls if r in s])
2073 2073
2074 2074 methods = {
2075 2075 "range": rangeset,
2076 2076 "dagrange": dagrange,
2077 2077 "string": stringset,
2078 2078 "symbol": stringset,
2079 2079 "and": andset,
2080 2080 "or": orset,
2081 2081 "not": notset,
2082 2082 "difference": differenceset,
2083 2083 "list": listset,
2084 2084 "keyvalue": keyvaluepair,
2085 2085 "func": func,
2086 2086 "ancestor": ancestorspec,
2087 2087 "parent": parentspec,
2088 2088 "parentpost": p1,
2089 2089 }
2090 2090
2091 2091 def optimize(x, small):
2092 2092 if x is None:
2093 2093 return 0, x
2094 2094
2095 2095 smallbonus = 1
2096 2096 if small:
2097 2097 smallbonus = .5
2098 2098
2099 2099 op = x[0]
2100 2100 if op == 'minus':
2101 2101 return optimize(('and', x[1], ('not', x[2])), small)
2102 2102 elif op == 'only':
2103 2103 return optimize(('func', ('symbol', 'only'),
2104 2104 ('list', x[1], x[2])), small)
2105 2105 elif op == 'onlypost':
2106 2106 return optimize(('func', ('symbol', 'only'), x[1]), small)
2107 2107 elif op == 'dagrangepre':
2108 2108 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2109 2109 elif op == 'dagrangepost':
2110 2110 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
2111 2111 elif op == 'rangeall':
2112 2112 return optimize(('range', ('string', '0'), ('string', 'tip')), small)
2113 2113 elif op == 'rangepre':
2114 2114 return optimize(('range', ('string', '0'), x[1]), small)
2115 2115 elif op == 'rangepost':
2116 2116 return optimize(('range', x[1], ('string', 'tip')), small)
2117 2117 elif op == 'negate':
2118 2118 return optimize(('string',
2119 2119 '-' + getstring(x[1], _("can't negate that"))), small)
2120 2120 elif op in 'string symbol negate':
2121 2121 return smallbonus, x # single revisions are small
2122 2122 elif op == 'and':
2123 2123 wa, ta = optimize(x[1], True)
2124 2124 wb, tb = optimize(x[2], True)
2125 2125
2126 2126 # (::x and not ::y)/(not ::y and ::x) have a fast path
2127 2127 def isonly(revs, bases):
2128 2128 return (
2129 2129 revs is not None
2130 2130 and revs[0] == 'func'
2131 2131 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2132 2132 and bases is not None
2133 2133 and bases[0] == 'not'
2134 2134 and bases[1][0] == 'func'
2135 2135 and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
2136 2136
2137 2137 w = min(wa, wb)
2138 2138 if isonly(ta, tb):
2139 2139 return w, ('func', ('symbol', 'only'), ('list', ta[2], tb[1][2]))
2140 2140 if isonly(tb, ta):
2141 2141 return w, ('func', ('symbol', 'only'), ('list', tb[2], ta[1][2]))
2142 2142
2143 2143 if tb is not None and tb[0] == 'not':
2144 2144 return wa, ('difference', ta, tb[1])
2145 2145
2146 2146 if wa > wb:
2147 2147 return w, (op, tb, ta)
2148 2148 return w, (op, ta, tb)
2149 2149 elif op == 'or':
2150 2150 # fast path for machine-generated expression, that is likely to have
2151 2151 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2152 2152 ws, ts, ss = [], [], []
2153 2153 def flushss():
2154 2154 if not ss:
2155 2155 return
2156 2156 if len(ss) == 1:
2157 2157 w, t = ss[0]
2158 2158 else:
2159 2159 s = '\0'.join(t[1] for w, t in ss)
2160 2160 y = ('func', ('symbol', '_list'), ('string', s))
2161 2161 w, t = optimize(y, False)
2162 2162 ws.append(w)
2163 2163 ts.append(t)
2164 2164 del ss[:]
2165 2165 for y in x[1:]:
2166 2166 w, t = optimize(y, False)
2167 2167 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2168 2168 ss.append((w, t))
2169 2169 continue
2170 2170 flushss()
2171 2171 ws.append(w)
2172 2172 ts.append(t)
2173 2173 flushss()
2174 2174 if len(ts) == 1:
2175 2175 return ws[0], ts[0] # 'or' operation is fully optimized out
2176 2176 # we can't reorder trees by weight because it would change the order.
2177 2177 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2178 2178 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2179 2179 return max(ws), (op,) + tuple(ts)
2180 2180 elif op == 'not':
2181 2181 # Optimize not public() to _notpublic() because we have a fast version
2182 2182 if x[1] == ('func', ('symbol', 'public'), None):
2183 2183 newsym = ('func', ('symbol', '_notpublic'), None)
2184 2184 o = optimize(newsym, not small)
2185 2185 return o[0], o[1]
2186 2186 else:
2187 2187 o = optimize(x[1], not small)
2188 2188 return o[0], (op, o[1])
2189 2189 elif op == 'parentpost':
2190 2190 o = optimize(x[1], small)
2191 2191 return o[0], (op, o[1])
2192 2192 elif op == 'group':
2193 2193 return optimize(x[1], small)
2194 2194 elif op in 'dagrange range parent ancestorspec':
2195 2195 if op == 'parent':
2196 2196 # x^:y means (x^) : y, not x ^ (:y)
2197 2197 post = ('parentpost', x[1])
2198 2198 if x[2][0] == 'dagrangepre':
2199 2199 return optimize(('dagrange', post, x[2][1]), small)
2200 2200 elif x[2][0] == 'rangepre':
2201 2201 return optimize(('range', post, x[2][1]), small)
2202 2202
2203 2203 wa, ta = optimize(x[1], small)
2204 2204 wb, tb = optimize(x[2], small)
2205 2205 return wa + wb, (op, ta, tb)
2206 2206 elif op == 'list':
2207 2207 ws, ts = zip(*(optimize(y, small) for y in x[1:]))
2208 2208 return sum(ws), (op,) + ts
2209 2209 elif op == 'func':
2210 2210 f = getstring(x[1], _("not a symbol"))
2211 2211 wa, ta = optimize(x[2], small)
2212 2212 if f in ("author branch closed date desc file grep keyword "
2213 2213 "outgoing user"):
2214 2214 w = 10 # slow
2215 2215 elif f in "modifies adds removes":
2216 2216 w = 30 # slower
2217 2217 elif f == "contains":
2218 2218 w = 100 # very slow
2219 2219 elif f == "ancestor":
2220 2220 w = 1 * smallbonus
2221 2221 elif f in "reverse limit first _intlist":
2222 2222 w = 0
2223 2223 elif f in "sort":
2224 2224 w = 10 # assume most sorts look at changelog
2225 2225 else:
2226 2226 w = 1
2227 2227 return w + wa, (op, x[1], ta)
2228 2228 return 1, x
2229 2229
2230 2230 # the set of valid characters for the initial letter of symbols in
2231 2231 # alias declarations and definitions
2232 2232 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2233 2233 if c.isalnum() or c in '._@$' or ord(c) > 127)
2234 2234
2235 2235 def _tokenizealias(program, lookup=None):
2236 2236 """Parse alias declaration/definition into a stream of tokens
2237 2237
2238 2238 This allows symbol names to use also ``$`` as an initial letter
2239 2239 (for backward compatibility), and callers of this function should
2240 2240 examine whether ``$`` is used also for unexpected symbols or not.
2241 2241 """
2242 2242 return tokenize(program, lookup=lookup,
2243 2243 syminitletters=_aliassyminitletters)
2244 2244
2245 2245 def _parsealiasdecl(decl):
2246 2246 """Parse alias declaration ``decl``
2247 2247
2248 2248 This returns ``(name, tree, args, errorstr)`` tuple:
2249 2249
2250 2250 - ``name``: of declared alias (may be ``decl`` itself at error)
2251 2251 - ``tree``: parse result (or ``None`` at error)
2252 2252 - ``args``: list of alias argument names (or None for symbol declaration)
2253 2253 - ``errorstr``: detail about detected error (or None)
2254 2254
2255 2255 >>> _parsealiasdecl('foo')
2256 2256 ('foo', ('symbol', 'foo'), None, None)
2257 2257 >>> _parsealiasdecl('$foo')
2258 2258 ('$foo', None, None, "'$' not for alias arguments")
2259 2259 >>> _parsealiasdecl('foo::bar')
2260 2260 ('foo::bar', None, None, 'invalid format')
2261 2261 >>> _parsealiasdecl('foo bar')
2262 2262 ('foo bar', None, None, 'at 4: invalid token')
2263 2263 >>> _parsealiasdecl('foo()')
2264 2264 ('foo', ('func', ('symbol', 'foo')), [], None)
2265 2265 >>> _parsealiasdecl('$foo()')
2266 2266 ('$foo()', None, None, "'$' not for alias arguments")
2267 2267 >>> _parsealiasdecl('foo($1, $2)')
2268 2268 ('foo', ('func', ('symbol', 'foo')), ['$1', '$2'], None)
2269 2269 >>> _parsealiasdecl('foo(bar_bar, baz.baz)')
2270 2270 ('foo', ('func', ('symbol', 'foo')), ['bar_bar', 'baz.baz'], None)
2271 2271 >>> _parsealiasdecl('foo($1, $2, nested($1, $2))')
2272 2272 ('foo($1, $2, nested($1, $2))', None, None, 'invalid argument list')
2273 2273 >>> _parsealiasdecl('foo(bar($1, $2))')
2274 2274 ('foo(bar($1, $2))', None, None, 'invalid argument list')
2275 2275 >>> _parsealiasdecl('foo("string")')
2276 2276 ('foo("string")', None, None, 'invalid argument list')
2277 2277 >>> _parsealiasdecl('foo($1, $2')
2278 2278 ('foo($1, $2', None, None, 'at 10: unexpected token: end')
2279 2279 >>> _parsealiasdecl('foo("string')
2280 2280 ('foo("string', None, None, 'at 5: unterminated string')
2281 2281 >>> _parsealiasdecl('foo($1, $2, $1)')
2282 2282 ('foo', None, None, 'argument names collide with each other')
2283 2283 """
2284 2284 p = parser.parser(elements)
2285 2285 try:
2286 2286 tree, pos = p.parse(_tokenizealias(decl))
2287 2287 if (pos != len(decl)):
2288 2288 raise error.ParseError(_('invalid token'), pos)
2289 2289 tree = parser.simplifyinfixops(tree, ('list',))
2290 2290
2291 2291 if tree[0] == 'symbol':
2292 2292 # "name = ...." style
2293 2293 name = tree[1]
2294 2294 if name.startswith('$'):
2295 2295 return (decl, None, None, _("'$' not for alias arguments"))
2296 return (name, ('symbol', name), None, None)
2296 return (name, tree, None, None)
2297 2297
2298 2298 if tree[0] == 'func' and tree[1][0] == 'symbol':
2299 2299 # "name(arg, ....) = ...." style
2300 2300 name = tree[1][1]
2301 2301 if name.startswith('$'):
2302 2302 return (decl, None, None, _("'$' not for alias arguments"))
2303 2303 args = []
2304 2304 for arg in getlist(tree[2]):
2305 2305 if arg[0] != 'symbol':
2306 2306 return (decl, None, None, _("invalid argument list"))
2307 2307 args.append(arg[1])
2308 2308 if len(args) != len(set(args)):
2309 2309 return (name, None, None,
2310 2310 _("argument names collide with each other"))
2311 return (name, ('func', ('symbol', name)), args, None)
2311 return (name, tree[:2], args, None)
2312 2312
2313 2313 return (decl, None, None, _("invalid format"))
2314 2314 except error.ParseError as inst:
2315 2315 return (decl, None, None, parseerrordetail(inst))
2316 2316
2317 2317 def _relabelaliasargs(tree, args):
2318 2318 if not isinstance(tree, tuple):
2319 2319 return tree
2320 2320 op = tree[0]
2321 2321 if op != 'symbol':
2322 2322 return (op,) + tuple(_relabelaliasargs(x, args) for x in tree[1:])
2323 2323
2324 2324 assert len(tree) == 2
2325 2325 sym = tree[1]
2326 2326 if sym in args:
2327 2327 op = '_aliasarg'
2328 2328 elif sym.startswith('$'):
2329 2329 raise error.ParseError(_("'$' not for alias arguments"))
2330 2330 return (op, sym)
2331 2331
2332 2332 def _parsealiasdefn(defn, args):
2333 2333 """Parse alias definition ``defn``
2334 2334
2335 2335 This function marks alias argument references as ``_aliasarg``.
2336 2336
2337 2337 ``args`` is a list of alias argument names, or None if the alias
2338 2338 is declared as a symbol.
2339 2339
2340 2340 This returns "tree" as parsing result.
2341 2341
2342 2342 >>> def prettyformat(tree):
2343 2343 ... return parser.prettyformat(tree, ('_aliasarg', 'string', 'symbol'))
2344 2344 >>> args = ['$1', '$2', 'foo']
2345 2345 >>> print prettyformat(_parsealiasdefn('$1 or foo', args))
2346 2346 (or
2347 2347 ('_aliasarg', '$1')
2348 2348 ('_aliasarg', 'foo'))
2349 2349 >>> try:
2350 2350 ... _parsealiasdefn('$1 or $bar', args)
2351 2351 ... except error.ParseError, inst:
2352 2352 ... print parseerrordetail(inst)
2353 2353 '$' not for alias arguments
2354 2354 >>> args = ['$1', '$10', 'foo']
2355 2355 >>> print prettyformat(_parsealiasdefn('$10 or foobar', args))
2356 2356 (or
2357 2357 ('_aliasarg', '$10')
2358 2358 ('symbol', 'foobar'))
2359 2359 >>> print prettyformat(_parsealiasdefn('"$1" or "foo"', args))
2360 2360 (or
2361 2361 ('string', '$1')
2362 2362 ('string', 'foo'))
2363 2363 """
2364 2364 if args:
2365 2365 args = set(args)
2366 2366 else:
2367 2367 args = set()
2368 2368
2369 2369 p = parser.parser(elements)
2370 2370 tree, pos = p.parse(_tokenizealias(defn))
2371 2371 if pos != len(defn):
2372 2372 raise error.ParseError(_('invalid token'), pos)
2373 2373 tree = parser.simplifyinfixops(tree, ('list', 'or'))
2374 2374 return _relabelaliasargs(tree, args)
2375 2375
2376 2376 class revsetalias(object):
2377 2377 # whether own `error` information is already shown or not.
2378 2378 # this avoids showing same warning multiple times at each `findaliases`.
2379 2379 warned = False
2380 2380
2381 2381 def __init__(self, name, value):
2382 2382 '''Aliases like:
2383 2383
2384 2384 h = heads(default)
2385 2385 b($1) = ancestors($1) - ancestors(default)
2386 2386 '''
2387 2387 self.name, self.tree, self.args, self.error = _parsealiasdecl(name)
2388 2388 if self.error:
2389 2389 self.error = _('failed to parse the declaration of revset alias'
2390 2390 ' "%s": %s') % (self.name, self.error)
2391 2391 return
2392 2392
2393 2393 try:
2394 2394 self.replacement = _parsealiasdefn(value, self.args)
2395 2395 except error.ParseError as inst:
2396 2396 self.error = _('failed to parse the definition of revset alias'
2397 2397 ' "%s": %s') % (self.name, parseerrordetail(inst))
2398 2398
2399 2399 def _getalias(aliases, tree):
2400 2400 """If tree looks like an unexpanded alias, return it. Return None
2401 2401 otherwise.
2402 2402 """
2403 2403 if isinstance(tree, tuple):
2404 2404 if tree[0] == 'symbol':
2405 2405 name = tree[1]
2406 2406 alias = aliases.get(name)
2407 2407 if alias and alias.args is None and alias.tree == tree:
2408 2408 return alias
2409 2409 if tree[0] == 'func':
2410 2410 if tree[1][0] == 'symbol':
2411 2411 name = tree[1][1]
2412 2412 alias = aliases.get(name)
2413 2413 if alias and alias.args is not None and alias.tree == tree[:2]:
2414 2414 return alias
2415 2415 return None
2416 2416
2417 2417 def _expandargs(tree, args):
2418 2418 """Replace _aliasarg instances with the substitution value of the
2419 2419 same name in args, recursively.
2420 2420 """
2421 2421 if not isinstance(tree, tuple):
2422 2422 return tree
2423 2423 if tree[0] == '_aliasarg':
2424 2424 sym = tree[1]
2425 2425 return args[sym]
2426 2426 return tuple(_expandargs(t, args) for t in tree)
2427 2427
2428 2428 def _expandaliases(aliases, tree, expanding, cache):
2429 2429 """Expand aliases in tree, recursively.
2430 2430
2431 2431 'aliases' is a dictionary mapping user defined aliases to
2432 2432 revsetalias objects.
2433 2433 """
2434 2434 if not isinstance(tree, tuple):
2435 2435 # Do not expand raw strings
2436 2436 return tree
2437 2437 alias = _getalias(aliases, tree)
2438 2438 if alias is not None:
2439 2439 if alias.error:
2440 2440 raise error.Abort(alias.error)
2441 2441 if alias in expanding:
2442 2442 raise error.ParseError(_('infinite expansion of revset alias "%s" '
2443 2443 'detected') % alias.name)
2444 2444 expanding.append(alias)
2445 2445 if alias.name not in cache:
2446 2446 cache[alias.name] = _expandaliases(aliases, alias.replacement,
2447 2447 expanding, cache)
2448 2448 result = cache[alias.name]
2449 2449 expanding.pop()
2450 2450 if alias.args is not None:
2451 2451 l = getlist(tree[2])
2452 2452 if len(l) != len(alias.args):
2453 2453 raise error.ParseError(
2454 2454 _('invalid number of arguments: %d') % len(l))
2455 2455 l = [_expandaliases(aliases, a, [], cache) for a in l]
2456 2456 result = _expandargs(result, dict(zip(alias.args, l)))
2457 2457 else:
2458 2458 result = tuple(_expandaliases(aliases, t, expanding, cache)
2459 2459 for t in tree)
2460 2460 return result
2461 2461
2462 2462 def findaliases(ui, tree, showwarning=None):
2463 2463 aliases = {}
2464 2464 for k, v in ui.configitems('revsetalias'):
2465 2465 alias = revsetalias(k, v)
2466 2466 aliases[alias.name] = alias
2467 2467 tree = _expandaliases(aliases, tree, [], {})
2468 2468 if showwarning:
2469 2469 # warn about problematic (but not referred) aliases
2470 2470 for name, alias in sorted(aliases.iteritems()):
2471 2471 if alias.error and not alias.warned:
2472 2472 showwarning(_('warning: %s\n') % (alias.error))
2473 2473 alias.warned = True
2474 2474 return tree
2475 2475
2476 2476 def foldconcat(tree):
2477 2477 """Fold elements to be concatenated by `##`
2478 2478 """
2479 2479 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2480 2480 return tree
2481 2481 if tree[0] == '_concat':
2482 2482 pending = [tree]
2483 2483 l = []
2484 2484 while pending:
2485 2485 e = pending.pop()
2486 2486 if e[0] == '_concat':
2487 2487 pending.extend(reversed(e[1:]))
2488 2488 elif e[0] in ('string', 'symbol'):
2489 2489 l.append(e[1])
2490 2490 else:
2491 2491 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2492 2492 raise error.ParseError(msg)
2493 2493 return ('string', ''.join(l))
2494 2494 else:
2495 2495 return tuple(foldconcat(t) for t in tree)
2496 2496
2497 2497 def parse(spec, lookup=None):
2498 2498 p = parser.parser(elements)
2499 2499 tree, pos = p.parse(tokenize(spec, lookup=lookup))
2500 2500 if pos != len(spec):
2501 2501 raise error.ParseError(_("invalid token"), pos)
2502 2502 return parser.simplifyinfixops(tree, ('list', 'or'))
2503 2503
2504 2504 def posttreebuilthook(tree, repo):
2505 2505 # hook for extensions to execute code on the optimized tree
2506 2506 pass
2507 2507
2508 2508 def match(ui, spec, repo=None):
2509 2509 if not spec:
2510 2510 raise error.ParseError(_("empty query"))
2511 2511 lookup = None
2512 2512 if repo:
2513 2513 lookup = repo.__contains__
2514 2514 tree = parse(spec, lookup)
2515 2515 return _makematcher(ui, tree, repo)
2516 2516
2517 2517 def matchany(ui, specs, repo=None):
2518 2518 """Create a matcher that will include any revisions matching one of the
2519 2519 given specs"""
2520 2520 if not specs:
2521 2521 def mfunc(repo, subset=None):
2522 2522 return baseset()
2523 2523 return mfunc
2524 2524 if not all(specs):
2525 2525 raise error.ParseError(_("empty query"))
2526 2526 lookup = None
2527 2527 if repo:
2528 2528 lookup = repo.__contains__
2529 2529 if len(specs) == 1:
2530 2530 tree = parse(specs[0], lookup)
2531 2531 else:
2532 2532 tree = ('or',) + tuple(parse(s, lookup) for s in specs)
2533 2533 return _makematcher(ui, tree, repo)
2534 2534
2535 2535 def _makematcher(ui, tree, repo):
2536 2536 if ui:
2537 2537 tree = findaliases(ui, tree, showwarning=ui.warn)
2538 2538 tree = foldconcat(tree)
2539 2539 weight, tree = optimize(tree, True)
2540 2540 posttreebuilthook(tree, repo)
2541 2541 def mfunc(repo, subset=None):
2542 2542 if subset is None:
2543 2543 subset = fullreposet(repo)
2544 2544 if util.safehasattr(subset, 'isascending'):
2545 2545 result = getset(repo, subset, tree)
2546 2546 else:
2547 2547 result = getset(repo, baseset(subset), tree)
2548 2548 return result
2549 2549 return mfunc
2550 2550
2551 2551 def formatspec(expr, *args):
2552 2552 '''
2553 2553 This is a convenience function for using revsets internally, and
2554 2554 escapes arguments appropriately. Aliases are intentionally ignored
2555 2555 so that intended expression behavior isn't accidentally subverted.
2556 2556
2557 2557 Supported arguments:
2558 2558
2559 2559 %r = revset expression, parenthesized
2560 2560 %d = int(arg), no quoting
2561 2561 %s = string(arg), escaped and single-quoted
2562 2562 %b = arg.branch(), escaped and single-quoted
2563 2563 %n = hex(arg), single-quoted
2564 2564 %% = a literal '%'
2565 2565
2566 2566 Prefixing the type with 'l' specifies a parenthesized list of that type.
2567 2567
2568 2568 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2569 2569 '(10 or 11):: and ((this()) or (that()))'
2570 2570 >>> formatspec('%d:: and not %d::', 10, 20)
2571 2571 '10:: and not 20::'
2572 2572 >>> formatspec('%ld or %ld', [], [1])
2573 2573 "_list('') or 1"
2574 2574 >>> formatspec('keyword(%s)', 'foo\\xe9')
2575 2575 "keyword('foo\\\\xe9')"
2576 2576 >>> b = lambda: 'default'
2577 2577 >>> b.branch = b
2578 2578 >>> formatspec('branch(%b)', b)
2579 2579 "branch('default')"
2580 2580 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2581 2581 "root(_list('a\\x00b\\x00c\\x00d'))"
2582 2582 '''
2583 2583
2584 2584 def quote(s):
2585 2585 return repr(str(s))
2586 2586
2587 2587 def argtype(c, arg):
2588 2588 if c == 'd':
2589 2589 return str(int(arg))
2590 2590 elif c == 's':
2591 2591 return quote(arg)
2592 2592 elif c == 'r':
2593 2593 parse(arg) # make sure syntax errors are confined
2594 2594 return '(%s)' % arg
2595 2595 elif c == 'n':
2596 2596 return quote(node.hex(arg))
2597 2597 elif c == 'b':
2598 2598 return quote(arg.branch())
2599 2599
2600 2600 def listexp(s, t):
2601 2601 l = len(s)
2602 2602 if l == 0:
2603 2603 return "_list('')"
2604 2604 elif l == 1:
2605 2605 return argtype(t, s[0])
2606 2606 elif t == 'd':
2607 2607 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2608 2608 elif t == 's':
2609 2609 return "_list('%s')" % "\0".join(s)
2610 2610 elif t == 'n':
2611 2611 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2612 2612 elif t == 'b':
2613 2613 return "_list('%s')" % "\0".join(a.branch() for a in s)
2614 2614
2615 2615 m = l // 2
2616 2616 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2617 2617
2618 2618 ret = ''
2619 2619 pos = 0
2620 2620 arg = 0
2621 2621 while pos < len(expr):
2622 2622 c = expr[pos]
2623 2623 if c == '%':
2624 2624 pos += 1
2625 2625 d = expr[pos]
2626 2626 if d == '%':
2627 2627 ret += d
2628 2628 elif d in 'dsnbr':
2629 2629 ret += argtype(d, args[arg])
2630 2630 arg += 1
2631 2631 elif d == 'l':
2632 2632 # a list of some type
2633 2633 pos += 1
2634 2634 d = expr[pos]
2635 2635 ret += listexp(list(args[arg]), d)
2636 2636 arg += 1
2637 2637 else:
2638 2638 raise error.Abort('unexpected revspec format character %s' % d)
2639 2639 else:
2640 2640 ret += c
2641 2641 pos += 1
2642 2642
2643 2643 return ret
2644 2644
2645 2645 def prettyformat(tree):
2646 2646 return parser.prettyformat(tree, ('string', 'symbol'))
2647 2647
2648 2648 def depth(tree):
2649 2649 if isinstance(tree, tuple):
2650 2650 return max(map(depth, tree)) + 1
2651 2651 else:
2652 2652 return 0
2653 2653
2654 2654 def funcsused(tree):
2655 2655 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2656 2656 return set()
2657 2657 else:
2658 2658 funcs = set()
2659 2659 for s in tree[1:]:
2660 2660 funcs |= funcsused(s)
2661 2661 if tree[0] == 'func':
2662 2662 funcs.add(tree[1][1])
2663 2663 return funcs
2664 2664
2665 2665 def _formatsetrepr(r):
2666 2666 """Format an optional printable representation of a set
2667 2667
2668 2668 ======== =================================
2669 2669 type(r) example
2670 2670 ======== =================================
2671 2671 tuple ('<not %r>', other)
2672 2672 str '<branch closed>'
2673 2673 callable lambda: '<branch %r>' % sorted(b)
2674 2674 object other
2675 2675 ======== =================================
2676 2676 """
2677 2677 if r is None:
2678 2678 return ''
2679 2679 elif isinstance(r, tuple):
2680 2680 return r[0] % r[1:]
2681 2681 elif isinstance(r, str):
2682 2682 return r
2683 2683 elif callable(r):
2684 2684 return r()
2685 2685 else:
2686 2686 return repr(r)
2687 2687
2688 2688 class abstractsmartset(object):
2689 2689
2690 2690 def __nonzero__(self):
2691 2691 """True if the smartset is not empty"""
2692 2692 raise NotImplementedError()
2693 2693
2694 2694 def __contains__(self, rev):
2695 2695 """provide fast membership testing"""
2696 2696 raise NotImplementedError()
2697 2697
2698 2698 def __iter__(self):
2699 2699 """iterate the set in the order it is supposed to be iterated"""
2700 2700 raise NotImplementedError()
2701 2701
2702 2702 # Attributes containing a function to perform a fast iteration in a given
2703 2703 # direction. A smartset can have none, one, or both defined.
2704 2704 #
2705 2705 # Default value is None instead of a function returning None to avoid
2706 2706 # initializing an iterator just for testing if a fast method exists.
2707 2707 fastasc = None
2708 2708 fastdesc = None
2709 2709
2710 2710 def isascending(self):
2711 2711 """True if the set will iterate in ascending order"""
2712 2712 raise NotImplementedError()
2713 2713
2714 2714 def isdescending(self):
2715 2715 """True if the set will iterate in descending order"""
2716 2716 raise NotImplementedError()
2717 2717
2718 2718 @util.cachefunc
2719 2719 def min(self):
2720 2720 """return the minimum element in the set"""
2721 2721 if self.fastasc is not None:
2722 2722 for r in self.fastasc():
2723 2723 return r
2724 2724 raise ValueError('arg is an empty sequence')
2725 2725 return min(self)
2726 2726
2727 2727 @util.cachefunc
2728 2728 def max(self):
2729 2729 """return the maximum element in the set"""
2730 2730 if self.fastdesc is not None:
2731 2731 for r in self.fastdesc():
2732 2732 return r
2733 2733 raise ValueError('arg is an empty sequence')
2734 2734 return max(self)
2735 2735
2736 2736 def first(self):
2737 2737 """return the first element in the set (user iteration perspective)
2738 2738
2739 2739 Return None if the set is empty"""
2740 2740 raise NotImplementedError()
2741 2741
2742 2742 def last(self):
2743 2743 """return the last element in the set (user iteration perspective)
2744 2744
2745 2745 Return None if the set is empty"""
2746 2746 raise NotImplementedError()
2747 2747
2748 2748 def __len__(self):
2749 2749 """return the length of the smartsets
2750 2750
2751 2751 This can be expensive on smartset that could be lazy otherwise."""
2752 2752 raise NotImplementedError()
2753 2753
2754 2754 def reverse(self):
2755 2755 """reverse the expected iteration order"""
2756 2756 raise NotImplementedError()
2757 2757
2758 2758 def sort(self, reverse=True):
2759 2759 """get the set to iterate in an ascending or descending order"""
2760 2760 raise NotImplementedError()
2761 2761
2762 2762 def __and__(self, other):
2763 2763 """Returns a new object with the intersection of the two collections.
2764 2764
2765 2765 This is part of the mandatory API for smartset."""
2766 2766 if isinstance(other, fullreposet):
2767 2767 return self
2768 2768 return self.filter(other.__contains__, condrepr=other, cache=False)
2769 2769
2770 2770 def __add__(self, other):
2771 2771 """Returns a new object with the union of the two collections.
2772 2772
2773 2773 This is part of the mandatory API for smartset."""
2774 2774 return addset(self, other)
2775 2775
2776 2776 def __sub__(self, other):
2777 2777 """Returns a new object with the substraction of the two collections.
2778 2778
2779 2779 This is part of the mandatory API for smartset."""
2780 2780 c = other.__contains__
2781 2781 return self.filter(lambda r: not c(r), condrepr=('<not %r>', other),
2782 2782 cache=False)
2783 2783
2784 2784 def filter(self, condition, condrepr=None, cache=True):
2785 2785 """Returns this smartset filtered by condition as a new smartset.
2786 2786
2787 2787 `condition` is a callable which takes a revision number and returns a
2788 2788 boolean. Optional `condrepr` provides a printable representation of
2789 2789 the given `condition`.
2790 2790
2791 2791 This is part of the mandatory API for smartset."""
2792 2792 # builtin cannot be cached. but do not needs to
2793 2793 if cache and util.safehasattr(condition, 'func_code'):
2794 2794 condition = util.cachefunc(condition)
2795 2795 return filteredset(self, condition, condrepr)
2796 2796
2797 2797 class baseset(abstractsmartset):
2798 2798 """Basic data structure that represents a revset and contains the basic
2799 2799 operation that it should be able to perform.
2800 2800
2801 2801 Every method in this class should be implemented by any smartset class.
2802 2802 """
2803 2803 def __init__(self, data=(), datarepr=None):
2804 2804 """
2805 2805 datarepr: a tuple of (format, obj, ...), a function or an object that
2806 2806 provides a printable representation of the given data.
2807 2807 """
2808 2808 if not isinstance(data, list):
2809 2809 if isinstance(data, set):
2810 2810 self._set = data
2811 2811 data = list(data)
2812 2812 self._list = data
2813 2813 self._datarepr = datarepr
2814 2814 self._ascending = None
2815 2815
2816 2816 @util.propertycache
2817 2817 def _set(self):
2818 2818 return set(self._list)
2819 2819
2820 2820 @util.propertycache
2821 2821 def _asclist(self):
2822 2822 asclist = self._list[:]
2823 2823 asclist.sort()
2824 2824 return asclist
2825 2825
2826 2826 def __iter__(self):
2827 2827 if self._ascending is None:
2828 2828 return iter(self._list)
2829 2829 elif self._ascending:
2830 2830 return iter(self._asclist)
2831 2831 else:
2832 2832 return reversed(self._asclist)
2833 2833
2834 2834 def fastasc(self):
2835 2835 return iter(self._asclist)
2836 2836
2837 2837 def fastdesc(self):
2838 2838 return reversed(self._asclist)
2839 2839
2840 2840 @util.propertycache
2841 2841 def __contains__(self):
2842 2842 return self._set.__contains__
2843 2843
2844 2844 def __nonzero__(self):
2845 2845 return bool(self._list)
2846 2846
2847 2847 def sort(self, reverse=False):
2848 2848 self._ascending = not bool(reverse)
2849 2849
2850 2850 def reverse(self):
2851 2851 if self._ascending is None:
2852 2852 self._list.reverse()
2853 2853 else:
2854 2854 self._ascending = not self._ascending
2855 2855
2856 2856 def __len__(self):
2857 2857 return len(self._list)
2858 2858
2859 2859 def isascending(self):
2860 2860 """Returns True if the collection is ascending order, False if not.
2861 2861
2862 2862 This is part of the mandatory API for smartset."""
2863 2863 if len(self) <= 1:
2864 2864 return True
2865 2865 return self._ascending is not None and self._ascending
2866 2866
2867 2867 def isdescending(self):
2868 2868 """Returns True if the collection is descending order, False if not.
2869 2869
2870 2870 This is part of the mandatory API for smartset."""
2871 2871 if len(self) <= 1:
2872 2872 return True
2873 2873 return self._ascending is not None and not self._ascending
2874 2874
2875 2875 def first(self):
2876 2876 if self:
2877 2877 if self._ascending is None:
2878 2878 return self._list[0]
2879 2879 elif self._ascending:
2880 2880 return self._asclist[0]
2881 2881 else:
2882 2882 return self._asclist[-1]
2883 2883 return None
2884 2884
2885 2885 def last(self):
2886 2886 if self:
2887 2887 if self._ascending is None:
2888 2888 return self._list[-1]
2889 2889 elif self._ascending:
2890 2890 return self._asclist[-1]
2891 2891 else:
2892 2892 return self._asclist[0]
2893 2893 return None
2894 2894
2895 2895 def __repr__(self):
2896 2896 d = {None: '', False: '-', True: '+'}[self._ascending]
2897 2897 s = _formatsetrepr(self._datarepr)
2898 2898 if not s:
2899 2899 s = repr(self._list)
2900 2900 return '<%s%s %s>' % (type(self).__name__, d, s)
2901 2901
2902 2902 class filteredset(abstractsmartset):
2903 2903 """Duck type for baseset class which iterates lazily over the revisions in
2904 2904 the subset and contains a function which tests for membership in the
2905 2905 revset
2906 2906 """
2907 2907 def __init__(self, subset, condition=lambda x: True, condrepr=None):
2908 2908 """
2909 2909 condition: a function that decide whether a revision in the subset
2910 2910 belongs to the revset or not.
2911 2911 condrepr: a tuple of (format, obj, ...), a function or an object that
2912 2912 provides a printable representation of the given condition.
2913 2913 """
2914 2914 self._subset = subset
2915 2915 self._condition = condition
2916 2916 self._condrepr = condrepr
2917 2917
2918 2918 def __contains__(self, x):
2919 2919 return x in self._subset and self._condition(x)
2920 2920
2921 2921 def __iter__(self):
2922 2922 return self._iterfilter(self._subset)
2923 2923
2924 2924 def _iterfilter(self, it):
2925 2925 cond = self._condition
2926 2926 for x in it:
2927 2927 if cond(x):
2928 2928 yield x
2929 2929
2930 2930 @property
2931 2931 def fastasc(self):
2932 2932 it = self._subset.fastasc
2933 2933 if it is None:
2934 2934 return None
2935 2935 return lambda: self._iterfilter(it())
2936 2936
2937 2937 @property
2938 2938 def fastdesc(self):
2939 2939 it = self._subset.fastdesc
2940 2940 if it is None:
2941 2941 return None
2942 2942 return lambda: self._iterfilter(it())
2943 2943
2944 2944 def __nonzero__(self):
2945 2945 fast = self.fastasc
2946 2946 if fast is None:
2947 2947 fast = self.fastdesc
2948 2948 if fast is not None:
2949 2949 it = fast()
2950 2950 else:
2951 2951 it = self
2952 2952
2953 2953 for r in it:
2954 2954 return True
2955 2955 return False
2956 2956
2957 2957 def __len__(self):
2958 2958 # Basic implementation to be changed in future patches.
2959 2959 l = baseset([r for r in self])
2960 2960 return len(l)
2961 2961
2962 2962 def sort(self, reverse=False):
2963 2963 self._subset.sort(reverse=reverse)
2964 2964
2965 2965 def reverse(self):
2966 2966 self._subset.reverse()
2967 2967
2968 2968 def isascending(self):
2969 2969 return self._subset.isascending()
2970 2970
2971 2971 def isdescending(self):
2972 2972 return self._subset.isdescending()
2973 2973
2974 2974 def first(self):
2975 2975 for x in self:
2976 2976 return x
2977 2977 return None
2978 2978
2979 2979 def last(self):
2980 2980 it = None
2981 2981 if self.isascending():
2982 2982 it = self.fastdesc
2983 2983 elif self.isdescending():
2984 2984 it = self.fastasc
2985 2985 if it is not None:
2986 2986 for x in it():
2987 2987 return x
2988 2988 return None #empty case
2989 2989 else:
2990 2990 x = None
2991 2991 for x in self:
2992 2992 pass
2993 2993 return x
2994 2994
2995 2995 def __repr__(self):
2996 2996 xs = [repr(self._subset)]
2997 2997 s = _formatsetrepr(self._condrepr)
2998 2998 if s:
2999 2999 xs.append(s)
3000 3000 return '<%s %s>' % (type(self).__name__, ', '.join(xs))
3001 3001
3002 3002 def _iterordered(ascending, iter1, iter2):
3003 3003 """produce an ordered iteration from two iterators with the same order
3004 3004
3005 3005 The ascending is used to indicated the iteration direction.
3006 3006 """
3007 3007 choice = max
3008 3008 if ascending:
3009 3009 choice = min
3010 3010
3011 3011 val1 = None
3012 3012 val2 = None
3013 3013 try:
3014 3014 # Consume both iterators in an ordered way until one is empty
3015 3015 while True:
3016 3016 if val1 is None:
3017 3017 val1 = iter1.next()
3018 3018 if val2 is None:
3019 3019 val2 = iter2.next()
3020 3020 next = choice(val1, val2)
3021 3021 yield next
3022 3022 if val1 == next:
3023 3023 val1 = None
3024 3024 if val2 == next:
3025 3025 val2 = None
3026 3026 except StopIteration:
3027 3027 # Flush any remaining values and consume the other one
3028 3028 it = iter2
3029 3029 if val1 is not None:
3030 3030 yield val1
3031 3031 it = iter1
3032 3032 elif val2 is not None:
3033 3033 # might have been equality and both are empty
3034 3034 yield val2
3035 3035 for val in it:
3036 3036 yield val
3037 3037
3038 3038 class addset(abstractsmartset):
3039 3039 """Represent the addition of two sets
3040 3040
3041 3041 Wrapper structure for lazily adding two structures without losing much
3042 3042 performance on the __contains__ method
3043 3043
3044 3044 If the ascending attribute is set, that means the two structures are
3045 3045 ordered in either an ascending or descending way. Therefore, we can add
3046 3046 them maintaining the order by iterating over both at the same time
3047 3047
3048 3048 >>> xs = baseset([0, 3, 2])
3049 3049 >>> ys = baseset([5, 2, 4])
3050 3050
3051 3051 >>> rs = addset(xs, ys)
3052 3052 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3053 3053 (True, True, False, True, 0, 4)
3054 3054 >>> rs = addset(xs, baseset([]))
3055 3055 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3056 3056 (True, True, False, 0, 2)
3057 3057 >>> rs = addset(baseset([]), baseset([]))
3058 3058 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3059 3059 (False, False, None, None)
3060 3060
3061 3061 iterate unsorted:
3062 3062 >>> rs = addset(xs, ys)
3063 3063 >>> [x for x in rs] # without _genlist
3064 3064 [0, 3, 2, 5, 4]
3065 3065 >>> assert not rs._genlist
3066 3066 >>> len(rs)
3067 3067 5
3068 3068 >>> [x for x in rs] # with _genlist
3069 3069 [0, 3, 2, 5, 4]
3070 3070 >>> assert rs._genlist
3071 3071
3072 3072 iterate ascending:
3073 3073 >>> rs = addset(xs, ys, ascending=True)
3074 3074 >>> [x for x in rs], [x for x in rs.fastasc()] # without _asclist
3075 3075 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3076 3076 >>> assert not rs._asclist
3077 3077 >>> len(rs)
3078 3078 5
3079 3079 >>> [x for x in rs], [x for x in rs.fastasc()]
3080 3080 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3081 3081 >>> assert rs._asclist
3082 3082
3083 3083 iterate descending:
3084 3084 >>> rs = addset(xs, ys, ascending=False)
3085 3085 >>> [x for x in rs], [x for x in rs.fastdesc()] # without _asclist
3086 3086 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3087 3087 >>> assert not rs._asclist
3088 3088 >>> len(rs)
3089 3089 5
3090 3090 >>> [x for x in rs], [x for x in rs.fastdesc()]
3091 3091 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3092 3092 >>> assert rs._asclist
3093 3093
3094 3094 iterate ascending without fastasc:
3095 3095 >>> rs = addset(xs, generatorset(ys), ascending=True)
3096 3096 >>> assert rs.fastasc is None
3097 3097 >>> [x for x in rs]
3098 3098 [0, 2, 3, 4, 5]
3099 3099
3100 3100 iterate descending without fastdesc:
3101 3101 >>> rs = addset(generatorset(xs), ys, ascending=False)
3102 3102 >>> assert rs.fastdesc is None
3103 3103 >>> [x for x in rs]
3104 3104 [5, 4, 3, 2, 0]
3105 3105 """
3106 3106 def __init__(self, revs1, revs2, ascending=None):
3107 3107 self._r1 = revs1
3108 3108 self._r2 = revs2
3109 3109 self._iter = None
3110 3110 self._ascending = ascending
3111 3111 self._genlist = None
3112 3112 self._asclist = None
3113 3113
3114 3114 def __len__(self):
3115 3115 return len(self._list)
3116 3116
3117 3117 def __nonzero__(self):
3118 3118 return bool(self._r1) or bool(self._r2)
3119 3119
3120 3120 @util.propertycache
3121 3121 def _list(self):
3122 3122 if not self._genlist:
3123 3123 self._genlist = baseset(iter(self))
3124 3124 return self._genlist
3125 3125
3126 3126 def __iter__(self):
3127 3127 """Iterate over both collections without repeating elements
3128 3128
3129 3129 If the ascending attribute is not set, iterate over the first one and
3130 3130 then over the second one checking for membership on the first one so we
3131 3131 dont yield any duplicates.
3132 3132
3133 3133 If the ascending attribute is set, iterate over both collections at the
3134 3134 same time, yielding only one value at a time in the given order.
3135 3135 """
3136 3136 if self._ascending is None:
3137 3137 if self._genlist:
3138 3138 return iter(self._genlist)
3139 3139 def arbitraryordergen():
3140 3140 for r in self._r1:
3141 3141 yield r
3142 3142 inr1 = self._r1.__contains__
3143 3143 for r in self._r2:
3144 3144 if not inr1(r):
3145 3145 yield r
3146 3146 return arbitraryordergen()
3147 3147 # try to use our own fast iterator if it exists
3148 3148 self._trysetasclist()
3149 3149 if self._ascending:
3150 3150 attr = 'fastasc'
3151 3151 else:
3152 3152 attr = 'fastdesc'
3153 3153 it = getattr(self, attr)
3154 3154 if it is not None:
3155 3155 return it()
3156 3156 # maybe half of the component supports fast
3157 3157 # get iterator for _r1
3158 3158 iter1 = getattr(self._r1, attr)
3159 3159 if iter1 is None:
3160 3160 # let's avoid side effect (not sure it matters)
3161 3161 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3162 3162 else:
3163 3163 iter1 = iter1()
3164 3164 # get iterator for _r2
3165 3165 iter2 = getattr(self._r2, attr)
3166 3166 if iter2 is None:
3167 3167 # let's avoid side effect (not sure it matters)
3168 3168 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3169 3169 else:
3170 3170 iter2 = iter2()
3171 3171 return _iterordered(self._ascending, iter1, iter2)
3172 3172
3173 3173 def _trysetasclist(self):
3174 3174 """populate the _asclist attribute if possible and necessary"""
3175 3175 if self._genlist is not None and self._asclist is None:
3176 3176 self._asclist = sorted(self._genlist)
3177 3177
3178 3178 @property
3179 3179 def fastasc(self):
3180 3180 self._trysetasclist()
3181 3181 if self._asclist is not None:
3182 3182 return self._asclist.__iter__
3183 3183 iter1 = self._r1.fastasc
3184 3184 iter2 = self._r2.fastasc
3185 3185 if None in (iter1, iter2):
3186 3186 return None
3187 3187 return lambda: _iterordered(True, iter1(), iter2())
3188 3188
3189 3189 @property
3190 3190 def fastdesc(self):
3191 3191 self._trysetasclist()
3192 3192 if self._asclist is not None:
3193 3193 return self._asclist.__reversed__
3194 3194 iter1 = self._r1.fastdesc
3195 3195 iter2 = self._r2.fastdesc
3196 3196 if None in (iter1, iter2):
3197 3197 return None
3198 3198 return lambda: _iterordered(False, iter1(), iter2())
3199 3199
3200 3200 def __contains__(self, x):
3201 3201 return x in self._r1 or x in self._r2
3202 3202
3203 3203 def sort(self, reverse=False):
3204 3204 """Sort the added set
3205 3205
3206 3206 For this we use the cached list with all the generated values and if we
3207 3207 know they are ascending or descending we can sort them in a smart way.
3208 3208 """
3209 3209 self._ascending = not reverse
3210 3210
3211 3211 def isascending(self):
3212 3212 return self._ascending is not None and self._ascending
3213 3213
3214 3214 def isdescending(self):
3215 3215 return self._ascending is not None and not self._ascending
3216 3216
3217 3217 def reverse(self):
3218 3218 if self._ascending is None:
3219 3219 self._list.reverse()
3220 3220 else:
3221 3221 self._ascending = not self._ascending
3222 3222
3223 3223 def first(self):
3224 3224 for x in self:
3225 3225 return x
3226 3226 return None
3227 3227
3228 3228 def last(self):
3229 3229 self.reverse()
3230 3230 val = self.first()
3231 3231 self.reverse()
3232 3232 return val
3233 3233
3234 3234 def __repr__(self):
3235 3235 d = {None: '', False: '-', True: '+'}[self._ascending]
3236 3236 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3237 3237
3238 3238 class generatorset(abstractsmartset):
3239 3239 """Wrap a generator for lazy iteration
3240 3240
3241 3241 Wrapper structure for generators that provides lazy membership and can
3242 3242 be iterated more than once.
3243 3243 When asked for membership it generates values until either it finds the
3244 3244 requested one or has gone through all the elements in the generator
3245 3245 """
3246 3246 def __init__(self, gen, iterasc=None):
3247 3247 """
3248 3248 gen: a generator producing the values for the generatorset.
3249 3249 """
3250 3250 self._gen = gen
3251 3251 self._asclist = None
3252 3252 self._cache = {}
3253 3253 self._genlist = []
3254 3254 self._finished = False
3255 3255 self._ascending = True
3256 3256 if iterasc is not None:
3257 3257 if iterasc:
3258 3258 self.fastasc = self._iterator
3259 3259 self.__contains__ = self._asccontains
3260 3260 else:
3261 3261 self.fastdesc = self._iterator
3262 3262 self.__contains__ = self._desccontains
3263 3263
3264 3264 def __nonzero__(self):
3265 3265 # Do not use 'for r in self' because it will enforce the iteration
3266 3266 # order (default ascending), possibly unrolling a whole descending
3267 3267 # iterator.
3268 3268 if self._genlist:
3269 3269 return True
3270 3270 for r in self._consumegen():
3271 3271 return True
3272 3272 return False
3273 3273
3274 3274 def __contains__(self, x):
3275 3275 if x in self._cache:
3276 3276 return self._cache[x]
3277 3277
3278 3278 # Use new values only, as existing values would be cached.
3279 3279 for l in self._consumegen():
3280 3280 if l == x:
3281 3281 return True
3282 3282
3283 3283 self._cache[x] = False
3284 3284 return False
3285 3285
3286 3286 def _asccontains(self, x):
3287 3287 """version of contains optimised for ascending generator"""
3288 3288 if x in self._cache:
3289 3289 return self._cache[x]
3290 3290
3291 3291 # Use new values only, as existing values would be cached.
3292 3292 for l in self._consumegen():
3293 3293 if l == x:
3294 3294 return True
3295 3295 if l > x:
3296 3296 break
3297 3297
3298 3298 self._cache[x] = False
3299 3299 return False
3300 3300
3301 3301 def _desccontains(self, x):
3302 3302 """version of contains optimised for descending generator"""
3303 3303 if x in self._cache:
3304 3304 return self._cache[x]
3305 3305
3306 3306 # Use new values only, as existing values would be cached.
3307 3307 for l in self._consumegen():
3308 3308 if l == x:
3309 3309 return True
3310 3310 if l < x:
3311 3311 break
3312 3312
3313 3313 self._cache[x] = False
3314 3314 return False
3315 3315
3316 3316 def __iter__(self):
3317 3317 if self._ascending:
3318 3318 it = self.fastasc
3319 3319 else:
3320 3320 it = self.fastdesc
3321 3321 if it is not None:
3322 3322 return it()
3323 3323 # we need to consume the iterator
3324 3324 for x in self._consumegen():
3325 3325 pass
3326 3326 # recall the same code
3327 3327 return iter(self)
3328 3328
3329 3329 def _iterator(self):
3330 3330 if self._finished:
3331 3331 return iter(self._genlist)
3332 3332
3333 3333 # We have to use this complex iteration strategy to allow multiple
3334 3334 # iterations at the same time. We need to be able to catch revision
3335 3335 # removed from _consumegen and added to genlist in another instance.
3336 3336 #
3337 3337 # Getting rid of it would provide an about 15% speed up on this
3338 3338 # iteration.
3339 3339 genlist = self._genlist
3340 3340 nextrev = self._consumegen().next
3341 3341 _len = len # cache global lookup
3342 3342 def gen():
3343 3343 i = 0
3344 3344 while True:
3345 3345 if i < _len(genlist):
3346 3346 yield genlist[i]
3347 3347 else:
3348 3348 yield nextrev()
3349 3349 i += 1
3350 3350 return gen()
3351 3351
3352 3352 def _consumegen(self):
3353 3353 cache = self._cache
3354 3354 genlist = self._genlist.append
3355 3355 for item in self._gen:
3356 3356 cache[item] = True
3357 3357 genlist(item)
3358 3358 yield item
3359 3359 if not self._finished:
3360 3360 self._finished = True
3361 3361 asc = self._genlist[:]
3362 3362 asc.sort()
3363 3363 self._asclist = asc
3364 3364 self.fastasc = asc.__iter__
3365 3365 self.fastdesc = asc.__reversed__
3366 3366
3367 3367 def __len__(self):
3368 3368 for x in self._consumegen():
3369 3369 pass
3370 3370 return len(self._genlist)
3371 3371
3372 3372 def sort(self, reverse=False):
3373 3373 self._ascending = not reverse
3374 3374
3375 3375 def reverse(self):
3376 3376 self._ascending = not self._ascending
3377 3377
3378 3378 def isascending(self):
3379 3379 return self._ascending
3380 3380
3381 3381 def isdescending(self):
3382 3382 return not self._ascending
3383 3383
3384 3384 def first(self):
3385 3385 if self._ascending:
3386 3386 it = self.fastasc
3387 3387 else:
3388 3388 it = self.fastdesc
3389 3389 if it is None:
3390 3390 # we need to consume all and try again
3391 3391 for x in self._consumegen():
3392 3392 pass
3393 3393 return self.first()
3394 3394 return next(it(), None)
3395 3395
3396 3396 def last(self):
3397 3397 if self._ascending:
3398 3398 it = self.fastdesc
3399 3399 else:
3400 3400 it = self.fastasc
3401 3401 if it is None:
3402 3402 # we need to consume all and try again
3403 3403 for x in self._consumegen():
3404 3404 pass
3405 3405 return self.first()
3406 3406 return next(it(), None)
3407 3407
3408 3408 def __repr__(self):
3409 3409 d = {False: '-', True: '+'}[self._ascending]
3410 3410 return '<%s%s>' % (type(self).__name__, d)
3411 3411
3412 3412 class spanset(abstractsmartset):
3413 3413 """Duck type for baseset class which represents a range of revisions and
3414 3414 can work lazily and without having all the range in memory
3415 3415
3416 3416 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3417 3417 notable points:
3418 3418 - when x < y it will be automatically descending,
3419 3419 - revision filtered with this repoview will be skipped.
3420 3420
3421 3421 """
3422 3422 def __init__(self, repo, start=0, end=None):
3423 3423 """
3424 3424 start: first revision included the set
3425 3425 (default to 0)
3426 3426 end: first revision excluded (last+1)
3427 3427 (default to len(repo)
3428 3428
3429 3429 Spanset will be descending if `end` < `start`.
3430 3430 """
3431 3431 if end is None:
3432 3432 end = len(repo)
3433 3433 self._ascending = start <= end
3434 3434 if not self._ascending:
3435 3435 start, end = end + 1, start +1
3436 3436 self._start = start
3437 3437 self._end = end
3438 3438 self._hiddenrevs = repo.changelog.filteredrevs
3439 3439
3440 3440 def sort(self, reverse=False):
3441 3441 self._ascending = not reverse
3442 3442
3443 3443 def reverse(self):
3444 3444 self._ascending = not self._ascending
3445 3445
3446 3446 def _iterfilter(self, iterrange):
3447 3447 s = self._hiddenrevs
3448 3448 for r in iterrange:
3449 3449 if r not in s:
3450 3450 yield r
3451 3451
3452 3452 def __iter__(self):
3453 3453 if self._ascending:
3454 3454 return self.fastasc()
3455 3455 else:
3456 3456 return self.fastdesc()
3457 3457
3458 3458 def fastasc(self):
3459 3459 iterrange = xrange(self._start, self._end)
3460 3460 if self._hiddenrevs:
3461 3461 return self._iterfilter(iterrange)
3462 3462 return iter(iterrange)
3463 3463
3464 3464 def fastdesc(self):
3465 3465 iterrange = xrange(self._end - 1, self._start - 1, -1)
3466 3466 if self._hiddenrevs:
3467 3467 return self._iterfilter(iterrange)
3468 3468 return iter(iterrange)
3469 3469
3470 3470 def __contains__(self, rev):
3471 3471 hidden = self._hiddenrevs
3472 3472 return ((self._start <= rev < self._end)
3473 3473 and not (hidden and rev in hidden))
3474 3474
3475 3475 def __nonzero__(self):
3476 3476 for r in self:
3477 3477 return True
3478 3478 return False
3479 3479
3480 3480 def __len__(self):
3481 3481 if not self._hiddenrevs:
3482 3482 return abs(self._end - self._start)
3483 3483 else:
3484 3484 count = 0
3485 3485 start = self._start
3486 3486 end = self._end
3487 3487 for rev in self._hiddenrevs:
3488 3488 if (end < rev <= start) or (start <= rev < end):
3489 3489 count += 1
3490 3490 return abs(self._end - self._start) - count
3491 3491
3492 3492 def isascending(self):
3493 3493 return self._ascending
3494 3494
3495 3495 def isdescending(self):
3496 3496 return not self._ascending
3497 3497
3498 3498 def first(self):
3499 3499 if self._ascending:
3500 3500 it = self.fastasc
3501 3501 else:
3502 3502 it = self.fastdesc
3503 3503 for x in it():
3504 3504 return x
3505 3505 return None
3506 3506
3507 3507 def last(self):
3508 3508 if self._ascending:
3509 3509 it = self.fastdesc
3510 3510 else:
3511 3511 it = self.fastasc
3512 3512 for x in it():
3513 3513 return x
3514 3514 return None
3515 3515
3516 3516 def __repr__(self):
3517 3517 d = {False: '-', True: '+'}[self._ascending]
3518 3518 return '<%s%s %d:%d>' % (type(self).__name__, d,
3519 3519 self._start, self._end - 1)
3520 3520
3521 3521 class fullreposet(spanset):
3522 3522 """a set containing all revisions in the repo
3523 3523
3524 3524 This class exists to host special optimization and magic to handle virtual
3525 3525 revisions such as "null".
3526 3526 """
3527 3527
3528 3528 def __init__(self, repo):
3529 3529 super(fullreposet, self).__init__(repo)
3530 3530
3531 3531 def __and__(self, other):
3532 3532 """As self contains the whole repo, all of the other set should also be
3533 3533 in self. Therefore `self & other = other`.
3534 3534
3535 3535 This boldly assumes the other contains valid revs only.
3536 3536 """
3537 3537 # other not a smartset, make is so
3538 3538 if not util.safehasattr(other, 'isascending'):
3539 3539 # filter out hidden revision
3540 3540 # (this boldly assumes all smartset are pure)
3541 3541 #
3542 3542 # `other` was used with "&", let's assume this is a set like
3543 3543 # object.
3544 3544 other = baseset(other - self._hiddenrevs)
3545 3545
3546 3546 # XXX As fullreposet is also used as bootstrap, this is wrong.
3547 3547 #
3548 3548 # With a giveme312() revset returning [3,1,2], this makes
3549 3549 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3550 3550 # We cannot just drop it because other usage still need to sort it:
3551 3551 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3552 3552 #
3553 3553 # There is also some faulty revset implementations that rely on it
3554 3554 # (eg: children as of its state in e8075329c5fb)
3555 3555 #
3556 3556 # When we fix the two points above we can move this into the if clause
3557 3557 other.sort(reverse=self.isdescending())
3558 3558 return other
3559 3559
3560 3560 def prettyformatset(revs):
3561 3561 lines = []
3562 3562 rs = repr(revs)
3563 3563 p = 0
3564 3564 while p < len(rs):
3565 3565 q = rs.find('<', p + 1)
3566 3566 if q < 0:
3567 3567 q = len(rs)
3568 3568 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3569 3569 assert l >= 0
3570 3570 lines.append((l, rs[p:q].rstrip()))
3571 3571 p = q
3572 3572 return '\n'.join(' ' * l + s for l, s in lines)
3573 3573
3574 3574 def loadpredicate(ui, extname, registrarobj):
3575 3575 """Load revset predicates from specified registrarobj
3576 3576 """
3577 3577 for name, func in registrarobj._table.iteritems():
3578 3578 symbols[name] = func
3579 3579 if func._safe:
3580 3580 safesymbols.add(name)
3581 3581
3582 3582 # load built-in predicates explicitly to setup safesymbols
3583 3583 loadpredicate(None, None, predicate)
3584 3584
3585 3585 # tell hggettext to extract docstrings from these functions:
3586 3586 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now