##// END OF EJS Templates
revset: stub to add extra data to baseset for better inspection...
Yuya Nishihara -
r28425:02d7faaf default
parent child Browse files
Show More
@@ -1,3663 +1,3671
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import heapq
11 11 import re
12 12
13 13 from .i18n import _
14 14 from . import (
15 15 destutil,
16 16 encoding,
17 17 error,
18 18 hbisect,
19 19 match as matchmod,
20 20 node,
21 21 obsolete as obsmod,
22 22 parser,
23 23 pathutil,
24 24 phases,
25 25 registrar,
26 26 repoview,
27 27 util,
28 28 )
29 29
30 30 def _revancestors(repo, revs, followfirst):
31 31 """Like revlog.ancestors(), but supports followfirst."""
32 32 if followfirst:
33 33 cut = 1
34 34 else:
35 35 cut = None
36 36 cl = repo.changelog
37 37
38 38 def iterate():
39 39 revs.sort(reverse=True)
40 40 irevs = iter(revs)
41 41 h = []
42 42
43 43 inputrev = next(irevs, None)
44 44 if inputrev is not None:
45 45 heapq.heappush(h, -inputrev)
46 46
47 47 seen = set()
48 48 while h:
49 49 current = -heapq.heappop(h)
50 50 if current == inputrev:
51 51 inputrev = next(irevs, None)
52 52 if inputrev is not None:
53 53 heapq.heappush(h, -inputrev)
54 54 if current not in seen:
55 55 seen.add(current)
56 56 yield current
57 57 for parent in cl.parentrevs(current)[:cut]:
58 58 if parent != node.nullrev:
59 59 heapq.heappush(h, -parent)
60 60
61 61 return generatorset(iterate(), iterasc=False)
62 62
63 63 def _revdescendants(repo, revs, followfirst):
64 64 """Like revlog.descendants() but supports followfirst."""
65 65 if followfirst:
66 66 cut = 1
67 67 else:
68 68 cut = None
69 69
70 70 def iterate():
71 71 cl = repo.changelog
72 72 # XXX this should be 'parentset.min()' assuming 'parentset' is a
73 73 # smartset (and if it is not, it should.)
74 74 first = min(revs)
75 75 nullrev = node.nullrev
76 76 if first == nullrev:
77 77 # Are there nodes with a null first parent and a non-null
78 78 # second one? Maybe. Do we care? Probably not.
79 79 for i in cl:
80 80 yield i
81 81 else:
82 82 seen = set(revs)
83 83 for i in cl.revs(first + 1):
84 84 for x in cl.parentrevs(i)[:cut]:
85 85 if x != nullrev and x in seen:
86 86 seen.add(i)
87 87 yield i
88 88 break
89 89
90 90 return generatorset(iterate(), iterasc=True)
91 91
92 92 def _reachablerootspure(repo, minroot, roots, heads, includepath):
93 93 """return (heads(::<roots> and ::<heads>))
94 94
95 95 If includepath is True, return (<roots>::<heads>)."""
96 96 if not roots:
97 97 return []
98 98 parentrevs = repo.changelog.parentrevs
99 99 roots = set(roots)
100 100 visit = list(heads)
101 101 reachable = set()
102 102 seen = {}
103 103 # prefetch all the things! (because python is slow)
104 104 reached = reachable.add
105 105 dovisit = visit.append
106 106 nextvisit = visit.pop
107 107 # open-code the post-order traversal due to the tiny size of
108 108 # sys.getrecursionlimit()
109 109 while visit:
110 110 rev = nextvisit()
111 111 if rev in roots:
112 112 reached(rev)
113 113 if not includepath:
114 114 continue
115 115 parents = parentrevs(rev)
116 116 seen[rev] = parents
117 117 for parent in parents:
118 118 if parent >= minroot and parent not in seen:
119 119 dovisit(parent)
120 120 if not reachable:
121 121 return baseset()
122 122 if not includepath:
123 123 return reachable
124 124 for rev in sorted(seen):
125 125 for parent in seen[rev]:
126 126 if parent in reachable:
127 127 reached(rev)
128 128 return reachable
129 129
130 130 def reachableroots(repo, roots, heads, includepath=False):
131 131 """return (heads(::<roots> and ::<heads>))
132 132
133 133 If includepath is True, return (<roots>::<heads>)."""
134 134 if not roots:
135 135 return baseset()
136 136 minroot = roots.min()
137 137 roots = list(roots)
138 138 heads = list(heads)
139 139 try:
140 140 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
141 141 except AttributeError:
142 142 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
143 143 revs = baseset(revs)
144 144 revs.sort()
145 145 return revs
146 146
147 147 elements = {
148 148 # token-type: binding-strength, primary, prefix, infix, suffix
149 149 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
150 150 "##": (20, None, None, ("_concat", 20), None),
151 151 "~": (18, None, None, ("ancestor", 18), None),
152 152 "^": (18, None, None, ("parent", 18), ("parentpost", 18)),
153 153 "-": (5, None, ("negate", 19), ("minus", 5), None),
154 154 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17),
155 155 ("dagrangepost", 17)),
156 156 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17),
157 157 ("dagrangepost", 17)),
158 158 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), ("rangepost", 15)),
159 159 "not": (10, None, ("not", 10), None, None),
160 160 "!": (10, None, ("not", 10), None, None),
161 161 "and": (5, None, None, ("and", 5), None),
162 162 "&": (5, None, None, ("and", 5), None),
163 163 "%": (5, None, None, ("only", 5), ("onlypost", 5)),
164 164 "or": (4, None, None, ("or", 4), None),
165 165 "|": (4, None, None, ("or", 4), None),
166 166 "+": (4, None, None, ("or", 4), None),
167 167 "=": (3, None, None, ("keyvalue", 3), None),
168 168 ",": (2, None, None, ("list", 2), None),
169 169 ")": (0, None, None, None, None),
170 170 "symbol": (0, "symbol", None, None, None),
171 171 "string": (0, "string", None, None, None),
172 172 "end": (0, None, None, None, None),
173 173 }
174 174
175 175 keywords = set(['and', 'or', 'not'])
176 176
177 177 # default set of valid characters for the initial letter of symbols
178 178 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
179 179 if c.isalnum() or c in '._@' or ord(c) > 127)
180 180
181 181 # default set of valid characters for non-initial letters of symbols
182 182 _symletters = set(c for c in [chr(i) for i in xrange(256)]
183 183 if c.isalnum() or c in '-._/@' or ord(c) > 127)
184 184
185 185 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
186 186 '''
187 187 Parse a revset statement into a stream of tokens
188 188
189 189 ``syminitletters`` is the set of valid characters for the initial
190 190 letter of symbols.
191 191
192 192 By default, character ``c`` is recognized as valid for initial
193 193 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
194 194
195 195 ``symletters`` is the set of valid characters for non-initial
196 196 letters of symbols.
197 197
198 198 By default, character ``c`` is recognized as valid for non-initial
199 199 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
200 200
201 201 Check that @ is a valid unquoted token character (issue3686):
202 202 >>> list(tokenize("@::"))
203 203 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
204 204
205 205 '''
206 206 if syminitletters is None:
207 207 syminitletters = _syminitletters
208 208 if symletters is None:
209 209 symletters = _symletters
210 210
211 211 if program and lookup:
212 212 # attempt to parse old-style ranges first to deal with
213 213 # things like old-tag which contain query metacharacters
214 214 parts = program.split(':', 1)
215 215 if all(lookup(sym) for sym in parts if sym):
216 216 if parts[0]:
217 217 yield ('symbol', parts[0], 0)
218 218 if len(parts) > 1:
219 219 s = len(parts[0])
220 220 yield (':', None, s)
221 221 if parts[1]:
222 222 yield ('symbol', parts[1], s + 1)
223 223 yield ('end', None, len(program))
224 224 return
225 225
226 226 pos, l = 0, len(program)
227 227 while pos < l:
228 228 c = program[pos]
229 229 if c.isspace(): # skip inter-token whitespace
230 230 pass
231 231 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
232 232 yield ('::', None, pos)
233 233 pos += 1 # skip ahead
234 234 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
235 235 yield ('..', None, pos)
236 236 pos += 1 # skip ahead
237 237 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
238 238 yield ('##', None, pos)
239 239 pos += 1 # skip ahead
240 240 elif c in "():=,-|&+!~^%": # handle simple operators
241 241 yield (c, None, pos)
242 242 elif (c in '"\'' or c == 'r' and
243 243 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
244 244 if c == 'r':
245 245 pos += 1
246 246 c = program[pos]
247 247 decode = lambda x: x
248 248 else:
249 249 decode = parser.unescapestr
250 250 pos += 1
251 251 s = pos
252 252 while pos < l: # find closing quote
253 253 d = program[pos]
254 254 if d == '\\': # skip over escaped characters
255 255 pos += 2
256 256 continue
257 257 if d == c:
258 258 yield ('string', decode(program[s:pos]), s)
259 259 break
260 260 pos += 1
261 261 else:
262 262 raise error.ParseError(_("unterminated string"), s)
263 263 # gather up a symbol/keyword
264 264 elif c in syminitletters:
265 265 s = pos
266 266 pos += 1
267 267 while pos < l: # find end of symbol
268 268 d = program[pos]
269 269 if d not in symletters:
270 270 break
271 271 if d == '.' and program[pos - 1] == '.': # special case for ..
272 272 pos -= 1
273 273 break
274 274 pos += 1
275 275 sym = program[s:pos]
276 276 if sym in keywords: # operator keywords
277 277 yield (sym, None, s)
278 278 elif '-' in sym:
279 279 # some jerk gave us foo-bar-baz, try to check if it's a symbol
280 280 if lookup and lookup(sym):
281 281 # looks like a real symbol
282 282 yield ('symbol', sym, s)
283 283 else:
284 284 # looks like an expression
285 285 parts = sym.split('-')
286 286 for p in parts[:-1]:
287 287 if p: # possible consecutive -
288 288 yield ('symbol', p, s)
289 289 s += len(p)
290 290 yield ('-', None, pos)
291 291 s += 1
292 292 if parts[-1]: # possible trailing -
293 293 yield ('symbol', parts[-1], s)
294 294 else:
295 295 yield ('symbol', sym, s)
296 296 pos -= 1
297 297 else:
298 298 raise error.ParseError(_("syntax error in revset '%s'") %
299 299 program, pos)
300 300 pos += 1
301 301 yield ('end', None, pos)
302 302
303 303 def parseerrordetail(inst):
304 304 """Compose error message from specified ParseError object
305 305 """
306 306 if len(inst.args) > 1:
307 307 return _('at %s: %s') % (inst.args[1], inst.args[0])
308 308 else:
309 309 return inst.args[0]
310 310
311 311 # helpers
312 312
313 313 def getstring(x, err):
314 314 if x and (x[0] == 'string' or x[0] == 'symbol'):
315 315 return x[1]
316 316 raise error.ParseError(err)
317 317
318 318 def getlist(x):
319 319 if not x:
320 320 return []
321 321 if x[0] == 'list':
322 322 return list(x[1:])
323 323 return [x]
324 324
325 325 def getargs(x, min, max, err):
326 326 l = getlist(x)
327 327 if len(l) < min or (max >= 0 and len(l) > max):
328 328 raise error.ParseError(err)
329 329 return l
330 330
331 331 def getargsdict(x, funcname, keys):
332 332 return parser.buildargsdict(getlist(x), funcname, keys.split(),
333 333 keyvaluenode='keyvalue', keynode='symbol')
334 334
335 335 def isvalidsymbol(tree):
336 336 """Examine whether specified ``tree`` is valid ``symbol`` or not
337 337 """
338 338 return tree[0] == 'symbol' and len(tree) > 1
339 339
340 340 def getsymbol(tree):
341 341 """Get symbol name from valid ``symbol`` in ``tree``
342 342
343 343 This assumes that ``tree`` is already examined by ``isvalidsymbol``.
344 344 """
345 345 return tree[1]
346 346
347 347 def isvalidfunc(tree):
348 348 """Examine whether specified ``tree`` is valid ``func`` or not
349 349 """
350 350 return tree[0] == 'func' and len(tree) > 1 and isvalidsymbol(tree[1])
351 351
352 352 def getfuncname(tree):
353 353 """Get function name from valid ``func`` in ``tree``
354 354
355 355 This assumes that ``tree`` is already examined by ``isvalidfunc``.
356 356 """
357 357 return getsymbol(tree[1])
358 358
359 359 def getfuncargs(tree):
360 360 """Get list of function arguments from valid ``func`` in ``tree``
361 361
362 362 This assumes that ``tree`` is already examined by ``isvalidfunc``.
363 363 """
364 364 if len(tree) > 2:
365 365 return getlist(tree[2])
366 366 else:
367 367 return []
368 368
369 369 def getset(repo, subset, x):
370 370 if not x:
371 371 raise error.ParseError(_("missing argument"))
372 372 s = methods[x[0]](repo, subset, *x[1:])
373 373 if util.safehasattr(s, 'isascending'):
374 374 return s
375 375 if (repo.ui.configbool('devel', 'all-warnings')
376 376 or repo.ui.configbool('devel', 'old-revset')):
377 377 # else case should not happen, because all non-func are internal,
378 378 # ignoring for now.
379 379 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
380 380 repo.ui.develwarn('revset "%s" use list instead of smartset, '
381 381 '(upgrade your code)' % x[1][1])
382 382 return baseset(s)
383 383
384 384 def _getrevsource(repo, r):
385 385 extra = repo[r].extra()
386 386 for label in ('source', 'transplant_source', 'rebase_source'):
387 387 if label in extra:
388 388 try:
389 389 return repo[extra[label]].rev()
390 390 except error.RepoLookupError:
391 391 pass
392 392 return None
393 393
394 394 # operator methods
395 395
396 396 def stringset(repo, subset, x):
397 397 x = repo[x].rev()
398 398 if (x in subset
399 399 or x == node.nullrev and isinstance(subset, fullreposet)):
400 400 return baseset([x])
401 401 return baseset()
402 402
403 403 def rangeset(repo, subset, x, y):
404 404 m = getset(repo, fullreposet(repo), x)
405 405 n = getset(repo, fullreposet(repo), y)
406 406
407 407 if not m or not n:
408 408 return baseset()
409 409 m, n = m.first(), n.last()
410 410
411 411 if m == n:
412 412 r = baseset([m])
413 413 elif n == node.wdirrev:
414 414 r = spanset(repo, m, len(repo)) + baseset([n])
415 415 elif m == node.wdirrev:
416 416 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
417 417 elif m < n:
418 418 r = spanset(repo, m, n + 1)
419 419 else:
420 420 r = spanset(repo, m, n - 1)
421 421 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
422 422 # necessary to ensure we preserve the order in subset.
423 423 #
424 424 # This has performance implication, carrying the sorting over when possible
425 425 # would be more efficient.
426 426 return r & subset
427 427
428 428 def dagrange(repo, subset, x, y):
429 429 r = fullreposet(repo)
430 430 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
431 431 includepath=True)
432 432 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
433 433 # necessary to ensure we preserve the order in subset.
434 434 return xs & subset
435 435
436 436 def andset(repo, subset, x, y):
437 437 return getset(repo, getset(repo, subset, x), y)
438 438
439 439 def differenceset(repo, subset, x, y):
440 440 return getset(repo, subset, x) - getset(repo, subset, y)
441 441
442 442 def orset(repo, subset, *xs):
443 443 assert xs
444 444 if len(xs) == 1:
445 445 return getset(repo, subset, xs[0])
446 446 p = len(xs) // 2
447 447 a = orset(repo, subset, *xs[:p])
448 448 b = orset(repo, subset, *xs[p:])
449 449 return a + b
450 450
451 451 def notset(repo, subset, x):
452 452 return subset - getset(repo, subset, x)
453 453
454 454 def listset(repo, subset, *xs):
455 455 raise error.ParseError(_("can't use a list in this context"),
456 456 hint=_('see hg help "revsets.x or y"'))
457 457
458 458 def keyvaluepair(repo, subset, k, v):
459 459 raise error.ParseError(_("can't use a key-value pair in this context"))
460 460
461 461 def func(repo, subset, a, b):
462 462 if a[0] == 'symbol' and a[1] in symbols:
463 463 return symbols[a[1]](repo, subset, b)
464 464
465 465 keep = lambda fn: getattr(fn, '__doc__', None) is not None
466 466
467 467 syms = [s for (s, fn) in symbols.items() if keep(fn)]
468 468 raise error.UnknownIdentifier(a[1], syms)
469 469
470 470 # functions
471 471
472 472 # symbols are callables like:
473 473 # fn(repo, subset, x)
474 474 # with:
475 475 # repo - current repository instance
476 476 # subset - of revisions to be examined
477 477 # x - argument in tree form
478 478 symbols = {}
479 479
480 480 # symbols which can't be used for a DoS attack for any given input
481 481 # (e.g. those which accept regexes as plain strings shouldn't be included)
482 482 # functions that just return a lot of changesets (like all) don't count here
483 483 safesymbols = set()
484 484
485 485 predicate = registrar.revsetpredicate()
486 486
487 487 class extpredicate(registrar.delayregistrar):
488 488 """Decorator to register revset predicate in extensions
489 489
490 490 Usage::
491 491
492 492 revsetpredicate = revset.extpredicate()
493 493
494 494 @revsetpredicate('mypredicate(arg1, arg2[, arg3])')
495 495 def mypredicatefunc(repo, subset, x):
496 496 '''Explanation of this revset predicate ....
497 497 '''
498 498 pass
499 499
500 500 def uisetup(ui):
501 501 revsetpredicate.setup()
502 502
503 503 'revsetpredicate' instance above can be used to decorate multiple
504 504 functions, and 'setup()' on it registers all such functions at
505 505 once.
506 506 """
507 507 registrar = predicate
508 508
509 509 @predicate('_destupdate')
510 510 def _destupdate(repo, subset, x):
511 511 # experimental revset for update destination
512 512 args = getargsdict(x, 'limit', 'clean check')
513 513 return subset & baseset([destutil.destupdate(repo, **args)[0]])
514 514
515 515 @predicate('_destmerge')
516 516 def _destmerge(repo, subset, x):
517 517 # experimental revset for merge destination
518 518 sourceset = None
519 519 if x is not None:
520 520 sourceset = getset(repo, fullreposet(repo), x)
521 521 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
522 522
523 523 @predicate('adds(pattern)', safe=True)
524 524 def adds(repo, subset, x):
525 525 """Changesets that add a file matching pattern.
526 526
527 527 The pattern without explicit kind like ``glob:`` is expected to be
528 528 relative to the current directory and match against a file or a
529 529 directory.
530 530 """
531 531 # i18n: "adds" is a keyword
532 532 pat = getstring(x, _("adds requires a pattern"))
533 533 return checkstatus(repo, subset, pat, 1)
534 534
535 535 @predicate('ancestor(*changeset)', safe=True)
536 536 def ancestor(repo, subset, x):
537 537 """A greatest common ancestor of the changesets.
538 538
539 539 Accepts 0 or more changesets.
540 540 Will return empty list when passed no args.
541 541 Greatest common ancestor of a single changeset is that changeset.
542 542 """
543 543 # i18n: "ancestor" is a keyword
544 544 l = getlist(x)
545 545 rl = fullreposet(repo)
546 546 anc = None
547 547
548 548 # (getset(repo, rl, i) for i in l) generates a list of lists
549 549 for revs in (getset(repo, rl, i) for i in l):
550 550 for r in revs:
551 551 if anc is None:
552 552 anc = repo[r]
553 553 else:
554 554 anc = anc.ancestor(repo[r])
555 555
556 556 if anc is not None and anc.rev() in subset:
557 557 return baseset([anc.rev()])
558 558 return baseset()
559 559
560 560 def _ancestors(repo, subset, x, followfirst=False):
561 561 heads = getset(repo, fullreposet(repo), x)
562 562 if not heads:
563 563 return baseset()
564 564 s = _revancestors(repo, heads, followfirst)
565 565 return subset & s
566 566
567 567 @predicate('ancestors(set)', safe=True)
568 568 def ancestors(repo, subset, x):
569 569 """Changesets that are ancestors of a changeset in set.
570 570 """
571 571 return _ancestors(repo, subset, x)
572 572
573 573 @predicate('_firstancestors', safe=True)
574 574 def _firstancestors(repo, subset, x):
575 575 # ``_firstancestors(set)``
576 576 # Like ``ancestors(set)`` but follows only the first parents.
577 577 return _ancestors(repo, subset, x, followfirst=True)
578 578
579 579 def ancestorspec(repo, subset, x, n):
580 580 """``set~n``
581 581 Changesets that are the Nth ancestor (first parents only) of a changeset
582 582 in set.
583 583 """
584 584 try:
585 585 n = int(n[1])
586 586 except (TypeError, ValueError):
587 587 raise error.ParseError(_("~ expects a number"))
588 588 ps = set()
589 589 cl = repo.changelog
590 590 for r in getset(repo, fullreposet(repo), x):
591 591 for i in range(n):
592 592 r = cl.parentrevs(r)[0]
593 593 ps.add(r)
594 594 return subset & ps
595 595
596 596 @predicate('author(string)', safe=True)
597 597 def author(repo, subset, x):
598 598 """Alias for ``user(string)``.
599 599 """
600 600 # i18n: "author" is a keyword
601 601 n = encoding.lower(getstring(x, _("author requires a string")))
602 602 kind, pattern, matcher = _substringmatcher(n)
603 603 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())),
604 604 condrepr=('<user %r>', n))
605 605
606 606 @predicate('bisect(string)', safe=True)
607 607 def bisect(repo, subset, x):
608 608 """Changesets marked in the specified bisect status:
609 609
610 610 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
611 611 - ``goods``, ``bads`` : csets topologically good/bad
612 612 - ``range`` : csets taking part in the bisection
613 613 - ``pruned`` : csets that are goods, bads or skipped
614 614 - ``untested`` : csets whose fate is yet unknown
615 615 - ``ignored`` : csets ignored due to DAG topology
616 616 - ``current`` : the cset currently being bisected
617 617 """
618 618 # i18n: "bisect" is a keyword
619 619 status = getstring(x, _("bisect requires a string")).lower()
620 620 state = set(hbisect.get(repo, status))
621 621 return subset & state
622 622
623 623 # Backward-compatibility
624 624 # - no help entry so that we do not advertise it any more
625 625 @predicate('bisected', safe=True)
626 626 def bisected(repo, subset, x):
627 627 return bisect(repo, subset, x)
628 628
629 629 @predicate('bookmark([name])', safe=True)
630 630 def bookmark(repo, subset, x):
631 631 """The named bookmark or all bookmarks.
632 632
633 633 If `name` starts with `re:`, the remainder of the name is treated as
634 634 a regular expression. To match a bookmark that actually starts with `re:`,
635 635 use the prefix `literal:`.
636 636 """
637 637 # i18n: "bookmark" is a keyword
638 638 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
639 639 if args:
640 640 bm = getstring(args[0],
641 641 # i18n: "bookmark" is a keyword
642 642 _('the argument to bookmark must be a string'))
643 643 kind, pattern, matcher = util.stringmatcher(bm)
644 644 bms = set()
645 645 if kind == 'literal':
646 646 bmrev = repo._bookmarks.get(pattern, None)
647 647 if not bmrev:
648 648 raise error.RepoLookupError(_("bookmark '%s' does not exist")
649 649 % pattern)
650 650 bms.add(repo[bmrev].rev())
651 651 else:
652 652 matchrevs = set()
653 653 for name, bmrev in repo._bookmarks.iteritems():
654 654 if matcher(name):
655 655 matchrevs.add(bmrev)
656 656 if not matchrevs:
657 657 raise error.RepoLookupError(_("no bookmarks exist"
658 658 " that match '%s'") % pattern)
659 659 for bmrev in matchrevs:
660 660 bms.add(repo[bmrev].rev())
661 661 else:
662 662 bms = set([repo[r].rev()
663 663 for r in repo._bookmarks.values()])
664 664 bms -= set([node.nullrev])
665 665 return subset & bms
666 666
667 667 @predicate('branch(string or set)', safe=True)
668 668 def branch(repo, subset, x):
669 669 """
670 670 All changesets belonging to the given branch or the branches of the given
671 671 changesets.
672 672
673 673 If `string` starts with `re:`, the remainder of the name is treated as
674 674 a regular expression. To match a branch that actually starts with `re:`,
675 675 use the prefix `literal:`.
676 676 """
677 677 getbi = repo.revbranchcache().branchinfo
678 678
679 679 try:
680 680 b = getstring(x, '')
681 681 except error.ParseError:
682 682 # not a string, but another revspec, e.g. tip()
683 683 pass
684 684 else:
685 685 kind, pattern, matcher = util.stringmatcher(b)
686 686 if kind == 'literal':
687 687 # note: falls through to the revspec case if no branch with
688 688 # this name exists and pattern kind is not specified explicitly
689 689 if pattern in repo.branchmap():
690 690 return subset.filter(lambda r: matcher(getbi(r)[0]),
691 691 condrepr=('<branch %r>', b))
692 692 if b.startswith('literal:'):
693 693 raise error.RepoLookupError(_("branch '%s' does not exist")
694 694 % pattern)
695 695 else:
696 696 return subset.filter(lambda r: matcher(getbi(r)[0]),
697 697 condrepr=('<branch %r>', b))
698 698
699 699 s = getset(repo, fullreposet(repo), x)
700 700 b = set()
701 701 for r in s:
702 702 b.add(getbi(r)[0])
703 703 c = s.__contains__
704 704 return subset.filter(lambda r: c(r) or getbi(r)[0] in b,
705 705 condrepr=lambda: '<branch %r>' % sorted(b))
706 706
707 707 @predicate('bumped()', safe=True)
708 708 def bumped(repo, subset, x):
709 709 """Mutable changesets marked as successors of public changesets.
710 710
711 711 Only non-public and non-obsolete changesets can be `bumped`.
712 712 """
713 713 # i18n: "bumped" is a keyword
714 714 getargs(x, 0, 0, _("bumped takes no arguments"))
715 715 bumped = obsmod.getrevs(repo, 'bumped')
716 716 return subset & bumped
717 717
718 718 @predicate('bundle()', safe=True)
719 719 def bundle(repo, subset, x):
720 720 """Changesets in the bundle.
721 721
722 722 Bundle must be specified by the -R option."""
723 723
724 724 try:
725 725 bundlerevs = repo.changelog.bundlerevs
726 726 except AttributeError:
727 727 raise error.Abort(_("no bundle provided - specify with -R"))
728 728 return subset & bundlerevs
729 729
730 730 def checkstatus(repo, subset, pat, field):
731 731 hasset = matchmod.patkind(pat) == 'set'
732 732
733 733 mcache = [None]
734 734 def matches(x):
735 735 c = repo[x]
736 736 if not mcache[0] or hasset:
737 737 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
738 738 m = mcache[0]
739 739 fname = None
740 740 if not m.anypats() and len(m.files()) == 1:
741 741 fname = m.files()[0]
742 742 if fname is not None:
743 743 if fname not in c.files():
744 744 return False
745 745 else:
746 746 for f in c.files():
747 747 if m(f):
748 748 break
749 749 else:
750 750 return False
751 751 files = repo.status(c.p1().node(), c.node())[field]
752 752 if fname is not None:
753 753 if fname in files:
754 754 return True
755 755 else:
756 756 for f in files:
757 757 if m(f):
758 758 return True
759 759
760 760 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
761 761
762 762 def _children(repo, narrow, parentset):
763 763 if not parentset:
764 764 return baseset()
765 765 cs = set()
766 766 pr = repo.changelog.parentrevs
767 767 minrev = parentset.min()
768 768 for r in narrow:
769 769 if r <= minrev:
770 770 continue
771 771 for p in pr(r):
772 772 if p in parentset:
773 773 cs.add(r)
774 774 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
775 775 # This does not break because of other fullreposet misbehavior.
776 776 return baseset(cs)
777 777
778 778 @predicate('children(set)', safe=True)
779 779 def children(repo, subset, x):
780 780 """Child changesets of changesets in set.
781 781 """
782 782 s = getset(repo, fullreposet(repo), x)
783 783 cs = _children(repo, subset, s)
784 784 return subset & cs
785 785
786 786 @predicate('closed()', safe=True)
787 787 def closed(repo, subset, x):
788 788 """Changeset is closed.
789 789 """
790 790 # i18n: "closed" is a keyword
791 791 getargs(x, 0, 0, _("closed takes no arguments"))
792 792 return subset.filter(lambda r: repo[r].closesbranch(),
793 793 condrepr='<branch closed>')
794 794
795 795 @predicate('contains(pattern)')
796 796 def contains(repo, subset, x):
797 797 """The revision's manifest contains a file matching pattern (but might not
798 798 modify it). See :hg:`help patterns` for information about file patterns.
799 799
800 800 The pattern without explicit kind like ``glob:`` is expected to be
801 801 relative to the current directory and match against a file exactly
802 802 for efficiency.
803 803 """
804 804 # i18n: "contains" is a keyword
805 805 pat = getstring(x, _("contains requires a pattern"))
806 806
807 807 def matches(x):
808 808 if not matchmod.patkind(pat):
809 809 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
810 810 if pats in repo[x]:
811 811 return True
812 812 else:
813 813 c = repo[x]
814 814 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
815 815 for f in c.manifest():
816 816 if m(f):
817 817 return True
818 818 return False
819 819
820 820 return subset.filter(matches, condrepr=('<contains %r>', pat))
821 821
822 822 @predicate('converted([id])', safe=True)
823 823 def converted(repo, subset, x):
824 824 """Changesets converted from the given identifier in the old repository if
825 825 present, or all converted changesets if no identifier is specified.
826 826 """
827 827
828 828 # There is exactly no chance of resolving the revision, so do a simple
829 829 # string compare and hope for the best
830 830
831 831 rev = None
832 832 # i18n: "converted" is a keyword
833 833 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
834 834 if l:
835 835 # i18n: "converted" is a keyword
836 836 rev = getstring(l[0], _('converted requires a revision'))
837 837
838 838 def _matchvalue(r):
839 839 source = repo[r].extra().get('convert_revision', None)
840 840 return source is not None and (rev is None or source.startswith(rev))
841 841
842 842 return subset.filter(lambda r: _matchvalue(r),
843 843 condrepr=('<converted %r>', rev))
844 844
845 845 @predicate('date(interval)', safe=True)
846 846 def date(repo, subset, x):
847 847 """Changesets within the interval, see :hg:`help dates`.
848 848 """
849 849 # i18n: "date" is a keyword
850 850 ds = getstring(x, _("date requires a string"))
851 851 dm = util.matchdate(ds)
852 852 return subset.filter(lambda x: dm(repo[x].date()[0]),
853 853 condrepr=('<date %r>', ds))
854 854
855 855 @predicate('desc(string)', safe=True)
856 856 def desc(repo, subset, x):
857 857 """Search commit message for string. The match is case-insensitive.
858 858 """
859 859 # i18n: "desc" is a keyword
860 860 ds = encoding.lower(getstring(x, _("desc requires a string")))
861 861
862 862 def matches(x):
863 863 c = repo[x]
864 864 return ds in encoding.lower(c.description())
865 865
866 866 return subset.filter(matches, condrepr=('<desc %r>', ds))
867 867
868 868 def _descendants(repo, subset, x, followfirst=False):
869 869 roots = getset(repo, fullreposet(repo), x)
870 870 if not roots:
871 871 return baseset()
872 872 s = _revdescendants(repo, roots, followfirst)
873 873
874 874 # Both sets need to be ascending in order to lazily return the union
875 875 # in the correct order.
876 876 base = subset & roots
877 877 desc = subset & s
878 878 result = base + desc
879 879 if subset.isascending():
880 880 result.sort()
881 881 elif subset.isdescending():
882 882 result.sort(reverse=True)
883 883 else:
884 884 result = subset & result
885 885 return result
886 886
887 887 @predicate('descendants(set)', safe=True)
888 888 def descendants(repo, subset, x):
889 889 """Changesets which are descendants of changesets in set.
890 890 """
891 891 return _descendants(repo, subset, x)
892 892
893 893 @predicate('_firstdescendants', safe=True)
894 894 def _firstdescendants(repo, subset, x):
895 895 # ``_firstdescendants(set)``
896 896 # Like ``descendants(set)`` but follows only the first parents.
897 897 return _descendants(repo, subset, x, followfirst=True)
898 898
899 899 @predicate('destination([set])', safe=True)
900 900 def destination(repo, subset, x):
901 901 """Changesets that were created by a graft, transplant or rebase operation,
902 902 with the given revisions specified as the source. Omitting the optional set
903 903 is the same as passing all().
904 904 """
905 905 if x is not None:
906 906 sources = getset(repo, fullreposet(repo), x)
907 907 else:
908 908 sources = fullreposet(repo)
909 909
910 910 dests = set()
911 911
912 912 # subset contains all of the possible destinations that can be returned, so
913 913 # iterate over them and see if their source(s) were provided in the arg set.
914 914 # Even if the immediate src of r is not in the arg set, src's source (or
915 915 # further back) may be. Scanning back further than the immediate src allows
916 916 # transitive transplants and rebases to yield the same results as transitive
917 917 # grafts.
918 918 for r in subset:
919 919 src = _getrevsource(repo, r)
920 920 lineage = None
921 921
922 922 while src is not None:
923 923 if lineage is None:
924 924 lineage = list()
925 925
926 926 lineage.append(r)
927 927
928 928 # The visited lineage is a match if the current source is in the arg
929 929 # set. Since every candidate dest is visited by way of iterating
930 930 # subset, any dests further back in the lineage will be tested by a
931 931 # different iteration over subset. Likewise, if the src was already
932 932 # selected, the current lineage can be selected without going back
933 933 # further.
934 934 if src in sources or src in dests:
935 935 dests.update(lineage)
936 936 break
937 937
938 938 r = src
939 939 src = _getrevsource(repo, r)
940 940
941 941 return subset.filter(dests.__contains__,
942 942 condrepr=lambda: '<destination %r>' % sorted(dests))
943 943
944 944 @predicate('divergent()', safe=True)
945 945 def divergent(repo, subset, x):
946 946 """
947 947 Final successors of changesets with an alternative set of final successors.
948 948 """
949 949 # i18n: "divergent" is a keyword
950 950 getargs(x, 0, 0, _("divergent takes no arguments"))
951 951 divergent = obsmod.getrevs(repo, 'divergent')
952 952 return subset & divergent
953 953
954 954 @predicate('extinct()', safe=True)
955 955 def extinct(repo, subset, x):
956 956 """Obsolete changesets with obsolete descendants only.
957 957 """
958 958 # i18n: "extinct" is a keyword
959 959 getargs(x, 0, 0, _("extinct takes no arguments"))
960 960 extincts = obsmod.getrevs(repo, 'extinct')
961 961 return subset & extincts
962 962
963 963 @predicate('extra(label, [value])', safe=True)
964 964 def extra(repo, subset, x):
965 965 """Changesets with the given label in the extra metadata, with the given
966 966 optional value.
967 967
968 968 If `value` starts with `re:`, the remainder of the value is treated as
969 969 a regular expression. To match a value that actually starts with `re:`,
970 970 use the prefix `literal:`.
971 971 """
972 972 args = getargsdict(x, 'extra', 'label value')
973 973 if 'label' not in args:
974 974 # i18n: "extra" is a keyword
975 975 raise error.ParseError(_('extra takes at least 1 argument'))
976 976 # i18n: "extra" is a keyword
977 977 label = getstring(args['label'], _('first argument to extra must be '
978 978 'a string'))
979 979 value = None
980 980
981 981 if 'value' in args:
982 982 # i18n: "extra" is a keyword
983 983 value = getstring(args['value'], _('second argument to extra must be '
984 984 'a string'))
985 985 kind, value, matcher = util.stringmatcher(value)
986 986
987 987 def _matchvalue(r):
988 988 extra = repo[r].extra()
989 989 return label in extra and (value is None or matcher(extra[label]))
990 990
991 991 return subset.filter(lambda r: _matchvalue(r),
992 992 condrepr=('<extra[%r] %r>', label, value))
993 993
994 994 @predicate('filelog(pattern)', safe=True)
995 995 def filelog(repo, subset, x):
996 996 """Changesets connected to the specified filelog.
997 997
998 998 For performance reasons, visits only revisions mentioned in the file-level
999 999 filelog, rather than filtering through all changesets (much faster, but
1000 1000 doesn't include deletes or duplicate changes). For a slower, more accurate
1001 1001 result, use ``file()``.
1002 1002
1003 1003 The pattern without explicit kind like ``glob:`` is expected to be
1004 1004 relative to the current directory and match against a file exactly
1005 1005 for efficiency.
1006 1006
1007 1007 If some linkrev points to revisions filtered by the current repoview, we'll
1008 1008 work around it to return a non-filtered value.
1009 1009 """
1010 1010
1011 1011 # i18n: "filelog" is a keyword
1012 1012 pat = getstring(x, _("filelog requires a pattern"))
1013 1013 s = set()
1014 1014 cl = repo.changelog
1015 1015
1016 1016 if not matchmod.patkind(pat):
1017 1017 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
1018 1018 files = [f]
1019 1019 else:
1020 1020 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
1021 1021 files = (f for f in repo[None] if m(f))
1022 1022
1023 1023 for f in files:
1024 1024 fl = repo.file(f)
1025 1025 known = {}
1026 1026 scanpos = 0
1027 1027 for fr in list(fl):
1028 1028 fn = fl.node(fr)
1029 1029 if fn in known:
1030 1030 s.add(known[fn])
1031 1031 continue
1032 1032
1033 1033 lr = fl.linkrev(fr)
1034 1034 if lr in cl:
1035 1035 s.add(lr)
1036 1036 elif scanpos is not None:
1037 1037 # lowest matching changeset is filtered, scan further
1038 1038 # ahead in changelog
1039 1039 start = max(lr, scanpos) + 1
1040 1040 scanpos = None
1041 1041 for r in cl.revs(start):
1042 1042 # minimize parsing of non-matching entries
1043 1043 if f in cl.revision(r) and f in cl.readfiles(r):
1044 1044 try:
1045 1045 # try to use manifest delta fastpath
1046 1046 n = repo[r].filenode(f)
1047 1047 if n not in known:
1048 1048 if n == fn:
1049 1049 s.add(r)
1050 1050 scanpos = r
1051 1051 break
1052 1052 else:
1053 1053 known[n] = r
1054 1054 except error.ManifestLookupError:
1055 1055 # deletion in changelog
1056 1056 continue
1057 1057
1058 1058 return subset & s
1059 1059
1060 1060 @predicate('first(set, [n])', safe=True)
1061 1061 def first(repo, subset, x):
1062 1062 """An alias for limit().
1063 1063 """
1064 1064 return limit(repo, subset, x)
1065 1065
1066 1066 def _follow(repo, subset, x, name, followfirst=False):
1067 1067 l = getargs(x, 0, 1, _("%s takes no arguments or a pattern") % name)
1068 1068 c = repo['.']
1069 1069 if l:
1070 1070 x = getstring(l[0], _("%s expected a pattern") % name)
1071 1071 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
1072 1072 ctx=repo[None], default='path')
1073 1073
1074 1074 files = c.manifest().walk(matcher)
1075 1075
1076 1076 s = set()
1077 1077 for fname in files:
1078 1078 fctx = c[fname]
1079 1079 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
1080 1080 # include the revision responsible for the most recent version
1081 1081 s.add(fctx.introrev())
1082 1082 else:
1083 1083 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1084 1084
1085 1085 return subset & s
1086 1086
1087 1087 @predicate('follow([pattern])', safe=True)
1088 1088 def follow(repo, subset, x):
1089 1089 """
1090 1090 An alias for ``::.`` (ancestors of the working directory's first parent).
1091 1091 If pattern is specified, the histories of files matching given
1092 1092 pattern is followed, including copies.
1093 1093 """
1094 1094 return _follow(repo, subset, x, 'follow')
1095 1095
1096 1096 @predicate('_followfirst', safe=True)
1097 1097 def _followfirst(repo, subset, x):
1098 1098 # ``followfirst([pattern])``
1099 1099 # Like ``follow([pattern])`` but follows only the first parent of
1100 1100 # every revisions or files revisions.
1101 1101 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1102 1102
1103 1103 @predicate('all()', safe=True)
1104 1104 def getall(repo, subset, x):
1105 1105 """All changesets, the same as ``0:tip``.
1106 1106 """
1107 1107 # i18n: "all" is a keyword
1108 1108 getargs(x, 0, 0, _("all takes no arguments"))
1109 1109 return subset & spanset(repo) # drop "null" if any
1110 1110
1111 1111 @predicate('grep(regex)')
1112 1112 def grep(repo, subset, x):
1113 1113 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1114 1114 to ensure special escape characters are handled correctly. Unlike
1115 1115 ``keyword(string)``, the match is case-sensitive.
1116 1116 """
1117 1117 try:
1118 1118 # i18n: "grep" is a keyword
1119 1119 gr = re.compile(getstring(x, _("grep requires a string")))
1120 1120 except re.error as e:
1121 1121 raise error.ParseError(_('invalid match pattern: %s') % e)
1122 1122
1123 1123 def matches(x):
1124 1124 c = repo[x]
1125 1125 for e in c.files() + [c.user(), c.description()]:
1126 1126 if gr.search(e):
1127 1127 return True
1128 1128 return False
1129 1129
1130 1130 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1131 1131
1132 1132 @predicate('_matchfiles', safe=True)
1133 1133 def _matchfiles(repo, subset, x):
1134 1134 # _matchfiles takes a revset list of prefixed arguments:
1135 1135 #
1136 1136 # [p:foo, i:bar, x:baz]
1137 1137 #
1138 1138 # builds a match object from them and filters subset. Allowed
1139 1139 # prefixes are 'p:' for regular patterns, 'i:' for include
1140 1140 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1141 1141 # a revision identifier, or the empty string to reference the
1142 1142 # working directory, from which the match object is
1143 1143 # initialized. Use 'd:' to set the default matching mode, default
1144 1144 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1145 1145
1146 1146 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1147 1147 pats, inc, exc = [], [], []
1148 1148 rev, default = None, None
1149 1149 for arg in l:
1150 1150 s = getstring(arg, "_matchfiles requires string arguments")
1151 1151 prefix, value = s[:2], s[2:]
1152 1152 if prefix == 'p:':
1153 1153 pats.append(value)
1154 1154 elif prefix == 'i:':
1155 1155 inc.append(value)
1156 1156 elif prefix == 'x:':
1157 1157 exc.append(value)
1158 1158 elif prefix == 'r:':
1159 1159 if rev is not None:
1160 1160 raise error.ParseError('_matchfiles expected at most one '
1161 1161 'revision')
1162 1162 if value != '': # empty means working directory; leave rev as None
1163 1163 rev = value
1164 1164 elif prefix == 'd:':
1165 1165 if default is not None:
1166 1166 raise error.ParseError('_matchfiles expected at most one '
1167 1167 'default mode')
1168 1168 default = value
1169 1169 else:
1170 1170 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1171 1171 if not default:
1172 1172 default = 'glob'
1173 1173
1174 1174 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1175 1175 exclude=exc, ctx=repo[rev], default=default)
1176 1176
1177 1177 # This directly read the changelog data as creating changectx for all
1178 1178 # revisions is quite expensive.
1179 1179 getfiles = repo.changelog.readfiles
1180 1180 wdirrev = node.wdirrev
1181 1181 def matches(x):
1182 1182 if x == wdirrev:
1183 1183 files = repo[x].files()
1184 1184 else:
1185 1185 files = getfiles(x)
1186 1186 for f in files:
1187 1187 if m(f):
1188 1188 return True
1189 1189 return False
1190 1190
1191 1191 return subset.filter(matches,
1192 1192 condrepr=('<matchfiles patterns=%r, include=%r '
1193 1193 'exclude=%r, default=%r, rev=%r>',
1194 1194 pats, inc, exc, default, rev))
1195 1195
1196 1196 @predicate('file(pattern)', safe=True)
1197 1197 def hasfile(repo, subset, x):
1198 1198 """Changesets affecting files matched by pattern.
1199 1199
1200 1200 For a faster but less accurate result, consider using ``filelog()``
1201 1201 instead.
1202 1202
1203 1203 This predicate uses ``glob:`` as the default kind of pattern.
1204 1204 """
1205 1205 # i18n: "file" is a keyword
1206 1206 pat = getstring(x, _("file requires a pattern"))
1207 1207 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1208 1208
1209 1209 @predicate('head()', safe=True)
1210 1210 def head(repo, subset, x):
1211 1211 """Changeset is a named branch head.
1212 1212 """
1213 1213 # i18n: "head" is a keyword
1214 1214 getargs(x, 0, 0, _("head takes no arguments"))
1215 1215 hs = set()
1216 1216 cl = repo.changelog
1217 1217 for b, ls in repo.branchmap().iteritems():
1218 1218 hs.update(cl.rev(h) for h in ls)
1219 1219 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
1220 1220 # This does not break because of other fullreposet misbehavior.
1221 1221 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
1222 1222 # necessary to ensure we preserve the order in subset.
1223 1223 return baseset(hs) & subset
1224 1224
1225 1225 @predicate('heads(set)', safe=True)
1226 1226 def heads(repo, subset, x):
1227 1227 """Members of set with no children in set.
1228 1228 """
1229 1229 s = getset(repo, subset, x)
1230 1230 ps = parents(repo, subset, x)
1231 1231 return s - ps
1232 1232
1233 1233 @predicate('hidden()', safe=True)
1234 1234 def hidden(repo, subset, x):
1235 1235 """Hidden changesets.
1236 1236 """
1237 1237 # i18n: "hidden" is a keyword
1238 1238 getargs(x, 0, 0, _("hidden takes no arguments"))
1239 1239 hiddenrevs = repoview.filterrevs(repo, 'visible')
1240 1240 return subset & hiddenrevs
1241 1241
1242 1242 @predicate('keyword(string)', safe=True)
1243 1243 def keyword(repo, subset, x):
1244 1244 """Search commit message, user name, and names of changed files for
1245 1245 string. The match is case-insensitive.
1246 1246 """
1247 1247 # i18n: "keyword" is a keyword
1248 1248 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1249 1249
1250 1250 def matches(r):
1251 1251 c = repo[r]
1252 1252 return any(kw in encoding.lower(t)
1253 1253 for t in c.files() + [c.user(), c.description()])
1254 1254
1255 1255 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1256 1256
1257 1257 @predicate('limit(set[, n[, offset]])', safe=True)
1258 1258 def limit(repo, subset, x):
1259 1259 """First n members of set, defaulting to 1, starting from offset.
1260 1260 """
1261 1261 args = getargsdict(x, 'limit', 'set n offset')
1262 1262 if 'set' not in args:
1263 1263 # i18n: "limit" is a keyword
1264 1264 raise error.ParseError(_("limit requires one to three arguments"))
1265 1265 try:
1266 1266 lim, ofs = 1, 0
1267 1267 if 'n' in args:
1268 1268 # i18n: "limit" is a keyword
1269 1269 lim = int(getstring(args['n'], _("limit requires a number")))
1270 1270 if 'offset' in args:
1271 1271 # i18n: "limit" is a keyword
1272 1272 ofs = int(getstring(args['offset'], _("limit requires a number")))
1273 1273 if ofs < 0:
1274 1274 raise error.ParseError(_("negative offset"))
1275 1275 except (TypeError, ValueError):
1276 1276 # i18n: "limit" is a keyword
1277 1277 raise error.ParseError(_("limit expects a number"))
1278 1278 os = getset(repo, fullreposet(repo), args['set'])
1279 1279 result = []
1280 1280 it = iter(os)
1281 1281 for x in xrange(ofs):
1282 1282 y = next(it, None)
1283 1283 if y is None:
1284 1284 break
1285 1285 for x in xrange(lim):
1286 1286 y = next(it, None)
1287 1287 if y is None:
1288 1288 break
1289 1289 elif y in subset:
1290 1290 result.append(y)
1291 1291 return baseset(result)
1292 1292
1293 1293 @predicate('last(set, [n])', safe=True)
1294 1294 def last(repo, subset, x):
1295 1295 """Last n members of set, defaulting to 1.
1296 1296 """
1297 1297 # i18n: "last" is a keyword
1298 1298 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1299 1299 try:
1300 1300 lim = 1
1301 1301 if len(l) == 2:
1302 1302 # i18n: "last" is a keyword
1303 1303 lim = int(getstring(l[1], _("last requires a number")))
1304 1304 except (TypeError, ValueError):
1305 1305 # i18n: "last" is a keyword
1306 1306 raise error.ParseError(_("last expects a number"))
1307 1307 os = getset(repo, fullreposet(repo), l[0])
1308 1308 os.reverse()
1309 1309 result = []
1310 1310 it = iter(os)
1311 1311 for x in xrange(lim):
1312 1312 y = next(it, None)
1313 1313 if y is None:
1314 1314 break
1315 1315 elif y in subset:
1316 1316 result.append(y)
1317 1317 return baseset(result)
1318 1318
1319 1319 @predicate('max(set)', safe=True)
1320 1320 def maxrev(repo, subset, x):
1321 1321 """Changeset with highest revision number in set.
1322 1322 """
1323 1323 os = getset(repo, fullreposet(repo), x)
1324 1324 try:
1325 1325 m = os.max()
1326 1326 if m in subset:
1327 1327 return baseset([m])
1328 1328 except ValueError:
1329 1329 # os.max() throws a ValueError when the collection is empty.
1330 1330 # Same as python's max().
1331 1331 pass
1332 1332 return baseset()
1333 1333
1334 1334 @predicate('merge()', safe=True)
1335 1335 def merge(repo, subset, x):
1336 1336 """Changeset is a merge changeset.
1337 1337 """
1338 1338 # i18n: "merge" is a keyword
1339 1339 getargs(x, 0, 0, _("merge takes no arguments"))
1340 1340 cl = repo.changelog
1341 1341 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1342 1342 condrepr='<merge>')
1343 1343
1344 1344 @predicate('branchpoint()', safe=True)
1345 1345 def branchpoint(repo, subset, x):
1346 1346 """Changesets with more than one child.
1347 1347 """
1348 1348 # i18n: "branchpoint" is a keyword
1349 1349 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1350 1350 cl = repo.changelog
1351 1351 if not subset:
1352 1352 return baseset()
1353 1353 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1354 1354 # (and if it is not, it should.)
1355 1355 baserev = min(subset)
1356 1356 parentscount = [0]*(len(repo) - baserev)
1357 1357 for r in cl.revs(start=baserev + 1):
1358 1358 for p in cl.parentrevs(r):
1359 1359 if p >= baserev:
1360 1360 parentscount[p - baserev] += 1
1361 1361 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1362 1362 condrepr='<branchpoint>')
1363 1363
1364 1364 @predicate('min(set)', safe=True)
1365 1365 def minrev(repo, subset, x):
1366 1366 """Changeset with lowest revision number in set.
1367 1367 """
1368 1368 os = getset(repo, fullreposet(repo), x)
1369 1369 try:
1370 1370 m = os.min()
1371 1371 if m in subset:
1372 1372 return baseset([m])
1373 1373 except ValueError:
1374 1374 # os.min() throws a ValueError when the collection is empty.
1375 1375 # Same as python's min().
1376 1376 pass
1377 1377 return baseset()
1378 1378
1379 1379 @predicate('modifies(pattern)', safe=True)
1380 1380 def modifies(repo, subset, x):
1381 1381 """Changesets modifying files matched by pattern.
1382 1382
1383 1383 The pattern without explicit kind like ``glob:`` is expected to be
1384 1384 relative to the current directory and match against a file or a
1385 1385 directory.
1386 1386 """
1387 1387 # i18n: "modifies" is a keyword
1388 1388 pat = getstring(x, _("modifies requires a pattern"))
1389 1389 return checkstatus(repo, subset, pat, 0)
1390 1390
1391 1391 @predicate('named(namespace)')
1392 1392 def named(repo, subset, x):
1393 1393 """The changesets in a given namespace.
1394 1394
1395 1395 If `namespace` starts with `re:`, the remainder of the string is treated as
1396 1396 a regular expression. To match a namespace that actually starts with `re:`,
1397 1397 use the prefix `literal:`.
1398 1398 """
1399 1399 # i18n: "named" is a keyword
1400 1400 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1401 1401
1402 1402 ns = getstring(args[0],
1403 1403 # i18n: "named" is a keyword
1404 1404 _('the argument to named must be a string'))
1405 1405 kind, pattern, matcher = util.stringmatcher(ns)
1406 1406 namespaces = set()
1407 1407 if kind == 'literal':
1408 1408 if pattern not in repo.names:
1409 1409 raise error.RepoLookupError(_("namespace '%s' does not exist")
1410 1410 % ns)
1411 1411 namespaces.add(repo.names[pattern])
1412 1412 else:
1413 1413 for name, ns in repo.names.iteritems():
1414 1414 if matcher(name):
1415 1415 namespaces.add(ns)
1416 1416 if not namespaces:
1417 1417 raise error.RepoLookupError(_("no namespace exists"
1418 1418 " that match '%s'") % pattern)
1419 1419
1420 1420 names = set()
1421 1421 for ns in namespaces:
1422 1422 for name in ns.listnames(repo):
1423 1423 if name not in ns.deprecated:
1424 1424 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1425 1425
1426 1426 names -= set([node.nullrev])
1427 1427 return subset & names
1428 1428
1429 1429 @predicate('id(string)', safe=True)
1430 1430 def node_(repo, subset, x):
1431 1431 """Revision non-ambiguously specified by the given hex string prefix.
1432 1432 """
1433 1433 # i18n: "id" is a keyword
1434 1434 l = getargs(x, 1, 1, _("id requires one argument"))
1435 1435 # i18n: "id" is a keyword
1436 1436 n = getstring(l[0], _("id requires a string"))
1437 1437 if len(n) == 40:
1438 1438 try:
1439 1439 rn = repo.changelog.rev(node.bin(n))
1440 1440 except (LookupError, TypeError):
1441 1441 rn = None
1442 1442 else:
1443 1443 rn = None
1444 1444 pm = repo.changelog._partialmatch(n)
1445 1445 if pm is not None:
1446 1446 rn = repo.changelog.rev(pm)
1447 1447
1448 1448 if rn is None:
1449 1449 return baseset()
1450 1450 result = baseset([rn])
1451 1451 return result & subset
1452 1452
1453 1453 @predicate('obsolete()', safe=True)
1454 1454 def obsolete(repo, subset, x):
1455 1455 """Mutable changeset with a newer version."""
1456 1456 # i18n: "obsolete" is a keyword
1457 1457 getargs(x, 0, 0, _("obsolete takes no arguments"))
1458 1458 obsoletes = obsmod.getrevs(repo, 'obsolete')
1459 1459 return subset & obsoletes
1460 1460
1461 1461 @predicate('only(set, [set])', safe=True)
1462 1462 def only(repo, subset, x):
1463 1463 """Changesets that are ancestors of the first set that are not ancestors
1464 1464 of any other head in the repo. If a second set is specified, the result
1465 1465 is ancestors of the first set that are not ancestors of the second set
1466 1466 (i.e. ::<set1> - ::<set2>).
1467 1467 """
1468 1468 cl = repo.changelog
1469 1469 # i18n: "only" is a keyword
1470 1470 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1471 1471 include = getset(repo, fullreposet(repo), args[0])
1472 1472 if len(args) == 1:
1473 1473 if not include:
1474 1474 return baseset()
1475 1475
1476 1476 descendants = set(_revdescendants(repo, include, False))
1477 1477 exclude = [rev for rev in cl.headrevs()
1478 1478 if not rev in descendants and not rev in include]
1479 1479 else:
1480 1480 exclude = getset(repo, fullreposet(repo), args[1])
1481 1481
1482 1482 results = set(cl.findmissingrevs(common=exclude, heads=include))
1483 1483 # XXX we should turn this into a baseset instead of a set, smartset may do
1484 1484 # some optimisations from the fact this is a baseset.
1485 1485 return subset & results
1486 1486
1487 1487 @predicate('origin([set])', safe=True)
1488 1488 def origin(repo, subset, x):
1489 1489 """
1490 1490 Changesets that were specified as a source for the grafts, transplants or
1491 1491 rebases that created the given revisions. Omitting the optional set is the
1492 1492 same as passing all(). If a changeset created by these operations is itself
1493 1493 specified as a source for one of these operations, only the source changeset
1494 1494 for the first operation is selected.
1495 1495 """
1496 1496 if x is not None:
1497 1497 dests = getset(repo, fullreposet(repo), x)
1498 1498 else:
1499 1499 dests = fullreposet(repo)
1500 1500
1501 1501 def _firstsrc(rev):
1502 1502 src = _getrevsource(repo, rev)
1503 1503 if src is None:
1504 1504 return None
1505 1505
1506 1506 while True:
1507 1507 prev = _getrevsource(repo, src)
1508 1508
1509 1509 if prev is None:
1510 1510 return src
1511 1511 src = prev
1512 1512
1513 1513 o = set([_firstsrc(r) for r in dests])
1514 1514 o -= set([None])
1515 1515 # XXX we should turn this into a baseset instead of a set, smartset may do
1516 1516 # some optimisations from the fact this is a baseset.
1517 1517 return subset & o
1518 1518
1519 1519 @predicate('outgoing([path])', safe=True)
1520 1520 def outgoing(repo, subset, x):
1521 1521 """Changesets not found in the specified destination repository, or the
1522 1522 default push location.
1523 1523 """
1524 1524 # Avoid cycles.
1525 1525 from . import (
1526 1526 discovery,
1527 1527 hg,
1528 1528 )
1529 1529 # i18n: "outgoing" is a keyword
1530 1530 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1531 1531 # i18n: "outgoing" is a keyword
1532 1532 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1533 1533 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1534 1534 dest, branches = hg.parseurl(dest)
1535 1535 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1536 1536 if revs:
1537 1537 revs = [repo.lookup(rev) for rev in revs]
1538 1538 other = hg.peer(repo, {}, dest)
1539 1539 repo.ui.pushbuffer()
1540 1540 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1541 1541 repo.ui.popbuffer()
1542 1542 cl = repo.changelog
1543 1543 o = set([cl.rev(r) for r in outgoing.missing])
1544 1544 return subset & o
1545 1545
1546 1546 @predicate('p1([set])', safe=True)
1547 1547 def p1(repo, subset, x):
1548 1548 """First parent of changesets in set, or the working directory.
1549 1549 """
1550 1550 if x is None:
1551 1551 p = repo[x].p1().rev()
1552 1552 if p >= 0:
1553 1553 return subset & baseset([p])
1554 1554 return baseset()
1555 1555
1556 1556 ps = set()
1557 1557 cl = repo.changelog
1558 1558 for r in getset(repo, fullreposet(repo), x):
1559 1559 ps.add(cl.parentrevs(r)[0])
1560 1560 ps -= set([node.nullrev])
1561 1561 # XXX we should turn this into a baseset instead of a set, smartset may do
1562 1562 # some optimisations from the fact this is a baseset.
1563 1563 return subset & ps
1564 1564
1565 1565 @predicate('p2([set])', safe=True)
1566 1566 def p2(repo, subset, x):
1567 1567 """Second parent of changesets in set, or the working directory.
1568 1568 """
1569 1569 if x is None:
1570 1570 ps = repo[x].parents()
1571 1571 try:
1572 1572 p = ps[1].rev()
1573 1573 if p >= 0:
1574 1574 return subset & baseset([p])
1575 1575 return baseset()
1576 1576 except IndexError:
1577 1577 return baseset()
1578 1578
1579 1579 ps = set()
1580 1580 cl = repo.changelog
1581 1581 for r in getset(repo, fullreposet(repo), x):
1582 1582 ps.add(cl.parentrevs(r)[1])
1583 1583 ps -= set([node.nullrev])
1584 1584 # XXX we should turn this into a baseset instead of a set, smartset may do
1585 1585 # some optimisations from the fact this is a baseset.
1586 1586 return subset & ps
1587 1587
1588 1588 @predicate('parents([set])', safe=True)
1589 1589 def parents(repo, subset, x):
1590 1590 """
1591 1591 The set of all parents for all changesets in set, or the working directory.
1592 1592 """
1593 1593 if x is None:
1594 1594 ps = set(p.rev() for p in repo[x].parents())
1595 1595 else:
1596 1596 ps = set()
1597 1597 cl = repo.changelog
1598 1598 up = ps.update
1599 1599 parentrevs = cl.parentrevs
1600 1600 for r in getset(repo, fullreposet(repo), x):
1601 1601 if r == node.wdirrev:
1602 1602 up(p.rev() for p in repo[r].parents())
1603 1603 else:
1604 1604 up(parentrevs(r))
1605 1605 ps -= set([node.nullrev])
1606 1606 return subset & ps
1607 1607
1608 1608 def _phase(repo, subset, target):
1609 1609 """helper to select all rev in phase <target>"""
1610 1610 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1611 1611 if repo._phasecache._phasesets:
1612 1612 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1613 1613 s = baseset(s)
1614 1614 s.sort() # set are non ordered, so we enforce ascending
1615 1615 return subset & s
1616 1616 else:
1617 1617 phase = repo._phasecache.phase
1618 1618 condition = lambda r: phase(repo, r) == target
1619 1619 return subset.filter(condition, condrepr=('<phase %r>', target),
1620 1620 cache=False)
1621 1621
1622 1622 @predicate('draft()', safe=True)
1623 1623 def draft(repo, subset, x):
1624 1624 """Changeset in draft phase."""
1625 1625 # i18n: "draft" is a keyword
1626 1626 getargs(x, 0, 0, _("draft takes no arguments"))
1627 1627 target = phases.draft
1628 1628 return _phase(repo, subset, target)
1629 1629
1630 1630 @predicate('secret()', safe=True)
1631 1631 def secret(repo, subset, x):
1632 1632 """Changeset in secret phase."""
1633 1633 # i18n: "secret" is a keyword
1634 1634 getargs(x, 0, 0, _("secret takes no arguments"))
1635 1635 target = phases.secret
1636 1636 return _phase(repo, subset, target)
1637 1637
1638 1638 def parentspec(repo, subset, x, n):
1639 1639 """``set^0``
1640 1640 The set.
1641 1641 ``set^1`` (or ``set^``), ``set^2``
1642 1642 First or second parent, respectively, of all changesets in set.
1643 1643 """
1644 1644 try:
1645 1645 n = int(n[1])
1646 1646 if n not in (0, 1, 2):
1647 1647 raise ValueError
1648 1648 except (TypeError, ValueError):
1649 1649 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1650 1650 ps = set()
1651 1651 cl = repo.changelog
1652 1652 for r in getset(repo, fullreposet(repo), x):
1653 1653 if n == 0:
1654 1654 ps.add(r)
1655 1655 elif n == 1:
1656 1656 ps.add(cl.parentrevs(r)[0])
1657 1657 elif n == 2:
1658 1658 parents = cl.parentrevs(r)
1659 1659 if len(parents) > 1:
1660 1660 ps.add(parents[1])
1661 1661 return subset & ps
1662 1662
1663 1663 @predicate('present(set)', safe=True)
1664 1664 def present(repo, subset, x):
1665 1665 """An empty set, if any revision in set isn't found; otherwise,
1666 1666 all revisions in set.
1667 1667
1668 1668 If any of specified revisions is not present in the local repository,
1669 1669 the query is normally aborted. But this predicate allows the query
1670 1670 to continue even in such cases.
1671 1671 """
1672 1672 try:
1673 1673 return getset(repo, subset, x)
1674 1674 except error.RepoLookupError:
1675 1675 return baseset()
1676 1676
1677 1677 # for internal use
1678 1678 @predicate('_notpublic', safe=True)
1679 1679 def _notpublic(repo, subset, x):
1680 1680 getargs(x, 0, 0, "_notpublic takes no arguments")
1681 1681 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1682 1682 if repo._phasecache._phasesets:
1683 1683 s = set()
1684 1684 for u in repo._phasecache._phasesets[1:]:
1685 1685 s.update(u)
1686 1686 s = baseset(s - repo.changelog.filteredrevs)
1687 1687 s.sort()
1688 1688 return subset & s
1689 1689 else:
1690 1690 phase = repo._phasecache.phase
1691 1691 target = phases.public
1692 1692 condition = lambda r: phase(repo, r) != target
1693 1693 return subset.filter(condition, condrepr=('<phase %r>', target),
1694 1694 cache=False)
1695 1695
1696 1696 @predicate('public()', safe=True)
1697 1697 def public(repo, subset, x):
1698 1698 """Changeset in public phase."""
1699 1699 # i18n: "public" is a keyword
1700 1700 getargs(x, 0, 0, _("public takes no arguments"))
1701 1701 phase = repo._phasecache.phase
1702 1702 target = phases.public
1703 1703 condition = lambda r: phase(repo, r) == target
1704 1704 return subset.filter(condition, condrepr=('<phase %r>', target),
1705 1705 cache=False)
1706 1706
1707 1707 @predicate('remote([id [,path]])', safe=True)
1708 1708 def remote(repo, subset, x):
1709 1709 """Local revision that corresponds to the given identifier in a
1710 1710 remote repository, if present. Here, the '.' identifier is a
1711 1711 synonym for the current local branch.
1712 1712 """
1713 1713
1714 1714 from . import hg # avoid start-up nasties
1715 1715 # i18n: "remote" is a keyword
1716 1716 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1717 1717
1718 1718 q = '.'
1719 1719 if len(l) > 0:
1720 1720 # i18n: "remote" is a keyword
1721 1721 q = getstring(l[0], _("remote requires a string id"))
1722 1722 if q == '.':
1723 1723 q = repo['.'].branch()
1724 1724
1725 1725 dest = ''
1726 1726 if len(l) > 1:
1727 1727 # i18n: "remote" is a keyword
1728 1728 dest = getstring(l[1], _("remote requires a repository path"))
1729 1729 dest = repo.ui.expandpath(dest or 'default')
1730 1730 dest, branches = hg.parseurl(dest)
1731 1731 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1732 1732 if revs:
1733 1733 revs = [repo.lookup(rev) for rev in revs]
1734 1734 other = hg.peer(repo, {}, dest)
1735 1735 n = other.lookup(q)
1736 1736 if n in repo:
1737 1737 r = repo[n].rev()
1738 1738 if r in subset:
1739 1739 return baseset([r])
1740 1740 return baseset()
1741 1741
1742 1742 @predicate('removes(pattern)', safe=True)
1743 1743 def removes(repo, subset, x):
1744 1744 """Changesets which remove files matching pattern.
1745 1745
1746 1746 The pattern without explicit kind like ``glob:`` is expected to be
1747 1747 relative to the current directory and match against a file or a
1748 1748 directory.
1749 1749 """
1750 1750 # i18n: "removes" is a keyword
1751 1751 pat = getstring(x, _("removes requires a pattern"))
1752 1752 return checkstatus(repo, subset, pat, 2)
1753 1753
1754 1754 @predicate('rev(number)', safe=True)
1755 1755 def rev(repo, subset, x):
1756 1756 """Revision with the given numeric identifier.
1757 1757 """
1758 1758 # i18n: "rev" is a keyword
1759 1759 l = getargs(x, 1, 1, _("rev requires one argument"))
1760 1760 try:
1761 1761 # i18n: "rev" is a keyword
1762 1762 l = int(getstring(l[0], _("rev requires a number")))
1763 1763 except (TypeError, ValueError):
1764 1764 # i18n: "rev" is a keyword
1765 1765 raise error.ParseError(_("rev expects a number"))
1766 1766 if l not in repo.changelog and l != node.nullrev:
1767 1767 return baseset()
1768 1768 return subset & baseset([l])
1769 1769
1770 1770 @predicate('matching(revision [, field])', safe=True)
1771 1771 def matching(repo, subset, x):
1772 1772 """Changesets in which a given set of fields match the set of fields in the
1773 1773 selected revision or set.
1774 1774
1775 1775 To match more than one field pass the list of fields to match separated
1776 1776 by spaces (e.g. ``author description``).
1777 1777
1778 1778 Valid fields are most regular revision fields and some special fields.
1779 1779
1780 1780 Regular revision fields are ``description``, ``author``, ``branch``,
1781 1781 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1782 1782 and ``diff``.
1783 1783 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1784 1784 contents of the revision. Two revisions matching their ``diff`` will
1785 1785 also match their ``files``.
1786 1786
1787 1787 Special fields are ``summary`` and ``metadata``:
1788 1788 ``summary`` matches the first line of the description.
1789 1789 ``metadata`` is equivalent to matching ``description user date``
1790 1790 (i.e. it matches the main metadata fields).
1791 1791
1792 1792 ``metadata`` is the default field which is used when no fields are
1793 1793 specified. You can match more than one field at a time.
1794 1794 """
1795 1795 # i18n: "matching" is a keyword
1796 1796 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1797 1797
1798 1798 revs = getset(repo, fullreposet(repo), l[0])
1799 1799
1800 1800 fieldlist = ['metadata']
1801 1801 if len(l) > 1:
1802 1802 fieldlist = getstring(l[1],
1803 1803 # i18n: "matching" is a keyword
1804 1804 _("matching requires a string "
1805 1805 "as its second argument")).split()
1806 1806
1807 1807 # Make sure that there are no repeated fields,
1808 1808 # expand the 'special' 'metadata' field type
1809 1809 # and check the 'files' whenever we check the 'diff'
1810 1810 fields = []
1811 1811 for field in fieldlist:
1812 1812 if field == 'metadata':
1813 1813 fields += ['user', 'description', 'date']
1814 1814 elif field == 'diff':
1815 1815 # a revision matching the diff must also match the files
1816 1816 # since matching the diff is very costly, make sure to
1817 1817 # also match the files first
1818 1818 fields += ['files', 'diff']
1819 1819 else:
1820 1820 if field == 'author':
1821 1821 field = 'user'
1822 1822 fields.append(field)
1823 1823 fields = set(fields)
1824 1824 if 'summary' in fields and 'description' in fields:
1825 1825 # If a revision matches its description it also matches its summary
1826 1826 fields.discard('summary')
1827 1827
1828 1828 # We may want to match more than one field
1829 1829 # Not all fields take the same amount of time to be matched
1830 1830 # Sort the selected fields in order of increasing matching cost
1831 1831 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1832 1832 'files', 'description', 'substate', 'diff']
1833 1833 def fieldkeyfunc(f):
1834 1834 try:
1835 1835 return fieldorder.index(f)
1836 1836 except ValueError:
1837 1837 # assume an unknown field is very costly
1838 1838 return len(fieldorder)
1839 1839 fields = list(fields)
1840 1840 fields.sort(key=fieldkeyfunc)
1841 1841
1842 1842 # Each field will be matched with its own "getfield" function
1843 1843 # which will be added to the getfieldfuncs array of functions
1844 1844 getfieldfuncs = []
1845 1845 _funcs = {
1846 1846 'user': lambda r: repo[r].user(),
1847 1847 'branch': lambda r: repo[r].branch(),
1848 1848 'date': lambda r: repo[r].date(),
1849 1849 'description': lambda r: repo[r].description(),
1850 1850 'files': lambda r: repo[r].files(),
1851 1851 'parents': lambda r: repo[r].parents(),
1852 1852 'phase': lambda r: repo[r].phase(),
1853 1853 'substate': lambda r: repo[r].substate,
1854 1854 'summary': lambda r: repo[r].description().splitlines()[0],
1855 1855 'diff': lambda r: list(repo[r].diff(git=True),)
1856 1856 }
1857 1857 for info in fields:
1858 1858 getfield = _funcs.get(info, None)
1859 1859 if getfield is None:
1860 1860 raise error.ParseError(
1861 1861 # i18n: "matching" is a keyword
1862 1862 _("unexpected field name passed to matching: %s") % info)
1863 1863 getfieldfuncs.append(getfield)
1864 1864 # convert the getfield array of functions into a "getinfo" function
1865 1865 # which returns an array of field values (or a single value if there
1866 1866 # is only one field to match)
1867 1867 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1868 1868
1869 1869 def matches(x):
1870 1870 for rev in revs:
1871 1871 target = getinfo(rev)
1872 1872 match = True
1873 1873 for n, f in enumerate(getfieldfuncs):
1874 1874 if target[n] != f(x):
1875 1875 match = False
1876 1876 if match:
1877 1877 return True
1878 1878 return False
1879 1879
1880 1880 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1881 1881
1882 1882 @predicate('reverse(set)', safe=True)
1883 1883 def reverse(repo, subset, x):
1884 1884 """Reverse order of set.
1885 1885 """
1886 1886 l = getset(repo, subset, x)
1887 1887 l.reverse()
1888 1888 return l
1889 1889
1890 1890 @predicate('roots(set)', safe=True)
1891 1891 def roots(repo, subset, x):
1892 1892 """Changesets in set with no parent changeset in set.
1893 1893 """
1894 1894 s = getset(repo, fullreposet(repo), x)
1895 1895 parents = repo.changelog.parentrevs
1896 1896 def filter(r):
1897 1897 for p in parents(r):
1898 1898 if 0 <= p and p in s:
1899 1899 return False
1900 1900 return True
1901 1901 return subset & s.filter(filter, condrepr='<roots>')
1902 1902
1903 1903 @predicate('sort(set[, [-]key...])', safe=True)
1904 1904 def sort(repo, subset, x):
1905 1905 """Sort set by keys. The default sort order is ascending, specify a key
1906 1906 as ``-key`` to sort in descending order.
1907 1907
1908 1908 The keys can be:
1909 1909
1910 1910 - ``rev`` for the revision number,
1911 1911 - ``branch`` for the branch name,
1912 1912 - ``desc`` for the commit message (description),
1913 1913 - ``user`` for user name (``author`` can be used as an alias),
1914 1914 - ``date`` for the commit date
1915 1915 """
1916 1916 # i18n: "sort" is a keyword
1917 1917 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1918 1918 keys = "rev"
1919 1919 if len(l) == 2:
1920 1920 # i18n: "sort" is a keyword
1921 1921 keys = getstring(l[1], _("sort spec must be a string"))
1922 1922
1923 1923 s = l[0]
1924 1924 keys = keys.split()
1925 1925 l = []
1926 1926 def invert(s):
1927 1927 return "".join(chr(255 - ord(c)) for c in s)
1928 1928 revs = getset(repo, subset, s)
1929 1929 if keys == ["rev"]:
1930 1930 revs.sort()
1931 1931 return revs
1932 1932 elif keys == ["-rev"]:
1933 1933 revs.sort(reverse=True)
1934 1934 return revs
1935 1935 for r in revs:
1936 1936 c = repo[r]
1937 1937 e = []
1938 1938 for k in keys:
1939 1939 if k == 'rev':
1940 1940 e.append(r)
1941 1941 elif k == '-rev':
1942 1942 e.append(-r)
1943 1943 elif k == 'branch':
1944 1944 e.append(c.branch())
1945 1945 elif k == '-branch':
1946 1946 e.append(invert(c.branch()))
1947 1947 elif k == 'desc':
1948 1948 e.append(c.description())
1949 1949 elif k == '-desc':
1950 1950 e.append(invert(c.description()))
1951 1951 elif k in 'user author':
1952 1952 e.append(c.user())
1953 1953 elif k in '-user -author':
1954 1954 e.append(invert(c.user()))
1955 1955 elif k == 'date':
1956 1956 e.append(c.date()[0])
1957 1957 elif k == '-date':
1958 1958 e.append(-c.date()[0])
1959 1959 else:
1960 1960 raise error.ParseError(_("unknown sort key %r") % k)
1961 1961 e.append(r)
1962 1962 l.append(e)
1963 1963 l.sort()
1964 1964 return baseset([e[-1] for e in l])
1965 1965
1966 1966 @predicate('subrepo([pattern])')
1967 1967 def subrepo(repo, subset, x):
1968 1968 """Changesets that add, modify or remove the given subrepo. If no subrepo
1969 1969 pattern is named, any subrepo changes are returned.
1970 1970 """
1971 1971 # i18n: "subrepo" is a keyword
1972 1972 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
1973 1973 pat = None
1974 1974 if len(args) != 0:
1975 1975 pat = getstring(args[0], _("subrepo requires a pattern"))
1976 1976
1977 1977 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
1978 1978
1979 1979 def submatches(names):
1980 1980 k, p, m = util.stringmatcher(pat)
1981 1981 for name in names:
1982 1982 if m(name):
1983 1983 yield name
1984 1984
1985 1985 def matches(x):
1986 1986 c = repo[x]
1987 1987 s = repo.status(c.p1().node(), c.node(), match=m)
1988 1988
1989 1989 if pat is None:
1990 1990 return s.added or s.modified or s.removed
1991 1991
1992 1992 if s.added:
1993 1993 return any(submatches(c.substate.keys()))
1994 1994
1995 1995 if s.modified:
1996 1996 subs = set(c.p1().substate.keys())
1997 1997 subs.update(c.substate.keys())
1998 1998
1999 1999 for path in submatches(subs):
2000 2000 if c.p1().substate.get(path) != c.substate.get(path):
2001 2001 return True
2002 2002
2003 2003 if s.removed:
2004 2004 return any(submatches(c.p1().substate.keys()))
2005 2005
2006 2006 return False
2007 2007
2008 2008 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
2009 2009
2010 2010 def _substringmatcher(pattern):
2011 2011 kind, pattern, matcher = util.stringmatcher(pattern)
2012 2012 if kind == 'literal':
2013 2013 matcher = lambda s: pattern in s
2014 2014 return kind, pattern, matcher
2015 2015
2016 2016 @predicate('tag([name])', safe=True)
2017 2017 def tag(repo, subset, x):
2018 2018 """The specified tag by name, or all tagged revisions if no name is given.
2019 2019
2020 2020 If `name` starts with `re:`, the remainder of the name is treated as
2021 2021 a regular expression. To match a tag that actually starts with `re:`,
2022 2022 use the prefix `literal:`.
2023 2023 """
2024 2024 # i18n: "tag" is a keyword
2025 2025 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2026 2026 cl = repo.changelog
2027 2027 if args:
2028 2028 pattern = getstring(args[0],
2029 2029 # i18n: "tag" is a keyword
2030 2030 _('the argument to tag must be a string'))
2031 2031 kind, pattern, matcher = util.stringmatcher(pattern)
2032 2032 if kind == 'literal':
2033 2033 # avoid resolving all tags
2034 2034 tn = repo._tagscache.tags.get(pattern, None)
2035 2035 if tn is None:
2036 2036 raise error.RepoLookupError(_("tag '%s' does not exist")
2037 2037 % pattern)
2038 2038 s = set([repo[tn].rev()])
2039 2039 else:
2040 2040 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2041 2041 else:
2042 2042 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2043 2043 return subset & s
2044 2044
2045 2045 @predicate('tagged', safe=True)
2046 2046 def tagged(repo, subset, x):
2047 2047 return tag(repo, subset, x)
2048 2048
2049 2049 @predicate('unstable()', safe=True)
2050 2050 def unstable(repo, subset, x):
2051 2051 """Non-obsolete changesets with obsolete ancestors.
2052 2052 """
2053 2053 # i18n: "unstable" is a keyword
2054 2054 getargs(x, 0, 0, _("unstable takes no arguments"))
2055 2055 unstables = obsmod.getrevs(repo, 'unstable')
2056 2056 return subset & unstables
2057 2057
2058 2058
2059 2059 @predicate('user(string)', safe=True)
2060 2060 def user(repo, subset, x):
2061 2061 """User name contains string. The match is case-insensitive.
2062 2062
2063 2063 If `string` starts with `re:`, the remainder of the string is treated as
2064 2064 a regular expression. To match a user that actually contains `re:`, use
2065 2065 the prefix `literal:`.
2066 2066 """
2067 2067 return author(repo, subset, x)
2068 2068
2069 2069 # experimental
2070 2070 @predicate('wdir', safe=True)
2071 2071 def wdir(repo, subset, x):
2072 2072 # i18n: "wdir" is a keyword
2073 2073 getargs(x, 0, 0, _("wdir takes no arguments"))
2074 2074 if node.wdirrev in subset or isinstance(subset, fullreposet):
2075 2075 return baseset([node.wdirrev])
2076 2076 return baseset()
2077 2077
2078 2078 # for internal use
2079 2079 @predicate('_list', safe=True)
2080 2080 def _list(repo, subset, x):
2081 2081 s = getstring(x, "internal error")
2082 2082 if not s:
2083 2083 return baseset()
2084 2084 # remove duplicates here. it's difficult for caller to deduplicate sets
2085 2085 # because different symbols can point to the same rev.
2086 2086 cl = repo.changelog
2087 2087 ls = []
2088 2088 seen = set()
2089 2089 for t in s.split('\0'):
2090 2090 try:
2091 2091 # fast path for integer revision
2092 2092 r = int(t)
2093 2093 if str(r) != t or r not in cl:
2094 2094 raise ValueError
2095 2095 revs = [r]
2096 2096 except ValueError:
2097 2097 revs = stringset(repo, subset, t)
2098 2098
2099 2099 for r in revs:
2100 2100 if r in seen:
2101 2101 continue
2102 2102 if (r in subset
2103 2103 or r == node.nullrev and isinstance(subset, fullreposet)):
2104 2104 ls.append(r)
2105 2105 seen.add(r)
2106 2106 return baseset(ls)
2107 2107
2108 2108 # for internal use
2109 2109 @predicate('_intlist', safe=True)
2110 2110 def _intlist(repo, subset, x):
2111 2111 s = getstring(x, "internal error")
2112 2112 if not s:
2113 2113 return baseset()
2114 2114 ls = [int(r) for r in s.split('\0')]
2115 2115 s = subset
2116 2116 return baseset([r for r in ls if r in s])
2117 2117
2118 2118 # for internal use
2119 2119 @predicate('_hexlist', safe=True)
2120 2120 def _hexlist(repo, subset, x):
2121 2121 s = getstring(x, "internal error")
2122 2122 if not s:
2123 2123 return baseset()
2124 2124 cl = repo.changelog
2125 2125 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2126 2126 s = subset
2127 2127 return baseset([r for r in ls if r in s])
2128 2128
2129 2129 methods = {
2130 2130 "range": rangeset,
2131 2131 "dagrange": dagrange,
2132 2132 "string": stringset,
2133 2133 "symbol": stringset,
2134 2134 "and": andset,
2135 2135 "or": orset,
2136 2136 "not": notset,
2137 2137 "difference": differenceset,
2138 2138 "list": listset,
2139 2139 "keyvalue": keyvaluepair,
2140 2140 "func": func,
2141 2141 "ancestor": ancestorspec,
2142 2142 "parent": parentspec,
2143 2143 "parentpost": p1,
2144 2144 }
2145 2145
2146 2146 def optimize(x, small):
2147 2147 if x is None:
2148 2148 return 0, x
2149 2149
2150 2150 smallbonus = 1
2151 2151 if small:
2152 2152 smallbonus = .5
2153 2153
2154 2154 op = x[0]
2155 2155 if op == 'minus':
2156 2156 return optimize(('and', x[1], ('not', x[2])), small)
2157 2157 elif op == 'only':
2158 2158 return optimize(('func', ('symbol', 'only'),
2159 2159 ('list', x[1], x[2])), small)
2160 2160 elif op == 'onlypost':
2161 2161 return optimize(('func', ('symbol', 'only'), x[1]), small)
2162 2162 elif op == 'dagrangepre':
2163 2163 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2164 2164 elif op == 'dagrangepost':
2165 2165 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
2166 2166 elif op == 'rangeall':
2167 2167 return optimize(('range', ('string', '0'), ('string', 'tip')), small)
2168 2168 elif op == 'rangepre':
2169 2169 return optimize(('range', ('string', '0'), x[1]), small)
2170 2170 elif op == 'rangepost':
2171 2171 return optimize(('range', x[1], ('string', 'tip')), small)
2172 2172 elif op == 'negate':
2173 2173 return optimize(('string',
2174 2174 '-' + getstring(x[1], _("can't negate that"))), small)
2175 2175 elif op in 'string symbol negate':
2176 2176 return smallbonus, x # single revisions are small
2177 2177 elif op == 'and':
2178 2178 wa, ta = optimize(x[1], True)
2179 2179 wb, tb = optimize(x[2], True)
2180 2180
2181 2181 # (::x and not ::y)/(not ::y and ::x) have a fast path
2182 2182 def isonly(revs, bases):
2183 2183 return (
2184 2184 revs is not None
2185 2185 and revs[0] == 'func'
2186 2186 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2187 2187 and bases is not None
2188 2188 and bases[0] == 'not'
2189 2189 and bases[1][0] == 'func'
2190 2190 and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
2191 2191
2192 2192 w = min(wa, wb)
2193 2193 if isonly(ta, tb):
2194 2194 return w, ('func', ('symbol', 'only'), ('list', ta[2], tb[1][2]))
2195 2195 if isonly(tb, ta):
2196 2196 return w, ('func', ('symbol', 'only'), ('list', tb[2], ta[1][2]))
2197 2197
2198 2198 if tb is not None and tb[0] == 'not':
2199 2199 return wa, ('difference', ta, tb[1])
2200 2200
2201 2201 if wa > wb:
2202 2202 return w, (op, tb, ta)
2203 2203 return w, (op, ta, tb)
2204 2204 elif op == 'or':
2205 2205 # fast path for machine-generated expression, that is likely to have
2206 2206 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2207 2207 ws, ts, ss = [], [], []
2208 2208 def flushss():
2209 2209 if not ss:
2210 2210 return
2211 2211 if len(ss) == 1:
2212 2212 w, t = ss[0]
2213 2213 else:
2214 2214 s = '\0'.join(t[1] for w, t in ss)
2215 2215 y = ('func', ('symbol', '_list'), ('string', s))
2216 2216 w, t = optimize(y, False)
2217 2217 ws.append(w)
2218 2218 ts.append(t)
2219 2219 del ss[:]
2220 2220 for y in x[1:]:
2221 2221 w, t = optimize(y, False)
2222 2222 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2223 2223 ss.append((w, t))
2224 2224 continue
2225 2225 flushss()
2226 2226 ws.append(w)
2227 2227 ts.append(t)
2228 2228 flushss()
2229 2229 if len(ts) == 1:
2230 2230 return ws[0], ts[0] # 'or' operation is fully optimized out
2231 2231 # we can't reorder trees by weight because it would change the order.
2232 2232 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2233 2233 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2234 2234 return max(ws), (op,) + tuple(ts)
2235 2235 elif op == 'not':
2236 2236 # Optimize not public() to _notpublic() because we have a fast version
2237 2237 if x[1] == ('func', ('symbol', 'public'), None):
2238 2238 newsym = ('func', ('symbol', '_notpublic'), None)
2239 2239 o = optimize(newsym, not small)
2240 2240 return o[0], o[1]
2241 2241 else:
2242 2242 o = optimize(x[1], not small)
2243 2243 return o[0], (op, o[1])
2244 2244 elif op == 'parentpost':
2245 2245 o = optimize(x[1], small)
2246 2246 return o[0], (op, o[1])
2247 2247 elif op == 'group':
2248 2248 return optimize(x[1], small)
2249 2249 elif op in 'dagrange range parent ancestorspec':
2250 2250 if op == 'parent':
2251 2251 # x^:y means (x^) : y, not x ^ (:y)
2252 2252 post = ('parentpost', x[1])
2253 2253 if x[2][0] == 'dagrangepre':
2254 2254 return optimize(('dagrange', post, x[2][1]), small)
2255 2255 elif x[2][0] == 'rangepre':
2256 2256 return optimize(('range', post, x[2][1]), small)
2257 2257
2258 2258 wa, ta = optimize(x[1], small)
2259 2259 wb, tb = optimize(x[2], small)
2260 2260 return wa + wb, (op, ta, tb)
2261 2261 elif op == 'list':
2262 2262 ws, ts = zip(*(optimize(y, small) for y in x[1:]))
2263 2263 return sum(ws), (op,) + ts
2264 2264 elif op == 'func':
2265 2265 f = getstring(x[1], _("not a symbol"))
2266 2266 wa, ta = optimize(x[2], small)
2267 2267 if f in ("author branch closed date desc file grep keyword "
2268 2268 "outgoing user"):
2269 2269 w = 10 # slow
2270 2270 elif f in "modifies adds removes":
2271 2271 w = 30 # slower
2272 2272 elif f == "contains":
2273 2273 w = 100 # very slow
2274 2274 elif f == "ancestor":
2275 2275 w = 1 * smallbonus
2276 2276 elif f in "reverse limit first _intlist":
2277 2277 w = 0
2278 2278 elif f in "sort":
2279 2279 w = 10 # assume most sorts look at changelog
2280 2280 else:
2281 2281 w = 1
2282 2282 return w + wa, (op, x[1], ta)
2283 2283 return 1, x
2284 2284
2285 2285 _aliasarg = ('func', ('symbol', '_aliasarg'))
2286 2286 def _getaliasarg(tree):
2287 2287 """If tree matches ('func', ('symbol', '_aliasarg'), ('string', X))
2288 2288 return X, None otherwise.
2289 2289 """
2290 2290 if (len(tree) == 3 and tree[:2] == _aliasarg
2291 2291 and tree[2][0] == 'string'):
2292 2292 return tree[2][1]
2293 2293 return None
2294 2294
2295 2295 def _checkaliasarg(tree, known=None):
2296 2296 """Check tree contains no _aliasarg construct or only ones which
2297 2297 value is in known. Used to avoid alias placeholders injection.
2298 2298 """
2299 2299 if isinstance(tree, tuple):
2300 2300 arg = _getaliasarg(tree)
2301 2301 if arg is not None and (not known or arg not in known):
2302 2302 raise error.UnknownIdentifier('_aliasarg', [])
2303 2303 for t in tree:
2304 2304 _checkaliasarg(t, known)
2305 2305
2306 2306 # the set of valid characters for the initial letter of symbols in
2307 2307 # alias declarations and definitions
2308 2308 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2309 2309 if c.isalnum() or c in '._@$' or ord(c) > 127)
2310 2310
2311 2311 def _tokenizealias(program, lookup=None):
2312 2312 """Parse alias declaration/definition into a stream of tokens
2313 2313
2314 2314 This allows symbol names to use also ``$`` as an initial letter
2315 2315 (for backward compatibility), and callers of this function should
2316 2316 examine whether ``$`` is used also for unexpected symbols or not.
2317 2317 """
2318 2318 return tokenize(program, lookup=lookup,
2319 2319 syminitletters=_aliassyminitletters)
2320 2320
2321 2321 def _parsealiasdecl(decl):
2322 2322 """Parse alias declaration ``decl``
2323 2323
2324 2324 This returns ``(name, tree, args, errorstr)`` tuple:
2325 2325
2326 2326 - ``name``: of declared alias (may be ``decl`` itself at error)
2327 2327 - ``tree``: parse result (or ``None`` at error)
2328 2328 - ``args``: list of alias argument names (or None for symbol declaration)
2329 2329 - ``errorstr``: detail about detected error (or None)
2330 2330
2331 2331 >>> _parsealiasdecl('foo')
2332 2332 ('foo', ('symbol', 'foo'), None, None)
2333 2333 >>> _parsealiasdecl('$foo')
2334 2334 ('$foo', None, None, "'$' not for alias arguments")
2335 2335 >>> _parsealiasdecl('foo::bar')
2336 2336 ('foo::bar', None, None, 'invalid format')
2337 2337 >>> _parsealiasdecl('foo bar')
2338 2338 ('foo bar', None, None, 'at 4: invalid token')
2339 2339 >>> _parsealiasdecl('foo()')
2340 2340 ('foo', ('func', ('symbol', 'foo')), [], None)
2341 2341 >>> _parsealiasdecl('$foo()')
2342 2342 ('$foo()', None, None, "'$' not for alias arguments")
2343 2343 >>> _parsealiasdecl('foo($1, $2)')
2344 2344 ('foo', ('func', ('symbol', 'foo')), ['$1', '$2'], None)
2345 2345 >>> _parsealiasdecl('foo(bar_bar, baz.baz)')
2346 2346 ('foo', ('func', ('symbol', 'foo')), ['bar_bar', 'baz.baz'], None)
2347 2347 >>> _parsealiasdecl('foo($1, $2, nested($1, $2))')
2348 2348 ('foo($1, $2, nested($1, $2))', None, None, 'invalid argument list')
2349 2349 >>> _parsealiasdecl('foo(bar($1, $2))')
2350 2350 ('foo(bar($1, $2))', None, None, 'invalid argument list')
2351 2351 >>> _parsealiasdecl('foo("string")')
2352 2352 ('foo("string")', None, None, 'invalid argument list')
2353 2353 >>> _parsealiasdecl('foo($1, $2')
2354 2354 ('foo($1, $2', None, None, 'at 10: unexpected token: end')
2355 2355 >>> _parsealiasdecl('foo("string')
2356 2356 ('foo("string', None, None, 'at 5: unterminated string')
2357 2357 >>> _parsealiasdecl('foo($1, $2, $1)')
2358 2358 ('foo', None, None, 'argument names collide with each other')
2359 2359 """
2360 2360 p = parser.parser(elements)
2361 2361 try:
2362 2362 tree, pos = p.parse(_tokenizealias(decl))
2363 2363 if (pos != len(decl)):
2364 2364 raise error.ParseError(_('invalid token'), pos)
2365 2365 tree = parser.simplifyinfixops(tree, ('list',))
2366 2366
2367 2367 if isvalidsymbol(tree):
2368 2368 # "name = ...." style
2369 2369 name = getsymbol(tree)
2370 2370 if name.startswith('$'):
2371 2371 return (decl, None, None, _("'$' not for alias arguments"))
2372 2372 return (name, ('symbol', name), None, None)
2373 2373
2374 2374 if isvalidfunc(tree):
2375 2375 # "name(arg, ....) = ...." style
2376 2376 name = getfuncname(tree)
2377 2377 if name.startswith('$'):
2378 2378 return (decl, None, None, _("'$' not for alias arguments"))
2379 2379 args = []
2380 2380 for arg in getfuncargs(tree):
2381 2381 if not isvalidsymbol(arg):
2382 2382 return (decl, None, None, _("invalid argument list"))
2383 2383 args.append(getsymbol(arg))
2384 2384 if len(args) != len(set(args)):
2385 2385 return (name, None, None,
2386 2386 _("argument names collide with each other"))
2387 2387 return (name, ('func', ('symbol', name)), args, None)
2388 2388
2389 2389 return (decl, None, None, _("invalid format"))
2390 2390 except error.ParseError as inst:
2391 2391 return (decl, None, None, parseerrordetail(inst))
2392 2392
2393 2393 def _parsealiasdefn(defn, args):
2394 2394 """Parse alias definition ``defn``
2395 2395
2396 2396 This function also replaces alias argument references in the
2397 2397 specified definition by ``_aliasarg(ARGNAME)``.
2398 2398
2399 2399 ``args`` is a list of alias argument names, or None if the alias
2400 2400 is declared as a symbol.
2401 2401
2402 2402 This returns "tree" as parsing result.
2403 2403
2404 2404 >>> args = ['$1', '$2', 'foo']
2405 2405 >>> print prettyformat(_parsealiasdefn('$1 or foo', args))
2406 2406 (or
2407 2407 (func
2408 2408 ('symbol', '_aliasarg')
2409 2409 ('string', '$1'))
2410 2410 (func
2411 2411 ('symbol', '_aliasarg')
2412 2412 ('string', 'foo')))
2413 2413 >>> try:
2414 2414 ... _parsealiasdefn('$1 or $bar', args)
2415 2415 ... except error.ParseError, inst:
2416 2416 ... print parseerrordetail(inst)
2417 2417 at 6: '$' not for alias arguments
2418 2418 >>> args = ['$1', '$10', 'foo']
2419 2419 >>> print prettyformat(_parsealiasdefn('$10 or foobar', args))
2420 2420 (or
2421 2421 (func
2422 2422 ('symbol', '_aliasarg')
2423 2423 ('string', '$10'))
2424 2424 ('symbol', 'foobar'))
2425 2425 >>> print prettyformat(_parsealiasdefn('"$1" or "foo"', args))
2426 2426 (or
2427 2427 ('string', '$1')
2428 2428 ('string', 'foo'))
2429 2429 """
2430 2430 def tokenizedefn(program, lookup=None):
2431 2431 if args:
2432 2432 argset = set(args)
2433 2433 else:
2434 2434 argset = set()
2435 2435
2436 2436 for t, value, pos in _tokenizealias(program, lookup=lookup):
2437 2437 if t == 'symbol':
2438 2438 if value in argset:
2439 2439 # emulate tokenization of "_aliasarg('ARGNAME')":
2440 2440 # "_aliasarg()" is an unknown symbol only used separate
2441 2441 # alias argument placeholders from regular strings.
2442 2442 yield ('symbol', '_aliasarg', pos)
2443 2443 yield ('(', None, pos)
2444 2444 yield ('string', value, pos)
2445 2445 yield (')', None, pos)
2446 2446 continue
2447 2447 elif value.startswith('$'):
2448 2448 raise error.ParseError(_("'$' not for alias arguments"),
2449 2449 pos)
2450 2450 yield (t, value, pos)
2451 2451
2452 2452 p = parser.parser(elements)
2453 2453 tree, pos = p.parse(tokenizedefn(defn))
2454 2454 if pos != len(defn):
2455 2455 raise error.ParseError(_('invalid token'), pos)
2456 2456 return parser.simplifyinfixops(tree, ('list', 'or'))
2457 2457
2458 2458 class revsetalias(object):
2459 2459 # whether own `error` information is already shown or not.
2460 2460 # this avoids showing same warning multiple times at each `findaliases`.
2461 2461 warned = False
2462 2462
2463 2463 def __init__(self, name, value):
2464 2464 '''Aliases like:
2465 2465
2466 2466 h = heads(default)
2467 2467 b($1) = ancestors($1) - ancestors(default)
2468 2468 '''
2469 2469 self.name, self.tree, self.args, self.error = _parsealiasdecl(name)
2470 2470 if self.error:
2471 2471 self.error = _('failed to parse the declaration of revset alias'
2472 2472 ' "%s": %s') % (self.name, self.error)
2473 2473 return
2474 2474
2475 2475 try:
2476 2476 self.replacement = _parsealiasdefn(value, self.args)
2477 2477 # Check for placeholder injection
2478 2478 _checkaliasarg(self.replacement, self.args)
2479 2479 except error.ParseError as inst:
2480 2480 self.error = _('failed to parse the definition of revset alias'
2481 2481 ' "%s": %s') % (self.name, parseerrordetail(inst))
2482 2482
2483 2483 def _getalias(aliases, tree):
2484 2484 """If tree looks like an unexpanded alias, return it. Return None
2485 2485 otherwise.
2486 2486 """
2487 2487 if isinstance(tree, tuple) and tree:
2488 2488 if tree[0] == 'symbol' and len(tree) == 2:
2489 2489 name = tree[1]
2490 2490 alias = aliases.get(name)
2491 2491 if alias and alias.args is None and alias.tree == tree:
2492 2492 return alias
2493 2493 if tree[0] == 'func' and len(tree) > 1:
2494 2494 if tree[1][0] == 'symbol' and len(tree[1]) == 2:
2495 2495 name = tree[1][1]
2496 2496 alias = aliases.get(name)
2497 2497 if alias and alias.args is not None and alias.tree == tree[:2]:
2498 2498 return alias
2499 2499 return None
2500 2500
2501 2501 def _expandargs(tree, args):
2502 2502 """Replace _aliasarg instances with the substitution value of the
2503 2503 same name in args, recursively.
2504 2504 """
2505 2505 if not tree or not isinstance(tree, tuple):
2506 2506 return tree
2507 2507 arg = _getaliasarg(tree)
2508 2508 if arg is not None:
2509 2509 return args[arg]
2510 2510 return tuple(_expandargs(t, args) for t in tree)
2511 2511
2512 2512 def _expandaliases(aliases, tree, expanding, cache):
2513 2513 """Expand aliases in tree, recursively.
2514 2514
2515 2515 'aliases' is a dictionary mapping user defined aliases to
2516 2516 revsetalias objects.
2517 2517 """
2518 2518 if not isinstance(tree, tuple):
2519 2519 # Do not expand raw strings
2520 2520 return tree
2521 2521 alias = _getalias(aliases, tree)
2522 2522 if alias is not None:
2523 2523 if alias.error:
2524 2524 raise error.Abort(alias.error)
2525 2525 if alias in expanding:
2526 2526 raise error.ParseError(_('infinite expansion of revset alias "%s" '
2527 2527 'detected') % alias.name)
2528 2528 expanding.append(alias)
2529 2529 if alias.name not in cache:
2530 2530 cache[alias.name] = _expandaliases(aliases, alias.replacement,
2531 2531 expanding, cache)
2532 2532 result = cache[alias.name]
2533 2533 expanding.pop()
2534 2534 if alias.args is not None:
2535 2535 l = getlist(tree[2])
2536 2536 if len(l) != len(alias.args):
2537 2537 raise error.ParseError(
2538 2538 _('invalid number of arguments: %d') % len(l))
2539 2539 l = [_expandaliases(aliases, a, [], cache) for a in l]
2540 2540 result = _expandargs(result, dict(zip(alias.args, l)))
2541 2541 else:
2542 2542 result = tuple(_expandaliases(aliases, t, expanding, cache)
2543 2543 for t in tree)
2544 2544 return result
2545 2545
2546 2546 def findaliases(ui, tree, showwarning=None):
2547 2547 _checkaliasarg(tree)
2548 2548 aliases = {}
2549 2549 for k, v in ui.configitems('revsetalias'):
2550 2550 alias = revsetalias(k, v)
2551 2551 aliases[alias.name] = alias
2552 2552 tree = _expandaliases(aliases, tree, [], {})
2553 2553 if showwarning:
2554 2554 # warn about problematic (but not referred) aliases
2555 2555 for name, alias in sorted(aliases.iteritems()):
2556 2556 if alias.error and not alias.warned:
2557 2557 showwarning(_('warning: %s\n') % (alias.error))
2558 2558 alias.warned = True
2559 2559 return tree
2560 2560
2561 2561 def foldconcat(tree):
2562 2562 """Fold elements to be concatenated by `##`
2563 2563 """
2564 2564 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2565 2565 return tree
2566 2566 if tree[0] == '_concat':
2567 2567 pending = [tree]
2568 2568 l = []
2569 2569 while pending:
2570 2570 e = pending.pop()
2571 2571 if e[0] == '_concat':
2572 2572 pending.extend(reversed(e[1:]))
2573 2573 elif e[0] in ('string', 'symbol'):
2574 2574 l.append(e[1])
2575 2575 else:
2576 2576 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2577 2577 raise error.ParseError(msg)
2578 2578 return ('string', ''.join(l))
2579 2579 else:
2580 2580 return tuple(foldconcat(t) for t in tree)
2581 2581
2582 2582 def parse(spec, lookup=None):
2583 2583 p = parser.parser(elements)
2584 2584 tree, pos = p.parse(tokenize(spec, lookup=lookup))
2585 2585 if pos != len(spec):
2586 2586 raise error.ParseError(_("invalid token"), pos)
2587 2587 return parser.simplifyinfixops(tree, ('list', 'or'))
2588 2588
2589 2589 def posttreebuilthook(tree, repo):
2590 2590 # hook for extensions to execute code on the optimized tree
2591 2591 pass
2592 2592
2593 2593 def match(ui, spec, repo=None):
2594 2594 if not spec:
2595 2595 raise error.ParseError(_("empty query"))
2596 2596 lookup = None
2597 2597 if repo:
2598 2598 lookup = repo.__contains__
2599 2599 tree = parse(spec, lookup)
2600 2600 return _makematcher(ui, tree, repo)
2601 2601
2602 2602 def matchany(ui, specs, repo=None):
2603 2603 """Create a matcher that will include any revisions matching one of the
2604 2604 given specs"""
2605 2605 if not specs:
2606 2606 def mfunc(repo, subset=None):
2607 2607 return baseset()
2608 2608 return mfunc
2609 2609 if not all(specs):
2610 2610 raise error.ParseError(_("empty query"))
2611 2611 lookup = None
2612 2612 if repo:
2613 2613 lookup = repo.__contains__
2614 2614 if len(specs) == 1:
2615 2615 tree = parse(specs[0], lookup)
2616 2616 else:
2617 2617 tree = ('or',) + tuple(parse(s, lookup) for s in specs)
2618 2618 return _makematcher(ui, tree, repo)
2619 2619
2620 2620 def _makematcher(ui, tree, repo):
2621 2621 if ui:
2622 2622 tree = findaliases(ui, tree, showwarning=ui.warn)
2623 2623 tree = foldconcat(tree)
2624 2624 weight, tree = optimize(tree, True)
2625 2625 posttreebuilthook(tree, repo)
2626 2626 def mfunc(repo, subset=None):
2627 2627 if subset is None:
2628 2628 subset = fullreposet(repo)
2629 2629 if util.safehasattr(subset, 'isascending'):
2630 2630 result = getset(repo, subset, tree)
2631 2631 else:
2632 2632 result = getset(repo, baseset(subset), tree)
2633 2633 return result
2634 2634 return mfunc
2635 2635
2636 2636 def formatspec(expr, *args):
2637 2637 '''
2638 2638 This is a convenience function for using revsets internally, and
2639 2639 escapes arguments appropriately. Aliases are intentionally ignored
2640 2640 so that intended expression behavior isn't accidentally subverted.
2641 2641
2642 2642 Supported arguments:
2643 2643
2644 2644 %r = revset expression, parenthesized
2645 2645 %d = int(arg), no quoting
2646 2646 %s = string(arg), escaped and single-quoted
2647 2647 %b = arg.branch(), escaped and single-quoted
2648 2648 %n = hex(arg), single-quoted
2649 2649 %% = a literal '%'
2650 2650
2651 2651 Prefixing the type with 'l' specifies a parenthesized list of that type.
2652 2652
2653 2653 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2654 2654 '(10 or 11):: and ((this()) or (that()))'
2655 2655 >>> formatspec('%d:: and not %d::', 10, 20)
2656 2656 '10:: and not 20::'
2657 2657 >>> formatspec('%ld or %ld', [], [1])
2658 2658 "_list('') or 1"
2659 2659 >>> formatspec('keyword(%s)', 'foo\\xe9')
2660 2660 "keyword('foo\\\\xe9')"
2661 2661 >>> b = lambda: 'default'
2662 2662 >>> b.branch = b
2663 2663 >>> formatspec('branch(%b)', b)
2664 2664 "branch('default')"
2665 2665 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2666 2666 "root(_list('a\\x00b\\x00c\\x00d'))"
2667 2667 '''
2668 2668
2669 2669 def quote(s):
2670 2670 return repr(str(s))
2671 2671
2672 2672 def argtype(c, arg):
2673 2673 if c == 'd':
2674 2674 return str(int(arg))
2675 2675 elif c == 's':
2676 2676 return quote(arg)
2677 2677 elif c == 'r':
2678 2678 parse(arg) # make sure syntax errors are confined
2679 2679 return '(%s)' % arg
2680 2680 elif c == 'n':
2681 2681 return quote(node.hex(arg))
2682 2682 elif c == 'b':
2683 2683 return quote(arg.branch())
2684 2684
2685 2685 def listexp(s, t):
2686 2686 l = len(s)
2687 2687 if l == 0:
2688 2688 return "_list('')"
2689 2689 elif l == 1:
2690 2690 return argtype(t, s[0])
2691 2691 elif t == 'd':
2692 2692 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2693 2693 elif t == 's':
2694 2694 return "_list('%s')" % "\0".join(s)
2695 2695 elif t == 'n':
2696 2696 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2697 2697 elif t == 'b':
2698 2698 return "_list('%s')" % "\0".join(a.branch() for a in s)
2699 2699
2700 2700 m = l // 2
2701 2701 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2702 2702
2703 2703 ret = ''
2704 2704 pos = 0
2705 2705 arg = 0
2706 2706 while pos < len(expr):
2707 2707 c = expr[pos]
2708 2708 if c == '%':
2709 2709 pos += 1
2710 2710 d = expr[pos]
2711 2711 if d == '%':
2712 2712 ret += d
2713 2713 elif d in 'dsnbr':
2714 2714 ret += argtype(d, args[arg])
2715 2715 arg += 1
2716 2716 elif d == 'l':
2717 2717 # a list of some type
2718 2718 pos += 1
2719 2719 d = expr[pos]
2720 2720 ret += listexp(list(args[arg]), d)
2721 2721 arg += 1
2722 2722 else:
2723 2723 raise error.Abort('unexpected revspec format character %s' % d)
2724 2724 else:
2725 2725 ret += c
2726 2726 pos += 1
2727 2727
2728 2728 return ret
2729 2729
2730 2730 def prettyformat(tree):
2731 2731 return parser.prettyformat(tree, ('string', 'symbol'))
2732 2732
2733 2733 def depth(tree):
2734 2734 if isinstance(tree, tuple):
2735 2735 return max(map(depth, tree)) + 1
2736 2736 else:
2737 2737 return 0
2738 2738
2739 2739 def funcsused(tree):
2740 2740 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2741 2741 return set()
2742 2742 else:
2743 2743 funcs = set()
2744 2744 for s in tree[1:]:
2745 2745 funcs |= funcsused(s)
2746 2746 if tree[0] == 'func':
2747 2747 funcs.add(tree[1][1])
2748 2748 return funcs
2749 2749
2750 2750 def _formatsetrepr(r):
2751 2751 """Format an optional printable representation of a set
2752 2752
2753 2753 ======== =================================
2754 2754 type(r) example
2755 2755 ======== =================================
2756 2756 tuple ('<not %r>', other)
2757 2757 str '<branch closed>'
2758 2758 callable lambda: '<branch %r>' % sorted(b)
2759 2759 object other
2760 2760 ======== =================================
2761 2761 """
2762 2762 if r is None:
2763 2763 return ''
2764 2764 elif isinstance(r, tuple):
2765 2765 return r[0] % r[1:]
2766 2766 elif isinstance(r, str):
2767 2767 return r
2768 2768 elif callable(r):
2769 2769 return r()
2770 2770 else:
2771 2771 return repr(r)
2772 2772
2773 2773 class abstractsmartset(object):
2774 2774
2775 2775 def __nonzero__(self):
2776 2776 """True if the smartset is not empty"""
2777 2777 raise NotImplementedError()
2778 2778
2779 2779 def __contains__(self, rev):
2780 2780 """provide fast membership testing"""
2781 2781 raise NotImplementedError()
2782 2782
2783 2783 def __iter__(self):
2784 2784 """iterate the set in the order it is supposed to be iterated"""
2785 2785 raise NotImplementedError()
2786 2786
2787 2787 # Attributes containing a function to perform a fast iteration in a given
2788 2788 # direction. A smartset can have none, one, or both defined.
2789 2789 #
2790 2790 # Default value is None instead of a function returning None to avoid
2791 2791 # initializing an iterator just for testing if a fast method exists.
2792 2792 fastasc = None
2793 2793 fastdesc = None
2794 2794
2795 2795 def isascending(self):
2796 2796 """True if the set will iterate in ascending order"""
2797 2797 raise NotImplementedError()
2798 2798
2799 2799 def isdescending(self):
2800 2800 """True if the set will iterate in descending order"""
2801 2801 raise NotImplementedError()
2802 2802
2803 2803 @util.cachefunc
2804 2804 def min(self):
2805 2805 """return the minimum element in the set"""
2806 2806 if self.fastasc is not None:
2807 2807 for r in self.fastasc():
2808 2808 return r
2809 2809 raise ValueError('arg is an empty sequence')
2810 2810 return min(self)
2811 2811
2812 2812 @util.cachefunc
2813 2813 def max(self):
2814 2814 """return the maximum element in the set"""
2815 2815 if self.fastdesc is not None:
2816 2816 for r in self.fastdesc():
2817 2817 return r
2818 2818 raise ValueError('arg is an empty sequence')
2819 2819 return max(self)
2820 2820
2821 2821 def first(self):
2822 2822 """return the first element in the set (user iteration perspective)
2823 2823
2824 2824 Return None if the set is empty"""
2825 2825 raise NotImplementedError()
2826 2826
2827 2827 def last(self):
2828 2828 """return the last element in the set (user iteration perspective)
2829 2829
2830 2830 Return None if the set is empty"""
2831 2831 raise NotImplementedError()
2832 2832
2833 2833 def __len__(self):
2834 2834 """return the length of the smartsets
2835 2835
2836 2836 This can be expensive on smartset that could be lazy otherwise."""
2837 2837 raise NotImplementedError()
2838 2838
2839 2839 def reverse(self):
2840 2840 """reverse the expected iteration order"""
2841 2841 raise NotImplementedError()
2842 2842
2843 2843 def sort(self, reverse=True):
2844 2844 """get the set to iterate in an ascending or descending order"""
2845 2845 raise NotImplementedError()
2846 2846
2847 2847 def __and__(self, other):
2848 2848 """Returns a new object with the intersection of the two collections.
2849 2849
2850 2850 This is part of the mandatory API for smartset."""
2851 2851 if isinstance(other, fullreposet):
2852 2852 return self
2853 2853 return self.filter(other.__contains__, condrepr=other, cache=False)
2854 2854
2855 2855 def __add__(self, other):
2856 2856 """Returns a new object with the union of the two collections.
2857 2857
2858 2858 This is part of the mandatory API for smartset."""
2859 2859 return addset(self, other)
2860 2860
2861 2861 def __sub__(self, other):
2862 2862 """Returns a new object with the substraction of the two collections.
2863 2863
2864 2864 This is part of the mandatory API for smartset."""
2865 2865 c = other.__contains__
2866 2866 return self.filter(lambda r: not c(r), condrepr=('<not %r>', other),
2867 2867 cache=False)
2868 2868
2869 2869 def filter(self, condition, condrepr=None, cache=True):
2870 2870 """Returns this smartset filtered by condition as a new smartset.
2871 2871
2872 2872 `condition` is a callable which takes a revision number and returns a
2873 2873 boolean. Optional `condrepr` provides a printable representation of
2874 2874 the given `condition`.
2875 2875
2876 2876 This is part of the mandatory API for smartset."""
2877 2877 # builtin cannot be cached. but do not needs to
2878 2878 if cache and util.safehasattr(condition, 'func_code'):
2879 2879 condition = util.cachefunc(condition)
2880 2880 return filteredset(self, condition, condrepr)
2881 2881
2882 2882 class baseset(abstractsmartset):
2883 2883 """Basic data structure that represents a revset and contains the basic
2884 2884 operation that it should be able to perform.
2885 2885
2886 2886 Every method in this class should be implemented by any smartset class.
2887 2887 """
2888 def __init__(self, data=()):
2888 def __init__(self, data=(), datarepr=None):
2889 """
2890 datarepr: a tuple of (format, obj, ...), a function or an object that
2891 provides a printable representation of the given data.
2892 """
2889 2893 if not isinstance(data, list):
2890 2894 if isinstance(data, set):
2891 2895 self._set = data
2892 2896 data = list(data)
2893 2897 self._list = data
2898 self._datarepr = datarepr
2894 2899 self._ascending = None
2895 2900
2896 2901 @util.propertycache
2897 2902 def _set(self):
2898 2903 return set(self._list)
2899 2904
2900 2905 @util.propertycache
2901 2906 def _asclist(self):
2902 2907 asclist = self._list[:]
2903 2908 asclist.sort()
2904 2909 return asclist
2905 2910
2906 2911 def __iter__(self):
2907 2912 if self._ascending is None:
2908 2913 return iter(self._list)
2909 2914 elif self._ascending:
2910 2915 return iter(self._asclist)
2911 2916 else:
2912 2917 return reversed(self._asclist)
2913 2918
2914 2919 def fastasc(self):
2915 2920 return iter(self._asclist)
2916 2921
2917 2922 def fastdesc(self):
2918 2923 return reversed(self._asclist)
2919 2924
2920 2925 @util.propertycache
2921 2926 def __contains__(self):
2922 2927 return self._set.__contains__
2923 2928
2924 2929 def __nonzero__(self):
2925 2930 return bool(self._list)
2926 2931
2927 2932 def sort(self, reverse=False):
2928 2933 self._ascending = not bool(reverse)
2929 2934
2930 2935 def reverse(self):
2931 2936 if self._ascending is None:
2932 2937 self._list.reverse()
2933 2938 else:
2934 2939 self._ascending = not self._ascending
2935 2940
2936 2941 def __len__(self):
2937 2942 return len(self._list)
2938 2943
2939 2944 def isascending(self):
2940 2945 """Returns True if the collection is ascending order, False if not.
2941 2946
2942 2947 This is part of the mandatory API for smartset."""
2943 2948 if len(self) <= 1:
2944 2949 return True
2945 2950 return self._ascending is not None and self._ascending
2946 2951
2947 2952 def isdescending(self):
2948 2953 """Returns True if the collection is descending order, False if not.
2949 2954
2950 2955 This is part of the mandatory API for smartset."""
2951 2956 if len(self) <= 1:
2952 2957 return True
2953 2958 return self._ascending is not None and not self._ascending
2954 2959
2955 2960 def first(self):
2956 2961 if self:
2957 2962 if self._ascending is None:
2958 2963 return self._list[0]
2959 2964 elif self._ascending:
2960 2965 return self._asclist[0]
2961 2966 else:
2962 2967 return self._asclist[-1]
2963 2968 return None
2964 2969
2965 2970 def last(self):
2966 2971 if self:
2967 2972 if self._ascending is None:
2968 2973 return self._list[-1]
2969 2974 elif self._ascending:
2970 2975 return self._asclist[-1]
2971 2976 else:
2972 2977 return self._asclist[0]
2973 2978 return None
2974 2979
2975 2980 def __repr__(self):
2976 2981 d = {None: '', False: '-', True: '+'}[self._ascending]
2977 return '<%s%s %r>' % (type(self).__name__, d, self._list)
2982 s = _formatsetrepr(self._datarepr)
2983 if not s:
2984 s = repr(self._list)
2985 return '<%s%s %s>' % (type(self).__name__, d, s)
2978 2986
2979 2987 class filteredset(abstractsmartset):
2980 2988 """Duck type for baseset class which iterates lazily over the revisions in
2981 2989 the subset and contains a function which tests for membership in the
2982 2990 revset
2983 2991 """
2984 2992 def __init__(self, subset, condition=lambda x: True, condrepr=None):
2985 2993 """
2986 2994 condition: a function that decide whether a revision in the subset
2987 2995 belongs to the revset or not.
2988 2996 condrepr: a tuple of (format, obj, ...), a function or an object that
2989 2997 provides a printable representation of the given condition.
2990 2998 """
2991 2999 self._subset = subset
2992 3000 self._condition = condition
2993 3001 self._condrepr = condrepr
2994 3002
2995 3003 def __contains__(self, x):
2996 3004 return x in self._subset and self._condition(x)
2997 3005
2998 3006 def __iter__(self):
2999 3007 return self._iterfilter(self._subset)
3000 3008
3001 3009 def _iterfilter(self, it):
3002 3010 cond = self._condition
3003 3011 for x in it:
3004 3012 if cond(x):
3005 3013 yield x
3006 3014
3007 3015 @property
3008 3016 def fastasc(self):
3009 3017 it = self._subset.fastasc
3010 3018 if it is None:
3011 3019 return None
3012 3020 return lambda: self._iterfilter(it())
3013 3021
3014 3022 @property
3015 3023 def fastdesc(self):
3016 3024 it = self._subset.fastdesc
3017 3025 if it is None:
3018 3026 return None
3019 3027 return lambda: self._iterfilter(it())
3020 3028
3021 3029 def __nonzero__(self):
3022 3030 fast = self.fastasc
3023 3031 if fast is None:
3024 3032 fast = self.fastdesc
3025 3033 if fast is not None:
3026 3034 it = fast()
3027 3035 else:
3028 3036 it = self
3029 3037
3030 3038 for r in it:
3031 3039 return True
3032 3040 return False
3033 3041
3034 3042 def __len__(self):
3035 3043 # Basic implementation to be changed in future patches.
3036 3044 l = baseset([r for r in self])
3037 3045 return len(l)
3038 3046
3039 3047 def sort(self, reverse=False):
3040 3048 self._subset.sort(reverse=reverse)
3041 3049
3042 3050 def reverse(self):
3043 3051 self._subset.reverse()
3044 3052
3045 3053 def isascending(self):
3046 3054 return self._subset.isascending()
3047 3055
3048 3056 def isdescending(self):
3049 3057 return self._subset.isdescending()
3050 3058
3051 3059 def first(self):
3052 3060 for x in self:
3053 3061 return x
3054 3062 return None
3055 3063
3056 3064 def last(self):
3057 3065 it = None
3058 3066 if self.isascending():
3059 3067 it = self.fastdesc
3060 3068 elif self.isdescending():
3061 3069 it = self.fastasc
3062 3070 if it is not None:
3063 3071 for x in it():
3064 3072 return x
3065 3073 return None #empty case
3066 3074 else:
3067 3075 x = None
3068 3076 for x in self:
3069 3077 pass
3070 3078 return x
3071 3079
3072 3080 def __repr__(self):
3073 3081 xs = [repr(self._subset)]
3074 3082 s = _formatsetrepr(self._condrepr)
3075 3083 if s:
3076 3084 xs.append(s)
3077 3085 return '<%s %s>' % (type(self).__name__, ', '.join(xs))
3078 3086
3079 3087 def _iterordered(ascending, iter1, iter2):
3080 3088 """produce an ordered iteration from two iterators with the same order
3081 3089
3082 3090 The ascending is used to indicated the iteration direction.
3083 3091 """
3084 3092 choice = max
3085 3093 if ascending:
3086 3094 choice = min
3087 3095
3088 3096 val1 = None
3089 3097 val2 = None
3090 3098 try:
3091 3099 # Consume both iterators in an ordered way until one is empty
3092 3100 while True:
3093 3101 if val1 is None:
3094 3102 val1 = iter1.next()
3095 3103 if val2 is None:
3096 3104 val2 = iter2.next()
3097 3105 next = choice(val1, val2)
3098 3106 yield next
3099 3107 if val1 == next:
3100 3108 val1 = None
3101 3109 if val2 == next:
3102 3110 val2 = None
3103 3111 except StopIteration:
3104 3112 # Flush any remaining values and consume the other one
3105 3113 it = iter2
3106 3114 if val1 is not None:
3107 3115 yield val1
3108 3116 it = iter1
3109 3117 elif val2 is not None:
3110 3118 # might have been equality and both are empty
3111 3119 yield val2
3112 3120 for val in it:
3113 3121 yield val
3114 3122
3115 3123 class addset(abstractsmartset):
3116 3124 """Represent the addition of two sets
3117 3125
3118 3126 Wrapper structure for lazily adding two structures without losing much
3119 3127 performance on the __contains__ method
3120 3128
3121 3129 If the ascending attribute is set, that means the two structures are
3122 3130 ordered in either an ascending or descending way. Therefore, we can add
3123 3131 them maintaining the order by iterating over both at the same time
3124 3132
3125 3133 >>> xs = baseset([0, 3, 2])
3126 3134 >>> ys = baseset([5, 2, 4])
3127 3135
3128 3136 >>> rs = addset(xs, ys)
3129 3137 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3130 3138 (True, True, False, True, 0, 4)
3131 3139 >>> rs = addset(xs, baseset([]))
3132 3140 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3133 3141 (True, True, False, 0, 2)
3134 3142 >>> rs = addset(baseset([]), baseset([]))
3135 3143 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3136 3144 (False, False, None, None)
3137 3145
3138 3146 iterate unsorted:
3139 3147 >>> rs = addset(xs, ys)
3140 3148 >>> [x for x in rs] # without _genlist
3141 3149 [0, 3, 2, 5, 4]
3142 3150 >>> assert not rs._genlist
3143 3151 >>> len(rs)
3144 3152 5
3145 3153 >>> [x for x in rs] # with _genlist
3146 3154 [0, 3, 2, 5, 4]
3147 3155 >>> assert rs._genlist
3148 3156
3149 3157 iterate ascending:
3150 3158 >>> rs = addset(xs, ys, ascending=True)
3151 3159 >>> [x for x in rs], [x for x in rs.fastasc()] # without _asclist
3152 3160 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3153 3161 >>> assert not rs._asclist
3154 3162 >>> len(rs)
3155 3163 5
3156 3164 >>> [x for x in rs], [x for x in rs.fastasc()]
3157 3165 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3158 3166 >>> assert rs._asclist
3159 3167
3160 3168 iterate descending:
3161 3169 >>> rs = addset(xs, ys, ascending=False)
3162 3170 >>> [x for x in rs], [x for x in rs.fastdesc()] # without _asclist
3163 3171 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3164 3172 >>> assert not rs._asclist
3165 3173 >>> len(rs)
3166 3174 5
3167 3175 >>> [x for x in rs], [x for x in rs.fastdesc()]
3168 3176 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3169 3177 >>> assert rs._asclist
3170 3178
3171 3179 iterate ascending without fastasc:
3172 3180 >>> rs = addset(xs, generatorset(ys), ascending=True)
3173 3181 >>> assert rs.fastasc is None
3174 3182 >>> [x for x in rs]
3175 3183 [0, 2, 3, 4, 5]
3176 3184
3177 3185 iterate descending without fastdesc:
3178 3186 >>> rs = addset(generatorset(xs), ys, ascending=False)
3179 3187 >>> assert rs.fastdesc is None
3180 3188 >>> [x for x in rs]
3181 3189 [5, 4, 3, 2, 0]
3182 3190 """
3183 3191 def __init__(self, revs1, revs2, ascending=None):
3184 3192 self._r1 = revs1
3185 3193 self._r2 = revs2
3186 3194 self._iter = None
3187 3195 self._ascending = ascending
3188 3196 self._genlist = None
3189 3197 self._asclist = None
3190 3198
3191 3199 def __len__(self):
3192 3200 return len(self._list)
3193 3201
3194 3202 def __nonzero__(self):
3195 3203 return bool(self._r1) or bool(self._r2)
3196 3204
3197 3205 @util.propertycache
3198 3206 def _list(self):
3199 3207 if not self._genlist:
3200 3208 self._genlist = baseset(iter(self))
3201 3209 return self._genlist
3202 3210
3203 3211 def __iter__(self):
3204 3212 """Iterate over both collections without repeating elements
3205 3213
3206 3214 If the ascending attribute is not set, iterate over the first one and
3207 3215 then over the second one checking for membership on the first one so we
3208 3216 dont yield any duplicates.
3209 3217
3210 3218 If the ascending attribute is set, iterate over both collections at the
3211 3219 same time, yielding only one value at a time in the given order.
3212 3220 """
3213 3221 if self._ascending is None:
3214 3222 if self._genlist:
3215 3223 return iter(self._genlist)
3216 3224 def arbitraryordergen():
3217 3225 for r in self._r1:
3218 3226 yield r
3219 3227 inr1 = self._r1.__contains__
3220 3228 for r in self._r2:
3221 3229 if not inr1(r):
3222 3230 yield r
3223 3231 return arbitraryordergen()
3224 3232 # try to use our own fast iterator if it exists
3225 3233 self._trysetasclist()
3226 3234 if self._ascending:
3227 3235 attr = 'fastasc'
3228 3236 else:
3229 3237 attr = 'fastdesc'
3230 3238 it = getattr(self, attr)
3231 3239 if it is not None:
3232 3240 return it()
3233 3241 # maybe half of the component supports fast
3234 3242 # get iterator for _r1
3235 3243 iter1 = getattr(self._r1, attr)
3236 3244 if iter1 is None:
3237 3245 # let's avoid side effect (not sure it matters)
3238 3246 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3239 3247 else:
3240 3248 iter1 = iter1()
3241 3249 # get iterator for _r2
3242 3250 iter2 = getattr(self._r2, attr)
3243 3251 if iter2 is None:
3244 3252 # let's avoid side effect (not sure it matters)
3245 3253 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3246 3254 else:
3247 3255 iter2 = iter2()
3248 3256 return _iterordered(self._ascending, iter1, iter2)
3249 3257
3250 3258 def _trysetasclist(self):
3251 3259 """populate the _asclist attribute if possible and necessary"""
3252 3260 if self._genlist is not None and self._asclist is None:
3253 3261 self._asclist = sorted(self._genlist)
3254 3262
3255 3263 @property
3256 3264 def fastasc(self):
3257 3265 self._trysetasclist()
3258 3266 if self._asclist is not None:
3259 3267 return self._asclist.__iter__
3260 3268 iter1 = self._r1.fastasc
3261 3269 iter2 = self._r2.fastasc
3262 3270 if None in (iter1, iter2):
3263 3271 return None
3264 3272 return lambda: _iterordered(True, iter1(), iter2())
3265 3273
3266 3274 @property
3267 3275 def fastdesc(self):
3268 3276 self._trysetasclist()
3269 3277 if self._asclist is not None:
3270 3278 return self._asclist.__reversed__
3271 3279 iter1 = self._r1.fastdesc
3272 3280 iter2 = self._r2.fastdesc
3273 3281 if None in (iter1, iter2):
3274 3282 return None
3275 3283 return lambda: _iterordered(False, iter1(), iter2())
3276 3284
3277 3285 def __contains__(self, x):
3278 3286 return x in self._r1 or x in self._r2
3279 3287
3280 3288 def sort(self, reverse=False):
3281 3289 """Sort the added set
3282 3290
3283 3291 For this we use the cached list with all the generated values and if we
3284 3292 know they are ascending or descending we can sort them in a smart way.
3285 3293 """
3286 3294 self._ascending = not reverse
3287 3295
3288 3296 def isascending(self):
3289 3297 return self._ascending is not None and self._ascending
3290 3298
3291 3299 def isdescending(self):
3292 3300 return self._ascending is not None and not self._ascending
3293 3301
3294 3302 def reverse(self):
3295 3303 if self._ascending is None:
3296 3304 self._list.reverse()
3297 3305 else:
3298 3306 self._ascending = not self._ascending
3299 3307
3300 3308 def first(self):
3301 3309 for x in self:
3302 3310 return x
3303 3311 return None
3304 3312
3305 3313 def last(self):
3306 3314 self.reverse()
3307 3315 val = self.first()
3308 3316 self.reverse()
3309 3317 return val
3310 3318
3311 3319 def __repr__(self):
3312 3320 d = {None: '', False: '-', True: '+'}[self._ascending]
3313 3321 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3314 3322
3315 3323 class generatorset(abstractsmartset):
3316 3324 """Wrap a generator for lazy iteration
3317 3325
3318 3326 Wrapper structure for generators that provides lazy membership and can
3319 3327 be iterated more than once.
3320 3328 When asked for membership it generates values until either it finds the
3321 3329 requested one or has gone through all the elements in the generator
3322 3330 """
3323 3331 def __init__(self, gen, iterasc=None):
3324 3332 """
3325 3333 gen: a generator producing the values for the generatorset.
3326 3334 """
3327 3335 self._gen = gen
3328 3336 self._asclist = None
3329 3337 self._cache = {}
3330 3338 self._genlist = []
3331 3339 self._finished = False
3332 3340 self._ascending = True
3333 3341 if iterasc is not None:
3334 3342 if iterasc:
3335 3343 self.fastasc = self._iterator
3336 3344 self.__contains__ = self._asccontains
3337 3345 else:
3338 3346 self.fastdesc = self._iterator
3339 3347 self.__contains__ = self._desccontains
3340 3348
3341 3349 def __nonzero__(self):
3342 3350 # Do not use 'for r in self' because it will enforce the iteration
3343 3351 # order (default ascending), possibly unrolling a whole descending
3344 3352 # iterator.
3345 3353 if self._genlist:
3346 3354 return True
3347 3355 for r in self._consumegen():
3348 3356 return True
3349 3357 return False
3350 3358
3351 3359 def __contains__(self, x):
3352 3360 if x in self._cache:
3353 3361 return self._cache[x]
3354 3362
3355 3363 # Use new values only, as existing values would be cached.
3356 3364 for l in self._consumegen():
3357 3365 if l == x:
3358 3366 return True
3359 3367
3360 3368 self._cache[x] = False
3361 3369 return False
3362 3370
3363 3371 def _asccontains(self, x):
3364 3372 """version of contains optimised for ascending generator"""
3365 3373 if x in self._cache:
3366 3374 return self._cache[x]
3367 3375
3368 3376 # Use new values only, as existing values would be cached.
3369 3377 for l in self._consumegen():
3370 3378 if l == x:
3371 3379 return True
3372 3380 if l > x:
3373 3381 break
3374 3382
3375 3383 self._cache[x] = False
3376 3384 return False
3377 3385
3378 3386 def _desccontains(self, x):
3379 3387 """version of contains optimised for descending generator"""
3380 3388 if x in self._cache:
3381 3389 return self._cache[x]
3382 3390
3383 3391 # Use new values only, as existing values would be cached.
3384 3392 for l in self._consumegen():
3385 3393 if l == x:
3386 3394 return True
3387 3395 if l < x:
3388 3396 break
3389 3397
3390 3398 self._cache[x] = False
3391 3399 return False
3392 3400
3393 3401 def __iter__(self):
3394 3402 if self._ascending:
3395 3403 it = self.fastasc
3396 3404 else:
3397 3405 it = self.fastdesc
3398 3406 if it is not None:
3399 3407 return it()
3400 3408 # we need to consume the iterator
3401 3409 for x in self._consumegen():
3402 3410 pass
3403 3411 # recall the same code
3404 3412 return iter(self)
3405 3413
3406 3414 def _iterator(self):
3407 3415 if self._finished:
3408 3416 return iter(self._genlist)
3409 3417
3410 3418 # We have to use this complex iteration strategy to allow multiple
3411 3419 # iterations at the same time. We need to be able to catch revision
3412 3420 # removed from _consumegen and added to genlist in another instance.
3413 3421 #
3414 3422 # Getting rid of it would provide an about 15% speed up on this
3415 3423 # iteration.
3416 3424 genlist = self._genlist
3417 3425 nextrev = self._consumegen().next
3418 3426 _len = len # cache global lookup
3419 3427 def gen():
3420 3428 i = 0
3421 3429 while True:
3422 3430 if i < _len(genlist):
3423 3431 yield genlist[i]
3424 3432 else:
3425 3433 yield nextrev()
3426 3434 i += 1
3427 3435 return gen()
3428 3436
3429 3437 def _consumegen(self):
3430 3438 cache = self._cache
3431 3439 genlist = self._genlist.append
3432 3440 for item in self._gen:
3433 3441 cache[item] = True
3434 3442 genlist(item)
3435 3443 yield item
3436 3444 if not self._finished:
3437 3445 self._finished = True
3438 3446 asc = self._genlist[:]
3439 3447 asc.sort()
3440 3448 self._asclist = asc
3441 3449 self.fastasc = asc.__iter__
3442 3450 self.fastdesc = asc.__reversed__
3443 3451
3444 3452 def __len__(self):
3445 3453 for x in self._consumegen():
3446 3454 pass
3447 3455 return len(self._genlist)
3448 3456
3449 3457 def sort(self, reverse=False):
3450 3458 self._ascending = not reverse
3451 3459
3452 3460 def reverse(self):
3453 3461 self._ascending = not self._ascending
3454 3462
3455 3463 def isascending(self):
3456 3464 return self._ascending
3457 3465
3458 3466 def isdescending(self):
3459 3467 return not self._ascending
3460 3468
3461 3469 def first(self):
3462 3470 if self._ascending:
3463 3471 it = self.fastasc
3464 3472 else:
3465 3473 it = self.fastdesc
3466 3474 if it is None:
3467 3475 # we need to consume all and try again
3468 3476 for x in self._consumegen():
3469 3477 pass
3470 3478 return self.first()
3471 3479 return next(it(), None)
3472 3480
3473 3481 def last(self):
3474 3482 if self._ascending:
3475 3483 it = self.fastdesc
3476 3484 else:
3477 3485 it = self.fastasc
3478 3486 if it is None:
3479 3487 # we need to consume all and try again
3480 3488 for x in self._consumegen():
3481 3489 pass
3482 3490 return self.first()
3483 3491 return next(it(), None)
3484 3492
3485 3493 def __repr__(self):
3486 3494 d = {False: '-', True: '+'}[self._ascending]
3487 3495 return '<%s%s>' % (type(self).__name__, d)
3488 3496
3489 3497 class spanset(abstractsmartset):
3490 3498 """Duck type for baseset class which represents a range of revisions and
3491 3499 can work lazily and without having all the range in memory
3492 3500
3493 3501 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3494 3502 notable points:
3495 3503 - when x < y it will be automatically descending,
3496 3504 - revision filtered with this repoview will be skipped.
3497 3505
3498 3506 """
3499 3507 def __init__(self, repo, start=0, end=None):
3500 3508 """
3501 3509 start: first revision included the set
3502 3510 (default to 0)
3503 3511 end: first revision excluded (last+1)
3504 3512 (default to len(repo)
3505 3513
3506 3514 Spanset will be descending if `end` < `start`.
3507 3515 """
3508 3516 if end is None:
3509 3517 end = len(repo)
3510 3518 self._ascending = start <= end
3511 3519 if not self._ascending:
3512 3520 start, end = end + 1, start +1
3513 3521 self._start = start
3514 3522 self._end = end
3515 3523 self._hiddenrevs = repo.changelog.filteredrevs
3516 3524
3517 3525 def sort(self, reverse=False):
3518 3526 self._ascending = not reverse
3519 3527
3520 3528 def reverse(self):
3521 3529 self._ascending = not self._ascending
3522 3530
3523 3531 def _iterfilter(self, iterrange):
3524 3532 s = self._hiddenrevs
3525 3533 for r in iterrange:
3526 3534 if r not in s:
3527 3535 yield r
3528 3536
3529 3537 def __iter__(self):
3530 3538 if self._ascending:
3531 3539 return self.fastasc()
3532 3540 else:
3533 3541 return self.fastdesc()
3534 3542
3535 3543 def fastasc(self):
3536 3544 iterrange = xrange(self._start, self._end)
3537 3545 if self._hiddenrevs:
3538 3546 return self._iterfilter(iterrange)
3539 3547 return iter(iterrange)
3540 3548
3541 3549 def fastdesc(self):
3542 3550 iterrange = xrange(self._end - 1, self._start - 1, -1)
3543 3551 if self._hiddenrevs:
3544 3552 return self._iterfilter(iterrange)
3545 3553 return iter(iterrange)
3546 3554
3547 3555 def __contains__(self, rev):
3548 3556 hidden = self._hiddenrevs
3549 3557 return ((self._start <= rev < self._end)
3550 3558 and not (hidden and rev in hidden))
3551 3559
3552 3560 def __nonzero__(self):
3553 3561 for r in self:
3554 3562 return True
3555 3563 return False
3556 3564
3557 3565 def __len__(self):
3558 3566 if not self._hiddenrevs:
3559 3567 return abs(self._end - self._start)
3560 3568 else:
3561 3569 count = 0
3562 3570 start = self._start
3563 3571 end = self._end
3564 3572 for rev in self._hiddenrevs:
3565 3573 if (end < rev <= start) or (start <= rev < end):
3566 3574 count += 1
3567 3575 return abs(self._end - self._start) - count
3568 3576
3569 3577 def isascending(self):
3570 3578 return self._ascending
3571 3579
3572 3580 def isdescending(self):
3573 3581 return not self._ascending
3574 3582
3575 3583 def first(self):
3576 3584 if self._ascending:
3577 3585 it = self.fastasc
3578 3586 else:
3579 3587 it = self.fastdesc
3580 3588 for x in it():
3581 3589 return x
3582 3590 return None
3583 3591
3584 3592 def last(self):
3585 3593 if self._ascending:
3586 3594 it = self.fastdesc
3587 3595 else:
3588 3596 it = self.fastasc
3589 3597 for x in it():
3590 3598 return x
3591 3599 return None
3592 3600
3593 3601 def __repr__(self):
3594 3602 d = {False: '-', True: '+'}[self._ascending]
3595 3603 return '<%s%s %d:%d>' % (type(self).__name__, d,
3596 3604 self._start, self._end - 1)
3597 3605
3598 3606 class fullreposet(spanset):
3599 3607 """a set containing all revisions in the repo
3600 3608
3601 3609 This class exists to host special optimization and magic to handle virtual
3602 3610 revisions such as "null".
3603 3611 """
3604 3612
3605 3613 def __init__(self, repo):
3606 3614 super(fullreposet, self).__init__(repo)
3607 3615
3608 3616 def __and__(self, other):
3609 3617 """As self contains the whole repo, all of the other set should also be
3610 3618 in self. Therefore `self & other = other`.
3611 3619
3612 3620 This boldly assumes the other contains valid revs only.
3613 3621 """
3614 3622 # other not a smartset, make is so
3615 3623 if not util.safehasattr(other, 'isascending'):
3616 3624 # filter out hidden revision
3617 3625 # (this boldly assumes all smartset are pure)
3618 3626 #
3619 3627 # `other` was used with "&", let's assume this is a set like
3620 3628 # object.
3621 3629 other = baseset(other - self._hiddenrevs)
3622 3630
3623 3631 # XXX As fullreposet is also used as bootstrap, this is wrong.
3624 3632 #
3625 3633 # With a giveme312() revset returning [3,1,2], this makes
3626 3634 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3627 3635 # We cannot just drop it because other usage still need to sort it:
3628 3636 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3629 3637 #
3630 3638 # There is also some faulty revset implementations that rely on it
3631 3639 # (eg: children as of its state in e8075329c5fb)
3632 3640 #
3633 3641 # When we fix the two points above we can move this into the if clause
3634 3642 other.sort(reverse=self.isdescending())
3635 3643 return other
3636 3644
3637 3645 def prettyformatset(revs):
3638 3646 lines = []
3639 3647 rs = repr(revs)
3640 3648 p = 0
3641 3649 while p < len(rs):
3642 3650 q = rs.find('<', p + 1)
3643 3651 if q < 0:
3644 3652 q = len(rs)
3645 3653 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3646 3654 assert l >= 0
3647 3655 lines.append((l, rs[p:q].rstrip()))
3648 3656 p = q
3649 3657 return '\n'.join(' ' * l + s for l, s in lines)
3650 3658
3651 3659 def loadpredicate(ui, extname, registrarobj):
3652 3660 """Load revset predicates from specified registrarobj
3653 3661 """
3654 3662 for name, func in registrarobj._table.iteritems():
3655 3663 symbols[name] = func
3656 3664 if func._safe:
3657 3665 safesymbols.add(name)
3658 3666
3659 3667 # load built-in predicates explicitly to setup safesymbols
3660 3668 loadpredicate(None, None, predicate)
3661 3669
3662 3670 # tell hggettext to extract docstrings from these functions:
3663 3671 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now