##// END OF EJS Templates
dagop: split module hosting DAG-related algorithms from revset...
Yuya Nishihara -
r32903:27932a76 default
parent child Browse files
Show More
This diff has been collapsed as it changes many lines, (2008 lines changed) Show them Hide them
@@ -1,4 +1,4
1 # revset.py - revision set queries for mercurial
1 # dagop.py - graph ancestry and topology algorithm for revset
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
@@ -8,48 +8,17
8 8 from __future__ import absolute_import
9 9
10 10 import heapq
11 import re
12 11
13 from .i18n import _
14 12 from . import (
15 destutil,
16 encoding,
17 13 error,
18 hbisect,
19 match as matchmod,
20 14 node,
21 obsolete as obsmod,
22 pathutil,
23 phases,
24 registrar,
25 repoview,
26 revsetlang,
27 scmutil,
28 15 smartset,
29 util,
30 16 )
31 17
32 # helpers for processing parsed tree
33 getsymbol = revsetlang.getsymbol
34 getstring = revsetlang.getstring
35 getinteger = revsetlang.getinteger
36 getboolean = revsetlang.getboolean
37 getlist = revsetlang.getlist
38 getrange = revsetlang.getrange
39 getargs = revsetlang.getargs
40 getargsdict = revsetlang.getargsdict
41
42 # constants used as an argument of match() and matchany()
43 anyorder = revsetlang.anyorder
44 defineorder = revsetlang.defineorder
45 followorder = revsetlang.followorder
46
47 18 baseset = smartset.baseset
48 19 generatorset = smartset.generatorset
49 spanset = smartset.spanset
50 fullreposet = smartset.fullreposet
51 20
52 def _revancestors(repo, revs, followfirst):
21 def revancestors(repo, revs, followfirst):
53 22 """Like revlog.ancestors(), but supports followfirst."""
54 23 if followfirst:
55 24 cut = 1
@@ -87,7 +56,7 def _revancestors(repo, revs, followfirs
87 56
88 57 return generatorset(iterate(), iterasc=False)
89 58
90 def _revdescendants(repo, revs, followfirst):
59 def revdescendants(repo, revs, followfirst):
91 60 """Like revlog.descendants() but supports followfirst."""
92 61 if followfirst:
93 62 cut = 1
@@ -171,1705 +140,7 def reachableroots(repo, roots, heads, i
171 140 revs.sort()
172 141 return revs
173 142
174 # helpers
175
176 def getset(repo, subset, x):
177 if not x:
178 raise error.ParseError(_("missing argument"))
179 return methods[x[0]](repo, subset, *x[1:])
180
181 def _getrevsource(repo, r):
182 extra = repo[r].extra()
183 for label in ('source', 'transplant_source', 'rebase_source'):
184 if label in extra:
185 try:
186 return repo[extra[label]].rev()
187 except error.RepoLookupError:
188 pass
189 return None
190
191 # operator methods
192
193 def stringset(repo, subset, x):
194 x = scmutil.intrev(repo[x])
195 if (x in subset
196 or x == node.nullrev and isinstance(subset, fullreposet)):
197 return baseset([x])
198 return baseset()
199
200 def rangeset(repo, subset, x, y, order):
201 m = getset(repo, fullreposet(repo), x)
202 n = getset(repo, fullreposet(repo), y)
203
204 if not m or not n:
205 return baseset()
206 return _makerangeset(repo, subset, m.first(), n.last(), order)
207
208 def rangeall(repo, subset, x, order):
209 assert x is None
210 return _makerangeset(repo, subset, 0, len(repo) - 1, order)
211
212 def rangepre(repo, subset, y, order):
213 # ':y' can't be rewritten to '0:y' since '0' may be hidden
214 n = getset(repo, fullreposet(repo), y)
215 if not n:
216 return baseset()
217 return _makerangeset(repo, subset, 0, n.last(), order)
218
219 def rangepost(repo, subset, x, order):
220 m = getset(repo, fullreposet(repo), x)
221 if not m:
222 return baseset()
223 return _makerangeset(repo, subset, m.first(), len(repo) - 1, order)
224
225 def _makerangeset(repo, subset, m, n, order):
226 if m == n:
227 r = baseset([m])
228 elif n == node.wdirrev:
229 r = spanset(repo, m, len(repo)) + baseset([n])
230 elif m == node.wdirrev:
231 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
232 elif m < n:
233 r = spanset(repo, m, n + 1)
234 else:
235 r = spanset(repo, m, n - 1)
236
237 if order == defineorder:
238 return r & subset
239 else:
240 # carrying the sorting over when possible would be more efficient
241 return subset & r
242
243 def dagrange(repo, subset, x, y, order):
244 r = fullreposet(repo)
245 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
246 includepath=True)
247 return subset & xs
248
249 def andset(repo, subset, x, y, order):
250 return getset(repo, getset(repo, subset, x), y)
251
252 def differenceset(repo, subset, x, y, order):
253 return getset(repo, subset, x) - getset(repo, subset, y)
254
255 def _orsetlist(repo, subset, xs):
256 assert xs
257 if len(xs) == 1:
258 return getset(repo, subset, xs[0])
259 p = len(xs) // 2
260 a = _orsetlist(repo, subset, xs[:p])
261 b = _orsetlist(repo, subset, xs[p:])
262 return a + b
263
264 def orset(repo, subset, x, order):
265 xs = getlist(x)
266 if order == followorder:
267 # slow path to take the subset order
268 return subset & _orsetlist(repo, fullreposet(repo), xs)
269 else:
270 return _orsetlist(repo, subset, xs)
271
272 def notset(repo, subset, x, order):
273 return subset - getset(repo, subset, x)
274
275 def listset(repo, subset, *xs):
276 raise error.ParseError(_("can't use a list in this context"),
277 hint=_('see hg help "revsets.x or y"'))
278
279 def keyvaluepair(repo, subset, k, v):
280 raise error.ParseError(_("can't use a key-value pair in this context"))
281
282 def func(repo, subset, a, b, order):
283 f = getsymbol(a)
284 if f in symbols:
285 func = symbols[f]
286 if getattr(func, '_takeorder', False):
287 return func(repo, subset, b, order)
288 return func(repo, subset, b)
289
290 keep = lambda fn: getattr(fn, '__doc__', None) is not None
291
292 syms = [s for (s, fn) in symbols.items() if keep(fn)]
293 raise error.UnknownIdentifier(f, syms)
294
295 # functions
296
297 # symbols are callables like:
298 # fn(repo, subset, x)
299 # with:
300 # repo - current repository instance
301 # subset - of revisions to be examined
302 # x - argument in tree form
303 symbols = {}
304
305 # symbols which can't be used for a DoS attack for any given input
306 # (e.g. those which accept regexes as plain strings shouldn't be included)
307 # functions that just return a lot of changesets (like all) don't count here
308 safesymbols = set()
309
310 predicate = registrar.revsetpredicate()
311
312 @predicate('_destupdate')
313 def _destupdate(repo, subset, x):
314 # experimental revset for update destination
315 args = getargsdict(x, 'limit', 'clean')
316 return subset & baseset([destutil.destupdate(repo, **args)[0]])
317
318 @predicate('_destmerge')
319 def _destmerge(repo, subset, x):
320 # experimental revset for merge destination
321 sourceset = None
322 if x is not None:
323 sourceset = getset(repo, fullreposet(repo), x)
324 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
325
326 @predicate('adds(pattern)', safe=True)
327 def adds(repo, subset, x):
328 """Changesets that add a file matching pattern.
329
330 The pattern without explicit kind like ``glob:`` is expected to be
331 relative to the current directory and match against a file or a
332 directory.
333 """
334 # i18n: "adds" is a keyword
335 pat = getstring(x, _("adds requires a pattern"))
336 return checkstatus(repo, subset, pat, 1)
337
338 @predicate('ancestor(*changeset)', safe=True)
339 def ancestor(repo, subset, x):
340 """A greatest common ancestor of the changesets.
341
342 Accepts 0 or more changesets.
343 Will return empty list when passed no args.
344 Greatest common ancestor of a single changeset is that changeset.
345 """
346 # i18n: "ancestor" is a keyword
347 l = getlist(x)
348 rl = fullreposet(repo)
349 anc = None
350
351 # (getset(repo, rl, i) for i in l) generates a list of lists
352 for revs in (getset(repo, rl, i) for i in l):
353 for r in revs:
354 if anc is None:
355 anc = repo[r]
356 else:
357 anc = anc.ancestor(repo[r])
358
359 if anc is not None and anc.rev() in subset:
360 return baseset([anc.rev()])
361 return baseset()
362
363 def _ancestors(repo, subset, x, followfirst=False):
364 heads = getset(repo, fullreposet(repo), x)
365 if not heads:
366 return baseset()
367 s = _revancestors(repo, heads, followfirst)
368 return subset & s
369
370 @predicate('ancestors(set)', safe=True)
371 def ancestors(repo, subset, x):
372 """Changesets that are ancestors of a changeset in set.
373 """
374 return _ancestors(repo, subset, x)
375
376 @predicate('_firstancestors', safe=True)
377 def _firstancestors(repo, subset, x):
378 # ``_firstancestors(set)``
379 # Like ``ancestors(set)`` but follows only the first parents.
380 return _ancestors(repo, subset, x, followfirst=True)
381
382 def _childrenspec(repo, subset, x, n, order):
383 """Changesets that are the Nth child of a changeset
384 in set.
385 """
386 cs = set()
387 for r in getset(repo, fullreposet(repo), x):
388 for i in range(n):
389 c = repo[r].children()
390 if len(c) == 0:
391 break
392 if len(c) > 1:
393 raise error.RepoLookupError(
394 _("revision in set has more than one child"))
395 r = c[0].rev()
396 else:
397 cs.add(r)
398 return subset & cs
399
400 def ancestorspec(repo, subset, x, n, order):
401 """``set~n``
402 Changesets that are the Nth ancestor (first parents only) of a changeset
403 in set.
404 """
405 n = getinteger(n, _("~ expects a number"))
406 if n < 0:
407 # children lookup
408 return _childrenspec(repo, subset, x, -n, order)
409 ps = set()
410 cl = repo.changelog
411 for r in getset(repo, fullreposet(repo), x):
412 for i in range(n):
413 try:
414 r = cl.parentrevs(r)[0]
415 except error.WdirUnsupported:
416 r = repo[r].parents()[0].rev()
417 ps.add(r)
418 return subset & ps
419
420 @predicate('author(string)', safe=True)
421 def author(repo, subset, x):
422 """Alias for ``user(string)``.
423 """
424 # i18n: "author" is a keyword
425 n = getstring(x, _("author requires a string"))
426 kind, pattern, matcher = _substringmatcher(n, casesensitive=False)
427 return subset.filter(lambda x: matcher(repo[x].user()),
428 condrepr=('<user %r>', n))
429
430 @predicate('bisect(string)', safe=True)
431 def bisect(repo, subset, x):
432 """Changesets marked in the specified bisect status:
433
434 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
435 - ``goods``, ``bads`` : csets topologically good/bad
436 - ``range`` : csets taking part in the bisection
437 - ``pruned`` : csets that are goods, bads or skipped
438 - ``untested`` : csets whose fate is yet unknown
439 - ``ignored`` : csets ignored due to DAG topology
440 - ``current`` : the cset currently being bisected
441 """
442 # i18n: "bisect" is a keyword
443 status = getstring(x, _("bisect requires a string")).lower()
444 state = set(hbisect.get(repo, status))
445 return subset & state
446
447 # Backward-compatibility
448 # - no help entry so that we do not advertise it any more
449 @predicate('bisected', safe=True)
450 def bisected(repo, subset, x):
451 return bisect(repo, subset, x)
452
453 @predicate('bookmark([name])', safe=True)
454 def bookmark(repo, subset, x):
455 """The named bookmark or all bookmarks.
456
457 Pattern matching is supported for `name`. See :hg:`help revisions.patterns`.
458 """
459 # i18n: "bookmark" is a keyword
460 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
461 if args:
462 bm = getstring(args[0],
463 # i18n: "bookmark" is a keyword
464 _('the argument to bookmark must be a string'))
465 kind, pattern, matcher = util.stringmatcher(bm)
466 bms = set()
467 if kind == 'literal':
468 bmrev = repo._bookmarks.get(pattern, None)
469 if not bmrev:
470 raise error.RepoLookupError(_("bookmark '%s' does not exist")
471 % pattern)
472 bms.add(repo[bmrev].rev())
473 else:
474 matchrevs = set()
475 for name, bmrev in repo._bookmarks.iteritems():
476 if matcher(name):
477 matchrevs.add(bmrev)
478 if not matchrevs:
479 raise error.RepoLookupError(_("no bookmarks exist"
480 " that match '%s'") % pattern)
481 for bmrev in matchrevs:
482 bms.add(repo[bmrev].rev())
483 else:
484 bms = {repo[r].rev() for r in repo._bookmarks.values()}
485 bms -= {node.nullrev}
486 return subset & bms
487
488 @predicate('branch(string or set)', safe=True)
489 def branch(repo, subset, x):
490 """
491 All changesets belonging to the given branch or the branches of the given
492 changesets.
493
494 Pattern matching is supported for `string`. See
495 :hg:`help revisions.patterns`.
496 """
497 getbi = repo.revbranchcache().branchinfo
498 def getbranch(r):
499 try:
500 return getbi(r)[0]
501 except error.WdirUnsupported:
502 return repo[r].branch()
503
504 try:
505 b = getstring(x, '')
506 except error.ParseError:
507 # not a string, but another revspec, e.g. tip()
508 pass
509 else:
510 kind, pattern, matcher = util.stringmatcher(b)
511 if kind == 'literal':
512 # note: falls through to the revspec case if no branch with
513 # this name exists and pattern kind is not specified explicitly
514 if pattern in repo.branchmap():
515 return subset.filter(lambda r: matcher(getbranch(r)),
516 condrepr=('<branch %r>', b))
517 if b.startswith('literal:'):
518 raise error.RepoLookupError(_("branch '%s' does not exist")
519 % pattern)
520 else:
521 return subset.filter(lambda r: matcher(getbranch(r)),
522 condrepr=('<branch %r>', b))
523
524 s = getset(repo, fullreposet(repo), x)
525 b = set()
526 for r in s:
527 b.add(getbranch(r))
528 c = s.__contains__
529 return subset.filter(lambda r: c(r) or getbranch(r) in b,
530 condrepr=lambda: '<branch %r>' % sorted(b))
531
532 @predicate('bumped()', safe=True)
533 def bumped(repo, subset, x):
534 """Mutable changesets marked as successors of public changesets.
535
536 Only non-public and non-obsolete changesets can be `bumped`.
537 """
538 # i18n: "bumped" is a keyword
539 getargs(x, 0, 0, _("bumped takes no arguments"))
540 bumped = obsmod.getrevs(repo, 'bumped')
541 return subset & bumped
542
543 @predicate('bundle()', safe=True)
544 def bundle(repo, subset, x):
545 """Changesets in the bundle.
546
547 Bundle must be specified by the -R option."""
548
549 try:
550 bundlerevs = repo.changelog.bundlerevs
551 except AttributeError:
552 raise error.Abort(_("no bundle provided - specify with -R"))
553 return subset & bundlerevs
554
555 def checkstatus(repo, subset, pat, field):
556 hasset = matchmod.patkind(pat) == 'set'
557
558 mcache = [None]
559 def matches(x):
560 c = repo[x]
561 if not mcache[0] or hasset:
562 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
563 m = mcache[0]
564 fname = None
565 if not m.anypats() and len(m.files()) == 1:
566 fname = m.files()[0]
567 if fname is not None:
568 if fname not in c.files():
569 return False
570 else:
571 for f in c.files():
572 if m(f):
573 break
574 else:
575 return False
576 files = repo.status(c.p1().node(), c.node())[field]
577 if fname is not None:
578 if fname in files:
579 return True
580 else:
581 for f in files:
582 if m(f):
583 return True
584
585 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
586
587 def _children(repo, subset, parentset):
588 if not parentset:
589 return baseset()
590 cs = set()
591 pr = repo.changelog.parentrevs
592 minrev = parentset.min()
593 nullrev = node.nullrev
594 for r in subset:
595 if r <= minrev:
596 continue
597 p1, p2 = pr(r)
598 if p1 in parentset:
599 cs.add(r)
600 if p2 != nullrev and p2 in parentset:
601 cs.add(r)
602 return baseset(cs)
603
604 @predicate('children(set)', safe=True)
605 def children(repo, subset, x):
606 """Child changesets of changesets in set.
607 """
608 s = getset(repo, fullreposet(repo), x)
609 cs = _children(repo, subset, s)
610 return subset & cs
611
612 @predicate('closed()', safe=True)
613 def closed(repo, subset, x):
614 """Changeset is closed.
615 """
616 # i18n: "closed" is a keyword
617 getargs(x, 0, 0, _("closed takes no arguments"))
618 return subset.filter(lambda r: repo[r].closesbranch(),
619 condrepr='<branch closed>')
620
621 @predicate('contains(pattern)')
622 def contains(repo, subset, x):
623 """The revision's manifest contains a file matching pattern (but might not
624 modify it). See :hg:`help patterns` for information about file patterns.
625
626 The pattern without explicit kind like ``glob:`` is expected to be
627 relative to the current directory and match against a file exactly
628 for efficiency.
629 """
630 # i18n: "contains" is a keyword
631 pat = getstring(x, _("contains requires a pattern"))
632
633 def matches(x):
634 if not matchmod.patkind(pat):
635 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
636 if pats in repo[x]:
637 return True
638 else:
639 c = repo[x]
640 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
641 for f in c.manifest():
642 if m(f):
643 return True
644 return False
645
646 return subset.filter(matches, condrepr=('<contains %r>', pat))
647
648 @predicate('converted([id])', safe=True)
649 def converted(repo, subset, x):
650 """Changesets converted from the given identifier in the old repository if
651 present, or all converted changesets if no identifier is specified.
652 """
653
654 # There is exactly no chance of resolving the revision, so do a simple
655 # string compare and hope for the best
656
657 rev = None
658 # i18n: "converted" is a keyword
659 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
660 if l:
661 # i18n: "converted" is a keyword
662 rev = getstring(l[0], _('converted requires a revision'))
663
664 def _matchvalue(r):
665 source = repo[r].extra().get('convert_revision', None)
666 return source is not None and (rev is None or source.startswith(rev))
667
668 return subset.filter(lambda r: _matchvalue(r),
669 condrepr=('<converted %r>', rev))
670
671 @predicate('date(interval)', safe=True)
672 def date(repo, subset, x):
673 """Changesets within the interval, see :hg:`help dates`.
674 """
675 # i18n: "date" is a keyword
676 ds = getstring(x, _("date requires a string"))
677 dm = util.matchdate(ds)
678 return subset.filter(lambda x: dm(repo[x].date()[0]),
679 condrepr=('<date %r>', ds))
680
681 @predicate('desc(string)', safe=True)
682 def desc(repo, subset, x):
683 """Search commit message for string. The match is case-insensitive.
684
685 Pattern matching is supported for `string`. See
686 :hg:`help revisions.patterns`.
687 """
688 # i18n: "desc" is a keyword
689 ds = getstring(x, _("desc requires a string"))
690
691 kind, pattern, matcher = _substringmatcher(ds, casesensitive=False)
692
693 return subset.filter(lambda r: matcher(repo[r].description()),
694 condrepr=('<desc %r>', ds))
695
696 def _descendants(repo, subset, x, followfirst=False):
697 roots = getset(repo, fullreposet(repo), x)
698 if not roots:
699 return baseset()
700 s = _revdescendants(repo, roots, followfirst)
701
702 # Both sets need to be ascending in order to lazily return the union
703 # in the correct order.
704 base = subset & roots
705 desc = subset & s
706 result = base + desc
707 if subset.isascending():
708 result.sort()
709 elif subset.isdescending():
710 result.sort(reverse=True)
711 else:
712 result = subset & result
713 return result
714
715 @predicate('descendants(set)', safe=True)
716 def descendants(repo, subset, x):
717 """Changesets which are descendants of changesets in set.
718 """
719 return _descendants(repo, subset, x)
720
721 @predicate('_firstdescendants', safe=True)
722 def _firstdescendants(repo, subset, x):
723 # ``_firstdescendants(set)``
724 # Like ``descendants(set)`` but follows only the first parents.
725 return _descendants(repo, subset, x, followfirst=True)
726
727 @predicate('destination([set])', safe=True)
728 def destination(repo, subset, x):
729 """Changesets that were created by a graft, transplant or rebase operation,
730 with the given revisions specified as the source. Omitting the optional set
731 is the same as passing all().
732 """
733 if x is not None:
734 sources = getset(repo, fullreposet(repo), x)
735 else:
736 sources = fullreposet(repo)
737
738 dests = set()
739
740 # subset contains all of the possible destinations that can be returned, so
741 # iterate over them and see if their source(s) were provided in the arg set.
742 # Even if the immediate src of r is not in the arg set, src's source (or
743 # further back) may be. Scanning back further than the immediate src allows
744 # transitive transplants and rebases to yield the same results as transitive
745 # grafts.
746 for r in subset:
747 src = _getrevsource(repo, r)
748 lineage = None
749
750 while src is not None:
751 if lineage is None:
752 lineage = list()
753
754 lineage.append(r)
755
756 # The visited lineage is a match if the current source is in the arg
757 # set. Since every candidate dest is visited by way of iterating
758 # subset, any dests further back in the lineage will be tested by a
759 # different iteration over subset. Likewise, if the src was already
760 # selected, the current lineage can be selected without going back
761 # further.
762 if src in sources or src in dests:
763 dests.update(lineage)
764 break
765
766 r = src
767 src = _getrevsource(repo, r)
768
769 return subset.filter(dests.__contains__,
770 condrepr=lambda: '<destination %r>' % sorted(dests))
771
772 @predicate('divergent()', safe=True)
773 def divergent(repo, subset, x):
774 """
775 Final successors of changesets with an alternative set of final successors.
776 """
777 # i18n: "divergent" is a keyword
778 getargs(x, 0, 0, _("divergent takes no arguments"))
779 divergent = obsmod.getrevs(repo, 'divergent')
780 return subset & divergent
781
782 @predicate('extinct()', safe=True)
783 def extinct(repo, subset, x):
784 """Obsolete changesets with obsolete descendants only.
785 """
786 # i18n: "extinct" is a keyword
787 getargs(x, 0, 0, _("extinct takes no arguments"))
788 extincts = obsmod.getrevs(repo, 'extinct')
789 return subset & extincts
790
791 @predicate('extra(label, [value])', safe=True)
792 def extra(repo, subset, x):
793 """Changesets with the given label in the extra metadata, with the given
794 optional value.
795
796 Pattern matching is supported for `value`. See
797 :hg:`help revisions.patterns`.
798 """
799 args = getargsdict(x, 'extra', 'label value')
800 if 'label' not in args:
801 # i18n: "extra" is a keyword
802 raise error.ParseError(_('extra takes at least 1 argument'))
803 # i18n: "extra" is a keyword
804 label = getstring(args['label'], _('first argument to extra must be '
805 'a string'))
806 value = None
807
808 if 'value' in args:
809 # i18n: "extra" is a keyword
810 value = getstring(args['value'], _('second argument to extra must be '
811 'a string'))
812 kind, value, matcher = util.stringmatcher(value)
813
814 def _matchvalue(r):
815 extra = repo[r].extra()
816 return label in extra and (value is None or matcher(extra[label]))
817
818 return subset.filter(lambda r: _matchvalue(r),
819 condrepr=('<extra[%r] %r>', label, value))
820
821 @predicate('filelog(pattern)', safe=True)
822 def filelog(repo, subset, x):
823 """Changesets connected to the specified filelog.
824
825 For performance reasons, visits only revisions mentioned in the file-level
826 filelog, rather than filtering through all changesets (much faster, but
827 doesn't include deletes or duplicate changes). For a slower, more accurate
828 result, use ``file()``.
829
830 The pattern without explicit kind like ``glob:`` is expected to be
831 relative to the current directory and match against a file exactly
832 for efficiency.
833
834 If some linkrev points to revisions filtered by the current repoview, we'll
835 work around it to return a non-filtered value.
836 """
837
838 # i18n: "filelog" is a keyword
839 pat = getstring(x, _("filelog requires a pattern"))
840 s = set()
841 cl = repo.changelog
842
843 if not matchmod.patkind(pat):
844 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
845 files = [f]
846 else:
847 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
848 files = (f for f in repo[None] if m(f))
849
850 for f in files:
851 fl = repo.file(f)
852 known = {}
853 scanpos = 0
854 for fr in list(fl):
855 fn = fl.node(fr)
856 if fn in known:
857 s.add(known[fn])
858 continue
859
860 lr = fl.linkrev(fr)
861 if lr in cl:
862 s.add(lr)
863 elif scanpos is not None:
864 # lowest matching changeset is filtered, scan further
865 # ahead in changelog
866 start = max(lr, scanpos) + 1
867 scanpos = None
868 for r in cl.revs(start):
869 # minimize parsing of non-matching entries
870 if f in cl.revision(r) and f in cl.readfiles(r):
871 try:
872 # try to use manifest delta fastpath
873 n = repo[r].filenode(f)
874 if n not in known:
875 if n == fn:
876 s.add(r)
877 scanpos = r
878 break
879 else:
880 known[n] = r
881 except error.ManifestLookupError:
882 # deletion in changelog
883 continue
884
885 return subset & s
886
887 @predicate('first(set, [n])', safe=True, takeorder=True)
888 def first(repo, subset, x, order):
889 """An alias for limit().
890 """
891 return limit(repo, subset, x, order)
892
893 def _follow(repo, subset, x, name, followfirst=False):
894 l = getargs(x, 0, 2, _("%s takes no arguments or a pattern "
895 "and an optional revset") % name)
896 c = repo['.']
897 if l:
898 x = getstring(l[0], _("%s expected a pattern") % name)
899 rev = None
900 if len(l) >= 2:
901 revs = getset(repo, fullreposet(repo), l[1])
902 if len(revs) != 1:
903 raise error.RepoLookupError(
904 _("%s expected one starting revision") % name)
905 rev = revs.last()
906 c = repo[rev]
907 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
908 ctx=repo[rev], default='path')
909
910 files = c.manifest().walk(matcher)
911
912 s = set()
913 for fname in files:
914 fctx = c[fname]
915 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
916 # include the revision responsible for the most recent version
917 s.add(fctx.introrev())
918 else:
919 s = _revancestors(repo, baseset([c.rev()]), followfirst)
920
921 return subset & s
922
923 @predicate('follow([pattern[, startrev]])', safe=True)
924 def follow(repo, subset, x):
925 """
926 An alias for ``::.`` (ancestors of the working directory's first parent).
927 If pattern is specified, the histories of files matching given
928 pattern in the revision given by startrev are followed, including copies.
929 """
930 return _follow(repo, subset, x, 'follow')
931
932 @predicate('_followfirst', safe=True)
933 def _followfirst(repo, subset, x):
934 # ``followfirst([pattern[, startrev]])``
935 # Like ``follow([pattern[, startrev]])`` but follows only the first parent
936 # of every revisions or files revisions.
937 return _follow(repo, subset, x, '_followfirst', followfirst=True)
938
939 @predicate('followlines(file, fromline:toline[, startrev=., descend=False])',
940 safe=True)
941 def followlines(repo, subset, x):
942 """Changesets modifying `file` in line range ('fromline', 'toline').
943
944 Line range corresponds to 'file' content at 'startrev' and should hence be
945 consistent with file size. If startrev is not specified, working directory's
946 parent is used.
947
948 By default, ancestors of 'startrev' are returned. If 'descend' is True,
949 descendants of 'startrev' are returned though renames are (currently) not
950 followed in this direction.
951 """
952 from . import context # avoid circular import issues
953
954 args = getargsdict(x, 'followlines', 'file *lines startrev descend')
955 if len(args['lines']) != 1:
956 raise error.ParseError(_("followlines requires a line range"))
957
958 rev = '.'
959 if 'startrev' in args:
960 revs = getset(repo, fullreposet(repo), args['startrev'])
961 if len(revs) != 1:
962 raise error.ParseError(
963 # i18n: "followlines" is a keyword
964 _("followlines expects exactly one revision"))
965 rev = revs.last()
966
967 pat = getstring(args['file'], _("followlines requires a pattern"))
968 if not matchmod.patkind(pat):
969 fname = pathutil.canonpath(repo.root, repo.getcwd(), pat)
970 else:
971 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[rev])
972 files = [f for f in repo[rev] if m(f)]
973 if len(files) != 1:
974 # i18n: "followlines" is a keyword
975 raise error.ParseError(_("followlines expects exactly one file"))
976 fname = files[0]
977
978 # i18n: "followlines" is a keyword
979 lr = getrange(args['lines'][0], _("followlines expects a line range"))
980 fromline, toline = [getinteger(a, _("line range bounds must be integers"))
981 for a in lr]
982 fromline, toline = util.processlinerange(fromline, toline)
983
984 fctx = repo[rev].filectx(fname)
985 descend = False
986 if 'descend' in args:
987 descend = getboolean(args['descend'],
988 # i18n: "descend" is a keyword
989 _("descend argument must be a boolean"))
990 if descend:
991 rs = generatorset(
992 (c.rev() for c, _linerange
993 in context.blockdescendants(fctx, fromline, toline)),
994 iterasc=True)
995 else:
996 rs = generatorset(
997 (c.rev() for c, _linerange
998 in context.blockancestors(fctx, fromline, toline)),
999 iterasc=False)
1000 return subset & rs
1001
1002 @predicate('all()', safe=True)
1003 def getall(repo, subset, x):
1004 """All changesets, the same as ``0:tip``.
1005 """
1006 # i18n: "all" is a keyword
1007 getargs(x, 0, 0, _("all takes no arguments"))
1008 return subset & spanset(repo) # drop "null" if any
1009
1010 @predicate('grep(regex)')
1011 def grep(repo, subset, x):
1012 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1013 to ensure special escape characters are handled correctly. Unlike
1014 ``keyword(string)``, the match is case-sensitive.
1015 """
1016 try:
1017 # i18n: "grep" is a keyword
1018 gr = re.compile(getstring(x, _("grep requires a string")))
1019 except re.error as e:
1020 raise error.ParseError(_('invalid match pattern: %s') % e)
1021
1022 def matches(x):
1023 c = repo[x]
1024 for e in c.files() + [c.user(), c.description()]:
1025 if gr.search(e):
1026 return True
1027 return False
1028
1029 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1030
1031 @predicate('_matchfiles', safe=True)
1032 def _matchfiles(repo, subset, x):
1033 # _matchfiles takes a revset list of prefixed arguments:
1034 #
1035 # [p:foo, i:bar, x:baz]
1036 #
1037 # builds a match object from them and filters subset. Allowed
1038 # prefixes are 'p:' for regular patterns, 'i:' for include
1039 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1040 # a revision identifier, or the empty string to reference the
1041 # working directory, from which the match object is
1042 # initialized. Use 'd:' to set the default matching mode, default
1043 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1044
1045 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1046 pats, inc, exc = [], [], []
1047 rev, default = None, None
1048 for arg in l:
1049 s = getstring(arg, "_matchfiles requires string arguments")
1050 prefix, value = s[:2], s[2:]
1051 if prefix == 'p:':
1052 pats.append(value)
1053 elif prefix == 'i:':
1054 inc.append(value)
1055 elif prefix == 'x:':
1056 exc.append(value)
1057 elif prefix == 'r:':
1058 if rev is not None:
1059 raise error.ParseError('_matchfiles expected at most one '
1060 'revision')
1061 if value != '': # empty means working directory; leave rev as None
1062 rev = value
1063 elif prefix == 'd:':
1064 if default is not None:
1065 raise error.ParseError('_matchfiles expected at most one '
1066 'default mode')
1067 default = value
1068 else:
1069 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1070 if not default:
1071 default = 'glob'
1072
1073 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1074 exclude=exc, ctx=repo[rev], default=default)
1075
1076 # This directly read the changelog data as creating changectx for all
1077 # revisions is quite expensive.
1078 getfiles = repo.changelog.readfiles
1079 wdirrev = node.wdirrev
1080 def matches(x):
1081 if x == wdirrev:
1082 files = repo[x].files()
1083 else:
1084 files = getfiles(x)
1085 for f in files:
1086 if m(f):
1087 return True
1088 return False
1089
1090 return subset.filter(matches,
1091 condrepr=('<matchfiles patterns=%r, include=%r '
1092 'exclude=%r, default=%r, rev=%r>',
1093 pats, inc, exc, default, rev))
1094
1095 @predicate('file(pattern)', safe=True)
1096 def hasfile(repo, subset, x):
1097 """Changesets affecting files matched by pattern.
1098
1099 For a faster but less accurate result, consider using ``filelog()``
1100 instead.
1101
1102 This predicate uses ``glob:`` as the default kind of pattern.
1103 """
1104 # i18n: "file" is a keyword
1105 pat = getstring(x, _("file requires a pattern"))
1106 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1107
1108 @predicate('head()', safe=True)
1109 def head(repo, subset, x):
1110 """Changeset is a named branch head.
1111 """
1112 # i18n: "head" is a keyword
1113 getargs(x, 0, 0, _("head takes no arguments"))
1114 hs = set()
1115 cl = repo.changelog
1116 for ls in repo.branchmap().itervalues():
1117 hs.update(cl.rev(h) for h in ls)
1118 return subset & baseset(hs)
1119
1120 @predicate('heads(set)', safe=True)
1121 def heads(repo, subset, x):
1122 """Members of set with no children in set.
1123 """
1124 s = getset(repo, subset, x)
1125 ps = parents(repo, subset, x)
1126 return s - ps
1127
1128 @predicate('hidden()', safe=True)
1129 def hidden(repo, subset, x):
1130 """Hidden changesets.
1131 """
1132 # i18n: "hidden" is a keyword
1133 getargs(x, 0, 0, _("hidden takes no arguments"))
1134 hiddenrevs = repoview.filterrevs(repo, 'visible')
1135 return subset & hiddenrevs
1136
1137 @predicate('keyword(string)', safe=True)
1138 def keyword(repo, subset, x):
1139 """Search commit message, user name, and names of changed files for
1140 string. The match is case-insensitive.
1141
1142 For a regular expression or case sensitive search of these fields, use
1143 ``grep(regex)``.
1144 """
1145 # i18n: "keyword" is a keyword
1146 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1147
1148 def matches(r):
1149 c = repo[r]
1150 return any(kw in encoding.lower(t)
1151 for t in c.files() + [c.user(), c.description()])
1152
1153 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1154
1155 @predicate('limit(set[, n[, offset]])', safe=True, takeorder=True)
1156 def limit(repo, subset, x, order):
1157 """First n members of set, defaulting to 1, starting from offset.
1158 """
1159 args = getargsdict(x, 'limit', 'set n offset')
1160 if 'set' not in args:
1161 # i18n: "limit" is a keyword
1162 raise error.ParseError(_("limit requires one to three arguments"))
1163 # i18n: "limit" is a keyword
1164 lim = getinteger(args.get('n'), _("limit expects a number"), default=1)
1165 if lim < 0:
1166 raise error.ParseError(_("negative number to select"))
1167 # i18n: "limit" is a keyword
1168 ofs = getinteger(args.get('offset'), _("limit expects a number"), default=0)
1169 if ofs < 0:
1170 raise error.ParseError(_("negative offset"))
1171 os = getset(repo, fullreposet(repo), args['set'])
1172 ls = os.slice(ofs, ofs + lim)
1173 if order == followorder and lim > 1:
1174 return subset & ls
1175 return ls & subset
1176
1177 @predicate('last(set, [n])', safe=True, takeorder=True)
1178 def last(repo, subset, x, order):
1179 """Last n members of set, defaulting to 1.
1180 """
1181 # i18n: "last" is a keyword
1182 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1183 lim = 1
1184 if len(l) == 2:
1185 # i18n: "last" is a keyword
1186 lim = getinteger(l[1], _("last expects a number"))
1187 if lim < 0:
1188 raise error.ParseError(_("negative number to select"))
1189 os = getset(repo, fullreposet(repo), l[0])
1190 os.reverse()
1191 ls = os.slice(0, lim)
1192 if order == followorder and lim > 1:
1193 return subset & ls
1194 ls.reverse()
1195 return ls & subset
1196
1197 @predicate('max(set)', safe=True)
1198 def maxrev(repo, subset, x):
1199 """Changeset with highest revision number in set.
1200 """
1201 os = getset(repo, fullreposet(repo), x)
1202 try:
1203 m = os.max()
1204 if m in subset:
1205 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1206 except ValueError:
1207 # os.max() throws a ValueError when the collection is empty.
1208 # Same as python's max().
1209 pass
1210 return baseset(datarepr=('<max %r, %r>', subset, os))
1211
1212 @predicate('merge()', safe=True)
1213 def merge(repo, subset, x):
1214 """Changeset is a merge changeset.
1215 """
1216 # i18n: "merge" is a keyword
1217 getargs(x, 0, 0, _("merge takes no arguments"))
1218 cl = repo.changelog
1219 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1220 condrepr='<merge>')
1221
1222 @predicate('branchpoint()', safe=True)
1223 def branchpoint(repo, subset, x):
1224 """Changesets with more than one child.
1225 """
1226 # i18n: "branchpoint" is a keyword
1227 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1228 cl = repo.changelog
1229 if not subset:
1230 return baseset()
1231 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1232 # (and if it is not, it should.)
1233 baserev = min(subset)
1234 parentscount = [0]*(len(repo) - baserev)
1235 for r in cl.revs(start=baserev + 1):
1236 for p in cl.parentrevs(r):
1237 if p >= baserev:
1238 parentscount[p - baserev] += 1
1239 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1240 condrepr='<branchpoint>')
1241
1242 @predicate('min(set)', safe=True)
1243 def minrev(repo, subset, x):
1244 """Changeset with lowest revision number in set.
1245 """
1246 os = getset(repo, fullreposet(repo), x)
1247 try:
1248 m = os.min()
1249 if m in subset:
1250 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1251 except ValueError:
1252 # os.min() throws a ValueError when the collection is empty.
1253 # Same as python's min().
1254 pass
1255 return baseset(datarepr=('<min %r, %r>', subset, os))
1256
1257 @predicate('modifies(pattern)', safe=True)
1258 def modifies(repo, subset, x):
1259 """Changesets modifying files matched by pattern.
1260
1261 The pattern without explicit kind like ``glob:`` is expected to be
1262 relative to the current directory and match against a file or a
1263 directory.
1264 """
1265 # i18n: "modifies" is a keyword
1266 pat = getstring(x, _("modifies requires a pattern"))
1267 return checkstatus(repo, subset, pat, 0)
1268
1269 @predicate('named(namespace)')
1270 def named(repo, subset, x):
1271 """The changesets in a given namespace.
1272
1273 Pattern matching is supported for `namespace`. See
1274 :hg:`help revisions.patterns`.
1275 """
1276 # i18n: "named" is a keyword
1277 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1278
1279 ns = getstring(args[0],
1280 # i18n: "named" is a keyword
1281 _('the argument to named must be a string'))
1282 kind, pattern, matcher = util.stringmatcher(ns)
1283 namespaces = set()
1284 if kind == 'literal':
1285 if pattern not in repo.names:
1286 raise error.RepoLookupError(_("namespace '%s' does not exist")
1287 % ns)
1288 namespaces.add(repo.names[pattern])
1289 else:
1290 for name, ns in repo.names.iteritems():
1291 if matcher(name):
1292 namespaces.add(ns)
1293 if not namespaces:
1294 raise error.RepoLookupError(_("no namespace exists"
1295 " that match '%s'") % pattern)
1296
1297 names = set()
1298 for ns in namespaces:
1299 for name in ns.listnames(repo):
1300 if name not in ns.deprecated:
1301 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1302
1303 names -= {node.nullrev}
1304 return subset & names
1305
1306 @predicate('id(string)', safe=True)
1307 def node_(repo, subset, x):
1308 """Revision non-ambiguously specified by the given hex string prefix.
1309 """
1310 # i18n: "id" is a keyword
1311 l = getargs(x, 1, 1, _("id requires one argument"))
1312 # i18n: "id" is a keyword
1313 n = getstring(l[0], _("id requires a string"))
1314 if len(n) == 40:
1315 try:
1316 rn = repo.changelog.rev(node.bin(n))
1317 except error.WdirUnsupported:
1318 rn = node.wdirrev
1319 except (LookupError, TypeError):
1320 rn = None
1321 else:
1322 rn = None
1323 try:
1324 pm = repo.changelog._partialmatch(n)
1325 if pm is not None:
1326 rn = repo.changelog.rev(pm)
1327 except error.WdirUnsupported:
1328 rn = node.wdirrev
1329
1330 if rn is None:
1331 return baseset()
1332 result = baseset([rn])
1333 return result & subset
1334
1335 @predicate('obsolete()', safe=True)
1336 def obsolete(repo, subset, x):
1337 """Mutable changeset with a newer version."""
1338 # i18n: "obsolete" is a keyword
1339 getargs(x, 0, 0, _("obsolete takes no arguments"))
1340 obsoletes = obsmod.getrevs(repo, 'obsolete')
1341 return subset & obsoletes
1342
1343 @predicate('only(set, [set])', safe=True)
1344 def only(repo, subset, x):
1345 """Changesets that are ancestors of the first set that are not ancestors
1346 of any other head in the repo. If a second set is specified, the result
1347 is ancestors of the first set that are not ancestors of the second set
1348 (i.e. ::<set1> - ::<set2>).
1349 """
1350 cl = repo.changelog
1351 # i18n: "only" is a keyword
1352 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1353 include = getset(repo, fullreposet(repo), args[0])
1354 if len(args) == 1:
1355 if not include:
1356 return baseset()
1357
1358 descendants = set(_revdescendants(repo, include, False))
1359 exclude = [rev for rev in cl.headrevs()
1360 if not rev in descendants and not rev in include]
1361 else:
1362 exclude = getset(repo, fullreposet(repo), args[1])
1363
1364 results = set(cl.findmissingrevs(common=exclude, heads=include))
1365 # XXX we should turn this into a baseset instead of a set, smartset may do
1366 # some optimizations from the fact this is a baseset.
1367 return subset & results
1368
1369 @predicate('origin([set])', safe=True)
1370 def origin(repo, subset, x):
1371 """
1372 Changesets that were specified as a source for the grafts, transplants or
1373 rebases that created the given revisions. Omitting the optional set is the
1374 same as passing all(). If a changeset created by these operations is itself
1375 specified as a source for one of these operations, only the source changeset
1376 for the first operation is selected.
1377 """
1378 if x is not None:
1379 dests = getset(repo, fullreposet(repo), x)
1380 else:
1381 dests = fullreposet(repo)
1382
1383 def _firstsrc(rev):
1384 src = _getrevsource(repo, rev)
1385 if src is None:
1386 return None
1387
1388 while True:
1389 prev = _getrevsource(repo, src)
1390
1391 if prev is None:
1392 return src
1393 src = prev
1394
1395 o = {_firstsrc(r) for r in dests}
1396 o -= {None}
1397 # XXX we should turn this into a baseset instead of a set, smartset may do
1398 # some optimizations from the fact this is a baseset.
1399 return subset & o
1400
1401 @predicate('outgoing([path])', safe=False)
1402 def outgoing(repo, subset, x):
1403 """Changesets not found in the specified destination repository, or the
1404 default push location.
1405 """
1406 # Avoid cycles.
1407 from . import (
1408 discovery,
1409 hg,
1410 )
1411 # i18n: "outgoing" is a keyword
1412 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1413 # i18n: "outgoing" is a keyword
1414 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1415 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1416 dest, branches = hg.parseurl(dest)
1417 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1418 if revs:
1419 revs = [repo.lookup(rev) for rev in revs]
1420 other = hg.peer(repo, {}, dest)
1421 repo.ui.pushbuffer()
1422 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1423 repo.ui.popbuffer()
1424 cl = repo.changelog
1425 o = {cl.rev(r) for r in outgoing.missing}
1426 return subset & o
1427
1428 @predicate('p1([set])', safe=True)
1429 def p1(repo, subset, x):
1430 """First parent of changesets in set, or the working directory.
1431 """
1432 if x is None:
1433 p = repo[x].p1().rev()
1434 if p >= 0:
1435 return subset & baseset([p])
1436 return baseset()
1437
1438 ps = set()
1439 cl = repo.changelog
1440 for r in getset(repo, fullreposet(repo), x):
1441 try:
1442 ps.add(cl.parentrevs(r)[0])
1443 except error.WdirUnsupported:
1444 ps.add(repo[r].parents()[0].rev())
1445 ps -= {node.nullrev}
1446 # XXX we should turn this into a baseset instead of a set, smartset may do
1447 # some optimizations from the fact this is a baseset.
1448 return subset & ps
1449
1450 @predicate('p2([set])', safe=True)
1451 def p2(repo, subset, x):
1452 """Second parent of changesets in set, or the working directory.
1453 """
1454 if x is None:
1455 ps = repo[x].parents()
1456 try:
1457 p = ps[1].rev()
1458 if p >= 0:
1459 return subset & baseset([p])
1460 return baseset()
1461 except IndexError:
1462 return baseset()
1463
1464 ps = set()
1465 cl = repo.changelog
1466 for r in getset(repo, fullreposet(repo), x):
1467 try:
1468 ps.add(cl.parentrevs(r)[1])
1469 except error.WdirUnsupported:
1470 parents = repo[r].parents()
1471 if len(parents) == 2:
1472 ps.add(parents[1])
1473 ps -= {node.nullrev}
1474 # XXX we should turn this into a baseset instead of a set, smartset may do
1475 # some optimizations from the fact this is a baseset.
1476 return subset & ps
1477
1478 def parentpost(repo, subset, x, order):
1479 return p1(repo, subset, x)
1480
1481 @predicate('parents([set])', safe=True)
1482 def parents(repo, subset, x):
1483 """
1484 The set of all parents for all changesets in set, or the working directory.
1485 """
1486 if x is None:
1487 ps = set(p.rev() for p in repo[x].parents())
1488 else:
1489 ps = set()
1490 cl = repo.changelog
1491 up = ps.update
1492 parentrevs = cl.parentrevs
1493 for r in getset(repo, fullreposet(repo), x):
1494 try:
1495 up(parentrevs(r))
1496 except error.WdirUnsupported:
1497 up(p.rev() for p in repo[r].parents())
1498 ps -= {node.nullrev}
1499 return subset & ps
1500
1501 def _phase(repo, subset, *targets):
1502 """helper to select all rev in <targets> phases"""
1503 s = repo._phasecache.getrevset(repo, targets)
1504 return subset & s
1505
1506 @predicate('draft()', safe=True)
1507 def draft(repo, subset, x):
1508 """Changeset in draft phase."""
1509 # i18n: "draft" is a keyword
1510 getargs(x, 0, 0, _("draft takes no arguments"))
1511 target = phases.draft
1512 return _phase(repo, subset, target)
1513
1514 @predicate('secret()', safe=True)
1515 def secret(repo, subset, x):
1516 """Changeset in secret phase."""
1517 # i18n: "secret" is a keyword
1518 getargs(x, 0, 0, _("secret takes no arguments"))
1519 target = phases.secret
1520 return _phase(repo, subset, target)
1521
1522 def parentspec(repo, subset, x, n, order):
1523 """``set^0``
1524 The set.
1525 ``set^1`` (or ``set^``), ``set^2``
1526 First or second parent, respectively, of all changesets in set.
1527 """
1528 try:
1529 n = int(n[1])
1530 if n not in (0, 1, 2):
1531 raise ValueError
1532 except (TypeError, ValueError):
1533 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1534 ps = set()
1535 cl = repo.changelog
1536 for r in getset(repo, fullreposet(repo), x):
1537 if n == 0:
1538 ps.add(r)
1539 elif n == 1:
1540 try:
1541 ps.add(cl.parentrevs(r)[0])
1542 except error.WdirUnsupported:
1543 ps.add(repo[r].parents()[0].rev())
1544 else:
1545 try:
1546 parents = cl.parentrevs(r)
1547 if parents[1] != node.nullrev:
1548 ps.add(parents[1])
1549 except error.WdirUnsupported:
1550 parents = repo[r].parents()
1551 if len(parents) == 2:
1552 ps.add(parents[1].rev())
1553 return subset & ps
1554
1555 @predicate('present(set)', safe=True)
1556 def present(repo, subset, x):
1557 """An empty set, if any revision in set isn't found; otherwise,
1558 all revisions in set.
1559
1560 If any of specified revisions is not present in the local repository,
1561 the query is normally aborted. But this predicate allows the query
1562 to continue even in such cases.
1563 """
1564 try:
1565 return getset(repo, subset, x)
1566 except error.RepoLookupError:
1567 return baseset()
1568
1569 # for internal use
1570 @predicate('_notpublic', safe=True)
1571 def _notpublic(repo, subset, x):
1572 getargs(x, 0, 0, "_notpublic takes no arguments")
1573 return _phase(repo, subset, phases.draft, phases.secret)
1574
1575 @predicate('public()', safe=True)
1576 def public(repo, subset, x):
1577 """Changeset in public phase."""
1578 # i18n: "public" is a keyword
1579 getargs(x, 0, 0, _("public takes no arguments"))
1580 phase = repo._phasecache.phase
1581 target = phases.public
1582 condition = lambda r: phase(repo, r) == target
1583 return subset.filter(condition, condrepr=('<phase %r>', target),
1584 cache=False)
1585
1586 @predicate('remote([id [,path]])', safe=False)
1587 def remote(repo, subset, x):
1588 """Local revision that corresponds to the given identifier in a
1589 remote repository, if present. Here, the '.' identifier is a
1590 synonym for the current local branch.
1591 """
1592
1593 from . import hg # avoid start-up nasties
1594 # i18n: "remote" is a keyword
1595 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1596
1597 q = '.'
1598 if len(l) > 0:
1599 # i18n: "remote" is a keyword
1600 q = getstring(l[0], _("remote requires a string id"))
1601 if q == '.':
1602 q = repo['.'].branch()
1603
1604 dest = ''
1605 if len(l) > 1:
1606 # i18n: "remote" is a keyword
1607 dest = getstring(l[1], _("remote requires a repository path"))
1608 dest = repo.ui.expandpath(dest or 'default')
1609 dest, branches = hg.parseurl(dest)
1610 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1611 if revs:
1612 revs = [repo.lookup(rev) for rev in revs]
1613 other = hg.peer(repo, {}, dest)
1614 n = other.lookup(q)
1615 if n in repo:
1616 r = repo[n].rev()
1617 if r in subset:
1618 return baseset([r])
1619 return baseset()
1620
1621 @predicate('removes(pattern)', safe=True)
1622 def removes(repo, subset, x):
1623 """Changesets which remove files matching pattern.
1624
1625 The pattern without explicit kind like ``glob:`` is expected to be
1626 relative to the current directory and match against a file or a
1627 directory.
1628 """
1629 # i18n: "removes" is a keyword
1630 pat = getstring(x, _("removes requires a pattern"))
1631 return checkstatus(repo, subset, pat, 2)
1632
1633 @predicate('rev(number)', safe=True)
1634 def rev(repo, subset, x):
1635 """Revision with the given numeric identifier.
1636 """
1637 # i18n: "rev" is a keyword
1638 l = getargs(x, 1, 1, _("rev requires one argument"))
1639 try:
1640 # i18n: "rev" is a keyword
1641 l = int(getstring(l[0], _("rev requires a number")))
1642 except (TypeError, ValueError):
1643 # i18n: "rev" is a keyword
1644 raise error.ParseError(_("rev expects a number"))
1645 if l not in repo.changelog and l not in (node.nullrev, node.wdirrev):
1646 return baseset()
1647 return subset & baseset([l])
1648
1649 @predicate('matching(revision [, field])', safe=True)
1650 def matching(repo, subset, x):
1651 """Changesets in which a given set of fields match the set of fields in the
1652 selected revision or set.
1653
1654 To match more than one field pass the list of fields to match separated
1655 by spaces (e.g. ``author description``).
1656
1657 Valid fields are most regular revision fields and some special fields.
1658
1659 Regular revision fields are ``description``, ``author``, ``branch``,
1660 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1661 and ``diff``.
1662 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1663 contents of the revision. Two revisions matching their ``diff`` will
1664 also match their ``files``.
1665
1666 Special fields are ``summary`` and ``metadata``:
1667 ``summary`` matches the first line of the description.
1668 ``metadata`` is equivalent to matching ``description user date``
1669 (i.e. it matches the main metadata fields).
1670
1671 ``metadata`` is the default field which is used when no fields are
1672 specified. You can match more than one field at a time.
1673 """
1674 # i18n: "matching" is a keyword
1675 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1676
1677 revs = getset(repo, fullreposet(repo), l[0])
1678
1679 fieldlist = ['metadata']
1680 if len(l) > 1:
1681 fieldlist = getstring(l[1],
1682 # i18n: "matching" is a keyword
1683 _("matching requires a string "
1684 "as its second argument")).split()
1685
1686 # Make sure that there are no repeated fields,
1687 # expand the 'special' 'metadata' field type
1688 # and check the 'files' whenever we check the 'diff'
1689 fields = []
1690 for field in fieldlist:
1691 if field == 'metadata':
1692 fields += ['user', 'description', 'date']
1693 elif field == 'diff':
1694 # a revision matching the diff must also match the files
1695 # since matching the diff is very costly, make sure to
1696 # also match the files first
1697 fields += ['files', 'diff']
1698 else:
1699 if field == 'author':
1700 field = 'user'
1701 fields.append(field)
1702 fields = set(fields)
1703 if 'summary' in fields and 'description' in fields:
1704 # If a revision matches its description it also matches its summary
1705 fields.discard('summary')
1706
1707 # We may want to match more than one field
1708 # Not all fields take the same amount of time to be matched
1709 # Sort the selected fields in order of increasing matching cost
1710 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1711 'files', 'description', 'substate', 'diff']
1712 def fieldkeyfunc(f):
1713 try:
1714 return fieldorder.index(f)
1715 except ValueError:
1716 # assume an unknown field is very costly
1717 return len(fieldorder)
1718 fields = list(fields)
1719 fields.sort(key=fieldkeyfunc)
1720
1721 # Each field will be matched with its own "getfield" function
1722 # which will be added to the getfieldfuncs array of functions
1723 getfieldfuncs = []
1724 _funcs = {
1725 'user': lambda r: repo[r].user(),
1726 'branch': lambda r: repo[r].branch(),
1727 'date': lambda r: repo[r].date(),
1728 'description': lambda r: repo[r].description(),
1729 'files': lambda r: repo[r].files(),
1730 'parents': lambda r: repo[r].parents(),
1731 'phase': lambda r: repo[r].phase(),
1732 'substate': lambda r: repo[r].substate,
1733 'summary': lambda r: repo[r].description().splitlines()[0],
1734 'diff': lambda r: list(repo[r].diff(git=True),)
1735 }
1736 for info in fields:
1737 getfield = _funcs.get(info, None)
1738 if getfield is None:
1739 raise error.ParseError(
1740 # i18n: "matching" is a keyword
1741 _("unexpected field name passed to matching: %s") % info)
1742 getfieldfuncs.append(getfield)
1743 # convert the getfield array of functions into a "getinfo" function
1744 # which returns an array of field values (or a single value if there
1745 # is only one field to match)
1746 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1747
1748 def matches(x):
1749 for rev in revs:
1750 target = getinfo(rev)
1751 match = True
1752 for n, f in enumerate(getfieldfuncs):
1753 if target[n] != f(x):
1754 match = False
1755 if match:
1756 return True
1757 return False
1758
1759 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1760
1761 @predicate('reverse(set)', safe=True, takeorder=True)
1762 def reverse(repo, subset, x, order):
1763 """Reverse order of set.
1764 """
1765 l = getset(repo, subset, x)
1766 if order == defineorder:
1767 l.reverse()
1768 return l
1769
1770 @predicate('roots(set)', safe=True)
1771 def roots(repo, subset, x):
1772 """Changesets in set with no parent changeset in set.
1773 """
1774 s = getset(repo, fullreposet(repo), x)
1775 parents = repo.changelog.parentrevs
1776 def filter(r):
1777 for p in parents(r):
1778 if 0 <= p and p in s:
1779 return False
1780 return True
1781 return subset & s.filter(filter, condrepr='<roots>')
1782
1783 _sortkeyfuncs = {
1784 'rev': lambda c: c.rev(),
1785 'branch': lambda c: c.branch(),
1786 'desc': lambda c: c.description(),
1787 'user': lambda c: c.user(),
1788 'author': lambda c: c.user(),
1789 'date': lambda c: c.date()[0],
1790 }
1791
1792 def _getsortargs(x):
1793 """Parse sort options into (set, [(key, reverse)], opts)"""
1794 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
1795 if 'set' not in args:
1796 # i18n: "sort" is a keyword
1797 raise error.ParseError(_('sort requires one or two arguments'))
1798 keys = "rev"
1799 if 'keys' in args:
1800 # i18n: "sort" is a keyword
1801 keys = getstring(args['keys'], _("sort spec must be a string"))
1802
1803 keyflags = []
1804 for k in keys.split():
1805 fk = k
1806 reverse = (k[0] == '-')
1807 if reverse:
1808 k = k[1:]
1809 if k not in _sortkeyfuncs and k != 'topo':
1810 raise error.ParseError(_("unknown sort key %r") % fk)
1811 keyflags.append((k, reverse))
1812
1813 if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
1814 # i18n: "topo" is a keyword
1815 raise error.ParseError(_('topo sort order cannot be combined '
1816 'with other sort keys'))
1817
1818 opts = {}
1819 if 'topo.firstbranch' in args:
1820 if any(k == 'topo' for k, reverse in keyflags):
1821 opts['topo.firstbranch'] = args['topo.firstbranch']
1822 else:
1823 # i18n: "topo" and "topo.firstbranch" are keywords
1824 raise error.ParseError(_('topo.firstbranch can only be used '
1825 'when using the topo sort key'))
1826
1827 return args['set'], keyflags, opts
1828
1829 @predicate('sort(set[, [-]key... [, ...]])', safe=True, takeorder=True)
1830 def sort(repo, subset, x, order):
1831 """Sort set by keys. The default sort order is ascending, specify a key
1832 as ``-key`` to sort in descending order.
1833
1834 The keys can be:
1835
1836 - ``rev`` for the revision number,
1837 - ``branch`` for the branch name,
1838 - ``desc`` for the commit message (description),
1839 - ``user`` for user name (``author`` can be used as an alias),
1840 - ``date`` for the commit date
1841 - ``topo`` for a reverse topographical sort
1842
1843 The ``topo`` sort order cannot be combined with other sort keys. This sort
1844 takes one optional argument, ``topo.firstbranch``, which takes a revset that
1845 specifies what topographical branches to prioritize in the sort.
1846
1847 """
1848 s, keyflags, opts = _getsortargs(x)
1849 revs = getset(repo, subset, s)
1850
1851 if not keyflags or order != defineorder:
1852 return revs
1853 if len(keyflags) == 1 and keyflags[0][0] == "rev":
1854 revs.sort(reverse=keyflags[0][1])
1855 return revs
1856 elif keyflags[0][0] == "topo":
1857 firstbranch = ()
1858 if 'topo.firstbranch' in opts:
1859 firstbranch = getset(repo, subset, opts['topo.firstbranch'])
1860 revs = baseset(_toposort(revs, repo.changelog.parentrevs, firstbranch),
1861 istopo=True)
1862 if keyflags[0][1]:
1863 revs.reverse()
1864 return revs
1865
1866 # sort() is guaranteed to be stable
1867 ctxs = [repo[r] for r in revs]
1868 for k, reverse in reversed(keyflags):
1869 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
1870 return baseset([c.rev() for c in ctxs])
1871
1872 def _toposort(revs, parentsfunc, firstbranch=()):
143 def toposort(revs, parentsfunc, firstbranch=()):
1873 144 """Yield revisions from heads to roots one (topo) branch at a time.
1874 145
1875 146 This function aims to be used by a graph generator that wishes to minimize
@@ -2066,274 +337,3 def _toposort(revs, parentsfunc, firstbr
2066 337 for g in groups:
2067 338 for r in g[0]:
2068 339 yield r
2069
2070 @predicate('subrepo([pattern])')
2071 def subrepo(repo, subset, x):
2072 """Changesets that add, modify or remove the given subrepo. If no subrepo
2073 pattern is named, any subrepo changes are returned.
2074 """
2075 # i18n: "subrepo" is a keyword
2076 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
2077 pat = None
2078 if len(args) != 0:
2079 pat = getstring(args[0], _("subrepo requires a pattern"))
2080
2081 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
2082
2083 def submatches(names):
2084 k, p, m = util.stringmatcher(pat)
2085 for name in names:
2086 if m(name):
2087 yield name
2088
2089 def matches(x):
2090 c = repo[x]
2091 s = repo.status(c.p1().node(), c.node(), match=m)
2092
2093 if pat is None:
2094 return s.added or s.modified or s.removed
2095
2096 if s.added:
2097 return any(submatches(c.substate.keys()))
2098
2099 if s.modified:
2100 subs = set(c.p1().substate.keys())
2101 subs.update(c.substate.keys())
2102
2103 for path in submatches(subs):
2104 if c.p1().substate.get(path) != c.substate.get(path):
2105 return True
2106
2107 if s.removed:
2108 return any(submatches(c.p1().substate.keys()))
2109
2110 return False
2111
2112 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
2113
2114 def _substringmatcher(pattern, casesensitive=True):
2115 kind, pattern, matcher = util.stringmatcher(pattern,
2116 casesensitive=casesensitive)
2117 if kind == 'literal':
2118 if not casesensitive:
2119 pattern = encoding.lower(pattern)
2120 matcher = lambda s: pattern in encoding.lower(s)
2121 else:
2122 matcher = lambda s: pattern in s
2123 return kind, pattern, matcher
2124
2125 @predicate('tag([name])', safe=True)
2126 def tag(repo, subset, x):
2127 """The specified tag by name, or all tagged revisions if no name is given.
2128
2129 Pattern matching is supported for `name`. See
2130 :hg:`help revisions.patterns`.
2131 """
2132 # i18n: "tag" is a keyword
2133 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2134 cl = repo.changelog
2135 if args:
2136 pattern = getstring(args[0],
2137 # i18n: "tag" is a keyword
2138 _('the argument to tag must be a string'))
2139 kind, pattern, matcher = util.stringmatcher(pattern)
2140 if kind == 'literal':
2141 # avoid resolving all tags
2142 tn = repo._tagscache.tags.get(pattern, None)
2143 if tn is None:
2144 raise error.RepoLookupError(_("tag '%s' does not exist")
2145 % pattern)
2146 s = {repo[tn].rev()}
2147 else:
2148 s = {cl.rev(n) for t, n in repo.tagslist() if matcher(t)}
2149 else:
2150 s = {cl.rev(n) for t, n in repo.tagslist() if t != 'tip'}
2151 return subset & s
2152
2153 @predicate('tagged', safe=True)
2154 def tagged(repo, subset, x):
2155 return tag(repo, subset, x)
2156
2157 @predicate('unstable()', safe=True)
2158 def unstable(repo, subset, x):
2159 """Non-obsolete changesets with obsolete ancestors.
2160 """
2161 # i18n: "unstable" is a keyword
2162 getargs(x, 0, 0, _("unstable takes no arguments"))
2163 unstables = obsmod.getrevs(repo, 'unstable')
2164 return subset & unstables
2165
2166
2167 @predicate('user(string)', safe=True)
2168 def user(repo, subset, x):
2169 """User name contains string. The match is case-insensitive.
2170
2171 Pattern matching is supported for `string`. See
2172 :hg:`help revisions.patterns`.
2173 """
2174 return author(repo, subset, x)
2175
2176 @predicate('wdir()', safe=True)
2177 def wdir(repo, subset, x):
2178 """Working directory. (EXPERIMENTAL)"""
2179 # i18n: "wdir" is a keyword
2180 getargs(x, 0, 0, _("wdir takes no arguments"))
2181 if node.wdirrev in subset or isinstance(subset, fullreposet):
2182 return baseset([node.wdirrev])
2183 return baseset()
2184
2185 def _orderedlist(repo, subset, x):
2186 s = getstring(x, "internal error")
2187 if not s:
2188 return baseset()
2189 # remove duplicates here. it's difficult for caller to deduplicate sets
2190 # because different symbols can point to the same rev.
2191 cl = repo.changelog
2192 ls = []
2193 seen = set()
2194 for t in s.split('\0'):
2195 try:
2196 # fast path for integer revision
2197 r = int(t)
2198 if str(r) != t or r not in cl:
2199 raise ValueError
2200 revs = [r]
2201 except ValueError:
2202 revs = stringset(repo, subset, t)
2203
2204 for r in revs:
2205 if r in seen:
2206 continue
2207 if (r in subset
2208 or r == node.nullrev and isinstance(subset, fullreposet)):
2209 ls.append(r)
2210 seen.add(r)
2211 return baseset(ls)
2212
2213 # for internal use
2214 @predicate('_list', safe=True, takeorder=True)
2215 def _list(repo, subset, x, order):
2216 if order == followorder:
2217 # slow path to take the subset order
2218 return subset & _orderedlist(repo, fullreposet(repo), x)
2219 else:
2220 return _orderedlist(repo, subset, x)
2221
2222 def _orderedintlist(repo, subset, x):
2223 s = getstring(x, "internal error")
2224 if not s:
2225 return baseset()
2226 ls = [int(r) for r in s.split('\0')]
2227 s = subset
2228 return baseset([r for r in ls if r in s])
2229
2230 # for internal use
2231 @predicate('_intlist', safe=True, takeorder=True)
2232 def _intlist(repo, subset, x, order):
2233 if order == followorder:
2234 # slow path to take the subset order
2235 return subset & _orderedintlist(repo, fullreposet(repo), x)
2236 else:
2237 return _orderedintlist(repo, subset, x)
2238
2239 def _orderedhexlist(repo, subset, x):
2240 s = getstring(x, "internal error")
2241 if not s:
2242 return baseset()
2243 cl = repo.changelog
2244 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2245 s = subset
2246 return baseset([r for r in ls if r in s])
2247
2248 # for internal use
2249 @predicate('_hexlist', safe=True, takeorder=True)
2250 def _hexlist(repo, subset, x, order):
2251 if order == followorder:
2252 # slow path to take the subset order
2253 return subset & _orderedhexlist(repo, fullreposet(repo), x)
2254 else:
2255 return _orderedhexlist(repo, subset, x)
2256
2257 methods = {
2258 "range": rangeset,
2259 "rangeall": rangeall,
2260 "rangepre": rangepre,
2261 "rangepost": rangepost,
2262 "dagrange": dagrange,
2263 "string": stringset,
2264 "symbol": stringset,
2265 "and": andset,
2266 "or": orset,
2267 "not": notset,
2268 "difference": differenceset,
2269 "list": listset,
2270 "keyvalue": keyvaluepair,
2271 "func": func,
2272 "ancestor": ancestorspec,
2273 "parent": parentspec,
2274 "parentpost": parentpost,
2275 }
2276
2277 def posttreebuilthook(tree, repo):
2278 # hook for extensions to execute code on the optimized tree
2279 pass
2280
2281 def match(ui, spec, repo=None, order=defineorder):
2282 """Create a matcher for a single revision spec
2283
2284 If order=followorder, a matcher takes the ordering specified by the input
2285 set.
2286 """
2287 return matchany(ui, [spec], repo=repo, order=order)
2288
2289 def matchany(ui, specs, repo=None, order=defineorder):
2290 """Create a matcher that will include any revisions matching one of the
2291 given specs
2292
2293 If order=followorder, a matcher takes the ordering specified by the input
2294 set.
2295 """
2296 if not specs:
2297 def mfunc(repo, subset=None):
2298 return baseset()
2299 return mfunc
2300 if not all(specs):
2301 raise error.ParseError(_("empty query"))
2302 lookup = None
2303 if repo:
2304 lookup = repo.__contains__
2305 if len(specs) == 1:
2306 tree = revsetlang.parse(specs[0], lookup)
2307 else:
2308 tree = ('or',
2309 ('list',) + tuple(revsetlang.parse(s, lookup) for s in specs))
2310
2311 if ui:
2312 tree = revsetlang.expandaliases(ui, tree)
2313 tree = revsetlang.foldconcat(tree)
2314 tree = revsetlang.analyze(tree, order)
2315 tree = revsetlang.optimize(tree)
2316 posttreebuilthook(tree, repo)
2317 return makematcher(tree)
2318
2319 def makematcher(tree):
2320 """Create a matcher from an evaluatable tree"""
2321 def mfunc(repo, subset=None):
2322 if subset is None:
2323 subset = fullreposet(repo)
2324 return getset(repo, subset, tree)
2325 return mfunc
2326
2327 def loadpredicate(ui, extname, registrarobj):
2328 """Load revset predicates from specified registrarobj
2329 """
2330 for name, func in registrarobj._table.iteritems():
2331 symbols[name] = func
2332 if func._safe:
2333 safesymbols.add(name)
2334
2335 # load built-in predicates explicitly to setup safesymbols
2336 loadpredicate(None, None, predicate)
2337
2338 # tell hggettext to extract docstrings from these functions:
2339 i18nfunctions = symbols.values()
@@ -21,7 +21,7 from __future__ import absolute_import
21 21
22 22 from .node import nullrev
23 23 from . import (
24 revset,
24 dagop,
25 25 smartset,
26 26 util,
27 27 )
@@ -70,7 +70,7 def dagwalker(repo, revs):
70 70 # through all revs (issue4782)
71 71 if not isinstance(revs, smartset.baseset):
72 72 revs = smartset.baseset(revs)
73 gp = gpcache[mpar] = sorted(set(revset.reachableroots(
73 gp = gpcache[mpar] = sorted(set(dagop.reachableroots(
74 74 repo, revs, [mpar])))
75 75 if not gp:
76 76 parents.append((MISSINGPARENT, mpar))
@@ -7,11 +7,11
7 7
8 8 from __future__ import absolute_import
9 9
10 import heapq
11 10 import re
12 11
13 12 from .i18n import _
14 13 from . import (
14 dagop,
15 15 destutil,
16 16 encoding,
17 17 error,
@@ -49,128 +49,6 generatorset = smartset.generatorset
49 49 spanset = smartset.spanset
50 50 fullreposet = smartset.fullreposet
51 51
52 def _revancestors(repo, revs, followfirst):
53 """Like revlog.ancestors(), but supports followfirst."""
54 if followfirst:
55 cut = 1
56 else:
57 cut = None
58 cl = repo.changelog
59
60 def iterate():
61 revs.sort(reverse=True)
62 irevs = iter(revs)
63 h = []
64
65 inputrev = next(irevs, None)
66 if inputrev is not None:
67 heapq.heappush(h, -inputrev)
68
69 seen = set()
70 while h:
71 current = -heapq.heappop(h)
72 if current == inputrev:
73 inputrev = next(irevs, None)
74 if inputrev is not None:
75 heapq.heappush(h, -inputrev)
76 if current not in seen:
77 seen.add(current)
78 yield current
79 try:
80 for parent in cl.parentrevs(current)[:cut]:
81 if parent != node.nullrev:
82 heapq.heappush(h, -parent)
83 except error.WdirUnsupported:
84 for parent in repo[current].parents()[:cut]:
85 if parent.rev() != node.nullrev:
86 heapq.heappush(h, -parent.rev())
87
88 return generatorset(iterate(), iterasc=False)
89
90 def _revdescendants(repo, revs, followfirst):
91 """Like revlog.descendants() but supports followfirst."""
92 if followfirst:
93 cut = 1
94 else:
95 cut = None
96
97 def iterate():
98 cl = repo.changelog
99 # XXX this should be 'parentset.min()' assuming 'parentset' is a
100 # smartset (and if it is not, it should.)
101 first = min(revs)
102 nullrev = node.nullrev
103 if first == nullrev:
104 # Are there nodes with a null first parent and a non-null
105 # second one? Maybe. Do we care? Probably not.
106 for i in cl:
107 yield i
108 else:
109 seen = set(revs)
110 for i in cl.revs(first + 1):
111 for x in cl.parentrevs(i)[:cut]:
112 if x != nullrev and x in seen:
113 seen.add(i)
114 yield i
115 break
116
117 return generatorset(iterate(), iterasc=True)
118
119 def _reachablerootspure(repo, minroot, roots, heads, includepath):
120 """return (heads(::<roots> and ::<heads>))
121
122 If includepath is True, return (<roots>::<heads>)."""
123 if not roots:
124 return []
125 parentrevs = repo.changelog.parentrevs
126 roots = set(roots)
127 visit = list(heads)
128 reachable = set()
129 seen = {}
130 # prefetch all the things! (because python is slow)
131 reached = reachable.add
132 dovisit = visit.append
133 nextvisit = visit.pop
134 # open-code the post-order traversal due to the tiny size of
135 # sys.getrecursionlimit()
136 while visit:
137 rev = nextvisit()
138 if rev in roots:
139 reached(rev)
140 if not includepath:
141 continue
142 parents = parentrevs(rev)
143 seen[rev] = parents
144 for parent in parents:
145 if parent >= minroot and parent not in seen:
146 dovisit(parent)
147 if not reachable:
148 return baseset()
149 if not includepath:
150 return reachable
151 for rev in sorted(seen):
152 for parent in seen[rev]:
153 if parent in reachable:
154 reached(rev)
155 return reachable
156
157 def reachableroots(repo, roots, heads, includepath=False):
158 """return (heads(::<roots> and ::<heads>))
159
160 If includepath is True, return (<roots>::<heads>)."""
161 if not roots:
162 return baseset()
163 minroot = roots.min()
164 roots = list(roots)
165 heads = list(heads)
166 try:
167 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
168 except AttributeError:
169 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
170 revs = baseset(revs)
171 revs.sort()
172 return revs
173
174 52 # helpers
175 53
176 54 def getset(repo, subset, x):
@@ -242,8 +120,8 def _makerangeset(repo, subset, m, n, or
242 120
243 121 def dagrange(repo, subset, x, y, order):
244 122 r = fullreposet(repo)
245 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
246 includepath=True)
123 xs = dagop.reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
124 includepath=True)
247 125 return subset & xs
248 126
249 127 def andset(repo, subset, x, y, order):
@@ -364,7 +242,7 def _ancestors(repo, subset, x, followfi
364 242 heads = getset(repo, fullreposet(repo), x)
365 243 if not heads:
366 244 return baseset()
367 s = _revancestors(repo, heads, followfirst)
245 s = dagop.revancestors(repo, heads, followfirst)
368 246 return subset & s
369 247
370 248 @predicate('ancestors(set)', safe=True)
@@ -697,7 +575,7 def _descendants(repo, subset, x, follow
697 575 roots = getset(repo, fullreposet(repo), x)
698 576 if not roots:
699 577 return baseset()
700 s = _revdescendants(repo, roots, followfirst)
578 s = dagop.revdescendants(repo, roots, followfirst)
701 579
702 580 # Both sets need to be ascending in order to lazily return the union
703 581 # in the correct order.
@@ -916,7 +794,7 def _follow(repo, subset, x, name, follo
916 794 # include the revision responsible for the most recent version
917 795 s.add(fctx.introrev())
918 796 else:
919 s = _revancestors(repo, baseset([c.rev()]), followfirst)
797 s = dagop.revancestors(repo, baseset([c.rev()]), followfirst)
920 798
921 799 return subset & s
922 800
@@ -1355,7 +1233,7 def only(repo, subset, x):
1355 1233 if not include:
1356 1234 return baseset()
1357 1235
1358 descendants = set(_revdescendants(repo, include, False))
1236 descendants = set(dagop.revdescendants(repo, include, False))
1359 1237 exclude = [rev for rev in cl.headrevs()
1360 1238 if not rev in descendants and not rev in include]
1361 1239 else:
@@ -1857,7 +1735,8 def sort(repo, subset, x, order):
1857 1735 firstbranch = ()
1858 1736 if 'topo.firstbranch' in opts:
1859 1737 firstbranch = getset(repo, subset, opts['topo.firstbranch'])
1860 revs = baseset(_toposort(revs, repo.changelog.parentrevs, firstbranch),
1738 revs = baseset(dagop.toposort(revs, repo.changelog.parentrevs,
1739 firstbranch),
1861 1740 istopo=True)
1862 1741 if keyflags[0][1]:
1863 1742 revs.reverse()
@@ -1869,204 +1748,6 def sort(repo, subset, x, order):
1869 1748 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
1870 1749 return baseset([c.rev() for c in ctxs])
1871 1750
1872 def _toposort(revs, parentsfunc, firstbranch=()):
1873 """Yield revisions from heads to roots one (topo) branch at a time.
1874
1875 This function aims to be used by a graph generator that wishes to minimize
1876 the number of parallel branches and their interleaving.
1877
1878 Example iteration order (numbers show the "true" order in a changelog):
1879
1880 o 4
1881 |
1882 o 1
1883 |
1884 | o 3
1885 | |
1886 | o 2
1887 |/
1888 o 0
1889
1890 Note that the ancestors of merges are understood by the current
1891 algorithm to be on the same branch. This means no reordering will
1892 occur behind a merge.
1893 """
1894
1895 ### Quick summary of the algorithm
1896 #
1897 # This function is based around a "retention" principle. We keep revisions
1898 # in memory until we are ready to emit a whole branch that immediately
1899 # "merges" into an existing one. This reduces the number of parallel
1900 # branches with interleaved revisions.
1901 #
1902 # During iteration revs are split into two groups:
1903 # A) revision already emitted
1904 # B) revision in "retention". They are stored as different subgroups.
1905 #
1906 # for each REV, we do the following logic:
1907 #
1908 # 1) if REV is a parent of (A), we will emit it. If there is a
1909 # retention group ((B) above) that is blocked on REV being
1910 # available, we emit all the revisions out of that retention
1911 # group first.
1912 #
1913 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
1914 # available, if such subgroup exist, we add REV to it and the subgroup is
1915 # now awaiting for REV.parents() to be available.
1916 #
1917 # 3) finally if no such group existed in (B), we create a new subgroup.
1918 #
1919 #
1920 # To bootstrap the algorithm, we emit the tipmost revision (which
1921 # puts it in group (A) from above).
1922
1923 revs.sort(reverse=True)
1924
1925 # Set of parents of revision that have been emitted. They can be considered
1926 # unblocked as the graph generator is already aware of them so there is no
1927 # need to delay the revisions that reference them.
1928 #
1929 # If someone wants to prioritize a branch over the others, pre-filling this
1930 # set will force all other branches to wait until this branch is ready to be
1931 # emitted.
1932 unblocked = set(firstbranch)
1933
1934 # list of groups waiting to be displayed, each group is defined by:
1935 #
1936 # (revs: lists of revs waiting to be displayed,
1937 # blocked: set of that cannot be displayed before those in 'revs')
1938 #
1939 # The second value ('blocked') correspond to parents of any revision in the
1940 # group ('revs') that is not itself contained in the group. The main idea
1941 # of this algorithm is to delay as much as possible the emission of any
1942 # revision. This means waiting for the moment we are about to display
1943 # these parents to display the revs in a group.
1944 #
1945 # This first implementation is smart until it encounters a merge: it will
1946 # emit revs as soon as any parent is about to be emitted and can grow an
1947 # arbitrary number of revs in 'blocked'. In practice this mean we properly
1948 # retains new branches but gives up on any special ordering for ancestors
1949 # of merges. The implementation can be improved to handle this better.
1950 #
1951 # The first subgroup is special. It corresponds to all the revision that
1952 # were already emitted. The 'revs' lists is expected to be empty and the
1953 # 'blocked' set contains the parents revisions of already emitted revision.
1954 #
1955 # You could pre-seed the <parents> set of groups[0] to a specific
1956 # changesets to select what the first emitted branch should be.
1957 groups = [([], unblocked)]
1958 pendingheap = []
1959 pendingset = set()
1960
1961 heapq.heapify(pendingheap)
1962 heappop = heapq.heappop
1963 heappush = heapq.heappush
1964 for currentrev in revs:
1965 # Heap works with smallest element, we want highest so we invert
1966 if currentrev not in pendingset:
1967 heappush(pendingheap, -currentrev)
1968 pendingset.add(currentrev)
1969 # iterates on pending rev until after the current rev have been
1970 # processed.
1971 rev = None
1972 while rev != currentrev:
1973 rev = -heappop(pendingheap)
1974 pendingset.remove(rev)
1975
1976 # Seek for a subgroup blocked, waiting for the current revision.
1977 matching = [i for i, g in enumerate(groups) if rev in g[1]]
1978
1979 if matching:
1980 # The main idea is to gather together all sets that are blocked
1981 # on the same revision.
1982 #
1983 # Groups are merged when a common blocking ancestor is
1984 # observed. For example, given two groups:
1985 #
1986 # revs [5, 4] waiting for 1
1987 # revs [3, 2] waiting for 1
1988 #
1989 # These two groups will be merged when we process
1990 # 1. In theory, we could have merged the groups when
1991 # we added 2 to the group it is now in (we could have
1992 # noticed the groups were both blocked on 1 then), but
1993 # the way it works now makes the algorithm simpler.
1994 #
1995 # We also always keep the oldest subgroup first. We can
1996 # probably improve the behavior by having the longest set
1997 # first. That way, graph algorithms could minimise the length
1998 # of parallel lines their drawing. This is currently not done.
1999 targetidx = matching.pop(0)
2000 trevs, tparents = groups[targetidx]
2001 for i in matching:
2002 gr = groups[i]
2003 trevs.extend(gr[0])
2004 tparents |= gr[1]
2005 # delete all merged subgroups (except the one we kept)
2006 # (starting from the last subgroup for performance and
2007 # sanity reasons)
2008 for i in reversed(matching):
2009 del groups[i]
2010 else:
2011 # This is a new head. We create a new subgroup for it.
2012 targetidx = len(groups)
2013 groups.append(([], {rev}))
2014
2015 gr = groups[targetidx]
2016
2017 # We now add the current nodes to this subgroups. This is done
2018 # after the subgroup merging because all elements from a subgroup
2019 # that relied on this rev must precede it.
2020 #
2021 # we also update the <parents> set to include the parents of the
2022 # new nodes.
2023 if rev == currentrev: # only display stuff in rev
2024 gr[0].append(rev)
2025 gr[1].remove(rev)
2026 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
2027 gr[1].update(parents)
2028 for p in parents:
2029 if p not in pendingset:
2030 pendingset.add(p)
2031 heappush(pendingheap, -p)
2032
2033 # Look for a subgroup to display
2034 #
2035 # When unblocked is empty (if clause), we were not waiting for any
2036 # revisions during the first iteration (if no priority was given) or
2037 # if we emitted a whole disconnected set of the graph (reached a
2038 # root). In that case we arbitrarily take the oldest known
2039 # subgroup. The heuristic could probably be better.
2040 #
2041 # Otherwise (elif clause) if the subgroup is blocked on
2042 # a revision we just emitted, we can safely emit it as
2043 # well.
2044 if not unblocked:
2045 if len(groups) > 1: # display other subset
2046 targetidx = 1
2047 gr = groups[1]
2048 elif not gr[1] & unblocked:
2049 gr = None
2050
2051 if gr is not None:
2052 # update the set of awaited revisions with the one from the
2053 # subgroup
2054 unblocked |= gr[1]
2055 # output all revisions in the subgroup
2056 for r in gr[0]:
2057 yield r
2058 # delete the subgroup that you just output
2059 # unless it is groups[0] in which case you just empty it.
2060 if targetidx:
2061 del groups[targetidx]
2062 else:
2063 gr[0][:] = []
2064 # Check if we have some subgroup waiting for revisions we are not going to
2065 # iterate over
2066 for g in groups:
2067 for r in g[0]:
2068 yield r
2069
2070 1751 @predicate('subrepo([pattern])')
2071 1752 def subrepo(repo, subset, x):
2072 1753 """Changesets that add, modify or remove the given subrepo. If no subrepo
General Comments 0
You need to be logged in to leave comments. Login now