##// END OF EJS Templates
grep: add MULTIREV support to --allfiles flag...
grep: add MULTIREV support to --allfiles flag This patch facilitates passing multiple revisions with all-files flag. It's assumed that if you are passing multiple revisions to --allfiles, you want hits from all of them. Differential Revision: https://phab.mercurial-scm.org/D3976

File last commit:

r38806:e7aa113b default
r38981:f3f10997 default
Show More
dagop.py
717 lines | 26.0 KiB | text/x-python | PythonLexer
Yuya Nishihara
dagop: split module hosting DAG-related algorithms from revset...
r32903 # dagop.py - graph ancestry and topology algorithm for revset
#
# Copyright 2010 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
import heapq
Yuya Nishihara
dagop: move annotateline and _annotatepair from context.py...
r36935 from .thirdparty import (
attr,
)
Yuya Nishihara
dagop: split module hosting DAG-related algorithms from revset...
r32903 from . import (
error,
Yuya Nishihara
dagop: move blockancestors() and blockdescendants() from context...
r32904 mdiff,
Yuya Nishihara
dagop: split module hosting DAG-related algorithms from revset...
r32903 node,
Yuya Nishihara
dagop: move blockancestors() and blockdescendants() from context...
r32904 patch,
Yuya Nishihara
dagop: extract core algorithm of annotate() from context.py...
r36936 pycompat,
Yuya Nishihara
dagop: split module hosting DAG-related algorithms from revset...
r32903 smartset,
)
baseset = smartset.baseset
generatorset = smartset.generatorset
Yuya Nishihara
revset: add depth limit to ancestors()...
r33002 # possible maximum depth between null and wdir()
_maxlogdepth = 0x80000000
Yuya Nishihara
dagop: make walk direction switchable so it can track descendants...
r33079 def _walkrevtree(pfunc, revs, startdepth, stopdepth, reverse):
Yuya Nishihara
dagop: factor out generator of ancestor nodes...
r33078 """Walk DAG using 'pfunc' from the given 'revs' nodes
Yuya Nishihara
dagop: make walk direction switchable so it can track descendants...
r33079 'pfunc(rev)' should return the parent/child revisions of the given 'rev'
if 'reverse' is True/False respectively.
Yuya Nishihara
dagop: factor out generator of ancestor nodes...
r33078
Scan ends at the stopdepth (exlusive) if specified. Revisions found
earlier than the startdepth are omitted.
"""
Yuya Nishihara
revset: add startdepth limit to ancestors() as internal option...
r33003 if startdepth is None:
startdepth = 0
Yuya Nishihara
revset: add depth limit to ancestors()...
r33002 if stopdepth is None:
stopdepth = _maxlogdepth
Martin von Zweigbergk
dagop: raise ProgrammingError if stopdepth < 0...
r33027 if stopdepth == 0:
Yuya Nishihara
revset: add depth limit to ancestors()...
r33002 return
Martin von Zweigbergk
dagop: raise ProgrammingError if stopdepth < 0...
r33027 if stopdepth < 0:
raise error.ProgrammingError('negative stopdepth')
Yuya Nishihara
dagop: make walk direction switchable so it can track descendants...
r33079 if reverse:
heapsign = -1 # max heap
else:
heapsign = +1 # min heap
Yuya Nishihara
revset: add depth limit to ancestors()...
r33002
Yuya Nishihara
dagop: comment why revancestors() doesn't heapify input revs at once...
r32998 # load input revs lazily to heap so earlier revisions can be yielded
# without fully computing the input revs
Yuya Nishihara
dagop: make walk direction switchable so it can track descendants...
r33079 revs.sort(reverse)
Yuya Nishihara
dagop: unnest inner generator of revancestors()...
r32997 irevs = iter(revs)
Yuya Nishihara
dagop: make walk direction switchable so it can track descendants...
r33079 pendingheap = [] # [(heapsign * rev, depth), ...] (i.e. lower depth first)
Yuya Nishihara
dagop: split module hosting DAG-related algorithms from revset...
r32903
Yuya Nishihara
dagop: unnest inner generator of revancestors()...
r32997 inputrev = next(irevs, None)
if inputrev is not None:
Yuya Nishihara
dagop: make walk direction switchable so it can track descendants...
r33079 heapq.heappush(pendingheap, (heapsign * inputrev, 0))
Yuya Nishihara
dagop: split module hosting DAG-related algorithms from revset...
r32903
Yuya Nishihara
dagop: just compare with the last value to deduplicate input of revancestors()...
r33000 lastrev = None
Yuya Nishihara
dagop: bulk rename variables in revancestors() generator...
r32999 while pendingheap:
Yuya Nishihara
dagop: compute depth in revancestors() generator...
r33001 currev, curdepth = heapq.heappop(pendingheap)
Yuya Nishihara
dagop: make walk direction switchable so it can track descendants...
r33079 currev = heapsign * currev
Yuya Nishihara
dagop: bulk rename variables in revancestors() generator...
r32999 if currev == inputrev:
Yuya Nishihara
dagop: unnest inner generator of revancestors()...
r32997 inputrev = next(irevs, None)
if inputrev is not None:
Yuya Nishihara
dagop: make walk direction switchable so it can track descendants...
r33079 heapq.heappush(pendingheap, (heapsign * inputrev, 0))
Yuya Nishihara
revset: add startdepth limit to ancestors() as internal option...
r33003 # rescan parents until curdepth >= startdepth because queued entries
# of the same revision are iterated from the lowest depth
Yuya Nishihara
revset: add depth limit to ancestors()...
r33002 foundnew = (currev != lastrev)
Yuya Nishihara
revset: add startdepth limit to ancestors() as internal option...
r33003 if foundnew and curdepth >= startdepth:
Yuya Nishihara
dagop: just compare with the last value to deduplicate input of revancestors()...
r33000 lastrev = currev
Yuya Nishihara
dagop: bulk rename variables in revancestors() generator...
r32999 yield currev
Yuya Nishihara
revset: add depth limit to ancestors()...
r33002 pdepth = curdepth + 1
if foundnew and pdepth < stopdepth:
Yuya Nishihara
dagop: factor out pfunc from revancestors() generator...
r33077 for prev in pfunc(currev):
if prev != node.nullrev:
Yuya Nishihara
dagop: make walk direction switchable so it can track descendants...
r33079 heapq.heappush(pendingheap, (heapsign * prev, pdepth))
Yuya Nishihara
dagop: split module hosting DAG-related algorithms from revset...
r32903
Yuya Nishihara
dagop: extend filectxancestors() to walk multiple files
r35277 def filectxancestors(fctxs, followfirst=False):
"""Like filectx.ancestors(), but can walk from multiple files/revisions,
Yuya Nishihara
dagop: add smartset interface to filectxancestors()...
r35297 and includes the given fctxs themselves
Yields (rev, {fctx, ...}) pairs in descending order.
"""
Yuya Nishihara
dagop: copy basefilectx.ancestors() to free function...
r35271 visit = {}
Yuya Nishihara
dagop: use heap to compute max rev in filectxancestors()
r35298 visitheap = []
Yuya Nishihara
dagop: change visit dict of filectxancestors() indexed solely by rev...
r35275 def addvisit(fctx):
rev = fctx.rev()
if rev not in visit:
visit[rev] = set()
Yuya Nishihara
dagop: use heap to compute max rev in filectxancestors()
r35298 heapq.heappush(visitheap, -rev) # max heap
Yuya Nishihara
dagop: change visit dict of filectxancestors() indexed solely by rev...
r35275 visit[rev].add(fctx)
Yuya Nishihara
dagop: copy basefilectx.ancestors() to free function...
r35271 if followfirst:
cut = 1
else:
cut = None
Yuya Nishihara
dagop: extend filectxancestors() to walk multiple files
r35277 for c in fctxs:
addvisit(c)
Yuya Nishihara
dagop: put start fctx into visit dict of filectxancestors()...
r35276 while visit:
Yuya Nishihara
dagop: use heap to compute max rev in filectxancestors()
r35298 currev = -heapq.heappop(visitheap)
Yuya Nishihara
dagop: add smartset interface to filectxancestors()...
r35297 curfctxs = visit.pop(currev)
yield currev, curfctxs
for c in curfctxs:
for parent in c.parents()[:cut]:
addvisit(parent)
Yuya Nishihara
dagop: use heap to compute max rev in filectxancestors()
r35298 assert not visitheap
Yuya Nishihara
dagop: add smartset interface to filectxancestors()...
r35297
def filerevancestors(fctxs, followfirst=False):
"""Like filectx.ancestors(), but can walk from multiple files/revisions,
and includes the given fctxs themselves
Returns a smartset.
"""
gen = (rev for rev, _cs in filectxancestors(fctxs, followfirst))
return generatorset(gen, iterasc=False)
Yuya Nishihara
dagop: copy basefilectx.ancestors() to free function...
r35271
Jun Wu
revset: optimize "draft() & ::x" pattern...
r34067 def _genrevancestors(repo, revs, followfirst, startdepth, stopdepth, cutfunc):
Yuya Nishihara
dagop: factor out generator of ancestor nodes...
r33078 if followfirst:
cut = 1
else:
cut = None
cl = repo.changelog
Jun Wu
revset: optimize "draft() & ::x" pattern...
r34067 def plainpfunc(rev):
Yuya Nishihara
dagop: factor out generator of ancestor nodes...
r33078 try:
return cl.parentrevs(rev)[:cut]
except error.WdirUnsupported:
return (pctx.rev() for pctx in repo[rev].parents()[:cut])
Jun Wu
revset: optimize "draft() & ::x" pattern...
r34067 if cutfunc is None:
pfunc = plainpfunc
else:
pfunc = lambda rev: [r for r in plainpfunc(rev) if not cutfunc(r)]
revs = revs.filter(lambda rev: not cutfunc(rev))
Yuya Nishihara
dagop: make walk direction switchable so it can track descendants...
r33079 return _walkrevtree(pfunc, revs, startdepth, stopdepth, reverse=True)
Yuya Nishihara
dagop: factor out generator of ancestor nodes...
r33078
Jun Wu
revset: optimize "draft() & ::x" pattern...
r34067 def revancestors(repo, revs, followfirst=False, startdepth=None,
stopdepth=None, cutfunc=None):
Yuya Nishihara
revset: add depth limit to ancestors()...
r33002 """Like revlog.ancestors(), but supports additional options, includes
the given revs themselves, and returns a smartset
Yuya Nishihara
revset: add startdepth limit to ancestors() as internal option...
r33003 Scan ends at the stopdepth (exlusive) if specified. Revisions found
earlier than the startdepth are omitted.
Jun Wu
revset: optimize "draft() & ::x" pattern...
r34067
If cutfunc is provided, it will be used to cut the traversal of the DAG.
When cutfunc(X) returns True, the DAG traversal stops - revision X and
X's ancestors in the traversal path will be skipped. This could be an
optimization sometimes.
Note: if Y is an ancestor of X, cutfunc(X) returning True does not
necessarily mean Y will also be cut. Usually cutfunc(Y) also wants to
return True in this case. For example,
D # revancestors(repo, D, cutfunc=lambda rev: rev == B)
|\ # will include "A", because the path D -> C -> A was not cut.
B C # If "B" gets cut, "A" might want to be cut too.
|/
A
Yuya Nishihara
revset: add depth limit to ancestors()...
r33002 """
Jun Wu
revset: optimize "draft() & ::x" pattern...
r34067 gen = _genrevancestors(repo, revs, followfirst, startdepth, stopdepth,
cutfunc)
Yuya Nishihara
dagop: unnest inner generator of revancestors()...
r32997 return generatorset(gen, iterasc=False)
Yuya Nishihara
dagop: split module hosting DAG-related algorithms from revset...
r32903
Yuya Nishihara
dagop: unnest inner generator of revdescendants()...
r33073 def _genrevdescendants(repo, revs, followfirst):
Yuya Nishihara
dagop: split module hosting DAG-related algorithms from revset...
r32903 if followfirst:
cut = 1
else:
cut = None
Yuya Nishihara
dagop: unnest inner generator of revdescendants()...
r33073 cl = repo.changelog
Yuya Nishihara
dagop: use smartset.min() in revdescendants() generator...
r33076 first = revs.min()
Yuya Nishihara
dagop: unnest inner generator of revdescendants()...
r33073 nullrev = node.nullrev
if first == nullrev:
# Are there nodes with a null first parent and a non-null
# second one? Maybe. Do we care? Probably not.
Yuya Nishihara
dagop: change revdescendants() to include all root revisions...
r33075 yield first
Yuya Nishihara
dagop: unnest inner generator of revdescendants()...
r33073 for i in cl:
yield i
else:
seen = set(revs)
Yuya Nishihara
dagop: change revdescendants() to include all root revisions...
r33075 for i in cl.revs(first):
if i in seen:
yield i
continue
Yuya Nishihara
dagop: unnest inner generator of revdescendants()...
r33073 for x in cl.parentrevs(i)[:cut]:
if x != nullrev and x in seen:
seen.add(i)
yield i
break
Yuya Nishihara
dagop: split module hosting DAG-related algorithms from revset...
r32903
Yuya Nishihara
revset: add depth limit to descendants() (issue5374)...
r33080 def _builddescendantsmap(repo, startrev, followfirst):
"""Build map of 'rev -> child revs', offset from startrev"""
cl = repo.changelog
nullrev = node.nullrev
Gregory Szorc
global: use pycompat.xrange()...
r38806 descmap = [[] for _rev in pycompat.xrange(startrev, len(cl))]
Yuya Nishihara
revset: add depth limit to descendants() (issue5374)...
r33080 for currev in cl.revs(startrev + 1):
p1rev, p2rev = cl.parentrevs(currev)
if p1rev >= startrev:
descmap[p1rev - startrev].append(currev)
if not followfirst and p2rev != nullrev and p2rev >= startrev:
descmap[p2rev - startrev].append(currev)
return descmap
def _genrevdescendantsofdepth(repo, revs, followfirst, startdepth, stopdepth):
startrev = revs.min()
descmap = _builddescendantsmap(repo, startrev, followfirst)
def pfunc(rev):
return descmap[rev - startrev]
return _walkrevtree(pfunc, revs, startdepth, stopdepth, reverse=False)
def revdescendants(repo, revs, followfirst, startdepth=None, stopdepth=None):
Yuya Nishihara
dagop: change revdescendants() to include all root revisions...
r33075 """Like revlog.descendants() but supports additional options, includes
Yuya Nishihara
revset: add depth limit to descendants() (issue5374)...
r33080 the given revs themselves, and returns a smartset
Scan ends at the stopdepth (exlusive) if specified. Revisions found
earlier than the startdepth are omitted.
"""
if startdepth is None and stopdepth is None:
gen = _genrevdescendants(repo, revs, followfirst)
else:
gen = _genrevdescendantsofdepth(repo, revs, followfirst,
startdepth, stopdepth)
Yuya Nishihara
dagop: unnest inner generator of revdescendants()...
r33073 return generatorset(gen, iterasc=True)
Yuya Nishihara
dagop: split module hosting DAG-related algorithms from revset...
r32903
def _reachablerootspure(repo, minroot, roots, heads, includepath):
"""return (heads(::<roots> and ::<heads>))
If includepath is True, return (<roots>::<heads>)."""
if not roots:
return []
parentrevs = repo.changelog.parentrevs
roots = set(roots)
visit = list(heads)
reachable = set()
seen = {}
# prefetch all the things! (because python is slow)
reached = reachable.add
dovisit = visit.append
nextvisit = visit.pop
# open-code the post-order traversal due to the tiny size of
# sys.getrecursionlimit()
while visit:
rev = nextvisit()
if rev in roots:
reached(rev)
if not includepath:
continue
parents = parentrevs(rev)
seen[rev] = parents
for parent in parents:
if parent >= minroot and parent not in seen:
dovisit(parent)
if not reachable:
return baseset()
if not includepath:
return reachable
for rev in sorted(seen):
for parent in seen[rev]:
if parent in reachable:
reached(rev)
return reachable
def reachableroots(repo, roots, heads, includepath=False):
"""return (heads(::<roots> and ::<heads>))
If includepath is True, return (<roots>::<heads>)."""
if not roots:
return baseset()
minroot = roots.min()
roots = list(roots)
heads = list(heads)
try:
revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
except AttributeError:
revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
revs = baseset(revs)
revs.sort()
return revs
Yuya Nishihara
dagop: move blockancestors() and blockdescendants() from context...
r32904 def _changesrange(fctx1, fctx2, linerange2, diffopts):
"""Return `(diffinrange, linerange1)` where `diffinrange` is True
if diff from fctx2 to fctx1 has changes in linerange2 and
`linerange1` is the new line range for fctx1.
"""
blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
diffinrange = any(stype == '!' for _, stype in filteredblocks)
return diffinrange, linerange1
def blockancestors(fctx, fromline, toline, followfirst=False):
"""Yield ancestors of `fctx` with respect to the block of lines within
`fromline`-`toline` range.
"""
diffopts = patch.diffopts(fctx._repo.ui)
Yuya Nishihara
filectx: extract helper method to obtain filectx pointing to its introrev
r35272 fctx = fctx.introfilectx()
Yuya Nishihara
dagop: move blockancestors() and blockdescendants() from context...
r32904 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
while visit:
c, linerange2 = visit.pop(max(visit))
pl = c.parents()
if followfirst:
pl = pl[:1]
if not pl:
# The block originates from the initial revision.
yield c, linerange2
continue
inrange = False
for p in pl:
inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts)
inrange = inrange or inrangep
if linerange1[0] == linerange1[1]:
# Parent's linerange is empty, meaning that the block got
# introduced in this revision; no need to go futher in this
# branch.
continue
# Set _descendantrev with 'c' (a known descendant) so that, when
# _adjustlinkrev is called for 'p', it receives this descendant
# (as srcrev) instead possibly topmost introrev.
p._descendantrev = c.rev()
visit[p.linkrev(), p.filenode()] = p, linerange1
if inrange:
yield c, linerange2
def blockdescendants(fctx, fromline, toline):
"""Yield descendants of `fctx` with respect to the block of lines within
`fromline`-`toline` range.
"""
# First possibly yield 'fctx' if it has changes in range with respect to
# its parents.
try:
c, linerange1 = next(blockancestors(fctx, fromline, toline))
except StopIteration:
pass
else:
if c == fctx:
yield c, linerange1
diffopts = patch.diffopts(fctx._repo.ui)
fl = fctx.filelog()
seen = {fctx.filerev(): (fctx, (fromline, toline))}
for i in fl.descendants([fctx.filerev()]):
c = fctx.filectx(i)
inrange = False
for x in fl.parentrevs(i):
try:
p, linerange2 = seen[x]
except KeyError:
# nullrev or other branch
continue
inrangep, linerange1 = _changesrange(c, p, linerange2, diffopts)
inrange = inrange or inrangep
Denis Laxalde
followlines: join merge parents line ranges in blockdescendants() (issue5595)...
r33284 # If revision 'i' has been seen (it's a merge) and the line range
# previously computed differs from the one we just got, we take the
# surrounding interval. This is conservative but avoids loosing
# information.
if i in seen and seen[i][1] != linerange1:
lbs, ubs = zip(linerange1, seen[i][1])
linerange1 = min(lbs), max(ubs)
Yuya Nishihara
dagop: move blockancestors() and blockdescendants() from context...
r32904 seen[i] = c, linerange1
if inrange:
yield c, linerange1
Yuya Nishihara
dagop: move annotateline and _annotatepair from context.py...
r36935 @attr.s(slots=True, frozen=True)
class annotateline(object):
fctx = attr.ib()
Yuya Nishihara
annotate: drop linenumber flag from fctx.annotate() (API)...
r37083 lineno = attr.ib()
Yuya Nishihara
dagop: move annotateline and _annotatepair from context.py...
r36935 # Whether this annotation was the result of a skip-annotate.
skip = attr.ib(default=False)
Yuya Nishihara
annotate: pack line content into annotateline object (API)...
r37084 text = attr.ib(default=None)
Yuya Nishihara
dagop: move annotateline and _annotatepair from context.py...
r36935
Yuya Nishihara
annotate: do not construct attr.s object per line while computing history...
r37082 @attr.s(slots=True, frozen=True)
class _annotatedfile(object):
# list indexed by lineno - 1
fctxs = attr.ib()
linenos = attr.ib()
skips = attr.ib()
# full file content
text = attr.ib()
Yuya Nishihara
dagop: move lines() out of annotate()
r36937 def _countlines(text):
if text.endswith("\n"):
return text.count("\n")
return text.count("\n") + int(bool(text))
Yuya Nishihara
annotate: drop linenumber flag from fctx.annotate() (API)...
r37083 def _decoratelines(text, fctx):
n = _countlines(text)
linenos = pycompat.rangelist(1, n + 1)
return _annotatedfile([fctx] * n, linenos, [False] * n, text)
Yuya Nishihara
dagop: move annotateline and _annotatepair from context.py...
r36935 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
r'''
Given parent and child fctxes and annotate data for parents, for all lines
in either parent that match the child, annotate the child with the parent's
data.
Additionally, if `skipchild` is True, replace all other lines with parent
annotate data as well such that child is never blamed for any lines.
See test-annotate.py for unit tests.
'''
Yuya Nishihara
annotate: do not construct attr.s object per line while computing history...
r37082 pblocks = [(parent, mdiff.allblocks(parent.text, child.text, opts=diffopts))
Yuya Nishihara
dagop: move annotateline and _annotatepair from context.py...
r36935 for parent in parents]
if skipchild:
# Need to iterate over the blocks twice -- make it a list
pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
# Mercurial currently prefers p2 over p1 for annotate.
# TODO: change this?
for parent, blocks in pblocks:
for (a1, a2, b1, b2), t in blocks:
# Changed blocks ('!') or blocks made only of blank lines ('~')
# belong to the child.
if t == '=':
Yuya Nishihara
annotate: do not construct attr.s object per line while computing history...
r37082 child.fctxs[b1:b2] = parent.fctxs[a1:a2]
child.linenos[b1:b2] = parent.linenos[a1:a2]
child.skips[b1:b2] = parent.skips[a1:a2]
Yuya Nishihara
dagop: move annotateline and _annotatepair from context.py...
r36935
if skipchild:
# Now try and match up anything that couldn't be matched,
# Reversing pblocks maintains bias towards p2, matching above
# behavior.
pblocks.reverse()
# The heuristics are:
# * Work on blocks of changed lines (effectively diff hunks with -U0).
# This could potentially be smarter but works well enough.
# * For a non-matching section, do a best-effort fit. Match lines in
# diff hunks 1:1, dropping lines as necessary.
# * Repeat the last line as a last resort.
# First, replace as much as possible without repeating the last line.
remaining = [(parent, []) for parent, _blocks in pblocks]
for idx, (parent, blocks) in enumerate(pblocks):
for (a1, a2, b1, b2), _t in blocks:
if a2 - a1 >= b2 - b1:
Gregory Szorc
global: use pycompat.xrange()...
r38806 for bk in pycompat.xrange(b1, b2):
Yuya Nishihara
annotate: do not construct attr.s object per line while computing history...
r37082 if child.fctxs[bk] == childfctx:
Yuya Nishihara
dagop: move annotateline and _annotatepair from context.py...
r36935 ak = min(a1 + (bk - b1), a2 - 1)
Yuya Nishihara
annotate: do not construct attr.s object per line while computing history...
r37082 child.fctxs[bk] = parent.fctxs[ak]
child.linenos[bk] = parent.linenos[ak]
child.skips[bk] = True
Yuya Nishihara
dagop: move annotateline and _annotatepair from context.py...
r36935 else:
remaining[idx][1].append((a1, a2, b1, b2))
# Then, look at anything left, which might involve repeating the last
# line.
for parent, blocks in remaining:
for a1, a2, b1, b2 in blocks:
Gregory Szorc
global: use pycompat.xrange()...
r38806 for bk in pycompat.xrange(b1, b2):
Yuya Nishihara
annotate: do not construct attr.s object per line while computing history...
r37082 if child.fctxs[bk] == childfctx:
Yuya Nishihara
dagop: move annotateline and _annotatepair from context.py...
r36935 ak = min(a1 + (bk - b1), a2 - 1)
Yuya Nishihara
annotate: do not construct attr.s object per line while computing history...
r37082 child.fctxs[bk] = parent.fctxs[ak]
child.linenos[bk] = parent.linenos[ak]
child.skips[bk] = True
Yuya Nishihara
dagop: move annotateline and _annotatepair from context.py...
r36935 return child
Yuya Nishihara
annotate: drop linenumber flag from fctx.annotate() (API)...
r37083 def annotate(base, parents, skiprevs=None, diffopts=None):
Yuya Nishihara
dagop: extract core algorithm of annotate() from context.py...
r36936 """Core algorithm for filectx.annotate()
`parents(fctx)` is a function returning a list of parent filectxs.
"""
# This algorithm would prefer to be recursive, but Python is a
# bit recursion-hostile. Instead we do an iterative
# depth-first search.
# 1st DFS pre-calculates pcache and needed
visit = [base]
pcache = {}
needed = {base: 1}
while visit:
f = visit.pop()
if f in pcache:
continue
pl = parents(f)
pcache[f] = pl
for p in pl:
needed[p] = needed.get(p, 0) + 1
if p not in pcache:
visit.append(p)
# 2nd DFS does the actual annotate
visit[:] = [base]
hist = {}
while visit:
f = visit[-1]
if f in hist:
visit.pop()
continue
ready = True
pl = pcache[f]
for p in pl:
if p not in hist:
ready = False
visit.append(p)
if ready:
visit.pop()
Yuya Nishihara
annotate: drop linenumber flag from fctx.annotate() (API)...
r37083 curr = _decoratelines(f.data(), f)
Yuya Nishihara
dagop: extract core algorithm of annotate() from context.py...
r36936 skipchild = False
if skiprevs is not None:
skipchild = f._changeid in skiprevs
curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
diffopts)
for p in pl:
if needed[p] == 1:
del hist[p]
del needed[p]
else:
needed[p] -= 1
hist[f] = curr
del pcache[f]
Yuya Nishihara
annotate: do not construct attr.s object per line while computing history...
r37082 a = hist[base]
Yuya Nishihara
annotate: pack line content into annotateline object (API)...
r37084 return [annotateline(*r) for r in zip(a.fctxs, a.linenos, a.skips,
mdiff.splitnewlines(a.text))]
Yuya Nishihara
dagop: extract core algorithm of annotate() from context.py...
r36936
Yuya Nishihara
dagop: split module hosting DAG-related algorithms from revset...
r32903 def toposort(revs, parentsfunc, firstbranch=()):
"""Yield revisions from heads to roots one (topo) branch at a time.
This function aims to be used by a graph generator that wishes to minimize
the number of parallel branches and their interleaving.
Example iteration order (numbers show the "true" order in a changelog):
o 4
|
o 1
|
| o 3
| |
| o 2
|/
o 0
Note that the ancestors of merges are understood by the current
algorithm to be on the same branch. This means no reordering will
occur behind a merge.
"""
### Quick summary of the algorithm
#
# This function is based around a "retention" principle. We keep revisions
# in memory until we are ready to emit a whole branch that immediately
# "merges" into an existing one. This reduces the number of parallel
# branches with interleaved revisions.
#
# During iteration revs are split into two groups:
# A) revision already emitted
# B) revision in "retention". They are stored as different subgroups.
#
# for each REV, we do the following logic:
#
# 1) if REV is a parent of (A), we will emit it. If there is a
# retention group ((B) above) that is blocked on REV being
# available, we emit all the revisions out of that retention
# group first.
#
# 2) else, we'll search for a subgroup in (B) awaiting for REV to be
# available, if such subgroup exist, we add REV to it and the subgroup is
# now awaiting for REV.parents() to be available.
#
# 3) finally if no such group existed in (B), we create a new subgroup.
#
#
# To bootstrap the algorithm, we emit the tipmost revision (which
# puts it in group (A) from above).
revs.sort(reverse=True)
# Set of parents of revision that have been emitted. They can be considered
# unblocked as the graph generator is already aware of them so there is no
# need to delay the revisions that reference them.
#
# If someone wants to prioritize a branch over the others, pre-filling this
# set will force all other branches to wait until this branch is ready to be
# emitted.
unblocked = set(firstbranch)
# list of groups waiting to be displayed, each group is defined by:
#
# (revs: lists of revs waiting to be displayed,
# blocked: set of that cannot be displayed before those in 'revs')
#
# The second value ('blocked') correspond to parents of any revision in the
# group ('revs') that is not itself contained in the group. The main idea
# of this algorithm is to delay as much as possible the emission of any
# revision. This means waiting for the moment we are about to display
# these parents to display the revs in a group.
#
# This first implementation is smart until it encounters a merge: it will
# emit revs as soon as any parent is about to be emitted and can grow an
# arbitrary number of revs in 'blocked'. In practice this mean we properly
# retains new branches but gives up on any special ordering for ancestors
# of merges. The implementation can be improved to handle this better.
#
# The first subgroup is special. It corresponds to all the revision that
# were already emitted. The 'revs' lists is expected to be empty and the
# 'blocked' set contains the parents revisions of already emitted revision.
#
# You could pre-seed the <parents> set of groups[0] to a specific
# changesets to select what the first emitted branch should be.
groups = [([], unblocked)]
pendingheap = []
pendingset = set()
heapq.heapify(pendingheap)
heappop = heapq.heappop
heappush = heapq.heappush
for currentrev in revs:
# Heap works with smallest element, we want highest so we invert
if currentrev not in pendingset:
heappush(pendingheap, -currentrev)
pendingset.add(currentrev)
# iterates on pending rev until after the current rev have been
# processed.
rev = None
while rev != currentrev:
rev = -heappop(pendingheap)
pendingset.remove(rev)
# Seek for a subgroup blocked, waiting for the current revision.
matching = [i for i, g in enumerate(groups) if rev in g[1]]
if matching:
# The main idea is to gather together all sets that are blocked
# on the same revision.
#
# Groups are merged when a common blocking ancestor is
# observed. For example, given two groups:
#
# revs [5, 4] waiting for 1
# revs [3, 2] waiting for 1
#
# These two groups will be merged when we process
# 1. In theory, we could have merged the groups when
# we added 2 to the group it is now in (we could have
# noticed the groups were both blocked on 1 then), but
# the way it works now makes the algorithm simpler.
#
# We also always keep the oldest subgroup first. We can
# probably improve the behavior by having the longest set
# first. That way, graph algorithms could minimise the length
# of parallel lines their drawing. This is currently not done.
targetidx = matching.pop(0)
trevs, tparents = groups[targetidx]
for i in matching:
gr = groups[i]
trevs.extend(gr[0])
tparents |= gr[1]
# delete all merged subgroups (except the one we kept)
# (starting from the last subgroup for performance and
# sanity reasons)
for i in reversed(matching):
del groups[i]
else:
# This is a new head. We create a new subgroup for it.
targetidx = len(groups)
groups.append(([], {rev}))
gr = groups[targetidx]
# We now add the current nodes to this subgroups. This is done
# after the subgroup merging because all elements from a subgroup
# that relied on this rev must precede it.
#
# we also update the <parents> set to include the parents of the
# new nodes.
if rev == currentrev: # only display stuff in rev
gr[0].append(rev)
gr[1].remove(rev)
parents = [p for p in parentsfunc(rev) if p > node.nullrev]
gr[1].update(parents)
for p in parents:
if p not in pendingset:
pendingset.add(p)
heappush(pendingheap, -p)
# Look for a subgroup to display
#
# When unblocked is empty (if clause), we were not waiting for any
# revisions during the first iteration (if no priority was given) or
# if we emitted a whole disconnected set of the graph (reached a
# root). In that case we arbitrarily take the oldest known
# subgroup. The heuristic could probably be better.
#
# Otherwise (elif clause) if the subgroup is blocked on
# a revision we just emitted, we can safely emit it as
# well.
if not unblocked:
if len(groups) > 1: # display other subset
targetidx = 1
gr = groups[1]
elif not gr[1] & unblocked:
gr = None
if gr is not None:
# update the set of awaited revisions with the one from the
# subgroup
unblocked |= gr[1]
# output all revisions in the subgroup
for r in gr[0]:
yield r
# delete the subgroup that you just output
# unless it is groups[0] in which case you just empty it.
if targetidx:
del groups[targetidx]
else:
gr[0][:] = []
# Check if we have some subgroup waiting for revisions we are not going to
# iterate over
for g in groups:
for r in g[0]:
yield r