##// END OF EJS Templates
dagop: fix subsetparentswalker to set p1/p2 chains at merge revision...
Yuya Nishihara -
r45147:67f757ed default
parent child Browse files
Show More
@@ -1,1108 +1,1113 b''
1 # dagop.py - graph ancestry and topology algorithm for revset
1 # dagop.py - graph ancestry and topology algorithm for revset
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import heapq
10 import heapq
11
11
12 from .node import nullrev
12 from .node import nullrev
13 from .thirdparty import attr
13 from .thirdparty import attr
14 from . import (
14 from . import (
15 error,
15 error,
16 mdiff,
16 mdiff,
17 node,
17 node,
18 patch,
18 patch,
19 pycompat,
19 pycompat,
20 smartset,
20 smartset,
21 )
21 )
22
22
23 baseset = smartset.baseset
23 baseset = smartset.baseset
24 generatorset = smartset.generatorset
24 generatorset = smartset.generatorset
25
25
26 # possible maximum depth between null and wdir()
26 # possible maximum depth between null and wdir()
27 maxlogdepth = 0x80000000
27 maxlogdepth = 0x80000000
28
28
29
29
30 def _walkrevtree(pfunc, revs, startdepth, stopdepth, reverse):
30 def _walkrevtree(pfunc, revs, startdepth, stopdepth, reverse):
31 """Walk DAG using 'pfunc' from the given 'revs' nodes
31 """Walk DAG using 'pfunc' from the given 'revs' nodes
32
32
33 'pfunc(rev)' should return the parent/child revisions of the given 'rev'
33 'pfunc(rev)' should return the parent/child revisions of the given 'rev'
34 if 'reverse' is True/False respectively.
34 if 'reverse' is True/False respectively.
35
35
36 Scan ends at the stopdepth (exlusive) if specified. Revisions found
36 Scan ends at the stopdepth (exlusive) if specified. Revisions found
37 earlier than the startdepth are omitted.
37 earlier than the startdepth are omitted.
38 """
38 """
39 if startdepth is None:
39 if startdepth is None:
40 startdepth = 0
40 startdepth = 0
41 if stopdepth is None:
41 if stopdepth is None:
42 stopdepth = maxlogdepth
42 stopdepth = maxlogdepth
43 if stopdepth == 0:
43 if stopdepth == 0:
44 return
44 return
45 if stopdepth < 0:
45 if stopdepth < 0:
46 raise error.ProgrammingError(b'negative stopdepth')
46 raise error.ProgrammingError(b'negative stopdepth')
47 if reverse:
47 if reverse:
48 heapsign = -1 # max heap
48 heapsign = -1 # max heap
49 else:
49 else:
50 heapsign = +1 # min heap
50 heapsign = +1 # min heap
51
51
52 # load input revs lazily to heap so earlier revisions can be yielded
52 # load input revs lazily to heap so earlier revisions can be yielded
53 # without fully computing the input revs
53 # without fully computing the input revs
54 revs.sort(reverse)
54 revs.sort(reverse)
55 irevs = iter(revs)
55 irevs = iter(revs)
56 pendingheap = [] # [(heapsign * rev, depth), ...] (i.e. lower depth first)
56 pendingheap = [] # [(heapsign * rev, depth), ...] (i.e. lower depth first)
57
57
58 inputrev = next(irevs, None)
58 inputrev = next(irevs, None)
59 if inputrev is not None:
59 if inputrev is not None:
60 heapq.heappush(pendingheap, (heapsign * inputrev, 0))
60 heapq.heappush(pendingheap, (heapsign * inputrev, 0))
61
61
62 lastrev = None
62 lastrev = None
63 while pendingheap:
63 while pendingheap:
64 currev, curdepth = heapq.heappop(pendingheap)
64 currev, curdepth = heapq.heappop(pendingheap)
65 currev = heapsign * currev
65 currev = heapsign * currev
66 if currev == inputrev:
66 if currev == inputrev:
67 inputrev = next(irevs, None)
67 inputrev = next(irevs, None)
68 if inputrev is not None:
68 if inputrev is not None:
69 heapq.heappush(pendingheap, (heapsign * inputrev, 0))
69 heapq.heappush(pendingheap, (heapsign * inputrev, 0))
70 # rescan parents until curdepth >= startdepth because queued entries
70 # rescan parents until curdepth >= startdepth because queued entries
71 # of the same revision are iterated from the lowest depth
71 # of the same revision are iterated from the lowest depth
72 foundnew = currev != lastrev
72 foundnew = currev != lastrev
73 if foundnew and curdepth >= startdepth:
73 if foundnew and curdepth >= startdepth:
74 lastrev = currev
74 lastrev = currev
75 yield currev
75 yield currev
76 pdepth = curdepth + 1
76 pdepth = curdepth + 1
77 if foundnew and pdepth < stopdepth:
77 if foundnew and pdepth < stopdepth:
78 for prev in pfunc(currev):
78 for prev in pfunc(currev):
79 if prev != node.nullrev:
79 if prev != node.nullrev:
80 heapq.heappush(pendingheap, (heapsign * prev, pdepth))
80 heapq.heappush(pendingheap, (heapsign * prev, pdepth))
81
81
82
82
83 def filectxancestors(fctxs, followfirst=False):
83 def filectxancestors(fctxs, followfirst=False):
84 """Like filectx.ancestors(), but can walk from multiple files/revisions,
84 """Like filectx.ancestors(), but can walk from multiple files/revisions,
85 and includes the given fctxs themselves
85 and includes the given fctxs themselves
86
86
87 Yields (rev, {fctx, ...}) pairs in descending order.
87 Yields (rev, {fctx, ...}) pairs in descending order.
88 """
88 """
89 visit = {}
89 visit = {}
90 visitheap = []
90 visitheap = []
91
91
92 def addvisit(fctx):
92 def addvisit(fctx):
93 rev = fctx.rev()
93 rev = fctx.rev()
94 if rev not in visit:
94 if rev not in visit:
95 visit[rev] = set()
95 visit[rev] = set()
96 heapq.heappush(visitheap, -rev) # max heap
96 heapq.heappush(visitheap, -rev) # max heap
97 visit[rev].add(fctx)
97 visit[rev].add(fctx)
98
98
99 if followfirst:
99 if followfirst:
100 cut = 1
100 cut = 1
101 else:
101 else:
102 cut = None
102 cut = None
103
103
104 for c in fctxs:
104 for c in fctxs:
105 addvisit(c)
105 addvisit(c)
106 while visit:
106 while visit:
107 currev = -(heapq.heappop(visitheap))
107 currev = -(heapq.heappop(visitheap))
108 curfctxs = visit.pop(currev)
108 curfctxs = visit.pop(currev)
109 yield currev, curfctxs
109 yield currev, curfctxs
110 for c in curfctxs:
110 for c in curfctxs:
111 for parent in c.parents()[:cut]:
111 for parent in c.parents()[:cut]:
112 addvisit(parent)
112 addvisit(parent)
113 assert not visitheap
113 assert not visitheap
114
114
115
115
116 def filerevancestors(fctxs, followfirst=False):
116 def filerevancestors(fctxs, followfirst=False):
117 """Like filectx.ancestors(), but can walk from multiple files/revisions,
117 """Like filectx.ancestors(), but can walk from multiple files/revisions,
118 and includes the given fctxs themselves
118 and includes the given fctxs themselves
119
119
120 Returns a smartset.
120 Returns a smartset.
121 """
121 """
122 gen = (rev for rev, _cs in filectxancestors(fctxs, followfirst))
122 gen = (rev for rev, _cs in filectxancestors(fctxs, followfirst))
123 return generatorset(gen, iterasc=False)
123 return generatorset(gen, iterasc=False)
124
124
125
125
126 def _genrevancestors(repo, revs, followfirst, startdepth, stopdepth, cutfunc):
126 def _genrevancestors(repo, revs, followfirst, startdepth, stopdepth, cutfunc):
127 if followfirst:
127 if followfirst:
128 cut = 1
128 cut = 1
129 else:
129 else:
130 cut = None
130 cut = None
131 cl = repo.changelog
131 cl = repo.changelog
132
132
133 def plainpfunc(rev):
133 def plainpfunc(rev):
134 try:
134 try:
135 return cl.parentrevs(rev)[:cut]
135 return cl.parentrevs(rev)[:cut]
136 except error.WdirUnsupported:
136 except error.WdirUnsupported:
137 return (pctx.rev() for pctx in repo[rev].parents()[:cut])
137 return (pctx.rev() for pctx in repo[rev].parents()[:cut])
138
138
139 if cutfunc is None:
139 if cutfunc is None:
140 pfunc = plainpfunc
140 pfunc = plainpfunc
141 else:
141 else:
142 pfunc = lambda rev: [r for r in plainpfunc(rev) if not cutfunc(r)]
142 pfunc = lambda rev: [r for r in plainpfunc(rev) if not cutfunc(r)]
143 revs = revs.filter(lambda rev: not cutfunc(rev))
143 revs = revs.filter(lambda rev: not cutfunc(rev))
144 return _walkrevtree(pfunc, revs, startdepth, stopdepth, reverse=True)
144 return _walkrevtree(pfunc, revs, startdepth, stopdepth, reverse=True)
145
145
146
146
147 def revancestors(
147 def revancestors(
148 repo, revs, followfirst=False, startdepth=None, stopdepth=None, cutfunc=None
148 repo, revs, followfirst=False, startdepth=None, stopdepth=None, cutfunc=None
149 ):
149 ):
150 r"""Like revlog.ancestors(), but supports additional options, includes
150 r"""Like revlog.ancestors(), but supports additional options, includes
151 the given revs themselves, and returns a smartset
151 the given revs themselves, and returns a smartset
152
152
153 Scan ends at the stopdepth (exlusive) if specified. Revisions found
153 Scan ends at the stopdepth (exlusive) if specified. Revisions found
154 earlier than the startdepth are omitted.
154 earlier than the startdepth are omitted.
155
155
156 If cutfunc is provided, it will be used to cut the traversal of the DAG.
156 If cutfunc is provided, it will be used to cut the traversal of the DAG.
157 When cutfunc(X) returns True, the DAG traversal stops - revision X and
157 When cutfunc(X) returns True, the DAG traversal stops - revision X and
158 X's ancestors in the traversal path will be skipped. This could be an
158 X's ancestors in the traversal path will be skipped. This could be an
159 optimization sometimes.
159 optimization sometimes.
160
160
161 Note: if Y is an ancestor of X, cutfunc(X) returning True does not
161 Note: if Y is an ancestor of X, cutfunc(X) returning True does not
162 necessarily mean Y will also be cut. Usually cutfunc(Y) also wants to
162 necessarily mean Y will also be cut. Usually cutfunc(Y) also wants to
163 return True in this case. For example,
163 return True in this case. For example,
164
164
165 D # revancestors(repo, D, cutfunc=lambda rev: rev == B)
165 D # revancestors(repo, D, cutfunc=lambda rev: rev == B)
166 |\ # will include "A", because the path D -> C -> A was not cut.
166 |\ # will include "A", because the path D -> C -> A was not cut.
167 B C # If "B" gets cut, "A" might want to be cut too.
167 B C # If "B" gets cut, "A" might want to be cut too.
168 |/
168 |/
169 A
169 A
170 """
170 """
171 gen = _genrevancestors(
171 gen = _genrevancestors(
172 repo, revs, followfirst, startdepth, stopdepth, cutfunc
172 repo, revs, followfirst, startdepth, stopdepth, cutfunc
173 )
173 )
174 return generatorset(gen, iterasc=False)
174 return generatorset(gen, iterasc=False)
175
175
176
176
177 def _genrevdescendants(repo, revs, followfirst):
177 def _genrevdescendants(repo, revs, followfirst):
178 if followfirst:
178 if followfirst:
179 cut = 1
179 cut = 1
180 else:
180 else:
181 cut = None
181 cut = None
182
182
183 cl = repo.changelog
183 cl = repo.changelog
184 first = revs.min()
184 first = revs.min()
185 nullrev = node.nullrev
185 nullrev = node.nullrev
186 if first == nullrev:
186 if first == nullrev:
187 # Are there nodes with a null first parent and a non-null
187 # Are there nodes with a null first parent and a non-null
188 # second one? Maybe. Do we care? Probably not.
188 # second one? Maybe. Do we care? Probably not.
189 yield first
189 yield first
190 for i in cl:
190 for i in cl:
191 yield i
191 yield i
192 else:
192 else:
193 seen = set(revs)
193 seen = set(revs)
194 for i in cl.revs(first):
194 for i in cl.revs(first):
195 if i in seen:
195 if i in seen:
196 yield i
196 yield i
197 continue
197 continue
198 for x in cl.parentrevs(i)[:cut]:
198 for x in cl.parentrevs(i)[:cut]:
199 if x != nullrev and x in seen:
199 if x != nullrev and x in seen:
200 seen.add(i)
200 seen.add(i)
201 yield i
201 yield i
202 break
202 break
203
203
204
204
205 def _builddescendantsmap(repo, startrev, followfirst):
205 def _builddescendantsmap(repo, startrev, followfirst):
206 """Build map of 'rev -> child revs', offset from startrev"""
206 """Build map of 'rev -> child revs', offset from startrev"""
207 cl = repo.changelog
207 cl = repo.changelog
208 nullrev = node.nullrev
208 nullrev = node.nullrev
209 descmap = [[] for _rev in pycompat.xrange(startrev, len(cl))]
209 descmap = [[] for _rev in pycompat.xrange(startrev, len(cl))]
210 for currev in cl.revs(startrev + 1):
210 for currev in cl.revs(startrev + 1):
211 p1rev, p2rev = cl.parentrevs(currev)
211 p1rev, p2rev = cl.parentrevs(currev)
212 if p1rev >= startrev:
212 if p1rev >= startrev:
213 descmap[p1rev - startrev].append(currev)
213 descmap[p1rev - startrev].append(currev)
214 if not followfirst and p2rev != nullrev and p2rev >= startrev:
214 if not followfirst and p2rev != nullrev and p2rev >= startrev:
215 descmap[p2rev - startrev].append(currev)
215 descmap[p2rev - startrev].append(currev)
216 return descmap
216 return descmap
217
217
218
218
219 def _genrevdescendantsofdepth(repo, revs, followfirst, startdepth, stopdepth):
219 def _genrevdescendantsofdepth(repo, revs, followfirst, startdepth, stopdepth):
220 startrev = revs.min()
220 startrev = revs.min()
221 descmap = _builddescendantsmap(repo, startrev, followfirst)
221 descmap = _builddescendantsmap(repo, startrev, followfirst)
222
222
223 def pfunc(rev):
223 def pfunc(rev):
224 return descmap[rev - startrev]
224 return descmap[rev - startrev]
225
225
226 return _walkrevtree(pfunc, revs, startdepth, stopdepth, reverse=False)
226 return _walkrevtree(pfunc, revs, startdepth, stopdepth, reverse=False)
227
227
228
228
229 def revdescendants(repo, revs, followfirst, startdepth=None, stopdepth=None):
229 def revdescendants(repo, revs, followfirst, startdepth=None, stopdepth=None):
230 """Like revlog.descendants() but supports additional options, includes
230 """Like revlog.descendants() but supports additional options, includes
231 the given revs themselves, and returns a smartset
231 the given revs themselves, and returns a smartset
232
232
233 Scan ends at the stopdepth (exlusive) if specified. Revisions found
233 Scan ends at the stopdepth (exlusive) if specified. Revisions found
234 earlier than the startdepth are omitted.
234 earlier than the startdepth are omitted.
235 """
235 """
236 if startdepth is None and (stopdepth is None or stopdepth >= maxlogdepth):
236 if startdepth is None and (stopdepth is None or stopdepth >= maxlogdepth):
237 gen = _genrevdescendants(repo, revs, followfirst)
237 gen = _genrevdescendants(repo, revs, followfirst)
238 else:
238 else:
239 gen = _genrevdescendantsofdepth(
239 gen = _genrevdescendantsofdepth(
240 repo, revs, followfirst, startdepth, stopdepth
240 repo, revs, followfirst, startdepth, stopdepth
241 )
241 )
242 return generatorset(gen, iterasc=True)
242 return generatorset(gen, iterasc=True)
243
243
244
244
245 def descendantrevs(revs, revsfn, parentrevsfn):
245 def descendantrevs(revs, revsfn, parentrevsfn):
246 """Generate revision number descendants in revision order.
246 """Generate revision number descendants in revision order.
247
247
248 Yields revision numbers starting with a child of some rev in
248 Yields revision numbers starting with a child of some rev in
249 ``revs``. Results are ordered by revision number and are
249 ``revs``. Results are ordered by revision number and are
250 therefore topological. Each revision is not considered a descendant
250 therefore topological. Each revision is not considered a descendant
251 of itself.
251 of itself.
252
252
253 ``revsfn`` is a callable that with no argument iterates over all
253 ``revsfn`` is a callable that with no argument iterates over all
254 revision numbers and with a ``start`` argument iterates over revision
254 revision numbers and with a ``start`` argument iterates over revision
255 numbers beginning with that value.
255 numbers beginning with that value.
256
256
257 ``parentrevsfn`` is a callable that receives a revision number and
257 ``parentrevsfn`` is a callable that receives a revision number and
258 returns an iterable of parent revision numbers, whose values may include
258 returns an iterable of parent revision numbers, whose values may include
259 nullrev.
259 nullrev.
260 """
260 """
261 first = min(revs)
261 first = min(revs)
262
262
263 if first == nullrev:
263 if first == nullrev:
264 for rev in revsfn():
264 for rev in revsfn():
265 yield rev
265 yield rev
266 return
266 return
267
267
268 seen = set(revs)
268 seen = set(revs)
269 for rev in revsfn(start=first + 1):
269 for rev in revsfn(start=first + 1):
270 for prev in parentrevsfn(rev):
270 for prev in parentrevsfn(rev):
271 if prev != nullrev and prev in seen:
271 if prev != nullrev and prev in seen:
272 seen.add(rev)
272 seen.add(rev)
273 yield rev
273 yield rev
274 break
274 break
275
275
276
276
277 class subsetparentswalker(object):
277 class subsetparentswalker(object):
278 r"""Scan adjacent ancestors in the graph given by the subset
278 r"""Scan adjacent ancestors in the graph given by the subset
279
279
280 This computes parent-child relations in the sub graph filtered by
280 This computes parent-child relations in the sub graph filtered by
281 a revset. Primary use case is to draw a revisions graph.
281 a revset. Primary use case is to draw a revisions graph.
282
282
283 In the following example, we consider that the node 'f' has edges to all
283 In the following example, we consider that the node 'f' has edges to all
284 ancestor nodes, but redundant paths are eliminated. The edge 'f'->'b'
284 ancestor nodes, but redundant paths are eliminated. The edge 'f'->'b'
285 is eliminated because there is a path 'f'->'c'->'b' for example.
285 is eliminated because there is a path 'f'->'c'->'b' for example.
286
286
287 - d - e -
287 - d - e -
288 / \
288 / \
289 a - b - c - f
289 a - b - c - f
290
290
291 If the node 'c' is filtered out, the edge 'f'->'b' is activated.
291 If the node 'c' is filtered out, the edge 'f'->'b' is activated.
292
292
293 - d - e -
293 - d - e -
294 / \
294 / \
295 a - b -(c)- f
295 a - b -(c)- f
296
296
297 Likewise, if 'd' and 'e' are filtered out, this edge is fully eliminated
297 Likewise, if 'd' and 'e' are filtered out, this edge is fully eliminated
298 since there is a path 'f'->'c'->'b'->'a' for 'f'->'a'.
298 since there is a path 'f'->'c'->'b'->'a' for 'f'->'a'.
299
299
300 (d) (e)
300 (d) (e)
301
301
302 a - b - c - f
302 a - b - c - f
303
303
304 Implementation-wise, 'f' is passed down to 'a' as unresolved through the
304 Implementation-wise, 'f' is passed down to 'a' as unresolved through the
305 'f'->'e'->'d'->'a' path, whereas we do also remember that 'f' has already
305 'f'->'e'->'d'->'a' path, whereas we do also remember that 'f' has already
306 been resolved while walking down the 'f'->'c'->'b'->'a' path. When
306 been resolved while walking down the 'f'->'c'->'b'->'a' path. When
307 processing the node 'a', the unresolved 'f'->'a' path is eliminated as
307 processing the node 'a', the unresolved 'f'->'a' path is eliminated as
308 the 'f' end is marked as resolved.
308 the 'f' end is marked as resolved.
309
309
310 Ancestors are searched from the tipmost revision in the subset so the
310 Ancestors are searched from the tipmost revision in the subset so the
311 results can be cached. You should specify startrev to narrow the search
311 results can be cached. You should specify startrev to narrow the search
312 space to ':startrev'.
312 space to ':startrev'.
313 """
313 """
314
314
315 def __init__(self, repo, subset, startrev=None):
315 def __init__(self, repo, subset, startrev=None):
316 if startrev is not None:
316 if startrev is not None:
317 subset = repo.revs(b'%d:null', startrev) & subset
317 subset = repo.revs(b'%d:null', startrev) & subset
318
318
319 # equivalent to 'subset = subset.sorted(reverse=True)', but there's
319 # equivalent to 'subset = subset.sorted(reverse=True)', but there's
320 # no such function.
320 # no such function.
321 fastdesc = subset.fastdesc
321 fastdesc = subset.fastdesc
322 if fastdesc:
322 if fastdesc:
323 desciter = fastdesc()
323 desciter = fastdesc()
324 else:
324 else:
325 if not subset.isdescending() and not subset.istopo():
325 if not subset.isdescending() and not subset.istopo():
326 subset = smartset.baseset(subset)
326 subset = smartset.baseset(subset)
327 subset.sort(reverse=True)
327 subset.sort(reverse=True)
328 desciter = iter(subset)
328 desciter = iter(subset)
329
329
330 self._repo = repo
330 self._repo = repo
331 self._changelog = repo.changelog
331 self._changelog = repo.changelog
332 self._subset = subset
332 self._subset = subset
333
333
334 # scanning state (see _scanparents):
334 # scanning state (see _scanparents):
335 self._tovisit = []
335 self._tovisit = []
336 self._pendingcnt = {}
336 self._pendingcnt = {}
337 self._pointers = {}
337 self._pointers = {}
338 self._parents = {}
338 self._parents = {}
339 self._inputhead = nullrev # reassigned by self._advanceinput()
339 self._inputhead = nullrev # reassigned by self._advanceinput()
340 self._inputtail = desciter
340 self._inputtail = desciter
341 self._bottomrev = nullrev
341 self._bottomrev = nullrev
342 self._advanceinput()
342 self._advanceinput()
343
343
344 def parentsset(self, rev):
344 def parentsset(self, rev):
345 """Look up parents of the given revision in the subset, and returns
345 """Look up parents of the given revision in the subset, and returns
346 as a smartset"""
346 as a smartset"""
347 return smartset.baseset(self.parents(rev))
347 return smartset.baseset(self.parents(rev))
348
348
349 def parents(self, rev):
349 def parents(self, rev):
350 """Look up parents of the given revision in the subset
350 """Look up parents of the given revision in the subset
351
351
352 The returned revisions are sorted by parent index (p1/p2).
352 The returned revisions are sorted by parent index (p1/p2).
353 """
353 """
354 self._scanparents(rev)
354 self._scanparents(rev)
355 return [r for _c, r in sorted(self._parents.get(rev, []))]
355 return [r for _c, r in sorted(self._parents.get(rev, []))]
356
356
357 def _parentrevs(self, rev):
357 def _parentrevs(self, rev):
358 try:
358 try:
359 revs = self._changelog.parentrevs(rev)
359 revs = self._changelog.parentrevs(rev)
360 if revs[-1] == nullrev:
360 if revs[-1] == nullrev:
361 return revs[:-1]
361 return revs[:-1]
362 return revs
362 return revs
363 except error.WdirUnsupported:
363 except error.WdirUnsupported:
364 return tuple(pctx.rev() for pctx in self._repo[None].parents())
364 return tuple(pctx.rev() for pctx in self._repo[None].parents())
365
365
366 def _advanceinput(self):
366 def _advanceinput(self):
367 """Advance the input iterator and set the next revision to _inputhead"""
367 """Advance the input iterator and set the next revision to _inputhead"""
368 if self._inputhead < nullrev:
368 if self._inputhead < nullrev:
369 return
369 return
370 try:
370 try:
371 self._inputhead = next(self._inputtail)
371 self._inputhead = next(self._inputtail)
372 except StopIteration:
372 except StopIteration:
373 self._bottomrev = self._inputhead
373 self._bottomrev = self._inputhead
374 self._inputhead = nullrev - 1
374 self._inputhead = nullrev - 1
375
375
376 def _scanparents(self, stoprev):
376 def _scanparents(self, stoprev):
377 """Scan ancestors until the parents of the specified stoprev are
377 """Scan ancestors until the parents of the specified stoprev are
378 resolved"""
378 resolved"""
379
379
380 # 'tovisit' is the queue of the input revisions and their ancestors.
380 # 'tovisit' is the queue of the input revisions and their ancestors.
381 # It will be populated incrementally to minimize the initial cost
381 # It will be populated incrementally to minimize the initial cost
382 # of computing the given subset.
382 # of computing the given subset.
383 #
383 #
384 # For to-visit revisions, we keep track of
384 # For to-visit revisions, we keep track of
385 # - the number of the unresolved paths: pendingcnt[rev],
385 # - the number of the unresolved paths: pendingcnt[rev],
386 # - dict of the unresolved descendants and chains: pointers[rev][0],
386 # - dict of the unresolved descendants and chains: pointers[rev][0],
387 # - set of the already resolved descendants: pointers[rev][1].
387 # - set of the already resolved descendants: pointers[rev][1].
388 #
388 #
389 # When a revision is visited, 'pointers[rev]' should be popped and
389 # When a revision is visited, 'pointers[rev]' should be popped and
390 # propagated to its parents accordingly.
390 # propagated to its parents accordingly.
391 #
391 #
392 # Once all pending paths have been resolved, 'pendingcnt[rev]' becomes
392 # Once all pending paths have been resolved, 'pendingcnt[rev]' becomes
393 # 0 and 'parents[rev]' contains the unsorted list of parent revisions
393 # 0 and 'parents[rev]' contains the unsorted list of parent revisions
394 # and p1/p2 chains (excluding linear paths.)
394 # and p1/p2 chains (excluding linear paths.) The p1/p2 chains will be
395 # used as a sort key preferring p1. 'len(chain)' should be the number
396 # of merges between two revisions.
395
397
396 subset = self._subset
398 subset = self._subset
397 tovisit = self._tovisit # heap queue of [-rev]
399 tovisit = self._tovisit # heap queue of [-rev]
398 pendingcnt = self._pendingcnt # {rev: count} for visited revisions
400 pendingcnt = self._pendingcnt # {rev: count} for visited revisions
399 pointers = self._pointers # {rev: [{unresolved_rev: chain}, resolved]}
401 pointers = self._pointers # {rev: [{unresolved_rev: chain}, resolved]}
400 parents = self._parents # {rev: [(chain, rev)]}
402 parents = self._parents # {rev: [(chain, rev)]}
401
403
402 while tovisit or self._inputhead >= nullrev:
404 while tovisit or self._inputhead >= nullrev:
403 if pendingcnt.get(stoprev) == 0:
405 if pendingcnt.get(stoprev) == 0:
404 return
406 return
405
407
406 # feed greater revisions from input set to queue
408 # feed greater revisions from input set to queue
407 if not tovisit:
409 if not tovisit:
408 heapq.heappush(tovisit, -self._inputhead)
410 heapq.heappush(tovisit, -self._inputhead)
409 self._advanceinput()
411 self._advanceinput()
410 while self._inputhead >= -tovisit[0]:
412 while self._inputhead >= -tovisit[0]:
411 heapq.heappush(tovisit, -self._inputhead)
413 heapq.heappush(tovisit, -self._inputhead)
412 self._advanceinput()
414 self._advanceinput()
413
415
414 rev = -heapq.heappop(tovisit)
416 rev = -heapq.heappop(tovisit)
415 if rev < self._bottomrev:
417 if rev < self._bottomrev:
416 return
418 return
417 if rev in pendingcnt and rev not in pointers:
419 if rev in pendingcnt and rev not in pointers:
418 continue # already visited
420 continue # already visited
419
421
420 curactive = rev in subset
422 curactive = rev in subset
421 pendingcnt.setdefault(rev, 0) # mark as visited
423 pendingcnt.setdefault(rev, 0) # mark as visited
422 if curactive:
424 if curactive:
423 assert rev not in parents
425 assert rev not in parents
424 parents[rev] = []
426 parents[rev] = []
425 unresolved, resolved = pointers.pop(rev, ({}, set()))
427 unresolved, resolved = pointers.pop(rev, ({}, set()))
426
428
427 if curactive:
429 if curactive:
428 # reached to active rev, resolve pending descendants' parents
430 # reached to active rev, resolve pending descendants' parents
429 for r, c in unresolved.items():
431 for r, c in unresolved.items():
430 pendingcnt[r] -= 1
432 pendingcnt[r] -= 1
431 assert pendingcnt[r] >= 0
433 assert pendingcnt[r] >= 0
432 if r in resolved:
434 if r in resolved:
433 continue # eliminate redundant path
435 continue # eliminate redundant path
434 parents[r].append((c, rev))
436 parents[r].append((c, rev))
435 # mark the descendant 'r' as resolved through this path if
437 # mark the descendant 'r' as resolved through this path if
436 # there are still pending pointers. the 'resolved' set may
438 # there are still pending pointers. the 'resolved' set may
437 # be concatenated later at a fork revision.
439 # be concatenated later at a fork revision.
438 if pendingcnt[r] > 0:
440 if pendingcnt[r] > 0:
439 resolved.add(r)
441 resolved.add(r)
440 unresolved.clear()
442 unresolved.clear()
441 # occasionally clean resolved markers. otherwise the set
443 # occasionally clean resolved markers. otherwise the set
442 # would grow indefinitely.
444 # would grow indefinitely.
443 resolved = {r for r in resolved if pendingcnt[r] > 0}
445 resolved = {r for r in resolved if pendingcnt[r] > 0}
444
446
445 parentrevs = self._parentrevs(rev)
447 parentrevs = self._parentrevs(rev)
446 bothparentsactive = all(p in subset for p in parentrevs)
448 bothparentsactive = all(p in subset for p in parentrevs)
447
449
448 # set up or propagate tracking pointers if
450 # set up or propagate tracking pointers if
449 # - one of the parents is not active,
451 # - one of the parents is not active,
450 # - or descendants' parents are unresolved.
452 # - or descendants' parents are unresolved.
451 if not bothparentsactive or unresolved or resolved:
453 if not bothparentsactive or unresolved or resolved:
452 if len(parentrevs) <= 1:
454 if len(parentrevs) <= 1:
453 # can avoid copying the tracking pointer
455 # can avoid copying the tracking pointer
454 parentpointers = [(unresolved, resolved)]
456 parentpointers = [(unresolved, resolved)]
455 else:
457 else:
456 parentpointers = [
458 parentpointers = [
457 (unresolved, resolved),
459 (unresolved, resolved),
458 (unresolved.copy(), resolved.copy()),
460 (unresolved.copy(), resolved.copy()),
459 ]
461 ]
460 # 'rev' is a merge revision. increment the pending count
462 # 'rev' is a merge revision. increment the pending count
461 # as the 'unresolved' dict will be duplicated.
463 # as the 'unresolved' dict will be duplicated, and append
464 # p1/p2 code to the existing chains.
462 for r in unresolved:
465 for r in unresolved:
463 pendingcnt[r] += 1
466 pendingcnt[r] += 1
467 parentpointers[0][0][r] += b'1'
468 parentpointers[1][0][r] += b'2'
464 for i, p in enumerate(parentrevs):
469 for i, p in enumerate(parentrevs):
465 assert p < rev
470 assert p < rev
466 heapq.heappush(tovisit, -p)
471 heapq.heappush(tovisit, -p)
467 if p in pointers:
472 if p in pointers:
468 # 'p' is a fork revision. concatenate tracking pointers
473 # 'p' is a fork revision. concatenate tracking pointers
469 # and decrement the pending count accordingly.
474 # and decrement the pending count accordingly.
470 knownunresolved, knownresolved = pointers[p]
475 knownunresolved, knownresolved = pointers[p]
471 unresolved, resolved = parentpointers[i]
476 unresolved, resolved = parentpointers[i]
472 for r, c in unresolved.items():
477 for r, c in unresolved.items():
473 c += [b'1', b'2'][i]
474 if r in knownunresolved:
478 if r in knownunresolved:
475 # unresolved at both paths
479 # unresolved at both paths
476 pendingcnt[r] -= 1
480 pendingcnt[r] -= 1
477 assert pendingcnt[r] > 0
481 assert pendingcnt[r] > 0
478 # take shorter chain
482 # take shorter chain
479 knownunresolved[r] = min(c, knownunresolved[r])
483 knownunresolved[r] = min(c, knownunresolved[r])
480 else:
484 else:
481 knownunresolved[r] = c
485 knownunresolved[r] = c
482 # simply propagate the 'resolved' set as deduplicating
486 # simply propagate the 'resolved' set as deduplicating
483 # 'unresolved' here would be slightly complicated.
487 # 'unresolved' here would be slightly complicated.
484 knownresolved.update(resolved)
488 knownresolved.update(resolved)
485 else:
489 else:
486 pointers[p] = parentpointers[i]
490 pointers[p] = parentpointers[i]
487
491
488 # then, populate the active parents directly and add the current
492 # then, populate the active parents directly and add the current
489 # 'rev' to the tracking pointers of the inactive parents.
493 # 'rev' to the tracking pointers of the inactive parents.
490 # 'pointers[p]' may be optimized out if both parents are active.
494 # 'pointers[p]' may be optimized out if both parents are active.
495 chaincodes = [b''] if len(parentrevs) <= 1 else [b'1', b'2']
491 if curactive and bothparentsactive:
496 if curactive and bothparentsactive:
492 for i, p in enumerate(parentrevs):
497 for i, p in enumerate(parentrevs):
493 c = [b'1', b'2'][i]
498 c = chaincodes[i]
494 parents[rev].append((c, p))
499 parents[rev].append((c, p))
495 # no need to mark 'rev' as resolved since the 'rev' should
500 # no need to mark 'rev' as resolved since the 'rev' should
496 # be fully resolved (i.e. pendingcnt[rev] == 0)
501 # be fully resolved (i.e. pendingcnt[rev] == 0)
497 assert pendingcnt[rev] == 0
502 assert pendingcnt[rev] == 0
498 elif curactive:
503 elif curactive:
499 for i, p in enumerate(parentrevs):
504 for i, p in enumerate(parentrevs):
500 unresolved, resolved = pointers[p]
505 unresolved, resolved = pointers[p]
501 assert rev not in unresolved
506 assert rev not in unresolved
502 c = [b'1', b'2'][i]
507 c = chaincodes[i]
503 if p in subset:
508 if p in subset:
504 parents[rev].append((c, p))
509 parents[rev].append((c, p))
505 # mark 'rev' as resolved through this path
510 # mark 'rev' as resolved through this path
506 resolved.add(rev)
511 resolved.add(rev)
507 else:
512 else:
508 pendingcnt[rev] += 1
513 pendingcnt[rev] += 1
509 unresolved[rev] = c
514 unresolved[rev] = c
510 assert 0 < pendingcnt[rev] <= 2
515 assert 0 < pendingcnt[rev] <= 2
511
516
512
517
513 def _reachablerootspure(pfunc, minroot, roots, heads, includepath):
518 def _reachablerootspure(pfunc, minroot, roots, heads, includepath):
514 """See revlog.reachableroots"""
519 """See revlog.reachableroots"""
515 if not roots:
520 if not roots:
516 return []
521 return []
517 roots = set(roots)
522 roots = set(roots)
518 visit = list(heads)
523 visit = list(heads)
519 reachable = set()
524 reachable = set()
520 seen = {}
525 seen = {}
521 # prefetch all the things! (because python is slow)
526 # prefetch all the things! (because python is slow)
522 reached = reachable.add
527 reached = reachable.add
523 dovisit = visit.append
528 dovisit = visit.append
524 nextvisit = visit.pop
529 nextvisit = visit.pop
525 # open-code the post-order traversal due to the tiny size of
530 # open-code the post-order traversal due to the tiny size of
526 # sys.getrecursionlimit()
531 # sys.getrecursionlimit()
527 while visit:
532 while visit:
528 rev = nextvisit()
533 rev = nextvisit()
529 if rev in roots:
534 if rev in roots:
530 reached(rev)
535 reached(rev)
531 if not includepath:
536 if not includepath:
532 continue
537 continue
533 parents = pfunc(rev)
538 parents = pfunc(rev)
534 seen[rev] = parents
539 seen[rev] = parents
535 for parent in parents:
540 for parent in parents:
536 if parent >= minroot and parent not in seen:
541 if parent >= minroot and parent not in seen:
537 dovisit(parent)
542 dovisit(parent)
538 if not reachable:
543 if not reachable:
539 return baseset()
544 return baseset()
540 if not includepath:
545 if not includepath:
541 return reachable
546 return reachable
542 for rev in sorted(seen):
547 for rev in sorted(seen):
543 for parent in seen[rev]:
548 for parent in seen[rev]:
544 if parent in reachable:
549 if parent in reachable:
545 reached(rev)
550 reached(rev)
546 return reachable
551 return reachable
547
552
548
553
549 def reachableroots(repo, roots, heads, includepath=False):
554 def reachableroots(repo, roots, heads, includepath=False):
550 """See revlog.reachableroots"""
555 """See revlog.reachableroots"""
551 if not roots:
556 if not roots:
552 return baseset()
557 return baseset()
553 minroot = roots.min()
558 minroot = roots.min()
554 roots = list(roots)
559 roots = list(roots)
555 heads = list(heads)
560 heads = list(heads)
556 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
561 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
557 revs = baseset(revs)
562 revs = baseset(revs)
558 revs.sort()
563 revs.sort()
559 return revs
564 return revs
560
565
561
566
562 def _changesrange(fctx1, fctx2, linerange2, diffopts):
567 def _changesrange(fctx1, fctx2, linerange2, diffopts):
563 """Return `(diffinrange, linerange1)` where `diffinrange` is True
568 """Return `(diffinrange, linerange1)` where `diffinrange` is True
564 if diff from fctx2 to fctx1 has changes in linerange2 and
569 if diff from fctx2 to fctx1 has changes in linerange2 and
565 `linerange1` is the new line range for fctx1.
570 `linerange1` is the new line range for fctx1.
566 """
571 """
567 blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
572 blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
568 filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
573 filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
569 diffinrange = any(stype == b'!' for _, stype in filteredblocks)
574 diffinrange = any(stype == b'!' for _, stype in filteredblocks)
570 return diffinrange, linerange1
575 return diffinrange, linerange1
571
576
572
577
573 def blockancestors(fctx, fromline, toline, followfirst=False):
578 def blockancestors(fctx, fromline, toline, followfirst=False):
574 """Yield ancestors of `fctx` with respect to the block of lines within
579 """Yield ancestors of `fctx` with respect to the block of lines within
575 `fromline`-`toline` range.
580 `fromline`-`toline` range.
576 """
581 """
577 diffopts = patch.diffopts(fctx._repo.ui)
582 diffopts = patch.diffopts(fctx._repo.ui)
578 fctx = fctx.introfilectx()
583 fctx = fctx.introfilectx()
579 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
584 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
580 while visit:
585 while visit:
581 c, linerange2 = visit.pop(max(visit))
586 c, linerange2 = visit.pop(max(visit))
582 pl = c.parents()
587 pl = c.parents()
583 if followfirst:
588 if followfirst:
584 pl = pl[:1]
589 pl = pl[:1]
585 if not pl:
590 if not pl:
586 # The block originates from the initial revision.
591 # The block originates from the initial revision.
587 yield c, linerange2
592 yield c, linerange2
588 continue
593 continue
589 inrange = False
594 inrange = False
590 for p in pl:
595 for p in pl:
591 inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts)
596 inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts)
592 inrange = inrange or inrangep
597 inrange = inrange or inrangep
593 if linerange1[0] == linerange1[1]:
598 if linerange1[0] == linerange1[1]:
594 # Parent's linerange is empty, meaning that the block got
599 # Parent's linerange is empty, meaning that the block got
595 # introduced in this revision; no need to go futher in this
600 # introduced in this revision; no need to go futher in this
596 # branch.
601 # branch.
597 continue
602 continue
598 # Set _descendantrev with 'c' (a known descendant) so that, when
603 # Set _descendantrev with 'c' (a known descendant) so that, when
599 # _adjustlinkrev is called for 'p', it receives this descendant
604 # _adjustlinkrev is called for 'p', it receives this descendant
600 # (as srcrev) instead possibly topmost introrev.
605 # (as srcrev) instead possibly topmost introrev.
601 p._descendantrev = c.rev()
606 p._descendantrev = c.rev()
602 visit[p.linkrev(), p.filenode()] = p, linerange1
607 visit[p.linkrev(), p.filenode()] = p, linerange1
603 if inrange:
608 if inrange:
604 yield c, linerange2
609 yield c, linerange2
605
610
606
611
607 def blockdescendants(fctx, fromline, toline):
612 def blockdescendants(fctx, fromline, toline):
608 """Yield descendants of `fctx` with respect to the block of lines within
613 """Yield descendants of `fctx` with respect to the block of lines within
609 `fromline`-`toline` range.
614 `fromline`-`toline` range.
610 """
615 """
611 # First possibly yield 'fctx' if it has changes in range with respect to
616 # First possibly yield 'fctx' if it has changes in range with respect to
612 # its parents.
617 # its parents.
613 try:
618 try:
614 c, linerange1 = next(blockancestors(fctx, fromline, toline))
619 c, linerange1 = next(blockancestors(fctx, fromline, toline))
615 except StopIteration:
620 except StopIteration:
616 pass
621 pass
617 else:
622 else:
618 if c == fctx:
623 if c == fctx:
619 yield c, linerange1
624 yield c, linerange1
620
625
621 diffopts = patch.diffopts(fctx._repo.ui)
626 diffopts = patch.diffopts(fctx._repo.ui)
622 fl = fctx.filelog()
627 fl = fctx.filelog()
623 seen = {fctx.filerev(): (fctx, (fromline, toline))}
628 seen = {fctx.filerev(): (fctx, (fromline, toline))}
624 for i in fl.descendants([fctx.filerev()]):
629 for i in fl.descendants([fctx.filerev()]):
625 c = fctx.filectx(i)
630 c = fctx.filectx(i)
626 inrange = False
631 inrange = False
627 for x in fl.parentrevs(i):
632 for x in fl.parentrevs(i):
628 try:
633 try:
629 p, linerange2 = seen[x]
634 p, linerange2 = seen[x]
630 except KeyError:
635 except KeyError:
631 # nullrev or other branch
636 # nullrev or other branch
632 continue
637 continue
633 inrangep, linerange1 = _changesrange(c, p, linerange2, diffopts)
638 inrangep, linerange1 = _changesrange(c, p, linerange2, diffopts)
634 inrange = inrange or inrangep
639 inrange = inrange or inrangep
635 # If revision 'i' has been seen (it's a merge) and the line range
640 # If revision 'i' has been seen (it's a merge) and the line range
636 # previously computed differs from the one we just got, we take the
641 # previously computed differs from the one we just got, we take the
637 # surrounding interval. This is conservative but avoids loosing
642 # surrounding interval. This is conservative but avoids loosing
638 # information.
643 # information.
639 if i in seen and seen[i][1] != linerange1:
644 if i in seen and seen[i][1] != linerange1:
640 lbs, ubs = zip(linerange1, seen[i][1])
645 lbs, ubs = zip(linerange1, seen[i][1])
641 linerange1 = min(lbs), max(ubs)
646 linerange1 = min(lbs), max(ubs)
642 seen[i] = c, linerange1
647 seen[i] = c, linerange1
643 if inrange:
648 if inrange:
644 yield c, linerange1
649 yield c, linerange1
645
650
646
651
647 @attr.s(slots=True, frozen=True)
652 @attr.s(slots=True, frozen=True)
648 class annotateline(object):
653 class annotateline(object):
649 fctx = attr.ib()
654 fctx = attr.ib()
650 lineno = attr.ib()
655 lineno = attr.ib()
651 # Whether this annotation was the result of a skip-annotate.
656 # Whether this annotation was the result of a skip-annotate.
652 skip = attr.ib(default=False)
657 skip = attr.ib(default=False)
653 text = attr.ib(default=None)
658 text = attr.ib(default=None)
654
659
655
660
656 @attr.s(slots=True, frozen=True)
661 @attr.s(slots=True, frozen=True)
657 class _annotatedfile(object):
662 class _annotatedfile(object):
658 # list indexed by lineno - 1
663 # list indexed by lineno - 1
659 fctxs = attr.ib()
664 fctxs = attr.ib()
660 linenos = attr.ib()
665 linenos = attr.ib()
661 skips = attr.ib()
666 skips = attr.ib()
662 # full file content
667 # full file content
663 text = attr.ib()
668 text = attr.ib()
664
669
665
670
666 def _countlines(text):
671 def _countlines(text):
667 if text.endswith(b"\n"):
672 if text.endswith(b"\n"):
668 return text.count(b"\n")
673 return text.count(b"\n")
669 return text.count(b"\n") + int(bool(text))
674 return text.count(b"\n") + int(bool(text))
670
675
671
676
672 def _decoratelines(text, fctx):
677 def _decoratelines(text, fctx):
673 n = _countlines(text)
678 n = _countlines(text)
674 linenos = pycompat.rangelist(1, n + 1)
679 linenos = pycompat.rangelist(1, n + 1)
675 return _annotatedfile([fctx] * n, linenos, [False] * n, text)
680 return _annotatedfile([fctx] * n, linenos, [False] * n, text)
676
681
677
682
678 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
683 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
679 r'''
684 r'''
680 Given parent and child fctxes and annotate data for parents, for all lines
685 Given parent and child fctxes and annotate data for parents, for all lines
681 in either parent that match the child, annotate the child with the parent's
686 in either parent that match the child, annotate the child with the parent's
682 data.
687 data.
683
688
684 Additionally, if `skipchild` is True, replace all other lines with parent
689 Additionally, if `skipchild` is True, replace all other lines with parent
685 annotate data as well such that child is never blamed for any lines.
690 annotate data as well such that child is never blamed for any lines.
686
691
687 See test-annotate.py for unit tests.
692 See test-annotate.py for unit tests.
688 '''
693 '''
689 pblocks = [
694 pblocks = [
690 (parent, mdiff.allblocks(parent.text, child.text, opts=diffopts))
695 (parent, mdiff.allblocks(parent.text, child.text, opts=diffopts))
691 for parent in parents
696 for parent in parents
692 ]
697 ]
693
698
694 if skipchild:
699 if skipchild:
695 # Need to iterate over the blocks twice -- make it a list
700 # Need to iterate over the blocks twice -- make it a list
696 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
701 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
697 # Mercurial currently prefers p2 over p1 for annotate.
702 # Mercurial currently prefers p2 over p1 for annotate.
698 # TODO: change this?
703 # TODO: change this?
699 for parent, blocks in pblocks:
704 for parent, blocks in pblocks:
700 for (a1, a2, b1, b2), t in blocks:
705 for (a1, a2, b1, b2), t in blocks:
701 # Changed blocks ('!') or blocks made only of blank lines ('~')
706 # Changed blocks ('!') or blocks made only of blank lines ('~')
702 # belong to the child.
707 # belong to the child.
703 if t == b'=':
708 if t == b'=':
704 child.fctxs[b1:b2] = parent.fctxs[a1:a2]
709 child.fctxs[b1:b2] = parent.fctxs[a1:a2]
705 child.linenos[b1:b2] = parent.linenos[a1:a2]
710 child.linenos[b1:b2] = parent.linenos[a1:a2]
706 child.skips[b1:b2] = parent.skips[a1:a2]
711 child.skips[b1:b2] = parent.skips[a1:a2]
707
712
708 if skipchild:
713 if skipchild:
709 # Now try and match up anything that couldn't be matched,
714 # Now try and match up anything that couldn't be matched,
710 # Reversing pblocks maintains bias towards p2, matching above
715 # Reversing pblocks maintains bias towards p2, matching above
711 # behavior.
716 # behavior.
712 pblocks.reverse()
717 pblocks.reverse()
713
718
714 # The heuristics are:
719 # The heuristics are:
715 # * Work on blocks of changed lines (effectively diff hunks with -U0).
720 # * Work on blocks of changed lines (effectively diff hunks with -U0).
716 # This could potentially be smarter but works well enough.
721 # This could potentially be smarter but works well enough.
717 # * For a non-matching section, do a best-effort fit. Match lines in
722 # * For a non-matching section, do a best-effort fit. Match lines in
718 # diff hunks 1:1, dropping lines as necessary.
723 # diff hunks 1:1, dropping lines as necessary.
719 # * Repeat the last line as a last resort.
724 # * Repeat the last line as a last resort.
720
725
721 # First, replace as much as possible without repeating the last line.
726 # First, replace as much as possible without repeating the last line.
722 remaining = [(parent, []) for parent, _blocks in pblocks]
727 remaining = [(parent, []) for parent, _blocks in pblocks]
723 for idx, (parent, blocks) in enumerate(pblocks):
728 for idx, (parent, blocks) in enumerate(pblocks):
724 for (a1, a2, b1, b2), _t in blocks:
729 for (a1, a2, b1, b2), _t in blocks:
725 if a2 - a1 >= b2 - b1:
730 if a2 - a1 >= b2 - b1:
726 for bk in pycompat.xrange(b1, b2):
731 for bk in pycompat.xrange(b1, b2):
727 if child.fctxs[bk] == childfctx:
732 if child.fctxs[bk] == childfctx:
728 ak = min(a1 + (bk - b1), a2 - 1)
733 ak = min(a1 + (bk - b1), a2 - 1)
729 child.fctxs[bk] = parent.fctxs[ak]
734 child.fctxs[bk] = parent.fctxs[ak]
730 child.linenos[bk] = parent.linenos[ak]
735 child.linenos[bk] = parent.linenos[ak]
731 child.skips[bk] = True
736 child.skips[bk] = True
732 else:
737 else:
733 remaining[idx][1].append((a1, a2, b1, b2))
738 remaining[idx][1].append((a1, a2, b1, b2))
734
739
735 # Then, look at anything left, which might involve repeating the last
740 # Then, look at anything left, which might involve repeating the last
736 # line.
741 # line.
737 for parent, blocks in remaining:
742 for parent, blocks in remaining:
738 for a1, a2, b1, b2 in blocks:
743 for a1, a2, b1, b2 in blocks:
739 for bk in pycompat.xrange(b1, b2):
744 for bk in pycompat.xrange(b1, b2):
740 if child.fctxs[bk] == childfctx:
745 if child.fctxs[bk] == childfctx:
741 ak = min(a1 + (bk - b1), a2 - 1)
746 ak = min(a1 + (bk - b1), a2 - 1)
742 child.fctxs[bk] = parent.fctxs[ak]
747 child.fctxs[bk] = parent.fctxs[ak]
743 child.linenos[bk] = parent.linenos[ak]
748 child.linenos[bk] = parent.linenos[ak]
744 child.skips[bk] = True
749 child.skips[bk] = True
745 return child
750 return child
746
751
747
752
748 def annotate(base, parents, skiprevs=None, diffopts=None):
753 def annotate(base, parents, skiprevs=None, diffopts=None):
749 """Core algorithm for filectx.annotate()
754 """Core algorithm for filectx.annotate()
750
755
751 `parents(fctx)` is a function returning a list of parent filectxs.
756 `parents(fctx)` is a function returning a list of parent filectxs.
752 """
757 """
753
758
754 # This algorithm would prefer to be recursive, but Python is a
759 # This algorithm would prefer to be recursive, but Python is a
755 # bit recursion-hostile. Instead we do an iterative
760 # bit recursion-hostile. Instead we do an iterative
756 # depth-first search.
761 # depth-first search.
757
762
758 # 1st DFS pre-calculates pcache and needed
763 # 1st DFS pre-calculates pcache and needed
759 visit = [base]
764 visit = [base]
760 pcache = {}
765 pcache = {}
761 needed = {base: 1}
766 needed = {base: 1}
762 while visit:
767 while visit:
763 f = visit.pop()
768 f = visit.pop()
764 if f in pcache:
769 if f in pcache:
765 continue
770 continue
766 pl = parents(f)
771 pl = parents(f)
767 pcache[f] = pl
772 pcache[f] = pl
768 for p in pl:
773 for p in pl:
769 needed[p] = needed.get(p, 0) + 1
774 needed[p] = needed.get(p, 0) + 1
770 if p not in pcache:
775 if p not in pcache:
771 visit.append(p)
776 visit.append(p)
772
777
773 # 2nd DFS does the actual annotate
778 # 2nd DFS does the actual annotate
774 visit[:] = [base]
779 visit[:] = [base]
775 hist = {}
780 hist = {}
776 while visit:
781 while visit:
777 f = visit[-1]
782 f = visit[-1]
778 if f in hist:
783 if f in hist:
779 visit.pop()
784 visit.pop()
780 continue
785 continue
781
786
782 ready = True
787 ready = True
783 pl = pcache[f]
788 pl = pcache[f]
784 for p in pl:
789 for p in pl:
785 if p not in hist:
790 if p not in hist:
786 ready = False
791 ready = False
787 visit.append(p)
792 visit.append(p)
788 if ready:
793 if ready:
789 visit.pop()
794 visit.pop()
790 curr = _decoratelines(f.data(), f)
795 curr = _decoratelines(f.data(), f)
791 skipchild = False
796 skipchild = False
792 if skiprevs is not None:
797 if skiprevs is not None:
793 skipchild = f._changeid in skiprevs
798 skipchild = f._changeid in skiprevs
794 curr = _annotatepair(
799 curr = _annotatepair(
795 [hist[p] for p in pl], f, curr, skipchild, diffopts
800 [hist[p] for p in pl], f, curr, skipchild, diffopts
796 )
801 )
797 for p in pl:
802 for p in pl:
798 if needed[p] == 1:
803 if needed[p] == 1:
799 del hist[p]
804 del hist[p]
800 del needed[p]
805 del needed[p]
801 else:
806 else:
802 needed[p] -= 1
807 needed[p] -= 1
803
808
804 hist[f] = curr
809 hist[f] = curr
805 del pcache[f]
810 del pcache[f]
806
811
807 a = hist[base]
812 a = hist[base]
808 return [
813 return [
809 annotateline(*r)
814 annotateline(*r)
810 for r in zip(a.fctxs, a.linenos, a.skips, mdiff.splitnewlines(a.text))
815 for r in zip(a.fctxs, a.linenos, a.skips, mdiff.splitnewlines(a.text))
811 ]
816 ]
812
817
813
818
814 def toposort(revs, parentsfunc, firstbranch=()):
819 def toposort(revs, parentsfunc, firstbranch=()):
815 """Yield revisions from heads to roots one (topo) branch at a time.
820 """Yield revisions from heads to roots one (topo) branch at a time.
816
821
817 This function aims to be used by a graph generator that wishes to minimize
822 This function aims to be used by a graph generator that wishes to minimize
818 the number of parallel branches and their interleaving.
823 the number of parallel branches and their interleaving.
819
824
820 Example iteration order (numbers show the "true" order in a changelog):
825 Example iteration order (numbers show the "true" order in a changelog):
821
826
822 o 4
827 o 4
823 |
828 |
824 o 1
829 o 1
825 |
830 |
826 | o 3
831 | o 3
827 | |
832 | |
828 | o 2
833 | o 2
829 |/
834 |/
830 o 0
835 o 0
831
836
832 Note that the ancestors of merges are understood by the current
837 Note that the ancestors of merges are understood by the current
833 algorithm to be on the same branch. This means no reordering will
838 algorithm to be on the same branch. This means no reordering will
834 occur behind a merge.
839 occur behind a merge.
835 """
840 """
836
841
837 ### Quick summary of the algorithm
842 ### Quick summary of the algorithm
838 #
843 #
839 # This function is based around a "retention" principle. We keep revisions
844 # This function is based around a "retention" principle. We keep revisions
840 # in memory until we are ready to emit a whole branch that immediately
845 # in memory until we are ready to emit a whole branch that immediately
841 # "merges" into an existing one. This reduces the number of parallel
846 # "merges" into an existing one. This reduces the number of parallel
842 # branches with interleaved revisions.
847 # branches with interleaved revisions.
843 #
848 #
844 # During iteration revs are split into two groups:
849 # During iteration revs are split into two groups:
845 # A) revision already emitted
850 # A) revision already emitted
846 # B) revision in "retention". They are stored as different subgroups.
851 # B) revision in "retention". They are stored as different subgroups.
847 #
852 #
848 # for each REV, we do the following logic:
853 # for each REV, we do the following logic:
849 #
854 #
850 # 1) if REV is a parent of (A), we will emit it. If there is a
855 # 1) if REV is a parent of (A), we will emit it. If there is a
851 # retention group ((B) above) that is blocked on REV being
856 # retention group ((B) above) that is blocked on REV being
852 # available, we emit all the revisions out of that retention
857 # available, we emit all the revisions out of that retention
853 # group first.
858 # group first.
854 #
859 #
855 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
860 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
856 # available, if such subgroup exist, we add REV to it and the subgroup is
861 # available, if such subgroup exist, we add REV to it and the subgroup is
857 # now awaiting for REV.parents() to be available.
862 # now awaiting for REV.parents() to be available.
858 #
863 #
859 # 3) finally if no such group existed in (B), we create a new subgroup.
864 # 3) finally if no such group existed in (B), we create a new subgroup.
860 #
865 #
861 #
866 #
862 # To bootstrap the algorithm, we emit the tipmost revision (which
867 # To bootstrap the algorithm, we emit the tipmost revision (which
863 # puts it in group (A) from above).
868 # puts it in group (A) from above).
864
869
865 revs.sort(reverse=True)
870 revs.sort(reverse=True)
866
871
867 # Set of parents of revision that have been emitted. They can be considered
872 # Set of parents of revision that have been emitted. They can be considered
868 # unblocked as the graph generator is already aware of them so there is no
873 # unblocked as the graph generator is already aware of them so there is no
869 # need to delay the revisions that reference them.
874 # need to delay the revisions that reference them.
870 #
875 #
871 # If someone wants to prioritize a branch over the others, pre-filling this
876 # If someone wants to prioritize a branch over the others, pre-filling this
872 # set will force all other branches to wait until this branch is ready to be
877 # set will force all other branches to wait until this branch is ready to be
873 # emitted.
878 # emitted.
874 unblocked = set(firstbranch)
879 unblocked = set(firstbranch)
875
880
876 # list of groups waiting to be displayed, each group is defined by:
881 # list of groups waiting to be displayed, each group is defined by:
877 #
882 #
878 # (revs: lists of revs waiting to be displayed,
883 # (revs: lists of revs waiting to be displayed,
879 # blocked: set of that cannot be displayed before those in 'revs')
884 # blocked: set of that cannot be displayed before those in 'revs')
880 #
885 #
881 # The second value ('blocked') correspond to parents of any revision in the
886 # The second value ('blocked') correspond to parents of any revision in the
882 # group ('revs') that is not itself contained in the group. The main idea
887 # group ('revs') that is not itself contained in the group. The main idea
883 # of this algorithm is to delay as much as possible the emission of any
888 # of this algorithm is to delay as much as possible the emission of any
884 # revision. This means waiting for the moment we are about to display
889 # revision. This means waiting for the moment we are about to display
885 # these parents to display the revs in a group.
890 # these parents to display the revs in a group.
886 #
891 #
887 # This first implementation is smart until it encounters a merge: it will
892 # This first implementation is smart until it encounters a merge: it will
888 # emit revs as soon as any parent is about to be emitted and can grow an
893 # emit revs as soon as any parent is about to be emitted and can grow an
889 # arbitrary number of revs in 'blocked'. In practice this mean we properly
894 # arbitrary number of revs in 'blocked'. In practice this mean we properly
890 # retains new branches but gives up on any special ordering for ancestors
895 # retains new branches but gives up on any special ordering for ancestors
891 # of merges. The implementation can be improved to handle this better.
896 # of merges. The implementation can be improved to handle this better.
892 #
897 #
893 # The first subgroup is special. It corresponds to all the revision that
898 # The first subgroup is special. It corresponds to all the revision that
894 # were already emitted. The 'revs' lists is expected to be empty and the
899 # were already emitted. The 'revs' lists is expected to be empty and the
895 # 'blocked' set contains the parents revisions of already emitted revision.
900 # 'blocked' set contains the parents revisions of already emitted revision.
896 #
901 #
897 # You could pre-seed the <parents> set of groups[0] to a specific
902 # You could pre-seed the <parents> set of groups[0] to a specific
898 # changesets to select what the first emitted branch should be.
903 # changesets to select what the first emitted branch should be.
899 groups = [([], unblocked)]
904 groups = [([], unblocked)]
900 pendingheap = []
905 pendingheap = []
901 pendingset = set()
906 pendingset = set()
902
907
903 heapq.heapify(pendingheap)
908 heapq.heapify(pendingheap)
904 heappop = heapq.heappop
909 heappop = heapq.heappop
905 heappush = heapq.heappush
910 heappush = heapq.heappush
906 for currentrev in revs:
911 for currentrev in revs:
907 # Heap works with smallest element, we want highest so we invert
912 # Heap works with smallest element, we want highest so we invert
908 if currentrev not in pendingset:
913 if currentrev not in pendingset:
909 heappush(pendingheap, -currentrev)
914 heappush(pendingheap, -currentrev)
910 pendingset.add(currentrev)
915 pendingset.add(currentrev)
911 # iterates on pending rev until after the current rev have been
916 # iterates on pending rev until after the current rev have been
912 # processed.
917 # processed.
913 rev = None
918 rev = None
914 while rev != currentrev:
919 while rev != currentrev:
915 rev = -heappop(pendingheap)
920 rev = -heappop(pendingheap)
916 pendingset.remove(rev)
921 pendingset.remove(rev)
917
922
918 # Seek for a subgroup blocked, waiting for the current revision.
923 # Seek for a subgroup blocked, waiting for the current revision.
919 matching = [i for i, g in enumerate(groups) if rev in g[1]]
924 matching = [i for i, g in enumerate(groups) if rev in g[1]]
920
925
921 if matching:
926 if matching:
922 # The main idea is to gather together all sets that are blocked
927 # The main idea is to gather together all sets that are blocked
923 # on the same revision.
928 # on the same revision.
924 #
929 #
925 # Groups are merged when a common blocking ancestor is
930 # Groups are merged when a common blocking ancestor is
926 # observed. For example, given two groups:
931 # observed. For example, given two groups:
927 #
932 #
928 # revs [5, 4] waiting for 1
933 # revs [5, 4] waiting for 1
929 # revs [3, 2] waiting for 1
934 # revs [3, 2] waiting for 1
930 #
935 #
931 # These two groups will be merged when we process
936 # These two groups will be merged when we process
932 # 1. In theory, we could have merged the groups when
937 # 1. In theory, we could have merged the groups when
933 # we added 2 to the group it is now in (we could have
938 # we added 2 to the group it is now in (we could have
934 # noticed the groups were both blocked on 1 then), but
939 # noticed the groups were both blocked on 1 then), but
935 # the way it works now makes the algorithm simpler.
940 # the way it works now makes the algorithm simpler.
936 #
941 #
937 # We also always keep the oldest subgroup first. We can
942 # We also always keep the oldest subgroup first. We can
938 # probably improve the behavior by having the longest set
943 # probably improve the behavior by having the longest set
939 # first. That way, graph algorithms could minimise the length
944 # first. That way, graph algorithms could minimise the length
940 # of parallel lines their drawing. This is currently not done.
945 # of parallel lines their drawing. This is currently not done.
941 targetidx = matching.pop(0)
946 targetidx = matching.pop(0)
942 trevs, tparents = groups[targetidx]
947 trevs, tparents = groups[targetidx]
943 for i in matching:
948 for i in matching:
944 gr = groups[i]
949 gr = groups[i]
945 trevs.extend(gr[0])
950 trevs.extend(gr[0])
946 tparents |= gr[1]
951 tparents |= gr[1]
947 # delete all merged subgroups (except the one we kept)
952 # delete all merged subgroups (except the one we kept)
948 # (starting from the last subgroup for performance and
953 # (starting from the last subgroup for performance and
949 # sanity reasons)
954 # sanity reasons)
950 for i in reversed(matching):
955 for i in reversed(matching):
951 del groups[i]
956 del groups[i]
952 else:
957 else:
953 # This is a new head. We create a new subgroup for it.
958 # This is a new head. We create a new subgroup for it.
954 targetidx = len(groups)
959 targetidx = len(groups)
955 groups.append(([], {rev}))
960 groups.append(([], {rev}))
956
961
957 gr = groups[targetidx]
962 gr = groups[targetidx]
958
963
959 # We now add the current nodes to this subgroups. This is done
964 # We now add the current nodes to this subgroups. This is done
960 # after the subgroup merging because all elements from a subgroup
965 # after the subgroup merging because all elements from a subgroup
961 # that relied on this rev must precede it.
966 # that relied on this rev must precede it.
962 #
967 #
963 # we also update the <parents> set to include the parents of the
968 # we also update the <parents> set to include the parents of the
964 # new nodes.
969 # new nodes.
965 if rev == currentrev: # only display stuff in rev
970 if rev == currentrev: # only display stuff in rev
966 gr[0].append(rev)
971 gr[0].append(rev)
967 gr[1].remove(rev)
972 gr[1].remove(rev)
968 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
973 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
969 gr[1].update(parents)
974 gr[1].update(parents)
970 for p in parents:
975 for p in parents:
971 if p not in pendingset:
976 if p not in pendingset:
972 pendingset.add(p)
977 pendingset.add(p)
973 heappush(pendingheap, -p)
978 heappush(pendingheap, -p)
974
979
975 # Look for a subgroup to display
980 # Look for a subgroup to display
976 #
981 #
977 # When unblocked is empty (if clause), we were not waiting for any
982 # When unblocked is empty (if clause), we were not waiting for any
978 # revisions during the first iteration (if no priority was given) or
983 # revisions during the first iteration (if no priority was given) or
979 # if we emitted a whole disconnected set of the graph (reached a
984 # if we emitted a whole disconnected set of the graph (reached a
980 # root). In that case we arbitrarily take the oldest known
985 # root). In that case we arbitrarily take the oldest known
981 # subgroup. The heuristic could probably be better.
986 # subgroup. The heuristic could probably be better.
982 #
987 #
983 # Otherwise (elif clause) if the subgroup is blocked on
988 # Otherwise (elif clause) if the subgroup is blocked on
984 # a revision we just emitted, we can safely emit it as
989 # a revision we just emitted, we can safely emit it as
985 # well.
990 # well.
986 if not unblocked:
991 if not unblocked:
987 if len(groups) > 1: # display other subset
992 if len(groups) > 1: # display other subset
988 targetidx = 1
993 targetidx = 1
989 gr = groups[1]
994 gr = groups[1]
990 elif not gr[1] & unblocked:
995 elif not gr[1] & unblocked:
991 gr = None
996 gr = None
992
997
993 if gr is not None:
998 if gr is not None:
994 # update the set of awaited revisions with the one from the
999 # update the set of awaited revisions with the one from the
995 # subgroup
1000 # subgroup
996 unblocked |= gr[1]
1001 unblocked |= gr[1]
997 # output all revisions in the subgroup
1002 # output all revisions in the subgroup
998 for r in gr[0]:
1003 for r in gr[0]:
999 yield r
1004 yield r
1000 # delete the subgroup that you just output
1005 # delete the subgroup that you just output
1001 # unless it is groups[0] in which case you just empty it.
1006 # unless it is groups[0] in which case you just empty it.
1002 if targetidx:
1007 if targetidx:
1003 del groups[targetidx]
1008 del groups[targetidx]
1004 else:
1009 else:
1005 gr[0][:] = []
1010 gr[0][:] = []
1006 # Check if we have some subgroup waiting for revisions we are not going to
1011 # Check if we have some subgroup waiting for revisions we are not going to
1007 # iterate over
1012 # iterate over
1008 for g in groups:
1013 for g in groups:
1009 for r in g[0]:
1014 for r in g[0]:
1010 yield r
1015 yield r
1011
1016
1012
1017
1013 def headrevs(revs, parentsfn):
1018 def headrevs(revs, parentsfn):
1014 """Resolve the set of heads from a set of revisions.
1019 """Resolve the set of heads from a set of revisions.
1015
1020
1016 Receives an iterable of revision numbers and a callbable that receives a
1021 Receives an iterable of revision numbers and a callbable that receives a
1017 revision number and returns an iterable of parent revision numbers, possibly
1022 revision number and returns an iterable of parent revision numbers, possibly
1018 including nullrev.
1023 including nullrev.
1019
1024
1020 Returns a set of revision numbers that are DAG heads within the passed
1025 Returns a set of revision numbers that are DAG heads within the passed
1021 subset.
1026 subset.
1022
1027
1023 ``nullrev`` is never included in the returned set, even if it is provided in
1028 ``nullrev`` is never included in the returned set, even if it is provided in
1024 the input set.
1029 the input set.
1025 """
1030 """
1026 headrevs = set(revs)
1031 headrevs = set(revs)
1027 parents = {node.nullrev}
1032 parents = {node.nullrev}
1028 up = parents.update
1033 up = parents.update
1029
1034
1030 for rev in revs:
1035 for rev in revs:
1031 up(parentsfn(rev))
1036 up(parentsfn(rev))
1032 headrevs.difference_update(parents)
1037 headrevs.difference_update(parents)
1033 return headrevs
1038 return headrevs
1034
1039
1035
1040
1036 def headrevssubset(revsfn, parentrevsfn, startrev=None, stoprevs=None):
1041 def headrevssubset(revsfn, parentrevsfn, startrev=None, stoprevs=None):
1037 """Returns the set of all revs that have no children with control.
1042 """Returns the set of all revs that have no children with control.
1038
1043
1039 ``revsfn`` is a callable that with no arguments returns an iterator over
1044 ``revsfn`` is a callable that with no arguments returns an iterator over
1040 all revision numbers in topological order. With a ``start`` argument, it
1045 all revision numbers in topological order. With a ``start`` argument, it
1041 returns revision numbers starting at that number.
1046 returns revision numbers starting at that number.
1042
1047
1043 ``parentrevsfn`` is a callable receiving a revision number and returns an
1048 ``parentrevsfn`` is a callable receiving a revision number and returns an
1044 iterable of parent revision numbers, where values can include nullrev.
1049 iterable of parent revision numbers, where values can include nullrev.
1045
1050
1046 ``startrev`` is a revision number at which to start the search.
1051 ``startrev`` is a revision number at which to start the search.
1047
1052
1048 ``stoprevs`` is an iterable of revision numbers that, when encountered,
1053 ``stoprevs`` is an iterable of revision numbers that, when encountered,
1049 will stop DAG traversal beyond them. Parents of revisions in this
1054 will stop DAG traversal beyond them. Parents of revisions in this
1050 collection will be heads.
1055 collection will be heads.
1051 """
1056 """
1052 if startrev is None:
1057 if startrev is None:
1053 startrev = nullrev
1058 startrev = nullrev
1054
1059
1055 stoprevs = set(stoprevs or [])
1060 stoprevs = set(stoprevs or [])
1056
1061
1057 reachable = {startrev}
1062 reachable = {startrev}
1058 heads = {startrev}
1063 heads = {startrev}
1059
1064
1060 for rev in revsfn(start=startrev + 1):
1065 for rev in revsfn(start=startrev + 1):
1061 for prev in parentrevsfn(rev):
1066 for prev in parentrevsfn(rev):
1062 if prev in reachable:
1067 if prev in reachable:
1063 if rev not in stoprevs:
1068 if rev not in stoprevs:
1064 reachable.add(rev)
1069 reachable.add(rev)
1065 heads.add(rev)
1070 heads.add(rev)
1066
1071
1067 if prev in heads and prev not in stoprevs:
1072 if prev in heads and prev not in stoprevs:
1068 heads.remove(prev)
1073 heads.remove(prev)
1069
1074
1070 return heads
1075 return heads
1071
1076
1072
1077
1073 def linearize(revs, parentsfn):
1078 def linearize(revs, parentsfn):
1074 """Linearize and topologically sort a list of revisions.
1079 """Linearize and topologically sort a list of revisions.
1075
1080
1076 The linearization process tries to create long runs of revs where a child
1081 The linearization process tries to create long runs of revs where a child
1077 rev comes immediately after its first parent. This is done by visiting the
1082 rev comes immediately after its first parent. This is done by visiting the
1078 heads of the revs in inverse topological order, and for each visited rev,
1083 heads of the revs in inverse topological order, and for each visited rev,
1079 visiting its second parent, then its first parent, then adding the rev
1084 visiting its second parent, then its first parent, then adding the rev
1080 itself to the output list.
1085 itself to the output list.
1081
1086
1082 Returns a list of revision numbers.
1087 Returns a list of revision numbers.
1083 """
1088 """
1084 visit = list(sorted(headrevs(revs, parentsfn), reverse=True))
1089 visit = list(sorted(headrevs(revs, parentsfn), reverse=True))
1085 finished = set()
1090 finished = set()
1086 result = []
1091 result = []
1087
1092
1088 while visit:
1093 while visit:
1089 rev = visit.pop()
1094 rev = visit.pop()
1090 if rev < 0:
1095 if rev < 0:
1091 rev = -rev - 1
1096 rev = -rev - 1
1092
1097
1093 if rev not in finished:
1098 if rev not in finished:
1094 result.append(rev)
1099 result.append(rev)
1095 finished.add(rev)
1100 finished.add(rev)
1096
1101
1097 else:
1102 else:
1098 visit.append(-rev - 1)
1103 visit.append(-rev - 1)
1099
1104
1100 for prev in parentsfn(rev):
1105 for prev in parentsfn(rev):
1101 if prev == node.nullrev or prev not in revs or prev in finished:
1106 if prev == node.nullrev or prev not in revs or prev in finished:
1102 continue
1107 continue
1103
1108
1104 visit.append(prev)
1109 visit.append(prev)
1105
1110
1106 assert len(result) == len(revs)
1111 assert len(result) == len(revs)
1107
1112
1108 return result
1113 return result
@@ -1,337 +1,440 b''
1 Test graph-related template functions
1 Test graph-related template functions
2 =====================================
2 =====================================
3
3
4 $ cat <<'EOF' >> $HGRCPATH
4 $ cat <<'EOF' >> $HGRCPATH
5 > [extensions]
5 > [extensions]
6 > drawdag = $RUNTESTDIR/drawdag.py
6 > drawdag = $RUNTESTDIR/drawdag.py
7 > EOF
7 > EOF
8
8
9 $ hg init a
9 $ hg init a
10 $ cd a
10 $ cd a
11
11
12 $ hg debugdrawdag <<'EOF'
12 $ hg debugdrawdag <<'EOF'
13 > l
13 > l
14 > / \
14 > / \
15 > | k
15 > | k
16 > | |\
16 > | |\
17 > | | j
17 > | | j
18 > | | |
18 > | | |
19 > i | |
19 > i | |
20 > |\ | |
20 > |\ | |
21 > h | | |
21 > h | | |
22 > | | | |
22 > | | | |
23 > | g | |
23 > | g | |
24 > | | | |
24 > | | | |
25 > f | | |
25 > f | | |
26 > | |/ /
26 > | |/ /
27 > | e |
27 > | e |
28 > | |/
28 > | |/
29 > | d
29 > | d
30 > |/|
30 > |/|
31 > c |
31 > c |
32 > | |
32 > | |
33 > b |
33 > b |
34 > |
34 > |
35 > a
35 > a
36 > EOF
36 > EOF
37
37
38 $ hg log -Gq -T'{rev} {tags}\n'
38 $ hg log -Gq -T'{rev} {tags}\n'
39 o 11 l tip
39 o 11 l tip
40 |\
40 |\
41 | o 10 i
41 | o 10 i
42 | |\
42 | |\
43 o \ \ 9 k
43 o \ \ 9 k
44 |\ \ \
44 |\ \ \
45 +-----o 8 g
45 +-----o 8 g
46 | | |
46 | | |
47 | o | 7 j
47 | o | 7 j
48 | | |
48 | | |
49 | | o 6 h
49 | | o 6 h
50 | | |
50 | | |
51 o | | 5 e
51 o | | 5 e
52 |/ /
52 |/ /
53 | o 4 f
53 | o 4 f
54 | |
54 | |
55 o | 3 d
55 o | 3 d
56 |\|
56 |\|
57 | o 2 c
57 | o 2 c
58 | |
58 | |
59 | o 1 b
59 | o 1 b
60 |
60 |
61 o 0 a
61 o 0 a
62
62
63
63
64 $ cd ..
64 $ cd ..
65
65
66 Create repository containing merges of p1 > p2:
67
68 $ hg init named-branch-order
69 $ cd named-branch-order
70
71 $ hg branch -q b0
72 $ hg ci -m 0
73 $ hg up -q null
74 $ hg branch -q b1
75 $ hg ci -m 1
76 $ hg up -q null
77 $ hg branch -q b2
78 $ hg ci -m 2
79 $ hg merge -q 1
80 $ hg ci -m 3
81 $ hg ci -m 4 --config ui.allowemptycommit=true
82 $ hg merge -q 0
83 $ hg ci -m 5
84 $ hg ci -m 6 --config ui.allowemptycommit=true
85 $ hg up -q 1
86 $ hg branch -q b7
87 $ hg ci -m 7
88 $ hg ci -m 8 --config ui.allowemptycommit=true
89 $ hg up -q 6
90 $ hg ci -m 9 --config ui.allowemptycommit=true
91 $ hg up -q 8
92 $ hg merge -q 9
93 $ hg ci -m 10
94
95 $ hg log -Gq -T'{rev} {branch} -> {p1.rev} {p2.rev}\n'
96 @ 10 b7 -> 8 9
97 |\
98 | o 9 b2 -> 6 -1
99 | |
100 o | 8 b7 -> 7 -1
101 | |
102 o | 7 b7 -> 1 -1
103 | |
104 | o 6 b2 -> 5 -1
105 | |
106 | o 5 b2 -> 4 0
107 | |\
108 | | o 4 b2 -> 3 -1
109 | | |
110 +---o 3 b2 -> 2 1
111 | | |
112 | | o 2 b2 -> -1 -1
113 | |
114 o | 1 b1 -> -1 -1
115 /
116 o 0 b0 -> -1 -1
117
118
119 $ cd ..
120
66 subsetparents
121 subsetparents
67 -------------
122 -------------
68
123
69 $ cd a
124 $ cd a
70
125
71 $ hg log -Gq -T '{rev} {tags}: {subsetparents(rev, revset("c+i"))}\n' -r 'c+i'
126 $ hg log -Gq -T '{rev} {tags}: {subsetparents(rev, revset("c+i"))}\n' -r 'c+i'
72 o 10 i: 2
127 o 10 i: 2
73 :
128 :
74 o 2 c:
129 o 2 c:
75 |
130 |
76 ~
131 ~
77
132
78 $ hg log -Gq -T '{rev} {tags}: {subsetparents(rev, revset("c+h+i"))}\n' -r 'c+h+i'
133 $ hg log -Gq -T '{rev} {tags}: {subsetparents(rev, revset("c+h+i"))}\n' -r 'c+h+i'
79 o 10 i: 6
134 o 10 i: 6
80 |\
135 |\
81 o : 6 h: 2
136 o : 6 h: 2
82 :/
137 :/
83 o 2 c:
138 o 2 c:
84 |
139 |
85 ~
140 ~
86
141
87 $ hg log -Gq -T '{rev} {tags}: {subsetparents(rev, revset("c+h+l"))}\n' -r 'c+h+l'
142 $ hg log -Gq -T '{rev} {tags}: {subsetparents(rev, revset("c+h+l"))}\n' -r 'c+h+l'
88 o 11 l tip: 6
143 o 11 l tip: 6
89 :\
144 :\
90 : o 6 h: 2
145 : o 6 h: 2
91 :/
146 :/
92 o 2 c:
147 o 2 c:
93 |
148 |
94 ~
149 ~
95
150
96 $ hg log -Gq -T '{rev} {tags}: {subsetparents(rev, revset("c+f+l"))}\n' -r 'c+f+l'
151 $ hg log -Gq -T '{rev} {tags}: {subsetparents(rev, revset("c+f+l"))}\n' -r 'c+f+l'
97 o 11 l tip: 4
152 o 11 l tip: 4
98 :\
153 :\
99 : o 4 f: 2
154 : o 4 f: 2
100 :/
155 :/
101 o 2 c:
156 o 2 c:
102 |
157 |
103 ~
158 ~
104
159
105 $ hg log -Gq -T '{rev} {tags}: {subsetparents(rev, revset("c+h+i+k"))}\n' -r 'c+h+i+k'
160 $ hg log -Gq -T '{rev} {tags}: {subsetparents(rev, revset("c+h+i+k"))}\n' -r 'c+h+i+k'
106 o 10 i: 6
161 o 10 i: 6
107 |\
162 |\
108 | : o 9 k: 2
163 | : o 9 k: 2
109 | :/
164 | :/
110 o : 6 h: 2
165 o : 6 h: 2
111 :/
166 :/
112 o 2 c:
167 o 2 c:
113 |
168 |
114 ~
169 ~
115
170
116 $ hg log -Gq -T '{rev} {tags}: {subsetparents(rev, revset("c+d+h+i+k"))}\n' -r 'c+d+h+i+k'
171 $ hg log -Gq -T '{rev} {tags}: {subsetparents(rev, revset("c+d+h+i+k"))}\n' -r 'c+d+h+i+k'
117 o 10 i: 6 3
172 o 10 i: 6 3
118 |\
173 |\
119 | : o 9 k: 3
174 | : o 9 k: 3
120 | :/
175 | :/
121 o : 6 h: 2
176 o : 6 h: 2
122 : :
177 : :
123 : o 3 d: 2
178 : o 3 d: 2
124 :/|
179 :/|
125 : ~
180 : ~
126 o 2 c:
181 o 2 c:
127 |
182 |
128 ~
183 ~
129
184
130 $ hg log -Gq -T '{rev} {tags}: {subsetparents(rev, revset("c+j+k+i"))}\n' -r 'c+j+k+i'
185 $ hg log -Gq -T '{rev} {tags}: {subsetparents(rev, revset("c+j+k+i"))}\n' -r 'c+j+k+i'
131 o 10 i: 2
186 o 10 i: 2
132 :
187 :
133 : o 9 k: 7
188 : o 9 k: 7
134 :/|
189 :/|
135 : o 7 j: 2
190 : o 7 j: 2
136 :/
191 :/
137 o 2 c:
192 o 2 c:
138 |
193 |
139 ~
194 ~
140
195
141 $ hg log -Gq -T '{rev} {tags}: {subsetparents(rev, revset("c+e+f+j"))}\n' -r 'c+e+f+j'
196 $ hg log -Gq -T '{rev} {tags}: {subsetparents(rev, revset("c+e+f+j"))}\n' -r 'c+e+f+j'
142 o 7 j: 2
197 o 7 j: 2
143 :
198 :
144 : o 5 e: 2
199 : o 5 e: 2
145 :/
200 :/
146 : o 4 f: 2
201 : o 4 f: 2
147 :/
202 :/
148 o 2 c:
203 o 2 c:
149 |
204 |
150 ~
205 ~
151
206
152 $ hg log -Gq -T '{rev} {tags}: {subsetparents(rev, revset("b+e+f+j"))}\n' -r 'b+e+f+j'
207 $ hg log -Gq -T '{rev} {tags}: {subsetparents(rev, revset("b+e+f+j"))}\n' -r 'b+e+f+j'
153 o 7 j: 1
208 o 7 j: 1
154 :
209 :
155 : o 5 e: 1
210 : o 5 e: 1
156 :/
211 :/
157 : o 4 f: 1
212 : o 4 f: 1
158 :/
213 :/
159 o 1 b:
214 o 1 b:
160
215
161
216
162 $ hg log -Gq -T '{rev} {tags}: {subsetparents(rev, revset("a+c+f+g+j+l"))}\n' -r 'a+c+f+g+j+l'
217 $ hg log -Gq -T '{rev} {tags}: {subsetparents(rev, revset("a+c+f+g+j+l"))}\n' -r 'a+c+f+g+j+l'
163 o 11 l tip: 4 8 7
218 o 11 l tip: 4 8 7
164 :\
219 :\
165 : \
220 : \
166 : :\
221 : :\
167 : : \
222 : : \
168 : : :\
223 : : :\
169 : : : \
224 : : : \
170 : : : :\
225 : : : :\
171 : o---+ : 8 g: 0 2
226 : o---+ : 8 g: 0 2
172 : :/ / /
227 : :/ / /
173 : +---o 7 j: 0 2
228 : +---o 7 j: 0 2
174 : : :/
229 : : :/
175 o---+ 4 f: 2
230 o---+ 4 f: 2
176 / /
231 / /
177 : o 2 c:
232 : o 2 c:
178 : |
233 : |
179 : ~
234 : ~
180 o 0 a:
235 o 0 a:
181
236
182
237
183 $ hg log -Gq -T '{rev} {tags}: {subsetparents(rev, revset("b+i+l"))}\n' -r 'b+i+l'
238 $ hg log -Gq -T '{rev} {tags}: {subsetparents(rev, revset("b+i+l"))}\n' -r 'b+i+l'
184 o 11 l tip: 10
239 o 11 l tip: 10
185 |\
240 |\
186 o : 10 i: 1
241 o : 10 i: 1
187 :/
242 :/
188 o 1 b:
243 o 1 b:
189
244
190
245
191 $ hg log -Gq -T '{rev} {tags}: {subsetparents(rev, revset("b+i+j+l"))}\n' -r 'b+i+j+l'
246 $ hg log -Gq -T '{rev} {tags}: {subsetparents(rev, revset("b+i+j+l"))}\n' -r 'b+i+j+l'
192 o 11 l tip: 10 7
247 o 11 l tip: 10 7
193 |\
248 |\
194 | \
249 | \
195 | :\
250 | :\
196 o : : 10 i: 1
251 o : : 10 i: 1
197 :/ /
252 :/ /
198 : o 7 j: 1
253 : o 7 j: 1
199 :/
254 :/
200 o 1 b:
255 o 1 b:
201
256
202
257
203 null in subset:
258 null in subset:
204
259
205 $ hg log -Gq -T '{rev} {tags}: {subsetparents(rev, revset("null+a+c+f"))}\n' -r 'null+a+c+f'
260 $ hg log -Gq -T '{rev} {tags}: {subsetparents(rev, revset("null+a+c+f"))}\n' -r 'null+a+c+f'
206 o 4 f: 2
261 o 4 f: 2
207 |
262 |
208 o 2 c: -1
263 o 2 c: -1
209 :
264 :
210 : o 0 a: -1
265 : o 0 a: -1
211 :/
266 :/
212 @ -1 : -1
267 @ -1 : -1
213
268
214
269
215 $ hg log -Gq -T '{rev} {tags}: {subsetparents(rev, revset("null+a+b+c+f"))}\n' -r 'null+a+b+c+f'
270 $ hg log -Gq -T '{rev} {tags}: {subsetparents(rev, revset("null+a+b+c+f"))}\n' -r 'null+a+b+c+f'
216 o 4 f: 2
271 o 4 f: 2
217 |
272 |
218 o 2 c: 1
273 o 2 c: 1
219 |
274 |
220 o 1 b: -1
275 o 1 b: -1
221 |
276 |
222 | o 0 a: -1
277 | o 0 a: -1
223 |/
278 |/
224 @ -1 : -1
279 @ -1 : -1
225
280
226
281
227 wdir in subset:
282 wdir in subset:
228
283
229 $ hg update -qC i
284 $ hg update -qC i
230
285
231 $ hg log -Gq -T '{rev} {tags}: {subsetparents(rev, revset("f+k+wdir()"))}\n' -r 'f+k+wdir()'
286 $ hg log -Gq -T '{rev} {tags}: {subsetparents(rev, revset("f+k+wdir()"))}\n' -r 'f+k+wdir()'
232 o 2147483647 : 4
287 o 2147483647 : 4
233 :
288 :
234 : o 9 k:
289 : o 9 k:
235 : |\
290 : |\
236 : ~ ~
291 : ~ ~
237 o 4 f:
292 o 4 f:
238 |
293 |
239 ~
294 ~
240
295
241 $ hg update -qC null
296 $ hg update -qC null
242
297
243 Revisions not in subset:
298 Revisions not in subset:
244
299
245 $ hg log -T '{rev} {tags}: {subsetparents(rev, revset("a+c+f+g+j+l"))}\n'
300 $ hg log -T '{rev} {tags}: {subsetparents(rev, revset("a+c+f+g+j+l"))}\n'
246 11 l tip: 4 8 7
301 11 l tip: 4 8 7
247 10 i:
302 10 i:
248 9 k:
303 9 k:
249 8 g: 0 2
304 8 g: 0 2
250 7 j: 0 2
305 7 j: 0 2
251 6 h:
306 6 h:
252 5 e:
307 5 e:
253 4 f: 2
308 4 f: 2
254 3 d:
309 3 d:
255 2 c:
310 2 c:
256 1 b:
311 1 b:
257 0 a:
312 0 a:
258
313
259 $ hg log -T '{rev} {tags}: {subsetparents(rev, revset("b+c"))}\n'
314 $ hg log -T '{rev} {tags}: {subsetparents(rev, revset("b+c"))}\n'
260 11 l tip:
315 11 l tip:
261 10 i:
316 10 i:
262 9 k:
317 9 k:
263 8 g:
318 8 g:
264 7 j:
319 7 j:
265 6 h:
320 6 h:
266 5 e:
321 5 e:
267 4 f:
322 4 f:
268 3 d:
323 3 d:
269 2 c: 1
324 2 c: 1
270 1 b:
325 1 b:
271 0 a:
326 0 a:
272
327
273 $ hg log -T '{rev} {tags}: {subsetparents(rev, revset("b+c"))}\n' -r'reverse(null:2)'
328 $ hg log -T '{rev} {tags}: {subsetparents(rev, revset("b+c"))}\n' -r'reverse(null:2)'
274 2 c: 1
329 2 c: 1
275 1 b:
330 1 b:
276 0 a:
331 0 a:
277 -1 :
332 -1 :
278
333
279 Nothing excluded:
334 Nothing excluded:
280
335
281 $ hg log -T '{rev} {tags}: {subsetparents(rev, revset("null:wdir()"))}\n' -r'reverse(null:wdir())'
336 $ hg log -T '{rev} {tags}: {subsetparents(rev, revset("null:wdir()"))}\n' -r'reverse(null:wdir())'
282 2147483647 : -1
337 2147483647 : -1
283 11 l tip: 10 9
338 11 l tip: 10 9
284 10 i: 6 8
339 10 i: 6 8
285 9 k: 5 7
340 9 k: 5 7
286 8 g: 5
341 8 g: 5
287 7 j: 3
342 7 j: 3
288 6 h: 4
343 6 h: 4
289 5 e: 3
344 5 e: 3
290 4 f: 2
345 4 f: 2
291 3 d: 0 2
346 3 d: 0 2
292 2 c: 1
347 2 c: 1
293 1 b: -1
348 1 b: -1
294 0 a: -1
349 0 a: -1
295 -1 : -1
350 -1 : -1
296
351
297 Uncachable query:
352 Uncachable query:
298
353
299 $ hg log -Gq -T '{rev} {tags}: {subsetparents(rev, revset("%d:%d", rev, rev - 1))}\n'
354 $ hg log -Gq -T '{rev} {tags}: {subsetparents(rev, revset("%d:%d", rev, rev - 1))}\n'
300 o 11 l tip: 10
355 o 11 l tip: 10
301 |\
356 |\
302 | o 10 i:
357 | o 10 i:
303 | |\
358 | |\
304 o \ \ 9 k:
359 o \ \ 9 k:
305 |\ \ \
360 |\ \ \
306 +-----o 8 g:
361 +-----o 8 g:
307 | | |
362 | | |
308 | o | 7 j:
363 | o | 7 j:
309 | | |
364 | | |
310 | | o 6 h:
365 | | o 6 h:
311 | | |
366 | | |
312 o | | 5 e:
367 o | | 5 e:
313 |/ /
368 |/ /
314 | o 4 f:
369 | o 4 f:
315 | |
370 | |
316 o | 3 d: 2
371 o | 3 d: 2
317 |\|
372 |\|
318 | o 2 c: 1
373 | o 2 c: 1
319 | |
374 | |
320 | o 1 b:
375 | o 1 b:
321 |
376 |
322 o 0 a: -1
377 o 0 a: -1
323
378
324
379
325 Invalid arguments:
380 Invalid arguments:
326
381
327 $ hg log -T '{subsetparents()}\n'
382 $ hg log -T '{subsetparents()}\n'
328 hg: parse error: subsetparents expects two arguments
383 hg: parse error: subsetparents expects two arguments
329 [255]
384 [255]
330 $ hg log -T '{subsetparents("a")}\n'
385 $ hg log -T '{subsetparents("a")}\n'
331 hg: parse error: subsetparents expects two arguments
386 hg: parse error: subsetparents expects two arguments
332 [255]
387 [255]
333 $ hg log -T '{subsetparents(rev, extras)}\n'
388 $ hg log -T '{subsetparents(rev, extras)}\n'
334 hg: parse error: subsetparents expects a queried revset
389 hg: parse error: subsetparents expects a queried revset
335 [255]
390 [255]
336
391
337 $ cd ..
392 $ cd ..
393
394 subsetparents: p1/p2 order
395 -------------------------
396
397 $ cd named-branch-order
398
399 Parents should be sorted in p1/p2 order since p1 is likely to belong to
400 the same named branch:
401
402 $ hg log -Gq -T '{rev} {tags}: {subsetparents(rev, revset("0+1+2+6"))}\n' -r '0+1+2+6'
403 o 6 : 2 1 0
404 :\
405 : \
406 : :\
407 : : o 2 :
408 : :
409 : o 1 :
410 :
411 o 0 :
412
413
414 $ hg log -Gq -T '{rev} {tags}: {subsetparents(rev, revset("0+1+2+6+10"))}\n' -r '0+1+2+6+10'
415 @ 10 tip: 6
416 :\
417 : o 6 : 2 1 0
418 :/:\
419 : : o 2 :
420 : :
421 o : 1 :
422 /
423 o 0 :
424
425
426 And p1 path should be selected if both p1/p2 paths exist:
427
428 $ hg log -Gq -T '{rev} {tags}: {subsetparents(rev, revset("0+1+2+10"))}\n' -r '0+1+2+10'
429 @ 10 tip: 1 2 0
430 :\
431 : \
432 : :\
433 : : o 2 :
434 : :
435 o : 1 :
436 /
437 o 0 :
438
439
440 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now