##// END OF EJS Templates
dagop: extract headsetofconnecteds() from dagutil...
Gregory Szorc -
r39215:1c3184d7 default
parent child Browse files
Show More
@@ -1,717 +1,740 b''
1 # dagop.py - graph ancestry and topology algorithm for revset
1 # dagop.py - graph ancestry and topology algorithm for revset
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import heapq
10 import heapq
11
11
12 from .thirdparty import (
12 from .thirdparty import (
13 attr,
13 attr,
14 )
14 )
15 from . import (
15 from . import (
16 error,
16 error,
17 mdiff,
17 mdiff,
18 node,
18 node,
19 patch,
19 patch,
20 pycompat,
20 pycompat,
21 smartset,
21 smartset,
22 )
22 )
23
23
24 baseset = smartset.baseset
24 baseset = smartset.baseset
25 generatorset = smartset.generatorset
25 generatorset = smartset.generatorset
26
26
27 # possible maximum depth between null and wdir()
27 # possible maximum depth between null and wdir()
28 _maxlogdepth = 0x80000000
28 _maxlogdepth = 0x80000000
29
29
30 def _walkrevtree(pfunc, revs, startdepth, stopdepth, reverse):
30 def _walkrevtree(pfunc, revs, startdepth, stopdepth, reverse):
31 """Walk DAG using 'pfunc' from the given 'revs' nodes
31 """Walk DAG using 'pfunc' from the given 'revs' nodes
32
32
33 'pfunc(rev)' should return the parent/child revisions of the given 'rev'
33 'pfunc(rev)' should return the parent/child revisions of the given 'rev'
34 if 'reverse' is True/False respectively.
34 if 'reverse' is True/False respectively.
35
35
36 Scan ends at the stopdepth (exlusive) if specified. Revisions found
36 Scan ends at the stopdepth (exlusive) if specified. Revisions found
37 earlier than the startdepth are omitted.
37 earlier than the startdepth are omitted.
38 """
38 """
39 if startdepth is None:
39 if startdepth is None:
40 startdepth = 0
40 startdepth = 0
41 if stopdepth is None:
41 if stopdepth is None:
42 stopdepth = _maxlogdepth
42 stopdepth = _maxlogdepth
43 if stopdepth == 0:
43 if stopdepth == 0:
44 return
44 return
45 if stopdepth < 0:
45 if stopdepth < 0:
46 raise error.ProgrammingError('negative stopdepth')
46 raise error.ProgrammingError('negative stopdepth')
47 if reverse:
47 if reverse:
48 heapsign = -1 # max heap
48 heapsign = -1 # max heap
49 else:
49 else:
50 heapsign = +1 # min heap
50 heapsign = +1 # min heap
51
51
52 # load input revs lazily to heap so earlier revisions can be yielded
52 # load input revs lazily to heap so earlier revisions can be yielded
53 # without fully computing the input revs
53 # without fully computing the input revs
54 revs.sort(reverse)
54 revs.sort(reverse)
55 irevs = iter(revs)
55 irevs = iter(revs)
56 pendingheap = [] # [(heapsign * rev, depth), ...] (i.e. lower depth first)
56 pendingheap = [] # [(heapsign * rev, depth), ...] (i.e. lower depth first)
57
57
58 inputrev = next(irevs, None)
58 inputrev = next(irevs, None)
59 if inputrev is not None:
59 if inputrev is not None:
60 heapq.heappush(pendingheap, (heapsign * inputrev, 0))
60 heapq.heappush(pendingheap, (heapsign * inputrev, 0))
61
61
62 lastrev = None
62 lastrev = None
63 while pendingheap:
63 while pendingheap:
64 currev, curdepth = heapq.heappop(pendingheap)
64 currev, curdepth = heapq.heappop(pendingheap)
65 currev = heapsign * currev
65 currev = heapsign * currev
66 if currev == inputrev:
66 if currev == inputrev:
67 inputrev = next(irevs, None)
67 inputrev = next(irevs, None)
68 if inputrev is not None:
68 if inputrev is not None:
69 heapq.heappush(pendingheap, (heapsign * inputrev, 0))
69 heapq.heappush(pendingheap, (heapsign * inputrev, 0))
70 # rescan parents until curdepth >= startdepth because queued entries
70 # rescan parents until curdepth >= startdepth because queued entries
71 # of the same revision are iterated from the lowest depth
71 # of the same revision are iterated from the lowest depth
72 foundnew = (currev != lastrev)
72 foundnew = (currev != lastrev)
73 if foundnew and curdepth >= startdepth:
73 if foundnew and curdepth >= startdepth:
74 lastrev = currev
74 lastrev = currev
75 yield currev
75 yield currev
76 pdepth = curdepth + 1
76 pdepth = curdepth + 1
77 if foundnew and pdepth < stopdepth:
77 if foundnew and pdepth < stopdepth:
78 for prev in pfunc(currev):
78 for prev in pfunc(currev):
79 if prev != node.nullrev:
79 if prev != node.nullrev:
80 heapq.heappush(pendingheap, (heapsign * prev, pdepth))
80 heapq.heappush(pendingheap, (heapsign * prev, pdepth))
81
81
82 def filectxancestors(fctxs, followfirst=False):
82 def filectxancestors(fctxs, followfirst=False):
83 """Like filectx.ancestors(), but can walk from multiple files/revisions,
83 """Like filectx.ancestors(), but can walk from multiple files/revisions,
84 and includes the given fctxs themselves
84 and includes the given fctxs themselves
85
85
86 Yields (rev, {fctx, ...}) pairs in descending order.
86 Yields (rev, {fctx, ...}) pairs in descending order.
87 """
87 """
88 visit = {}
88 visit = {}
89 visitheap = []
89 visitheap = []
90 def addvisit(fctx):
90 def addvisit(fctx):
91 rev = fctx.rev()
91 rev = fctx.rev()
92 if rev not in visit:
92 if rev not in visit:
93 visit[rev] = set()
93 visit[rev] = set()
94 heapq.heappush(visitheap, -rev) # max heap
94 heapq.heappush(visitheap, -rev) # max heap
95 visit[rev].add(fctx)
95 visit[rev].add(fctx)
96
96
97 if followfirst:
97 if followfirst:
98 cut = 1
98 cut = 1
99 else:
99 else:
100 cut = None
100 cut = None
101
101
102 for c in fctxs:
102 for c in fctxs:
103 addvisit(c)
103 addvisit(c)
104 while visit:
104 while visit:
105 currev = -heapq.heappop(visitheap)
105 currev = -heapq.heappop(visitheap)
106 curfctxs = visit.pop(currev)
106 curfctxs = visit.pop(currev)
107 yield currev, curfctxs
107 yield currev, curfctxs
108 for c in curfctxs:
108 for c in curfctxs:
109 for parent in c.parents()[:cut]:
109 for parent in c.parents()[:cut]:
110 addvisit(parent)
110 addvisit(parent)
111 assert not visitheap
111 assert not visitheap
112
112
113 def filerevancestors(fctxs, followfirst=False):
113 def filerevancestors(fctxs, followfirst=False):
114 """Like filectx.ancestors(), but can walk from multiple files/revisions,
114 """Like filectx.ancestors(), but can walk from multiple files/revisions,
115 and includes the given fctxs themselves
115 and includes the given fctxs themselves
116
116
117 Returns a smartset.
117 Returns a smartset.
118 """
118 """
119 gen = (rev for rev, _cs in filectxancestors(fctxs, followfirst))
119 gen = (rev for rev, _cs in filectxancestors(fctxs, followfirst))
120 return generatorset(gen, iterasc=False)
120 return generatorset(gen, iterasc=False)
121
121
122 def _genrevancestors(repo, revs, followfirst, startdepth, stopdepth, cutfunc):
122 def _genrevancestors(repo, revs, followfirst, startdepth, stopdepth, cutfunc):
123 if followfirst:
123 if followfirst:
124 cut = 1
124 cut = 1
125 else:
125 else:
126 cut = None
126 cut = None
127 cl = repo.changelog
127 cl = repo.changelog
128 def plainpfunc(rev):
128 def plainpfunc(rev):
129 try:
129 try:
130 return cl.parentrevs(rev)[:cut]
130 return cl.parentrevs(rev)[:cut]
131 except error.WdirUnsupported:
131 except error.WdirUnsupported:
132 return (pctx.rev() for pctx in repo[rev].parents()[:cut])
132 return (pctx.rev() for pctx in repo[rev].parents()[:cut])
133 if cutfunc is None:
133 if cutfunc is None:
134 pfunc = plainpfunc
134 pfunc = plainpfunc
135 else:
135 else:
136 pfunc = lambda rev: [r for r in plainpfunc(rev) if not cutfunc(r)]
136 pfunc = lambda rev: [r for r in plainpfunc(rev) if not cutfunc(r)]
137 revs = revs.filter(lambda rev: not cutfunc(rev))
137 revs = revs.filter(lambda rev: not cutfunc(rev))
138 return _walkrevtree(pfunc, revs, startdepth, stopdepth, reverse=True)
138 return _walkrevtree(pfunc, revs, startdepth, stopdepth, reverse=True)
139
139
140 def revancestors(repo, revs, followfirst=False, startdepth=None,
140 def revancestors(repo, revs, followfirst=False, startdepth=None,
141 stopdepth=None, cutfunc=None):
141 stopdepth=None, cutfunc=None):
142 """Like revlog.ancestors(), but supports additional options, includes
142 """Like revlog.ancestors(), but supports additional options, includes
143 the given revs themselves, and returns a smartset
143 the given revs themselves, and returns a smartset
144
144
145 Scan ends at the stopdepth (exlusive) if specified. Revisions found
145 Scan ends at the stopdepth (exlusive) if specified. Revisions found
146 earlier than the startdepth are omitted.
146 earlier than the startdepth are omitted.
147
147
148 If cutfunc is provided, it will be used to cut the traversal of the DAG.
148 If cutfunc is provided, it will be used to cut the traversal of the DAG.
149 When cutfunc(X) returns True, the DAG traversal stops - revision X and
149 When cutfunc(X) returns True, the DAG traversal stops - revision X and
150 X's ancestors in the traversal path will be skipped. This could be an
150 X's ancestors in the traversal path will be skipped. This could be an
151 optimization sometimes.
151 optimization sometimes.
152
152
153 Note: if Y is an ancestor of X, cutfunc(X) returning True does not
153 Note: if Y is an ancestor of X, cutfunc(X) returning True does not
154 necessarily mean Y will also be cut. Usually cutfunc(Y) also wants to
154 necessarily mean Y will also be cut. Usually cutfunc(Y) also wants to
155 return True in this case. For example,
155 return True in this case. For example,
156
156
157 D # revancestors(repo, D, cutfunc=lambda rev: rev == B)
157 D # revancestors(repo, D, cutfunc=lambda rev: rev == B)
158 |\ # will include "A", because the path D -> C -> A was not cut.
158 |\ # will include "A", because the path D -> C -> A was not cut.
159 B C # If "B" gets cut, "A" might want to be cut too.
159 B C # If "B" gets cut, "A" might want to be cut too.
160 |/
160 |/
161 A
161 A
162 """
162 """
163 gen = _genrevancestors(repo, revs, followfirst, startdepth, stopdepth,
163 gen = _genrevancestors(repo, revs, followfirst, startdepth, stopdepth,
164 cutfunc)
164 cutfunc)
165 return generatorset(gen, iterasc=False)
165 return generatorset(gen, iterasc=False)
166
166
167 def _genrevdescendants(repo, revs, followfirst):
167 def _genrevdescendants(repo, revs, followfirst):
168 if followfirst:
168 if followfirst:
169 cut = 1
169 cut = 1
170 else:
170 else:
171 cut = None
171 cut = None
172
172
173 cl = repo.changelog
173 cl = repo.changelog
174 first = revs.min()
174 first = revs.min()
175 nullrev = node.nullrev
175 nullrev = node.nullrev
176 if first == nullrev:
176 if first == nullrev:
177 # Are there nodes with a null first parent and a non-null
177 # Are there nodes with a null first parent and a non-null
178 # second one? Maybe. Do we care? Probably not.
178 # second one? Maybe. Do we care? Probably not.
179 yield first
179 yield first
180 for i in cl:
180 for i in cl:
181 yield i
181 yield i
182 else:
182 else:
183 seen = set(revs)
183 seen = set(revs)
184 for i in cl.revs(first):
184 for i in cl.revs(first):
185 if i in seen:
185 if i in seen:
186 yield i
186 yield i
187 continue
187 continue
188 for x in cl.parentrevs(i)[:cut]:
188 for x in cl.parentrevs(i)[:cut]:
189 if x != nullrev and x in seen:
189 if x != nullrev and x in seen:
190 seen.add(i)
190 seen.add(i)
191 yield i
191 yield i
192 break
192 break
193
193
194 def _builddescendantsmap(repo, startrev, followfirst):
194 def _builddescendantsmap(repo, startrev, followfirst):
195 """Build map of 'rev -> child revs', offset from startrev"""
195 """Build map of 'rev -> child revs', offset from startrev"""
196 cl = repo.changelog
196 cl = repo.changelog
197 nullrev = node.nullrev
197 nullrev = node.nullrev
198 descmap = [[] for _rev in pycompat.xrange(startrev, len(cl))]
198 descmap = [[] for _rev in pycompat.xrange(startrev, len(cl))]
199 for currev in cl.revs(startrev + 1):
199 for currev in cl.revs(startrev + 1):
200 p1rev, p2rev = cl.parentrevs(currev)
200 p1rev, p2rev = cl.parentrevs(currev)
201 if p1rev >= startrev:
201 if p1rev >= startrev:
202 descmap[p1rev - startrev].append(currev)
202 descmap[p1rev - startrev].append(currev)
203 if not followfirst and p2rev != nullrev and p2rev >= startrev:
203 if not followfirst and p2rev != nullrev and p2rev >= startrev:
204 descmap[p2rev - startrev].append(currev)
204 descmap[p2rev - startrev].append(currev)
205 return descmap
205 return descmap
206
206
207 def _genrevdescendantsofdepth(repo, revs, followfirst, startdepth, stopdepth):
207 def _genrevdescendantsofdepth(repo, revs, followfirst, startdepth, stopdepth):
208 startrev = revs.min()
208 startrev = revs.min()
209 descmap = _builddescendantsmap(repo, startrev, followfirst)
209 descmap = _builddescendantsmap(repo, startrev, followfirst)
210 def pfunc(rev):
210 def pfunc(rev):
211 return descmap[rev - startrev]
211 return descmap[rev - startrev]
212 return _walkrevtree(pfunc, revs, startdepth, stopdepth, reverse=False)
212 return _walkrevtree(pfunc, revs, startdepth, stopdepth, reverse=False)
213
213
214 def revdescendants(repo, revs, followfirst, startdepth=None, stopdepth=None):
214 def revdescendants(repo, revs, followfirst, startdepth=None, stopdepth=None):
215 """Like revlog.descendants() but supports additional options, includes
215 """Like revlog.descendants() but supports additional options, includes
216 the given revs themselves, and returns a smartset
216 the given revs themselves, and returns a smartset
217
217
218 Scan ends at the stopdepth (exlusive) if specified. Revisions found
218 Scan ends at the stopdepth (exlusive) if specified. Revisions found
219 earlier than the startdepth are omitted.
219 earlier than the startdepth are omitted.
220 """
220 """
221 if startdepth is None and stopdepth is None:
221 if startdepth is None and stopdepth is None:
222 gen = _genrevdescendants(repo, revs, followfirst)
222 gen = _genrevdescendants(repo, revs, followfirst)
223 else:
223 else:
224 gen = _genrevdescendantsofdepth(repo, revs, followfirst,
224 gen = _genrevdescendantsofdepth(repo, revs, followfirst,
225 startdepth, stopdepth)
225 startdepth, stopdepth)
226 return generatorset(gen, iterasc=True)
226 return generatorset(gen, iterasc=True)
227
227
228 def _reachablerootspure(repo, minroot, roots, heads, includepath):
228 def _reachablerootspure(repo, minroot, roots, heads, includepath):
229 """return (heads(::<roots> and ::<heads>))
229 """return (heads(::<roots> and ::<heads>))
230
230
231 If includepath is True, return (<roots>::<heads>)."""
231 If includepath is True, return (<roots>::<heads>)."""
232 if not roots:
232 if not roots:
233 return []
233 return []
234 parentrevs = repo.changelog.parentrevs
234 parentrevs = repo.changelog.parentrevs
235 roots = set(roots)
235 roots = set(roots)
236 visit = list(heads)
236 visit = list(heads)
237 reachable = set()
237 reachable = set()
238 seen = {}
238 seen = {}
239 # prefetch all the things! (because python is slow)
239 # prefetch all the things! (because python is slow)
240 reached = reachable.add
240 reached = reachable.add
241 dovisit = visit.append
241 dovisit = visit.append
242 nextvisit = visit.pop
242 nextvisit = visit.pop
243 # open-code the post-order traversal due to the tiny size of
243 # open-code the post-order traversal due to the tiny size of
244 # sys.getrecursionlimit()
244 # sys.getrecursionlimit()
245 while visit:
245 while visit:
246 rev = nextvisit()
246 rev = nextvisit()
247 if rev in roots:
247 if rev in roots:
248 reached(rev)
248 reached(rev)
249 if not includepath:
249 if not includepath:
250 continue
250 continue
251 parents = parentrevs(rev)
251 parents = parentrevs(rev)
252 seen[rev] = parents
252 seen[rev] = parents
253 for parent in parents:
253 for parent in parents:
254 if parent >= minroot and parent not in seen:
254 if parent >= minroot and parent not in seen:
255 dovisit(parent)
255 dovisit(parent)
256 if not reachable:
256 if not reachable:
257 return baseset()
257 return baseset()
258 if not includepath:
258 if not includepath:
259 return reachable
259 return reachable
260 for rev in sorted(seen):
260 for rev in sorted(seen):
261 for parent in seen[rev]:
261 for parent in seen[rev]:
262 if parent in reachable:
262 if parent in reachable:
263 reached(rev)
263 reached(rev)
264 return reachable
264 return reachable
265
265
266 def reachableroots(repo, roots, heads, includepath=False):
266 def reachableroots(repo, roots, heads, includepath=False):
267 """return (heads(::<roots> and ::<heads>))
267 """return (heads(::<roots> and ::<heads>))
268
268
269 If includepath is True, return (<roots>::<heads>)."""
269 If includepath is True, return (<roots>::<heads>)."""
270 if not roots:
270 if not roots:
271 return baseset()
271 return baseset()
272 minroot = roots.min()
272 minroot = roots.min()
273 roots = list(roots)
273 roots = list(roots)
274 heads = list(heads)
274 heads = list(heads)
275 try:
275 try:
276 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
276 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
277 except AttributeError:
277 except AttributeError:
278 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
278 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
279 revs = baseset(revs)
279 revs = baseset(revs)
280 revs.sort()
280 revs.sort()
281 return revs
281 return revs
282
282
283 def _changesrange(fctx1, fctx2, linerange2, diffopts):
283 def _changesrange(fctx1, fctx2, linerange2, diffopts):
284 """Return `(diffinrange, linerange1)` where `diffinrange` is True
284 """Return `(diffinrange, linerange1)` where `diffinrange` is True
285 if diff from fctx2 to fctx1 has changes in linerange2 and
285 if diff from fctx2 to fctx1 has changes in linerange2 and
286 `linerange1` is the new line range for fctx1.
286 `linerange1` is the new line range for fctx1.
287 """
287 """
288 blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
288 blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
289 filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
289 filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
290 diffinrange = any(stype == '!' for _, stype in filteredblocks)
290 diffinrange = any(stype == '!' for _, stype in filteredblocks)
291 return diffinrange, linerange1
291 return diffinrange, linerange1
292
292
293 def blockancestors(fctx, fromline, toline, followfirst=False):
293 def blockancestors(fctx, fromline, toline, followfirst=False):
294 """Yield ancestors of `fctx` with respect to the block of lines within
294 """Yield ancestors of `fctx` with respect to the block of lines within
295 `fromline`-`toline` range.
295 `fromline`-`toline` range.
296 """
296 """
297 diffopts = patch.diffopts(fctx._repo.ui)
297 diffopts = patch.diffopts(fctx._repo.ui)
298 fctx = fctx.introfilectx()
298 fctx = fctx.introfilectx()
299 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
299 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
300 while visit:
300 while visit:
301 c, linerange2 = visit.pop(max(visit))
301 c, linerange2 = visit.pop(max(visit))
302 pl = c.parents()
302 pl = c.parents()
303 if followfirst:
303 if followfirst:
304 pl = pl[:1]
304 pl = pl[:1]
305 if not pl:
305 if not pl:
306 # The block originates from the initial revision.
306 # The block originates from the initial revision.
307 yield c, linerange2
307 yield c, linerange2
308 continue
308 continue
309 inrange = False
309 inrange = False
310 for p in pl:
310 for p in pl:
311 inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts)
311 inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts)
312 inrange = inrange or inrangep
312 inrange = inrange or inrangep
313 if linerange1[0] == linerange1[1]:
313 if linerange1[0] == linerange1[1]:
314 # Parent's linerange is empty, meaning that the block got
314 # Parent's linerange is empty, meaning that the block got
315 # introduced in this revision; no need to go futher in this
315 # introduced in this revision; no need to go futher in this
316 # branch.
316 # branch.
317 continue
317 continue
318 # Set _descendantrev with 'c' (a known descendant) so that, when
318 # Set _descendantrev with 'c' (a known descendant) so that, when
319 # _adjustlinkrev is called for 'p', it receives this descendant
319 # _adjustlinkrev is called for 'p', it receives this descendant
320 # (as srcrev) instead possibly topmost introrev.
320 # (as srcrev) instead possibly topmost introrev.
321 p._descendantrev = c.rev()
321 p._descendantrev = c.rev()
322 visit[p.linkrev(), p.filenode()] = p, linerange1
322 visit[p.linkrev(), p.filenode()] = p, linerange1
323 if inrange:
323 if inrange:
324 yield c, linerange2
324 yield c, linerange2
325
325
326 def blockdescendants(fctx, fromline, toline):
326 def blockdescendants(fctx, fromline, toline):
327 """Yield descendants of `fctx` with respect to the block of lines within
327 """Yield descendants of `fctx` with respect to the block of lines within
328 `fromline`-`toline` range.
328 `fromline`-`toline` range.
329 """
329 """
330 # First possibly yield 'fctx' if it has changes in range with respect to
330 # First possibly yield 'fctx' if it has changes in range with respect to
331 # its parents.
331 # its parents.
332 try:
332 try:
333 c, linerange1 = next(blockancestors(fctx, fromline, toline))
333 c, linerange1 = next(blockancestors(fctx, fromline, toline))
334 except StopIteration:
334 except StopIteration:
335 pass
335 pass
336 else:
336 else:
337 if c == fctx:
337 if c == fctx:
338 yield c, linerange1
338 yield c, linerange1
339
339
340 diffopts = patch.diffopts(fctx._repo.ui)
340 diffopts = patch.diffopts(fctx._repo.ui)
341 fl = fctx.filelog()
341 fl = fctx.filelog()
342 seen = {fctx.filerev(): (fctx, (fromline, toline))}
342 seen = {fctx.filerev(): (fctx, (fromline, toline))}
343 for i in fl.descendants([fctx.filerev()]):
343 for i in fl.descendants([fctx.filerev()]):
344 c = fctx.filectx(i)
344 c = fctx.filectx(i)
345 inrange = False
345 inrange = False
346 for x in fl.parentrevs(i):
346 for x in fl.parentrevs(i):
347 try:
347 try:
348 p, linerange2 = seen[x]
348 p, linerange2 = seen[x]
349 except KeyError:
349 except KeyError:
350 # nullrev or other branch
350 # nullrev or other branch
351 continue
351 continue
352 inrangep, linerange1 = _changesrange(c, p, linerange2, diffopts)
352 inrangep, linerange1 = _changesrange(c, p, linerange2, diffopts)
353 inrange = inrange or inrangep
353 inrange = inrange or inrangep
354 # If revision 'i' has been seen (it's a merge) and the line range
354 # If revision 'i' has been seen (it's a merge) and the line range
355 # previously computed differs from the one we just got, we take the
355 # previously computed differs from the one we just got, we take the
356 # surrounding interval. This is conservative but avoids loosing
356 # surrounding interval. This is conservative but avoids loosing
357 # information.
357 # information.
358 if i in seen and seen[i][1] != linerange1:
358 if i in seen and seen[i][1] != linerange1:
359 lbs, ubs = zip(linerange1, seen[i][1])
359 lbs, ubs = zip(linerange1, seen[i][1])
360 linerange1 = min(lbs), max(ubs)
360 linerange1 = min(lbs), max(ubs)
361 seen[i] = c, linerange1
361 seen[i] = c, linerange1
362 if inrange:
362 if inrange:
363 yield c, linerange1
363 yield c, linerange1
364
364
365 @attr.s(slots=True, frozen=True)
365 @attr.s(slots=True, frozen=True)
366 class annotateline(object):
366 class annotateline(object):
367 fctx = attr.ib()
367 fctx = attr.ib()
368 lineno = attr.ib()
368 lineno = attr.ib()
369 # Whether this annotation was the result of a skip-annotate.
369 # Whether this annotation was the result of a skip-annotate.
370 skip = attr.ib(default=False)
370 skip = attr.ib(default=False)
371 text = attr.ib(default=None)
371 text = attr.ib(default=None)
372
372
373 @attr.s(slots=True, frozen=True)
373 @attr.s(slots=True, frozen=True)
374 class _annotatedfile(object):
374 class _annotatedfile(object):
375 # list indexed by lineno - 1
375 # list indexed by lineno - 1
376 fctxs = attr.ib()
376 fctxs = attr.ib()
377 linenos = attr.ib()
377 linenos = attr.ib()
378 skips = attr.ib()
378 skips = attr.ib()
379 # full file content
379 # full file content
380 text = attr.ib()
380 text = attr.ib()
381
381
382 def _countlines(text):
382 def _countlines(text):
383 if text.endswith("\n"):
383 if text.endswith("\n"):
384 return text.count("\n")
384 return text.count("\n")
385 return text.count("\n") + int(bool(text))
385 return text.count("\n") + int(bool(text))
386
386
387 def _decoratelines(text, fctx):
387 def _decoratelines(text, fctx):
388 n = _countlines(text)
388 n = _countlines(text)
389 linenos = pycompat.rangelist(1, n + 1)
389 linenos = pycompat.rangelist(1, n + 1)
390 return _annotatedfile([fctx] * n, linenos, [False] * n, text)
390 return _annotatedfile([fctx] * n, linenos, [False] * n, text)
391
391
392 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
392 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
393 r'''
393 r'''
394 Given parent and child fctxes and annotate data for parents, for all lines
394 Given parent and child fctxes and annotate data for parents, for all lines
395 in either parent that match the child, annotate the child with the parent's
395 in either parent that match the child, annotate the child with the parent's
396 data.
396 data.
397
397
398 Additionally, if `skipchild` is True, replace all other lines with parent
398 Additionally, if `skipchild` is True, replace all other lines with parent
399 annotate data as well such that child is never blamed for any lines.
399 annotate data as well such that child is never blamed for any lines.
400
400
401 See test-annotate.py for unit tests.
401 See test-annotate.py for unit tests.
402 '''
402 '''
403 pblocks = [(parent, mdiff.allblocks(parent.text, child.text, opts=diffopts))
403 pblocks = [(parent, mdiff.allblocks(parent.text, child.text, opts=diffopts))
404 for parent in parents]
404 for parent in parents]
405
405
406 if skipchild:
406 if skipchild:
407 # Need to iterate over the blocks twice -- make it a list
407 # Need to iterate over the blocks twice -- make it a list
408 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
408 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
409 # Mercurial currently prefers p2 over p1 for annotate.
409 # Mercurial currently prefers p2 over p1 for annotate.
410 # TODO: change this?
410 # TODO: change this?
411 for parent, blocks in pblocks:
411 for parent, blocks in pblocks:
412 for (a1, a2, b1, b2), t in blocks:
412 for (a1, a2, b1, b2), t in blocks:
413 # Changed blocks ('!') or blocks made only of blank lines ('~')
413 # Changed blocks ('!') or blocks made only of blank lines ('~')
414 # belong to the child.
414 # belong to the child.
415 if t == '=':
415 if t == '=':
416 child.fctxs[b1:b2] = parent.fctxs[a1:a2]
416 child.fctxs[b1:b2] = parent.fctxs[a1:a2]
417 child.linenos[b1:b2] = parent.linenos[a1:a2]
417 child.linenos[b1:b2] = parent.linenos[a1:a2]
418 child.skips[b1:b2] = parent.skips[a1:a2]
418 child.skips[b1:b2] = parent.skips[a1:a2]
419
419
420 if skipchild:
420 if skipchild:
421 # Now try and match up anything that couldn't be matched,
421 # Now try and match up anything that couldn't be matched,
422 # Reversing pblocks maintains bias towards p2, matching above
422 # Reversing pblocks maintains bias towards p2, matching above
423 # behavior.
423 # behavior.
424 pblocks.reverse()
424 pblocks.reverse()
425
425
426 # The heuristics are:
426 # The heuristics are:
427 # * Work on blocks of changed lines (effectively diff hunks with -U0).
427 # * Work on blocks of changed lines (effectively diff hunks with -U0).
428 # This could potentially be smarter but works well enough.
428 # This could potentially be smarter but works well enough.
429 # * For a non-matching section, do a best-effort fit. Match lines in
429 # * For a non-matching section, do a best-effort fit. Match lines in
430 # diff hunks 1:1, dropping lines as necessary.
430 # diff hunks 1:1, dropping lines as necessary.
431 # * Repeat the last line as a last resort.
431 # * Repeat the last line as a last resort.
432
432
433 # First, replace as much as possible without repeating the last line.
433 # First, replace as much as possible without repeating the last line.
434 remaining = [(parent, []) for parent, _blocks in pblocks]
434 remaining = [(parent, []) for parent, _blocks in pblocks]
435 for idx, (parent, blocks) in enumerate(pblocks):
435 for idx, (parent, blocks) in enumerate(pblocks):
436 for (a1, a2, b1, b2), _t in blocks:
436 for (a1, a2, b1, b2), _t in blocks:
437 if a2 - a1 >= b2 - b1:
437 if a2 - a1 >= b2 - b1:
438 for bk in pycompat.xrange(b1, b2):
438 for bk in pycompat.xrange(b1, b2):
439 if child.fctxs[bk] == childfctx:
439 if child.fctxs[bk] == childfctx:
440 ak = min(a1 + (bk - b1), a2 - 1)
440 ak = min(a1 + (bk - b1), a2 - 1)
441 child.fctxs[bk] = parent.fctxs[ak]
441 child.fctxs[bk] = parent.fctxs[ak]
442 child.linenos[bk] = parent.linenos[ak]
442 child.linenos[bk] = parent.linenos[ak]
443 child.skips[bk] = True
443 child.skips[bk] = True
444 else:
444 else:
445 remaining[idx][1].append((a1, a2, b1, b2))
445 remaining[idx][1].append((a1, a2, b1, b2))
446
446
447 # Then, look at anything left, which might involve repeating the last
447 # Then, look at anything left, which might involve repeating the last
448 # line.
448 # line.
449 for parent, blocks in remaining:
449 for parent, blocks in remaining:
450 for a1, a2, b1, b2 in blocks:
450 for a1, a2, b1, b2 in blocks:
451 for bk in pycompat.xrange(b1, b2):
451 for bk in pycompat.xrange(b1, b2):
452 if child.fctxs[bk] == childfctx:
452 if child.fctxs[bk] == childfctx:
453 ak = min(a1 + (bk - b1), a2 - 1)
453 ak = min(a1 + (bk - b1), a2 - 1)
454 child.fctxs[bk] = parent.fctxs[ak]
454 child.fctxs[bk] = parent.fctxs[ak]
455 child.linenos[bk] = parent.linenos[ak]
455 child.linenos[bk] = parent.linenos[ak]
456 child.skips[bk] = True
456 child.skips[bk] = True
457 return child
457 return child
458
458
459 def annotate(base, parents, skiprevs=None, diffopts=None):
459 def annotate(base, parents, skiprevs=None, diffopts=None):
460 """Core algorithm for filectx.annotate()
460 """Core algorithm for filectx.annotate()
461
461
462 `parents(fctx)` is a function returning a list of parent filectxs.
462 `parents(fctx)` is a function returning a list of parent filectxs.
463 """
463 """
464
464
465 # This algorithm would prefer to be recursive, but Python is a
465 # This algorithm would prefer to be recursive, but Python is a
466 # bit recursion-hostile. Instead we do an iterative
466 # bit recursion-hostile. Instead we do an iterative
467 # depth-first search.
467 # depth-first search.
468
468
469 # 1st DFS pre-calculates pcache and needed
469 # 1st DFS pre-calculates pcache and needed
470 visit = [base]
470 visit = [base]
471 pcache = {}
471 pcache = {}
472 needed = {base: 1}
472 needed = {base: 1}
473 while visit:
473 while visit:
474 f = visit.pop()
474 f = visit.pop()
475 if f in pcache:
475 if f in pcache:
476 continue
476 continue
477 pl = parents(f)
477 pl = parents(f)
478 pcache[f] = pl
478 pcache[f] = pl
479 for p in pl:
479 for p in pl:
480 needed[p] = needed.get(p, 0) + 1
480 needed[p] = needed.get(p, 0) + 1
481 if p not in pcache:
481 if p not in pcache:
482 visit.append(p)
482 visit.append(p)
483
483
484 # 2nd DFS does the actual annotate
484 # 2nd DFS does the actual annotate
485 visit[:] = [base]
485 visit[:] = [base]
486 hist = {}
486 hist = {}
487 while visit:
487 while visit:
488 f = visit[-1]
488 f = visit[-1]
489 if f in hist:
489 if f in hist:
490 visit.pop()
490 visit.pop()
491 continue
491 continue
492
492
493 ready = True
493 ready = True
494 pl = pcache[f]
494 pl = pcache[f]
495 for p in pl:
495 for p in pl:
496 if p not in hist:
496 if p not in hist:
497 ready = False
497 ready = False
498 visit.append(p)
498 visit.append(p)
499 if ready:
499 if ready:
500 visit.pop()
500 visit.pop()
501 curr = _decoratelines(f.data(), f)
501 curr = _decoratelines(f.data(), f)
502 skipchild = False
502 skipchild = False
503 if skiprevs is not None:
503 if skiprevs is not None:
504 skipchild = f._changeid in skiprevs
504 skipchild = f._changeid in skiprevs
505 curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
505 curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
506 diffopts)
506 diffopts)
507 for p in pl:
507 for p in pl:
508 if needed[p] == 1:
508 if needed[p] == 1:
509 del hist[p]
509 del hist[p]
510 del needed[p]
510 del needed[p]
511 else:
511 else:
512 needed[p] -= 1
512 needed[p] -= 1
513
513
514 hist[f] = curr
514 hist[f] = curr
515 del pcache[f]
515 del pcache[f]
516
516
517 a = hist[base]
517 a = hist[base]
518 return [annotateline(*r) for r in zip(a.fctxs, a.linenos, a.skips,
518 return [annotateline(*r) for r in zip(a.fctxs, a.linenos, a.skips,
519 mdiff.splitnewlines(a.text))]
519 mdiff.splitnewlines(a.text))]
520
520
521 def toposort(revs, parentsfunc, firstbranch=()):
521 def toposort(revs, parentsfunc, firstbranch=()):
522 """Yield revisions from heads to roots one (topo) branch at a time.
522 """Yield revisions from heads to roots one (topo) branch at a time.
523
523
524 This function aims to be used by a graph generator that wishes to minimize
524 This function aims to be used by a graph generator that wishes to minimize
525 the number of parallel branches and their interleaving.
525 the number of parallel branches and their interleaving.
526
526
527 Example iteration order (numbers show the "true" order in a changelog):
527 Example iteration order (numbers show the "true" order in a changelog):
528
528
529 o 4
529 o 4
530 |
530 |
531 o 1
531 o 1
532 |
532 |
533 | o 3
533 | o 3
534 | |
534 | |
535 | o 2
535 | o 2
536 |/
536 |/
537 o 0
537 o 0
538
538
539 Note that the ancestors of merges are understood by the current
539 Note that the ancestors of merges are understood by the current
540 algorithm to be on the same branch. This means no reordering will
540 algorithm to be on the same branch. This means no reordering will
541 occur behind a merge.
541 occur behind a merge.
542 """
542 """
543
543
544 ### Quick summary of the algorithm
544 ### Quick summary of the algorithm
545 #
545 #
546 # This function is based around a "retention" principle. We keep revisions
546 # This function is based around a "retention" principle. We keep revisions
547 # in memory until we are ready to emit a whole branch that immediately
547 # in memory until we are ready to emit a whole branch that immediately
548 # "merges" into an existing one. This reduces the number of parallel
548 # "merges" into an existing one. This reduces the number of parallel
549 # branches with interleaved revisions.
549 # branches with interleaved revisions.
550 #
550 #
551 # During iteration revs are split into two groups:
551 # During iteration revs are split into two groups:
552 # A) revision already emitted
552 # A) revision already emitted
553 # B) revision in "retention". They are stored as different subgroups.
553 # B) revision in "retention". They are stored as different subgroups.
554 #
554 #
555 # for each REV, we do the following logic:
555 # for each REV, we do the following logic:
556 #
556 #
557 # 1) if REV is a parent of (A), we will emit it. If there is a
557 # 1) if REV is a parent of (A), we will emit it. If there is a
558 # retention group ((B) above) that is blocked on REV being
558 # retention group ((B) above) that is blocked on REV being
559 # available, we emit all the revisions out of that retention
559 # available, we emit all the revisions out of that retention
560 # group first.
560 # group first.
561 #
561 #
562 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
562 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
563 # available, if such subgroup exist, we add REV to it and the subgroup is
563 # available, if such subgroup exist, we add REV to it and the subgroup is
564 # now awaiting for REV.parents() to be available.
564 # now awaiting for REV.parents() to be available.
565 #
565 #
566 # 3) finally if no such group existed in (B), we create a new subgroup.
566 # 3) finally if no such group existed in (B), we create a new subgroup.
567 #
567 #
568 #
568 #
569 # To bootstrap the algorithm, we emit the tipmost revision (which
569 # To bootstrap the algorithm, we emit the tipmost revision (which
570 # puts it in group (A) from above).
570 # puts it in group (A) from above).
571
571
572 revs.sort(reverse=True)
572 revs.sort(reverse=True)
573
573
574 # Set of parents of revision that have been emitted. They can be considered
574 # Set of parents of revision that have been emitted. They can be considered
575 # unblocked as the graph generator is already aware of them so there is no
575 # unblocked as the graph generator is already aware of them so there is no
576 # need to delay the revisions that reference them.
576 # need to delay the revisions that reference them.
577 #
577 #
578 # If someone wants to prioritize a branch over the others, pre-filling this
578 # If someone wants to prioritize a branch over the others, pre-filling this
579 # set will force all other branches to wait until this branch is ready to be
579 # set will force all other branches to wait until this branch is ready to be
580 # emitted.
580 # emitted.
581 unblocked = set(firstbranch)
581 unblocked = set(firstbranch)
582
582
583 # list of groups waiting to be displayed, each group is defined by:
583 # list of groups waiting to be displayed, each group is defined by:
584 #
584 #
585 # (revs: lists of revs waiting to be displayed,
585 # (revs: lists of revs waiting to be displayed,
586 # blocked: set of that cannot be displayed before those in 'revs')
586 # blocked: set of that cannot be displayed before those in 'revs')
587 #
587 #
588 # The second value ('blocked') correspond to parents of any revision in the
588 # The second value ('blocked') correspond to parents of any revision in the
589 # group ('revs') that is not itself contained in the group. The main idea
589 # group ('revs') that is not itself contained in the group. The main idea
590 # of this algorithm is to delay as much as possible the emission of any
590 # of this algorithm is to delay as much as possible the emission of any
591 # revision. This means waiting for the moment we are about to display
591 # revision. This means waiting for the moment we are about to display
592 # these parents to display the revs in a group.
592 # these parents to display the revs in a group.
593 #
593 #
594 # This first implementation is smart until it encounters a merge: it will
594 # This first implementation is smart until it encounters a merge: it will
595 # emit revs as soon as any parent is about to be emitted and can grow an
595 # emit revs as soon as any parent is about to be emitted and can grow an
596 # arbitrary number of revs in 'blocked'. In practice this mean we properly
596 # arbitrary number of revs in 'blocked'. In practice this mean we properly
597 # retains new branches but gives up on any special ordering for ancestors
597 # retains new branches but gives up on any special ordering for ancestors
598 # of merges. The implementation can be improved to handle this better.
598 # of merges. The implementation can be improved to handle this better.
599 #
599 #
600 # The first subgroup is special. It corresponds to all the revision that
600 # The first subgroup is special. It corresponds to all the revision that
601 # were already emitted. The 'revs' lists is expected to be empty and the
601 # were already emitted. The 'revs' lists is expected to be empty and the
602 # 'blocked' set contains the parents revisions of already emitted revision.
602 # 'blocked' set contains the parents revisions of already emitted revision.
603 #
603 #
604 # You could pre-seed the <parents> set of groups[0] to a specific
604 # You could pre-seed the <parents> set of groups[0] to a specific
605 # changesets to select what the first emitted branch should be.
605 # changesets to select what the first emitted branch should be.
606 groups = [([], unblocked)]
606 groups = [([], unblocked)]
607 pendingheap = []
607 pendingheap = []
608 pendingset = set()
608 pendingset = set()
609
609
610 heapq.heapify(pendingheap)
610 heapq.heapify(pendingheap)
611 heappop = heapq.heappop
611 heappop = heapq.heappop
612 heappush = heapq.heappush
612 heappush = heapq.heappush
613 for currentrev in revs:
613 for currentrev in revs:
614 # Heap works with smallest element, we want highest so we invert
614 # Heap works with smallest element, we want highest so we invert
615 if currentrev not in pendingset:
615 if currentrev not in pendingset:
616 heappush(pendingheap, -currentrev)
616 heappush(pendingheap, -currentrev)
617 pendingset.add(currentrev)
617 pendingset.add(currentrev)
618 # iterates on pending rev until after the current rev have been
618 # iterates on pending rev until after the current rev have been
619 # processed.
619 # processed.
620 rev = None
620 rev = None
621 while rev != currentrev:
621 while rev != currentrev:
622 rev = -heappop(pendingheap)
622 rev = -heappop(pendingheap)
623 pendingset.remove(rev)
623 pendingset.remove(rev)
624
624
625 # Seek for a subgroup blocked, waiting for the current revision.
625 # Seek for a subgroup blocked, waiting for the current revision.
626 matching = [i for i, g in enumerate(groups) if rev in g[1]]
626 matching = [i for i, g in enumerate(groups) if rev in g[1]]
627
627
628 if matching:
628 if matching:
629 # The main idea is to gather together all sets that are blocked
629 # The main idea is to gather together all sets that are blocked
630 # on the same revision.
630 # on the same revision.
631 #
631 #
632 # Groups are merged when a common blocking ancestor is
632 # Groups are merged when a common blocking ancestor is
633 # observed. For example, given two groups:
633 # observed. For example, given two groups:
634 #
634 #
635 # revs [5, 4] waiting for 1
635 # revs [5, 4] waiting for 1
636 # revs [3, 2] waiting for 1
636 # revs [3, 2] waiting for 1
637 #
637 #
638 # These two groups will be merged when we process
638 # These two groups will be merged when we process
639 # 1. In theory, we could have merged the groups when
639 # 1. In theory, we could have merged the groups when
640 # we added 2 to the group it is now in (we could have
640 # we added 2 to the group it is now in (we could have
641 # noticed the groups were both blocked on 1 then), but
641 # noticed the groups were both blocked on 1 then), but
642 # the way it works now makes the algorithm simpler.
642 # the way it works now makes the algorithm simpler.
643 #
643 #
644 # We also always keep the oldest subgroup first. We can
644 # We also always keep the oldest subgroup first. We can
645 # probably improve the behavior by having the longest set
645 # probably improve the behavior by having the longest set
646 # first. That way, graph algorithms could minimise the length
646 # first. That way, graph algorithms could minimise the length
647 # of parallel lines their drawing. This is currently not done.
647 # of parallel lines their drawing. This is currently not done.
648 targetidx = matching.pop(0)
648 targetidx = matching.pop(0)
649 trevs, tparents = groups[targetidx]
649 trevs, tparents = groups[targetidx]
650 for i in matching:
650 for i in matching:
651 gr = groups[i]
651 gr = groups[i]
652 trevs.extend(gr[0])
652 trevs.extend(gr[0])
653 tparents |= gr[1]
653 tparents |= gr[1]
654 # delete all merged subgroups (except the one we kept)
654 # delete all merged subgroups (except the one we kept)
655 # (starting from the last subgroup for performance and
655 # (starting from the last subgroup for performance and
656 # sanity reasons)
656 # sanity reasons)
657 for i in reversed(matching):
657 for i in reversed(matching):
658 del groups[i]
658 del groups[i]
659 else:
659 else:
660 # This is a new head. We create a new subgroup for it.
660 # This is a new head. We create a new subgroup for it.
661 targetidx = len(groups)
661 targetidx = len(groups)
662 groups.append(([], {rev}))
662 groups.append(([], {rev}))
663
663
664 gr = groups[targetidx]
664 gr = groups[targetidx]
665
665
666 # We now add the current nodes to this subgroups. This is done
666 # We now add the current nodes to this subgroups. This is done
667 # after the subgroup merging because all elements from a subgroup
667 # after the subgroup merging because all elements from a subgroup
668 # that relied on this rev must precede it.
668 # that relied on this rev must precede it.
669 #
669 #
670 # we also update the <parents> set to include the parents of the
670 # we also update the <parents> set to include the parents of the
671 # new nodes.
671 # new nodes.
672 if rev == currentrev: # only display stuff in rev
672 if rev == currentrev: # only display stuff in rev
673 gr[0].append(rev)
673 gr[0].append(rev)
674 gr[1].remove(rev)
674 gr[1].remove(rev)
675 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
675 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
676 gr[1].update(parents)
676 gr[1].update(parents)
677 for p in parents:
677 for p in parents:
678 if p not in pendingset:
678 if p not in pendingset:
679 pendingset.add(p)
679 pendingset.add(p)
680 heappush(pendingheap, -p)
680 heappush(pendingheap, -p)
681
681
682 # Look for a subgroup to display
682 # Look for a subgroup to display
683 #
683 #
684 # When unblocked is empty (if clause), we were not waiting for any
684 # When unblocked is empty (if clause), we were not waiting for any
685 # revisions during the first iteration (if no priority was given) or
685 # revisions during the first iteration (if no priority was given) or
686 # if we emitted a whole disconnected set of the graph (reached a
686 # if we emitted a whole disconnected set of the graph (reached a
687 # root). In that case we arbitrarily take the oldest known
687 # root). In that case we arbitrarily take the oldest known
688 # subgroup. The heuristic could probably be better.
688 # subgroup. The heuristic could probably be better.
689 #
689 #
690 # Otherwise (elif clause) if the subgroup is blocked on
690 # Otherwise (elif clause) if the subgroup is blocked on
691 # a revision we just emitted, we can safely emit it as
691 # a revision we just emitted, we can safely emit it as
692 # well.
692 # well.
693 if not unblocked:
693 if not unblocked:
694 if len(groups) > 1: # display other subset
694 if len(groups) > 1: # display other subset
695 targetidx = 1
695 targetidx = 1
696 gr = groups[1]
696 gr = groups[1]
697 elif not gr[1] & unblocked:
697 elif not gr[1] & unblocked:
698 gr = None
698 gr = None
699
699
700 if gr is not None:
700 if gr is not None:
701 # update the set of awaited revisions with the one from the
701 # update the set of awaited revisions with the one from the
702 # subgroup
702 # subgroup
703 unblocked |= gr[1]
703 unblocked |= gr[1]
704 # output all revisions in the subgroup
704 # output all revisions in the subgroup
705 for r in gr[0]:
705 for r in gr[0]:
706 yield r
706 yield r
707 # delete the subgroup that you just output
707 # delete the subgroup that you just output
708 # unless it is groups[0] in which case you just empty it.
708 # unless it is groups[0] in which case you just empty it.
709 if targetidx:
709 if targetidx:
710 del groups[targetidx]
710 del groups[targetidx]
711 else:
711 else:
712 gr[0][:] = []
712 gr[0][:] = []
713 # Check if we have some subgroup waiting for revisions we are not going to
713 # Check if we have some subgroup waiting for revisions we are not going to
714 # iterate over
714 # iterate over
715 for g in groups:
715 for g in groups:
716 for r in g[0]:
716 for r in g[0]:
717 yield r
717 yield r
718
719 def headrevs(revs, parentsfn):
720 """Resolve the set of heads from a set of revisions.
721
722 Receives an iterable of revision numbers and a callbable that receives a
723 revision number and returns an iterable of parent revision numbers, possibly
724 including nullrev.
725
726 Returns a set of revision numbers that are DAG heads within the passed
727 subset.
728
729 ``nullrev`` is never included in the returned set, even if it is provided in
730 the input set.
731 """
732 headrevs = set(revs)
733
734 for rev in revs:
735 for prev in parentsfn(rev):
736 headrevs.discard(prev)
737
738 headrevs.discard(node.nullrev)
739
740 return headrevs
@@ -1,75 +1,64 b''
1 # dagutil.py - dag utilities for mercurial
1 # dagutil.py - dag utilities for mercurial
2 #
2 #
3 # Copyright 2010 Benoit Boissinot <bboissin@gmail.com>
3 # Copyright 2010 Benoit Boissinot <bboissin@gmail.com>
4 # and Peter Arrenbrecht <peter@arrenbrecht.ch>
4 # and Peter Arrenbrecht <peter@arrenbrecht.ch>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 from .node import nullrev
11 from .node import nullrev
12
12
13 from . import (
14 dagop,
15 )
16
13 class revlogdag(object):
17 class revlogdag(object):
14 '''dag interface to a revlog'''
18 '''dag interface to a revlog'''
15
19
16 def __init__(self, revlog):
20 def __init__(self, revlog):
17 self._revlog = revlog
21 self._revlog = revlog
18
22
19 def parents(self, ix):
23 def parents(self, ix):
20 rlog = self._revlog
24 rlog = self._revlog
21 idx = rlog.index
25 idx = rlog.index
22 revdata = idx[ix]
26 revdata = idx[ix]
23 prev = revdata[5]
27 prev = revdata[5]
24 if prev != nullrev:
28 if prev != nullrev:
25 prev2 = revdata[6]
29 prev2 = revdata[6]
26 if prev2 == nullrev:
30 if prev2 == nullrev:
27 return [prev]
31 return [prev]
28 return [prev, prev2]
32 return [prev, prev2]
29 prev2 = revdata[6]
33 prev2 = revdata[6]
30 if prev2 != nullrev:
34 if prev2 != nullrev:
31 return [prev2]
35 return [prev2]
32 return []
36 return []
33
37
34 def headsetofconnecteds(self, ixs):
35 if not ixs:
36 return set()
37 rlog = self._revlog
38 idx = rlog.index
39 headrevs = set(ixs)
40 for rev in ixs:
41 revdata = idx[rev]
42 for i in [5, 6]:
43 prev = revdata[i]
44 if prev != nullrev:
45 headrevs.discard(prev)
46 assert headrevs
47 return headrevs
48
49 def linearize(self, ixs):
38 def linearize(self, ixs):
50 '''linearize and topologically sort a list of revisions
39 '''linearize and topologically sort a list of revisions
51
40
52 The linearization process tries to create long runs of revs where
41 The linearization process tries to create long runs of revs where
53 a child rev comes immediately after its first parent. This is done by
42 a child rev comes immediately after its first parent. This is done by
54 visiting the heads of the given revs in inverse topological order,
43 visiting the heads of the given revs in inverse topological order,
55 and for each visited rev, visiting its second parent, then its first
44 and for each visited rev, visiting its second parent, then its first
56 parent, then adding the rev itself to the output list.
45 parent, then adding the rev itself to the output list.
57 '''
46 '''
58 sorted = []
47 sorted = []
59 visit = list(self.headsetofconnecteds(ixs))
48 visit = list(dagop.headrevs(ixs, self.parents))
60 visit.sort(reverse=True)
49 visit.sort(reverse=True)
61 finished = set()
50 finished = set()
62
51
63 while visit:
52 while visit:
64 cur = visit.pop()
53 cur = visit.pop()
65 if cur < 0:
54 if cur < 0:
66 cur = -cur - 1
55 cur = -cur - 1
67 if cur not in finished:
56 if cur not in finished:
68 sorted.append(cur)
57 sorted.append(cur)
69 finished.add(cur)
58 finished.add(cur)
70 else:
59 else:
71 visit.append(-cur - 1)
60 visit.append(-cur - 1)
72 visit += [p for p in self.parents(cur)
61 visit += [p for p in self.parents(cur)
73 if p in ixs and p not in finished]
62 if p in ixs and p not in finished]
74 assert len(sorted) == len(ixs)
63 assert len(sorted) == len(ixs)
75 return sorted
64 return sorted
General Comments 0
You need to be logged in to leave comments. Login now