##// END OF EJS Templates
dagop: compute depth in revancestors() generator...
Yuya Nishihara -
r33001:92d0945a default
parent child Browse files
Show More
@@ -1,427 +1,429 b''
1 # dagop.py - graph ancestry and topology algorithm for revset
1 # dagop.py - graph ancestry and topology algorithm for revset
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import heapq
10 import heapq
11
11
12 from . import (
12 from . import (
13 error,
13 error,
14 mdiff,
14 mdiff,
15 node,
15 node,
16 patch,
16 patch,
17 smartset,
17 smartset,
18 )
18 )
19
19
20 baseset = smartset.baseset
20 baseset = smartset.baseset
21 generatorset = smartset.generatorset
21 generatorset = smartset.generatorset
22
22
23 def _genrevancestors(repo, revs, followfirst):
23 def _genrevancestors(repo, revs, followfirst):
24 if followfirst:
24 if followfirst:
25 cut = 1
25 cut = 1
26 else:
26 else:
27 cut = None
27 cut = None
28 cl = repo.changelog
28 cl = repo.changelog
29
29
30 # load input revs lazily to heap so earlier revisions can be yielded
30 # load input revs lazily to heap so earlier revisions can be yielded
31 # without fully computing the input revs
31 # without fully computing the input revs
32 revs.sort(reverse=True)
32 revs.sort(reverse=True)
33 irevs = iter(revs)
33 irevs = iter(revs)
34 pendingheap = []
34 pendingheap = [] # [(-rev, depth), ...] (i.e. lower depth first)
35
35
36 inputrev = next(irevs, None)
36 inputrev = next(irevs, None)
37 if inputrev is not None:
37 if inputrev is not None:
38 heapq.heappush(pendingheap, -inputrev)
38 heapq.heappush(pendingheap, (-inputrev, 0))
39
39
40 lastrev = None
40 lastrev = None
41 while pendingheap:
41 while pendingheap:
42 currev = -heapq.heappop(pendingheap)
42 currev, curdepth = heapq.heappop(pendingheap)
43 currev = -currev
43 if currev == inputrev:
44 if currev == inputrev:
44 inputrev = next(irevs, None)
45 inputrev = next(irevs, None)
45 if inputrev is not None:
46 if inputrev is not None:
46 heapq.heappush(pendingheap, -inputrev)
47 heapq.heappush(pendingheap, (-inputrev, 0))
47 if currev != lastrev:
48 if currev != lastrev:
48 lastrev = currev
49 lastrev = currev
49 yield currev
50 yield currev
51 pdepth = curdepth + 1
50 try:
52 try:
51 for prev in cl.parentrevs(currev)[:cut]:
53 for prev in cl.parentrevs(currev)[:cut]:
52 if prev != node.nullrev:
54 if prev != node.nullrev:
53 heapq.heappush(pendingheap, -prev)
55 heapq.heappush(pendingheap, (-prev, pdepth))
54 except error.WdirUnsupported:
56 except error.WdirUnsupported:
55 for pctx in repo[currev].parents()[:cut]:
57 for pctx in repo[currev].parents()[:cut]:
56 if pctx.rev() != node.nullrev:
58 if pctx.rev() != node.nullrev:
57 heapq.heappush(pendingheap, -pctx.rev())
59 heapq.heappush(pendingheap, (-pctx.rev(), pdepth))
58
60
59 def revancestors(repo, revs, followfirst):
61 def revancestors(repo, revs, followfirst):
60 """Like revlog.ancestors(), but supports followfirst."""
62 """Like revlog.ancestors(), but supports followfirst."""
61 gen = _genrevancestors(repo, revs, followfirst)
63 gen = _genrevancestors(repo, revs, followfirst)
62 return generatorset(gen, iterasc=False)
64 return generatorset(gen, iterasc=False)
63
65
64 def revdescendants(repo, revs, followfirst):
66 def revdescendants(repo, revs, followfirst):
65 """Like revlog.descendants() but supports followfirst."""
67 """Like revlog.descendants() but supports followfirst."""
66 if followfirst:
68 if followfirst:
67 cut = 1
69 cut = 1
68 else:
70 else:
69 cut = None
71 cut = None
70
72
71 def iterate():
73 def iterate():
72 cl = repo.changelog
74 cl = repo.changelog
73 # XXX this should be 'parentset.min()' assuming 'parentset' is a
75 # XXX this should be 'parentset.min()' assuming 'parentset' is a
74 # smartset (and if it is not, it should.)
76 # smartset (and if it is not, it should.)
75 first = min(revs)
77 first = min(revs)
76 nullrev = node.nullrev
78 nullrev = node.nullrev
77 if first == nullrev:
79 if first == nullrev:
78 # Are there nodes with a null first parent and a non-null
80 # Are there nodes with a null first parent and a non-null
79 # second one? Maybe. Do we care? Probably not.
81 # second one? Maybe. Do we care? Probably not.
80 for i in cl:
82 for i in cl:
81 yield i
83 yield i
82 else:
84 else:
83 seen = set(revs)
85 seen = set(revs)
84 for i in cl.revs(first + 1):
86 for i in cl.revs(first + 1):
85 for x in cl.parentrevs(i)[:cut]:
87 for x in cl.parentrevs(i)[:cut]:
86 if x != nullrev and x in seen:
88 if x != nullrev and x in seen:
87 seen.add(i)
89 seen.add(i)
88 yield i
90 yield i
89 break
91 break
90
92
91 return generatorset(iterate(), iterasc=True)
93 return generatorset(iterate(), iterasc=True)
92
94
93 def _reachablerootspure(repo, minroot, roots, heads, includepath):
95 def _reachablerootspure(repo, minroot, roots, heads, includepath):
94 """return (heads(::<roots> and ::<heads>))
96 """return (heads(::<roots> and ::<heads>))
95
97
96 If includepath is True, return (<roots>::<heads>)."""
98 If includepath is True, return (<roots>::<heads>)."""
97 if not roots:
99 if not roots:
98 return []
100 return []
99 parentrevs = repo.changelog.parentrevs
101 parentrevs = repo.changelog.parentrevs
100 roots = set(roots)
102 roots = set(roots)
101 visit = list(heads)
103 visit = list(heads)
102 reachable = set()
104 reachable = set()
103 seen = {}
105 seen = {}
104 # prefetch all the things! (because python is slow)
106 # prefetch all the things! (because python is slow)
105 reached = reachable.add
107 reached = reachable.add
106 dovisit = visit.append
108 dovisit = visit.append
107 nextvisit = visit.pop
109 nextvisit = visit.pop
108 # open-code the post-order traversal due to the tiny size of
110 # open-code the post-order traversal due to the tiny size of
109 # sys.getrecursionlimit()
111 # sys.getrecursionlimit()
110 while visit:
112 while visit:
111 rev = nextvisit()
113 rev = nextvisit()
112 if rev in roots:
114 if rev in roots:
113 reached(rev)
115 reached(rev)
114 if not includepath:
116 if not includepath:
115 continue
117 continue
116 parents = parentrevs(rev)
118 parents = parentrevs(rev)
117 seen[rev] = parents
119 seen[rev] = parents
118 for parent in parents:
120 for parent in parents:
119 if parent >= minroot and parent not in seen:
121 if parent >= minroot and parent not in seen:
120 dovisit(parent)
122 dovisit(parent)
121 if not reachable:
123 if not reachable:
122 return baseset()
124 return baseset()
123 if not includepath:
125 if not includepath:
124 return reachable
126 return reachable
125 for rev in sorted(seen):
127 for rev in sorted(seen):
126 for parent in seen[rev]:
128 for parent in seen[rev]:
127 if parent in reachable:
129 if parent in reachable:
128 reached(rev)
130 reached(rev)
129 return reachable
131 return reachable
130
132
131 def reachableroots(repo, roots, heads, includepath=False):
133 def reachableroots(repo, roots, heads, includepath=False):
132 """return (heads(::<roots> and ::<heads>))
134 """return (heads(::<roots> and ::<heads>))
133
135
134 If includepath is True, return (<roots>::<heads>)."""
136 If includepath is True, return (<roots>::<heads>)."""
135 if not roots:
137 if not roots:
136 return baseset()
138 return baseset()
137 minroot = roots.min()
139 minroot = roots.min()
138 roots = list(roots)
140 roots = list(roots)
139 heads = list(heads)
141 heads = list(heads)
140 try:
142 try:
141 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
143 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
142 except AttributeError:
144 except AttributeError:
143 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
145 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
144 revs = baseset(revs)
146 revs = baseset(revs)
145 revs.sort()
147 revs.sort()
146 return revs
148 return revs
147
149
148 def _changesrange(fctx1, fctx2, linerange2, diffopts):
150 def _changesrange(fctx1, fctx2, linerange2, diffopts):
149 """Return `(diffinrange, linerange1)` where `diffinrange` is True
151 """Return `(diffinrange, linerange1)` where `diffinrange` is True
150 if diff from fctx2 to fctx1 has changes in linerange2 and
152 if diff from fctx2 to fctx1 has changes in linerange2 and
151 `linerange1` is the new line range for fctx1.
153 `linerange1` is the new line range for fctx1.
152 """
154 """
153 blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
155 blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
154 filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
156 filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
155 diffinrange = any(stype == '!' for _, stype in filteredblocks)
157 diffinrange = any(stype == '!' for _, stype in filteredblocks)
156 return diffinrange, linerange1
158 return diffinrange, linerange1
157
159
158 def blockancestors(fctx, fromline, toline, followfirst=False):
160 def blockancestors(fctx, fromline, toline, followfirst=False):
159 """Yield ancestors of `fctx` with respect to the block of lines within
161 """Yield ancestors of `fctx` with respect to the block of lines within
160 `fromline`-`toline` range.
162 `fromline`-`toline` range.
161 """
163 """
162 diffopts = patch.diffopts(fctx._repo.ui)
164 diffopts = patch.diffopts(fctx._repo.ui)
163 introrev = fctx.introrev()
165 introrev = fctx.introrev()
164 if fctx.rev() != introrev:
166 if fctx.rev() != introrev:
165 fctx = fctx.filectx(fctx.filenode(), changeid=introrev)
167 fctx = fctx.filectx(fctx.filenode(), changeid=introrev)
166 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
168 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
167 while visit:
169 while visit:
168 c, linerange2 = visit.pop(max(visit))
170 c, linerange2 = visit.pop(max(visit))
169 pl = c.parents()
171 pl = c.parents()
170 if followfirst:
172 if followfirst:
171 pl = pl[:1]
173 pl = pl[:1]
172 if not pl:
174 if not pl:
173 # The block originates from the initial revision.
175 # The block originates from the initial revision.
174 yield c, linerange2
176 yield c, linerange2
175 continue
177 continue
176 inrange = False
178 inrange = False
177 for p in pl:
179 for p in pl:
178 inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts)
180 inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts)
179 inrange = inrange or inrangep
181 inrange = inrange or inrangep
180 if linerange1[0] == linerange1[1]:
182 if linerange1[0] == linerange1[1]:
181 # Parent's linerange is empty, meaning that the block got
183 # Parent's linerange is empty, meaning that the block got
182 # introduced in this revision; no need to go futher in this
184 # introduced in this revision; no need to go futher in this
183 # branch.
185 # branch.
184 continue
186 continue
185 # Set _descendantrev with 'c' (a known descendant) so that, when
187 # Set _descendantrev with 'c' (a known descendant) so that, when
186 # _adjustlinkrev is called for 'p', it receives this descendant
188 # _adjustlinkrev is called for 'p', it receives this descendant
187 # (as srcrev) instead possibly topmost introrev.
189 # (as srcrev) instead possibly topmost introrev.
188 p._descendantrev = c.rev()
190 p._descendantrev = c.rev()
189 visit[p.linkrev(), p.filenode()] = p, linerange1
191 visit[p.linkrev(), p.filenode()] = p, linerange1
190 if inrange:
192 if inrange:
191 yield c, linerange2
193 yield c, linerange2
192
194
193 def blockdescendants(fctx, fromline, toline):
195 def blockdescendants(fctx, fromline, toline):
194 """Yield descendants of `fctx` with respect to the block of lines within
196 """Yield descendants of `fctx` with respect to the block of lines within
195 `fromline`-`toline` range.
197 `fromline`-`toline` range.
196 """
198 """
197 # First possibly yield 'fctx' if it has changes in range with respect to
199 # First possibly yield 'fctx' if it has changes in range with respect to
198 # its parents.
200 # its parents.
199 try:
201 try:
200 c, linerange1 = next(blockancestors(fctx, fromline, toline))
202 c, linerange1 = next(blockancestors(fctx, fromline, toline))
201 except StopIteration:
203 except StopIteration:
202 pass
204 pass
203 else:
205 else:
204 if c == fctx:
206 if c == fctx:
205 yield c, linerange1
207 yield c, linerange1
206
208
207 diffopts = patch.diffopts(fctx._repo.ui)
209 diffopts = patch.diffopts(fctx._repo.ui)
208 fl = fctx.filelog()
210 fl = fctx.filelog()
209 seen = {fctx.filerev(): (fctx, (fromline, toline))}
211 seen = {fctx.filerev(): (fctx, (fromline, toline))}
210 for i in fl.descendants([fctx.filerev()]):
212 for i in fl.descendants([fctx.filerev()]):
211 c = fctx.filectx(i)
213 c = fctx.filectx(i)
212 inrange = False
214 inrange = False
213 for x in fl.parentrevs(i):
215 for x in fl.parentrevs(i):
214 try:
216 try:
215 p, linerange2 = seen[x]
217 p, linerange2 = seen[x]
216 except KeyError:
218 except KeyError:
217 # nullrev or other branch
219 # nullrev or other branch
218 continue
220 continue
219 inrangep, linerange1 = _changesrange(c, p, linerange2, diffopts)
221 inrangep, linerange1 = _changesrange(c, p, linerange2, diffopts)
220 inrange = inrange or inrangep
222 inrange = inrange or inrangep
221 # If revision 'i' has been seen (it's a merge), we assume that its
223 # If revision 'i' has been seen (it's a merge), we assume that its
222 # line range is the same independently of which parents was used
224 # line range is the same independently of which parents was used
223 # to compute it.
225 # to compute it.
224 assert i not in seen or seen[i][1] == linerange1, (
226 assert i not in seen or seen[i][1] == linerange1, (
225 'computed line range for %s is not consistent between '
227 'computed line range for %s is not consistent between '
226 'ancestor branches' % c)
228 'ancestor branches' % c)
227 seen[i] = c, linerange1
229 seen[i] = c, linerange1
228 if inrange:
230 if inrange:
229 yield c, linerange1
231 yield c, linerange1
230
232
231 def toposort(revs, parentsfunc, firstbranch=()):
233 def toposort(revs, parentsfunc, firstbranch=()):
232 """Yield revisions from heads to roots one (topo) branch at a time.
234 """Yield revisions from heads to roots one (topo) branch at a time.
233
235
234 This function aims to be used by a graph generator that wishes to minimize
236 This function aims to be used by a graph generator that wishes to minimize
235 the number of parallel branches and their interleaving.
237 the number of parallel branches and their interleaving.
236
238
237 Example iteration order (numbers show the "true" order in a changelog):
239 Example iteration order (numbers show the "true" order in a changelog):
238
240
239 o 4
241 o 4
240 |
242 |
241 o 1
243 o 1
242 |
244 |
243 | o 3
245 | o 3
244 | |
246 | |
245 | o 2
247 | o 2
246 |/
248 |/
247 o 0
249 o 0
248
250
249 Note that the ancestors of merges are understood by the current
251 Note that the ancestors of merges are understood by the current
250 algorithm to be on the same branch. This means no reordering will
252 algorithm to be on the same branch. This means no reordering will
251 occur behind a merge.
253 occur behind a merge.
252 """
254 """
253
255
254 ### Quick summary of the algorithm
256 ### Quick summary of the algorithm
255 #
257 #
256 # This function is based around a "retention" principle. We keep revisions
258 # This function is based around a "retention" principle. We keep revisions
257 # in memory until we are ready to emit a whole branch that immediately
259 # in memory until we are ready to emit a whole branch that immediately
258 # "merges" into an existing one. This reduces the number of parallel
260 # "merges" into an existing one. This reduces the number of parallel
259 # branches with interleaved revisions.
261 # branches with interleaved revisions.
260 #
262 #
261 # During iteration revs are split into two groups:
263 # During iteration revs are split into two groups:
262 # A) revision already emitted
264 # A) revision already emitted
263 # B) revision in "retention". They are stored as different subgroups.
265 # B) revision in "retention". They are stored as different subgroups.
264 #
266 #
265 # for each REV, we do the following logic:
267 # for each REV, we do the following logic:
266 #
268 #
267 # 1) if REV is a parent of (A), we will emit it. If there is a
269 # 1) if REV is a parent of (A), we will emit it. If there is a
268 # retention group ((B) above) that is blocked on REV being
270 # retention group ((B) above) that is blocked on REV being
269 # available, we emit all the revisions out of that retention
271 # available, we emit all the revisions out of that retention
270 # group first.
272 # group first.
271 #
273 #
272 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
274 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
273 # available, if such subgroup exist, we add REV to it and the subgroup is
275 # available, if such subgroup exist, we add REV to it and the subgroup is
274 # now awaiting for REV.parents() to be available.
276 # now awaiting for REV.parents() to be available.
275 #
277 #
276 # 3) finally if no such group existed in (B), we create a new subgroup.
278 # 3) finally if no such group existed in (B), we create a new subgroup.
277 #
279 #
278 #
280 #
279 # To bootstrap the algorithm, we emit the tipmost revision (which
281 # To bootstrap the algorithm, we emit the tipmost revision (which
280 # puts it in group (A) from above).
282 # puts it in group (A) from above).
281
283
282 revs.sort(reverse=True)
284 revs.sort(reverse=True)
283
285
284 # Set of parents of revision that have been emitted. They can be considered
286 # Set of parents of revision that have been emitted. They can be considered
285 # unblocked as the graph generator is already aware of them so there is no
287 # unblocked as the graph generator is already aware of them so there is no
286 # need to delay the revisions that reference them.
288 # need to delay the revisions that reference them.
287 #
289 #
288 # If someone wants to prioritize a branch over the others, pre-filling this
290 # If someone wants to prioritize a branch over the others, pre-filling this
289 # set will force all other branches to wait until this branch is ready to be
291 # set will force all other branches to wait until this branch is ready to be
290 # emitted.
292 # emitted.
291 unblocked = set(firstbranch)
293 unblocked = set(firstbranch)
292
294
293 # list of groups waiting to be displayed, each group is defined by:
295 # list of groups waiting to be displayed, each group is defined by:
294 #
296 #
295 # (revs: lists of revs waiting to be displayed,
297 # (revs: lists of revs waiting to be displayed,
296 # blocked: set of that cannot be displayed before those in 'revs')
298 # blocked: set of that cannot be displayed before those in 'revs')
297 #
299 #
298 # The second value ('blocked') correspond to parents of any revision in the
300 # The second value ('blocked') correspond to parents of any revision in the
299 # group ('revs') that is not itself contained in the group. The main idea
301 # group ('revs') that is not itself contained in the group. The main idea
300 # of this algorithm is to delay as much as possible the emission of any
302 # of this algorithm is to delay as much as possible the emission of any
301 # revision. This means waiting for the moment we are about to display
303 # revision. This means waiting for the moment we are about to display
302 # these parents to display the revs in a group.
304 # these parents to display the revs in a group.
303 #
305 #
304 # This first implementation is smart until it encounters a merge: it will
306 # This first implementation is smart until it encounters a merge: it will
305 # emit revs as soon as any parent is about to be emitted and can grow an
307 # emit revs as soon as any parent is about to be emitted and can grow an
306 # arbitrary number of revs in 'blocked'. In practice this mean we properly
308 # arbitrary number of revs in 'blocked'. In practice this mean we properly
307 # retains new branches but gives up on any special ordering for ancestors
309 # retains new branches but gives up on any special ordering for ancestors
308 # of merges. The implementation can be improved to handle this better.
310 # of merges. The implementation can be improved to handle this better.
309 #
311 #
310 # The first subgroup is special. It corresponds to all the revision that
312 # The first subgroup is special. It corresponds to all the revision that
311 # were already emitted. The 'revs' lists is expected to be empty and the
313 # were already emitted. The 'revs' lists is expected to be empty and the
312 # 'blocked' set contains the parents revisions of already emitted revision.
314 # 'blocked' set contains the parents revisions of already emitted revision.
313 #
315 #
314 # You could pre-seed the <parents> set of groups[0] to a specific
316 # You could pre-seed the <parents> set of groups[0] to a specific
315 # changesets to select what the first emitted branch should be.
317 # changesets to select what the first emitted branch should be.
316 groups = [([], unblocked)]
318 groups = [([], unblocked)]
317 pendingheap = []
319 pendingheap = []
318 pendingset = set()
320 pendingset = set()
319
321
320 heapq.heapify(pendingheap)
322 heapq.heapify(pendingheap)
321 heappop = heapq.heappop
323 heappop = heapq.heappop
322 heappush = heapq.heappush
324 heappush = heapq.heappush
323 for currentrev in revs:
325 for currentrev in revs:
324 # Heap works with smallest element, we want highest so we invert
326 # Heap works with smallest element, we want highest so we invert
325 if currentrev not in pendingset:
327 if currentrev not in pendingset:
326 heappush(pendingheap, -currentrev)
328 heappush(pendingheap, -currentrev)
327 pendingset.add(currentrev)
329 pendingset.add(currentrev)
328 # iterates on pending rev until after the current rev have been
330 # iterates on pending rev until after the current rev have been
329 # processed.
331 # processed.
330 rev = None
332 rev = None
331 while rev != currentrev:
333 while rev != currentrev:
332 rev = -heappop(pendingheap)
334 rev = -heappop(pendingheap)
333 pendingset.remove(rev)
335 pendingset.remove(rev)
334
336
335 # Seek for a subgroup blocked, waiting for the current revision.
337 # Seek for a subgroup blocked, waiting for the current revision.
336 matching = [i for i, g in enumerate(groups) if rev in g[1]]
338 matching = [i for i, g in enumerate(groups) if rev in g[1]]
337
339
338 if matching:
340 if matching:
339 # The main idea is to gather together all sets that are blocked
341 # The main idea is to gather together all sets that are blocked
340 # on the same revision.
342 # on the same revision.
341 #
343 #
342 # Groups are merged when a common blocking ancestor is
344 # Groups are merged when a common blocking ancestor is
343 # observed. For example, given two groups:
345 # observed. For example, given two groups:
344 #
346 #
345 # revs [5, 4] waiting for 1
347 # revs [5, 4] waiting for 1
346 # revs [3, 2] waiting for 1
348 # revs [3, 2] waiting for 1
347 #
349 #
348 # These two groups will be merged when we process
350 # These two groups will be merged when we process
349 # 1. In theory, we could have merged the groups when
351 # 1. In theory, we could have merged the groups when
350 # we added 2 to the group it is now in (we could have
352 # we added 2 to the group it is now in (we could have
351 # noticed the groups were both blocked on 1 then), but
353 # noticed the groups were both blocked on 1 then), but
352 # the way it works now makes the algorithm simpler.
354 # the way it works now makes the algorithm simpler.
353 #
355 #
354 # We also always keep the oldest subgroup first. We can
356 # We also always keep the oldest subgroup first. We can
355 # probably improve the behavior by having the longest set
357 # probably improve the behavior by having the longest set
356 # first. That way, graph algorithms could minimise the length
358 # first. That way, graph algorithms could minimise the length
357 # of parallel lines their drawing. This is currently not done.
359 # of parallel lines their drawing. This is currently not done.
358 targetidx = matching.pop(0)
360 targetidx = matching.pop(0)
359 trevs, tparents = groups[targetidx]
361 trevs, tparents = groups[targetidx]
360 for i in matching:
362 for i in matching:
361 gr = groups[i]
363 gr = groups[i]
362 trevs.extend(gr[0])
364 trevs.extend(gr[0])
363 tparents |= gr[1]
365 tparents |= gr[1]
364 # delete all merged subgroups (except the one we kept)
366 # delete all merged subgroups (except the one we kept)
365 # (starting from the last subgroup for performance and
367 # (starting from the last subgroup for performance and
366 # sanity reasons)
368 # sanity reasons)
367 for i in reversed(matching):
369 for i in reversed(matching):
368 del groups[i]
370 del groups[i]
369 else:
371 else:
370 # This is a new head. We create a new subgroup for it.
372 # This is a new head. We create a new subgroup for it.
371 targetidx = len(groups)
373 targetidx = len(groups)
372 groups.append(([], {rev}))
374 groups.append(([], {rev}))
373
375
374 gr = groups[targetidx]
376 gr = groups[targetidx]
375
377
376 # We now add the current nodes to this subgroups. This is done
378 # We now add the current nodes to this subgroups. This is done
377 # after the subgroup merging because all elements from a subgroup
379 # after the subgroup merging because all elements from a subgroup
378 # that relied on this rev must precede it.
380 # that relied on this rev must precede it.
379 #
381 #
380 # we also update the <parents> set to include the parents of the
382 # we also update the <parents> set to include the parents of the
381 # new nodes.
383 # new nodes.
382 if rev == currentrev: # only display stuff in rev
384 if rev == currentrev: # only display stuff in rev
383 gr[0].append(rev)
385 gr[0].append(rev)
384 gr[1].remove(rev)
386 gr[1].remove(rev)
385 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
387 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
386 gr[1].update(parents)
388 gr[1].update(parents)
387 for p in parents:
389 for p in parents:
388 if p not in pendingset:
390 if p not in pendingset:
389 pendingset.add(p)
391 pendingset.add(p)
390 heappush(pendingheap, -p)
392 heappush(pendingheap, -p)
391
393
392 # Look for a subgroup to display
394 # Look for a subgroup to display
393 #
395 #
394 # When unblocked is empty (if clause), we were not waiting for any
396 # When unblocked is empty (if clause), we were not waiting for any
395 # revisions during the first iteration (if no priority was given) or
397 # revisions during the first iteration (if no priority was given) or
396 # if we emitted a whole disconnected set of the graph (reached a
398 # if we emitted a whole disconnected set of the graph (reached a
397 # root). In that case we arbitrarily take the oldest known
399 # root). In that case we arbitrarily take the oldest known
398 # subgroup. The heuristic could probably be better.
400 # subgroup. The heuristic could probably be better.
399 #
401 #
400 # Otherwise (elif clause) if the subgroup is blocked on
402 # Otherwise (elif clause) if the subgroup is blocked on
401 # a revision we just emitted, we can safely emit it as
403 # a revision we just emitted, we can safely emit it as
402 # well.
404 # well.
403 if not unblocked:
405 if not unblocked:
404 if len(groups) > 1: # display other subset
406 if len(groups) > 1: # display other subset
405 targetidx = 1
407 targetidx = 1
406 gr = groups[1]
408 gr = groups[1]
407 elif not gr[1] & unblocked:
409 elif not gr[1] & unblocked:
408 gr = None
410 gr = None
409
411
410 if gr is not None:
412 if gr is not None:
411 # update the set of awaited revisions with the one from the
413 # update the set of awaited revisions with the one from the
412 # subgroup
414 # subgroup
413 unblocked |= gr[1]
415 unblocked |= gr[1]
414 # output all revisions in the subgroup
416 # output all revisions in the subgroup
415 for r in gr[0]:
417 for r in gr[0]:
416 yield r
418 yield r
417 # delete the subgroup that you just output
419 # delete the subgroup that you just output
418 # unless it is groups[0] in which case you just empty it.
420 # unless it is groups[0] in which case you just empty it.
419 if targetidx:
421 if targetidx:
420 del groups[targetidx]
422 del groups[targetidx]
421 else:
423 else:
422 gr[0][:] = []
424 gr[0][:] = []
423 # Check if we have some subgroup waiting for revisions we are not going to
425 # Check if we have some subgroup waiting for revisions we are not going to
424 # iterate over
426 # iterate over
425 for g in groups:
427 for g in groups:
426 for r in g[0]:
428 for r in g[0]:
427 yield r
429 yield r
General Comments 0
You need to be logged in to leave comments. Login now