##// END OF EJS Templates
dagop: unnest inner generator of revancestors()...
Yuya Nishihara -
r32997:b9e2269a default
parent child Browse files
Show More
@@ -1,424 +1,424 b''
1 # dagop.py - graph ancestry and topology algorithm for revset
1 # dagop.py - graph ancestry and topology algorithm for revset
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import heapq
10 import heapq
11
11
12 from . import (
12 from . import (
13 error,
13 error,
14 mdiff,
14 mdiff,
15 node,
15 node,
16 patch,
16 patch,
17 smartset,
17 smartset,
18 )
18 )
19
19
20 baseset = smartset.baseset
20 baseset = smartset.baseset
21 generatorset = smartset.generatorset
21 generatorset = smartset.generatorset
22
22
23 def revancestors(repo, revs, followfirst):
23 def _genrevancestors(repo, revs, followfirst):
24 """Like revlog.ancestors(), but supports followfirst."""
25 if followfirst:
24 if followfirst:
26 cut = 1
25 cut = 1
27 else:
26 else:
28 cut = None
27 cut = None
29 cl = repo.changelog
28 cl = repo.changelog
30
31 def iterate():
32 revs.sort(reverse=True)
29 revs.sort(reverse=True)
33 irevs = iter(revs)
30 irevs = iter(revs)
34 h = []
31 h = []
35
32
36 inputrev = next(irevs, None)
33 inputrev = next(irevs, None)
37 if inputrev is not None:
34 if inputrev is not None:
38 heapq.heappush(h, -inputrev)
35 heapq.heappush(h, -inputrev)
39
36
40 seen = set()
37 seen = set()
41 while h:
38 while h:
42 current = -heapq.heappop(h)
39 current = -heapq.heappop(h)
43 if current == inputrev:
40 if current == inputrev:
44 inputrev = next(irevs, None)
41 inputrev = next(irevs, None)
45 if inputrev is not None:
42 if inputrev is not None:
46 heapq.heappush(h, -inputrev)
43 heapq.heappush(h, -inputrev)
47 if current not in seen:
44 if current not in seen:
48 seen.add(current)
45 seen.add(current)
49 yield current
46 yield current
50 try:
47 try:
51 for parent in cl.parentrevs(current)[:cut]:
48 for parent in cl.parentrevs(current)[:cut]:
52 if parent != node.nullrev:
49 if parent != node.nullrev:
53 heapq.heappush(h, -parent)
50 heapq.heappush(h, -parent)
54 except error.WdirUnsupported:
51 except error.WdirUnsupported:
55 for parent in repo[current].parents()[:cut]:
52 for parent in repo[current].parents()[:cut]:
56 if parent.rev() != node.nullrev:
53 if parent.rev() != node.nullrev:
57 heapq.heappush(h, -parent.rev())
54 heapq.heappush(h, -parent.rev())
58
55
59 return generatorset(iterate(), iterasc=False)
56 def revancestors(repo, revs, followfirst):
57 """Like revlog.ancestors(), but supports followfirst."""
58 gen = _genrevancestors(repo, revs, followfirst)
59 return generatorset(gen, iterasc=False)
60
60
61 def revdescendants(repo, revs, followfirst):
61 def revdescendants(repo, revs, followfirst):
62 """Like revlog.descendants() but supports followfirst."""
62 """Like revlog.descendants() but supports followfirst."""
63 if followfirst:
63 if followfirst:
64 cut = 1
64 cut = 1
65 else:
65 else:
66 cut = None
66 cut = None
67
67
68 def iterate():
68 def iterate():
69 cl = repo.changelog
69 cl = repo.changelog
70 # XXX this should be 'parentset.min()' assuming 'parentset' is a
70 # XXX this should be 'parentset.min()' assuming 'parentset' is a
71 # smartset (and if it is not, it should.)
71 # smartset (and if it is not, it should.)
72 first = min(revs)
72 first = min(revs)
73 nullrev = node.nullrev
73 nullrev = node.nullrev
74 if first == nullrev:
74 if first == nullrev:
75 # Are there nodes with a null first parent and a non-null
75 # Are there nodes with a null first parent and a non-null
76 # second one? Maybe. Do we care? Probably not.
76 # second one? Maybe. Do we care? Probably not.
77 for i in cl:
77 for i in cl:
78 yield i
78 yield i
79 else:
79 else:
80 seen = set(revs)
80 seen = set(revs)
81 for i in cl.revs(first + 1):
81 for i in cl.revs(first + 1):
82 for x in cl.parentrevs(i)[:cut]:
82 for x in cl.parentrevs(i)[:cut]:
83 if x != nullrev and x in seen:
83 if x != nullrev and x in seen:
84 seen.add(i)
84 seen.add(i)
85 yield i
85 yield i
86 break
86 break
87
87
88 return generatorset(iterate(), iterasc=True)
88 return generatorset(iterate(), iterasc=True)
89
89
90 def _reachablerootspure(repo, minroot, roots, heads, includepath):
90 def _reachablerootspure(repo, minroot, roots, heads, includepath):
91 """return (heads(::<roots> and ::<heads>))
91 """return (heads(::<roots> and ::<heads>))
92
92
93 If includepath is True, return (<roots>::<heads>)."""
93 If includepath is True, return (<roots>::<heads>)."""
94 if not roots:
94 if not roots:
95 return []
95 return []
96 parentrevs = repo.changelog.parentrevs
96 parentrevs = repo.changelog.parentrevs
97 roots = set(roots)
97 roots = set(roots)
98 visit = list(heads)
98 visit = list(heads)
99 reachable = set()
99 reachable = set()
100 seen = {}
100 seen = {}
101 # prefetch all the things! (because python is slow)
101 # prefetch all the things! (because python is slow)
102 reached = reachable.add
102 reached = reachable.add
103 dovisit = visit.append
103 dovisit = visit.append
104 nextvisit = visit.pop
104 nextvisit = visit.pop
105 # open-code the post-order traversal due to the tiny size of
105 # open-code the post-order traversal due to the tiny size of
106 # sys.getrecursionlimit()
106 # sys.getrecursionlimit()
107 while visit:
107 while visit:
108 rev = nextvisit()
108 rev = nextvisit()
109 if rev in roots:
109 if rev in roots:
110 reached(rev)
110 reached(rev)
111 if not includepath:
111 if not includepath:
112 continue
112 continue
113 parents = parentrevs(rev)
113 parents = parentrevs(rev)
114 seen[rev] = parents
114 seen[rev] = parents
115 for parent in parents:
115 for parent in parents:
116 if parent >= minroot and parent not in seen:
116 if parent >= minroot and parent not in seen:
117 dovisit(parent)
117 dovisit(parent)
118 if not reachable:
118 if not reachable:
119 return baseset()
119 return baseset()
120 if not includepath:
120 if not includepath:
121 return reachable
121 return reachable
122 for rev in sorted(seen):
122 for rev in sorted(seen):
123 for parent in seen[rev]:
123 for parent in seen[rev]:
124 if parent in reachable:
124 if parent in reachable:
125 reached(rev)
125 reached(rev)
126 return reachable
126 return reachable
127
127
128 def reachableroots(repo, roots, heads, includepath=False):
128 def reachableroots(repo, roots, heads, includepath=False):
129 """return (heads(::<roots> and ::<heads>))
129 """return (heads(::<roots> and ::<heads>))
130
130
131 If includepath is True, return (<roots>::<heads>)."""
131 If includepath is True, return (<roots>::<heads>)."""
132 if not roots:
132 if not roots:
133 return baseset()
133 return baseset()
134 minroot = roots.min()
134 minroot = roots.min()
135 roots = list(roots)
135 roots = list(roots)
136 heads = list(heads)
136 heads = list(heads)
137 try:
137 try:
138 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
138 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
139 except AttributeError:
139 except AttributeError:
140 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
140 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
141 revs = baseset(revs)
141 revs = baseset(revs)
142 revs.sort()
142 revs.sort()
143 return revs
143 return revs
144
144
145 def _changesrange(fctx1, fctx2, linerange2, diffopts):
145 def _changesrange(fctx1, fctx2, linerange2, diffopts):
146 """Return `(diffinrange, linerange1)` where `diffinrange` is True
146 """Return `(diffinrange, linerange1)` where `diffinrange` is True
147 if diff from fctx2 to fctx1 has changes in linerange2 and
147 if diff from fctx2 to fctx1 has changes in linerange2 and
148 `linerange1` is the new line range for fctx1.
148 `linerange1` is the new line range for fctx1.
149 """
149 """
150 blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
150 blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
151 filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
151 filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
152 diffinrange = any(stype == '!' for _, stype in filteredblocks)
152 diffinrange = any(stype == '!' for _, stype in filteredblocks)
153 return diffinrange, linerange1
153 return diffinrange, linerange1
154
154
155 def blockancestors(fctx, fromline, toline, followfirst=False):
155 def blockancestors(fctx, fromline, toline, followfirst=False):
156 """Yield ancestors of `fctx` with respect to the block of lines within
156 """Yield ancestors of `fctx` with respect to the block of lines within
157 `fromline`-`toline` range.
157 `fromline`-`toline` range.
158 """
158 """
159 diffopts = patch.diffopts(fctx._repo.ui)
159 diffopts = patch.diffopts(fctx._repo.ui)
160 introrev = fctx.introrev()
160 introrev = fctx.introrev()
161 if fctx.rev() != introrev:
161 if fctx.rev() != introrev:
162 fctx = fctx.filectx(fctx.filenode(), changeid=introrev)
162 fctx = fctx.filectx(fctx.filenode(), changeid=introrev)
163 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
163 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
164 while visit:
164 while visit:
165 c, linerange2 = visit.pop(max(visit))
165 c, linerange2 = visit.pop(max(visit))
166 pl = c.parents()
166 pl = c.parents()
167 if followfirst:
167 if followfirst:
168 pl = pl[:1]
168 pl = pl[:1]
169 if not pl:
169 if not pl:
170 # The block originates from the initial revision.
170 # The block originates from the initial revision.
171 yield c, linerange2
171 yield c, linerange2
172 continue
172 continue
173 inrange = False
173 inrange = False
174 for p in pl:
174 for p in pl:
175 inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts)
175 inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts)
176 inrange = inrange or inrangep
176 inrange = inrange or inrangep
177 if linerange1[0] == linerange1[1]:
177 if linerange1[0] == linerange1[1]:
178 # Parent's linerange is empty, meaning that the block got
178 # Parent's linerange is empty, meaning that the block got
179 # introduced in this revision; no need to go futher in this
179 # introduced in this revision; no need to go futher in this
180 # branch.
180 # branch.
181 continue
181 continue
182 # Set _descendantrev with 'c' (a known descendant) so that, when
182 # Set _descendantrev with 'c' (a known descendant) so that, when
183 # _adjustlinkrev is called for 'p', it receives this descendant
183 # _adjustlinkrev is called for 'p', it receives this descendant
184 # (as srcrev) instead possibly topmost introrev.
184 # (as srcrev) instead possibly topmost introrev.
185 p._descendantrev = c.rev()
185 p._descendantrev = c.rev()
186 visit[p.linkrev(), p.filenode()] = p, linerange1
186 visit[p.linkrev(), p.filenode()] = p, linerange1
187 if inrange:
187 if inrange:
188 yield c, linerange2
188 yield c, linerange2
189
189
190 def blockdescendants(fctx, fromline, toline):
190 def blockdescendants(fctx, fromline, toline):
191 """Yield descendants of `fctx` with respect to the block of lines within
191 """Yield descendants of `fctx` with respect to the block of lines within
192 `fromline`-`toline` range.
192 `fromline`-`toline` range.
193 """
193 """
194 # First possibly yield 'fctx' if it has changes in range with respect to
194 # First possibly yield 'fctx' if it has changes in range with respect to
195 # its parents.
195 # its parents.
196 try:
196 try:
197 c, linerange1 = next(blockancestors(fctx, fromline, toline))
197 c, linerange1 = next(blockancestors(fctx, fromline, toline))
198 except StopIteration:
198 except StopIteration:
199 pass
199 pass
200 else:
200 else:
201 if c == fctx:
201 if c == fctx:
202 yield c, linerange1
202 yield c, linerange1
203
203
204 diffopts = patch.diffopts(fctx._repo.ui)
204 diffopts = patch.diffopts(fctx._repo.ui)
205 fl = fctx.filelog()
205 fl = fctx.filelog()
206 seen = {fctx.filerev(): (fctx, (fromline, toline))}
206 seen = {fctx.filerev(): (fctx, (fromline, toline))}
207 for i in fl.descendants([fctx.filerev()]):
207 for i in fl.descendants([fctx.filerev()]):
208 c = fctx.filectx(i)
208 c = fctx.filectx(i)
209 inrange = False
209 inrange = False
210 for x in fl.parentrevs(i):
210 for x in fl.parentrevs(i):
211 try:
211 try:
212 p, linerange2 = seen[x]
212 p, linerange2 = seen[x]
213 except KeyError:
213 except KeyError:
214 # nullrev or other branch
214 # nullrev or other branch
215 continue
215 continue
216 inrangep, linerange1 = _changesrange(c, p, linerange2, diffopts)
216 inrangep, linerange1 = _changesrange(c, p, linerange2, diffopts)
217 inrange = inrange or inrangep
217 inrange = inrange or inrangep
218 # If revision 'i' has been seen (it's a merge), we assume that its
218 # If revision 'i' has been seen (it's a merge), we assume that its
219 # line range is the same independently of which parents was used
219 # line range is the same independently of which parents was used
220 # to compute it.
220 # to compute it.
221 assert i not in seen or seen[i][1] == linerange1, (
221 assert i not in seen or seen[i][1] == linerange1, (
222 'computed line range for %s is not consistent between '
222 'computed line range for %s is not consistent between '
223 'ancestor branches' % c)
223 'ancestor branches' % c)
224 seen[i] = c, linerange1
224 seen[i] = c, linerange1
225 if inrange:
225 if inrange:
226 yield c, linerange1
226 yield c, linerange1
227
227
228 def toposort(revs, parentsfunc, firstbranch=()):
228 def toposort(revs, parentsfunc, firstbranch=()):
229 """Yield revisions from heads to roots one (topo) branch at a time.
229 """Yield revisions from heads to roots one (topo) branch at a time.
230
230
231 This function aims to be used by a graph generator that wishes to minimize
231 This function aims to be used by a graph generator that wishes to minimize
232 the number of parallel branches and their interleaving.
232 the number of parallel branches and their interleaving.
233
233
234 Example iteration order (numbers show the "true" order in a changelog):
234 Example iteration order (numbers show the "true" order in a changelog):
235
235
236 o 4
236 o 4
237 |
237 |
238 o 1
238 o 1
239 |
239 |
240 | o 3
240 | o 3
241 | |
241 | |
242 | o 2
242 | o 2
243 |/
243 |/
244 o 0
244 o 0
245
245
246 Note that the ancestors of merges are understood by the current
246 Note that the ancestors of merges are understood by the current
247 algorithm to be on the same branch. This means no reordering will
247 algorithm to be on the same branch. This means no reordering will
248 occur behind a merge.
248 occur behind a merge.
249 """
249 """
250
250
251 ### Quick summary of the algorithm
251 ### Quick summary of the algorithm
252 #
252 #
253 # This function is based around a "retention" principle. We keep revisions
253 # This function is based around a "retention" principle. We keep revisions
254 # in memory until we are ready to emit a whole branch that immediately
254 # in memory until we are ready to emit a whole branch that immediately
255 # "merges" into an existing one. This reduces the number of parallel
255 # "merges" into an existing one. This reduces the number of parallel
256 # branches with interleaved revisions.
256 # branches with interleaved revisions.
257 #
257 #
258 # During iteration revs are split into two groups:
258 # During iteration revs are split into two groups:
259 # A) revision already emitted
259 # A) revision already emitted
260 # B) revision in "retention". They are stored as different subgroups.
260 # B) revision in "retention". They are stored as different subgroups.
261 #
261 #
262 # for each REV, we do the following logic:
262 # for each REV, we do the following logic:
263 #
263 #
264 # 1) if REV is a parent of (A), we will emit it. If there is a
264 # 1) if REV is a parent of (A), we will emit it. If there is a
265 # retention group ((B) above) that is blocked on REV being
265 # retention group ((B) above) that is blocked on REV being
266 # available, we emit all the revisions out of that retention
266 # available, we emit all the revisions out of that retention
267 # group first.
267 # group first.
268 #
268 #
269 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
269 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
270 # available, if such subgroup exist, we add REV to it and the subgroup is
270 # available, if such subgroup exist, we add REV to it and the subgroup is
271 # now awaiting for REV.parents() to be available.
271 # now awaiting for REV.parents() to be available.
272 #
272 #
273 # 3) finally if no such group existed in (B), we create a new subgroup.
273 # 3) finally if no such group existed in (B), we create a new subgroup.
274 #
274 #
275 #
275 #
276 # To bootstrap the algorithm, we emit the tipmost revision (which
276 # To bootstrap the algorithm, we emit the tipmost revision (which
277 # puts it in group (A) from above).
277 # puts it in group (A) from above).
278
278
279 revs.sort(reverse=True)
279 revs.sort(reverse=True)
280
280
281 # Set of parents of revision that have been emitted. They can be considered
281 # Set of parents of revision that have been emitted. They can be considered
282 # unblocked as the graph generator is already aware of them so there is no
282 # unblocked as the graph generator is already aware of them so there is no
283 # need to delay the revisions that reference them.
283 # need to delay the revisions that reference them.
284 #
284 #
285 # If someone wants to prioritize a branch over the others, pre-filling this
285 # If someone wants to prioritize a branch over the others, pre-filling this
286 # set will force all other branches to wait until this branch is ready to be
286 # set will force all other branches to wait until this branch is ready to be
287 # emitted.
287 # emitted.
288 unblocked = set(firstbranch)
288 unblocked = set(firstbranch)
289
289
290 # list of groups waiting to be displayed, each group is defined by:
290 # list of groups waiting to be displayed, each group is defined by:
291 #
291 #
292 # (revs: lists of revs waiting to be displayed,
292 # (revs: lists of revs waiting to be displayed,
293 # blocked: set of that cannot be displayed before those in 'revs')
293 # blocked: set of that cannot be displayed before those in 'revs')
294 #
294 #
295 # The second value ('blocked') correspond to parents of any revision in the
295 # The second value ('blocked') correspond to parents of any revision in the
296 # group ('revs') that is not itself contained in the group. The main idea
296 # group ('revs') that is not itself contained in the group. The main idea
297 # of this algorithm is to delay as much as possible the emission of any
297 # of this algorithm is to delay as much as possible the emission of any
298 # revision. This means waiting for the moment we are about to display
298 # revision. This means waiting for the moment we are about to display
299 # these parents to display the revs in a group.
299 # these parents to display the revs in a group.
300 #
300 #
301 # This first implementation is smart until it encounters a merge: it will
301 # This first implementation is smart until it encounters a merge: it will
302 # emit revs as soon as any parent is about to be emitted and can grow an
302 # emit revs as soon as any parent is about to be emitted and can grow an
303 # arbitrary number of revs in 'blocked'. In practice this mean we properly
303 # arbitrary number of revs in 'blocked'. In practice this mean we properly
304 # retains new branches but gives up on any special ordering for ancestors
304 # retains new branches but gives up on any special ordering for ancestors
305 # of merges. The implementation can be improved to handle this better.
305 # of merges. The implementation can be improved to handle this better.
306 #
306 #
307 # The first subgroup is special. It corresponds to all the revision that
307 # The first subgroup is special. It corresponds to all the revision that
308 # were already emitted. The 'revs' lists is expected to be empty and the
308 # were already emitted. The 'revs' lists is expected to be empty and the
309 # 'blocked' set contains the parents revisions of already emitted revision.
309 # 'blocked' set contains the parents revisions of already emitted revision.
310 #
310 #
311 # You could pre-seed the <parents> set of groups[0] to a specific
311 # You could pre-seed the <parents> set of groups[0] to a specific
312 # changesets to select what the first emitted branch should be.
312 # changesets to select what the first emitted branch should be.
313 groups = [([], unblocked)]
313 groups = [([], unblocked)]
314 pendingheap = []
314 pendingheap = []
315 pendingset = set()
315 pendingset = set()
316
316
317 heapq.heapify(pendingheap)
317 heapq.heapify(pendingheap)
318 heappop = heapq.heappop
318 heappop = heapq.heappop
319 heappush = heapq.heappush
319 heappush = heapq.heappush
320 for currentrev in revs:
320 for currentrev in revs:
321 # Heap works with smallest element, we want highest so we invert
321 # Heap works with smallest element, we want highest so we invert
322 if currentrev not in pendingset:
322 if currentrev not in pendingset:
323 heappush(pendingheap, -currentrev)
323 heappush(pendingheap, -currentrev)
324 pendingset.add(currentrev)
324 pendingset.add(currentrev)
325 # iterates on pending rev until after the current rev have been
325 # iterates on pending rev until after the current rev have been
326 # processed.
326 # processed.
327 rev = None
327 rev = None
328 while rev != currentrev:
328 while rev != currentrev:
329 rev = -heappop(pendingheap)
329 rev = -heappop(pendingheap)
330 pendingset.remove(rev)
330 pendingset.remove(rev)
331
331
332 # Seek for a subgroup blocked, waiting for the current revision.
332 # Seek for a subgroup blocked, waiting for the current revision.
333 matching = [i for i, g in enumerate(groups) if rev in g[1]]
333 matching = [i for i, g in enumerate(groups) if rev in g[1]]
334
334
335 if matching:
335 if matching:
336 # The main idea is to gather together all sets that are blocked
336 # The main idea is to gather together all sets that are blocked
337 # on the same revision.
337 # on the same revision.
338 #
338 #
339 # Groups are merged when a common blocking ancestor is
339 # Groups are merged when a common blocking ancestor is
340 # observed. For example, given two groups:
340 # observed. For example, given two groups:
341 #
341 #
342 # revs [5, 4] waiting for 1
342 # revs [5, 4] waiting for 1
343 # revs [3, 2] waiting for 1
343 # revs [3, 2] waiting for 1
344 #
344 #
345 # These two groups will be merged when we process
345 # These two groups will be merged when we process
346 # 1. In theory, we could have merged the groups when
346 # 1. In theory, we could have merged the groups when
347 # we added 2 to the group it is now in (we could have
347 # we added 2 to the group it is now in (we could have
348 # noticed the groups were both blocked on 1 then), but
348 # noticed the groups were both blocked on 1 then), but
349 # the way it works now makes the algorithm simpler.
349 # the way it works now makes the algorithm simpler.
350 #
350 #
351 # We also always keep the oldest subgroup first. We can
351 # We also always keep the oldest subgroup first. We can
352 # probably improve the behavior by having the longest set
352 # probably improve the behavior by having the longest set
353 # first. That way, graph algorithms could minimise the length
353 # first. That way, graph algorithms could minimise the length
354 # of parallel lines their drawing. This is currently not done.
354 # of parallel lines their drawing. This is currently not done.
355 targetidx = matching.pop(0)
355 targetidx = matching.pop(0)
356 trevs, tparents = groups[targetidx]
356 trevs, tparents = groups[targetidx]
357 for i in matching:
357 for i in matching:
358 gr = groups[i]
358 gr = groups[i]
359 trevs.extend(gr[0])
359 trevs.extend(gr[0])
360 tparents |= gr[1]
360 tparents |= gr[1]
361 # delete all merged subgroups (except the one we kept)
361 # delete all merged subgroups (except the one we kept)
362 # (starting from the last subgroup for performance and
362 # (starting from the last subgroup for performance and
363 # sanity reasons)
363 # sanity reasons)
364 for i in reversed(matching):
364 for i in reversed(matching):
365 del groups[i]
365 del groups[i]
366 else:
366 else:
367 # This is a new head. We create a new subgroup for it.
367 # This is a new head. We create a new subgroup for it.
368 targetidx = len(groups)
368 targetidx = len(groups)
369 groups.append(([], {rev}))
369 groups.append(([], {rev}))
370
370
371 gr = groups[targetidx]
371 gr = groups[targetidx]
372
372
373 # We now add the current nodes to this subgroups. This is done
373 # We now add the current nodes to this subgroups. This is done
374 # after the subgroup merging because all elements from a subgroup
374 # after the subgroup merging because all elements from a subgroup
375 # that relied on this rev must precede it.
375 # that relied on this rev must precede it.
376 #
376 #
377 # we also update the <parents> set to include the parents of the
377 # we also update the <parents> set to include the parents of the
378 # new nodes.
378 # new nodes.
379 if rev == currentrev: # only display stuff in rev
379 if rev == currentrev: # only display stuff in rev
380 gr[0].append(rev)
380 gr[0].append(rev)
381 gr[1].remove(rev)
381 gr[1].remove(rev)
382 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
382 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
383 gr[1].update(parents)
383 gr[1].update(parents)
384 for p in parents:
384 for p in parents:
385 if p not in pendingset:
385 if p not in pendingset:
386 pendingset.add(p)
386 pendingset.add(p)
387 heappush(pendingheap, -p)
387 heappush(pendingheap, -p)
388
388
389 # Look for a subgroup to display
389 # Look for a subgroup to display
390 #
390 #
391 # When unblocked is empty (if clause), we were not waiting for any
391 # When unblocked is empty (if clause), we were not waiting for any
392 # revisions during the first iteration (if no priority was given) or
392 # revisions during the first iteration (if no priority was given) or
393 # if we emitted a whole disconnected set of the graph (reached a
393 # if we emitted a whole disconnected set of the graph (reached a
394 # root). In that case we arbitrarily take the oldest known
394 # root). In that case we arbitrarily take the oldest known
395 # subgroup. The heuristic could probably be better.
395 # subgroup. The heuristic could probably be better.
396 #
396 #
397 # Otherwise (elif clause) if the subgroup is blocked on
397 # Otherwise (elif clause) if the subgroup is blocked on
398 # a revision we just emitted, we can safely emit it as
398 # a revision we just emitted, we can safely emit it as
399 # well.
399 # well.
400 if not unblocked:
400 if not unblocked:
401 if len(groups) > 1: # display other subset
401 if len(groups) > 1: # display other subset
402 targetidx = 1
402 targetidx = 1
403 gr = groups[1]
403 gr = groups[1]
404 elif not gr[1] & unblocked:
404 elif not gr[1] & unblocked:
405 gr = None
405 gr = None
406
406
407 if gr is not None:
407 if gr is not None:
408 # update the set of awaited revisions with the one from the
408 # update the set of awaited revisions with the one from the
409 # subgroup
409 # subgroup
410 unblocked |= gr[1]
410 unblocked |= gr[1]
411 # output all revisions in the subgroup
411 # output all revisions in the subgroup
412 for r in gr[0]:
412 for r in gr[0]:
413 yield r
413 yield r
414 # delete the subgroup that you just output
414 # delete the subgroup that you just output
415 # unless it is groups[0] in which case you just empty it.
415 # unless it is groups[0] in which case you just empty it.
416 if targetidx:
416 if targetidx:
417 del groups[targetidx]
417 del groups[targetidx]
418 else:
418 else:
419 gr[0][:] = []
419 gr[0][:] = []
420 # Check if we have some subgroup waiting for revisions we are not going to
420 # Check if we have some subgroup waiting for revisions we are not going to
421 # iterate over
421 # iterate over
422 for g in groups:
422 for g in groups:
423 for r in g[0]:
423 for r in g[0]:
424 yield r
424 yield r
General Comments 0
You need to be logged in to leave comments. Login now