##// END OF EJS Templates
dagop: unnest inner generator of revdescendants()...
Yuya Nishihara -
r33073:b04cf7a6 default
parent child Browse files
Show More
@@ -1,450 +1,451 b''
1 # dagop.py - graph ancestry and topology algorithm for revset
1 # dagop.py - graph ancestry and topology algorithm for revset
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import heapq
10 import heapq
11
11
12 from . import (
12 from . import (
13 error,
13 error,
14 mdiff,
14 mdiff,
15 node,
15 node,
16 patch,
16 patch,
17 smartset,
17 smartset,
18 )
18 )
19
19
20 baseset = smartset.baseset
20 baseset = smartset.baseset
21 generatorset = smartset.generatorset
21 generatorset = smartset.generatorset
22
22
23 # possible maximum depth between null and wdir()
23 # possible maximum depth between null and wdir()
24 _maxlogdepth = 0x80000000
24 _maxlogdepth = 0x80000000
25
25
26 def _genrevancestors(repo, revs, followfirst, startdepth, stopdepth):
26 def _genrevancestors(repo, revs, followfirst, startdepth, stopdepth):
27 if followfirst:
27 if followfirst:
28 cut = 1
28 cut = 1
29 else:
29 else:
30 cut = None
30 cut = None
31 if startdepth is None:
31 if startdepth is None:
32 startdepth = 0
32 startdepth = 0
33 if stopdepth is None:
33 if stopdepth is None:
34 stopdepth = _maxlogdepth
34 stopdepth = _maxlogdepth
35 if stopdepth == 0:
35 if stopdepth == 0:
36 return
36 return
37 if stopdepth < 0:
37 if stopdepth < 0:
38 raise error.ProgrammingError('negative stopdepth')
38 raise error.ProgrammingError('negative stopdepth')
39
39
40 cl = repo.changelog
40 cl = repo.changelog
41
41
42 # load input revs lazily to heap so earlier revisions can be yielded
42 # load input revs lazily to heap so earlier revisions can be yielded
43 # without fully computing the input revs
43 # without fully computing the input revs
44 revs.sort(reverse=True)
44 revs.sort(reverse=True)
45 irevs = iter(revs)
45 irevs = iter(revs)
46 pendingheap = [] # [(-rev, depth), ...] (i.e. lower depth first)
46 pendingheap = [] # [(-rev, depth), ...] (i.e. lower depth first)
47
47
48 inputrev = next(irevs, None)
48 inputrev = next(irevs, None)
49 if inputrev is not None:
49 if inputrev is not None:
50 heapq.heappush(pendingheap, (-inputrev, 0))
50 heapq.heappush(pendingheap, (-inputrev, 0))
51
51
52 lastrev = None
52 lastrev = None
53 while pendingheap:
53 while pendingheap:
54 currev, curdepth = heapq.heappop(pendingheap)
54 currev, curdepth = heapq.heappop(pendingheap)
55 currev = -currev
55 currev = -currev
56 if currev == inputrev:
56 if currev == inputrev:
57 inputrev = next(irevs, None)
57 inputrev = next(irevs, None)
58 if inputrev is not None:
58 if inputrev is not None:
59 heapq.heappush(pendingheap, (-inputrev, 0))
59 heapq.heappush(pendingheap, (-inputrev, 0))
60 # rescan parents until curdepth >= startdepth because queued entries
60 # rescan parents until curdepth >= startdepth because queued entries
61 # of the same revision are iterated from the lowest depth
61 # of the same revision are iterated from the lowest depth
62 foundnew = (currev != lastrev)
62 foundnew = (currev != lastrev)
63 if foundnew and curdepth >= startdepth:
63 if foundnew and curdepth >= startdepth:
64 lastrev = currev
64 lastrev = currev
65 yield currev
65 yield currev
66 pdepth = curdepth + 1
66 pdepth = curdepth + 1
67 if foundnew and pdepth < stopdepth:
67 if foundnew and pdepth < stopdepth:
68 try:
68 try:
69 for prev in cl.parentrevs(currev)[:cut]:
69 for prev in cl.parentrevs(currev)[:cut]:
70 if prev != node.nullrev:
70 if prev != node.nullrev:
71 heapq.heappush(pendingheap, (-prev, pdepth))
71 heapq.heappush(pendingheap, (-prev, pdepth))
72 except error.WdirUnsupported:
72 except error.WdirUnsupported:
73 for pctx in repo[currev].parents()[:cut]:
73 for pctx in repo[currev].parents()[:cut]:
74 if pctx.rev() != node.nullrev:
74 if pctx.rev() != node.nullrev:
75 heapq.heappush(pendingheap, (-pctx.rev(), pdepth))
75 heapq.heappush(pendingheap, (-pctx.rev(), pdepth))
76
76
77 def revancestors(repo, revs, followfirst, startdepth=None, stopdepth=None):
77 def revancestors(repo, revs, followfirst, startdepth=None, stopdepth=None):
78 """Like revlog.ancestors(), but supports additional options, includes
78 """Like revlog.ancestors(), but supports additional options, includes
79 the given revs themselves, and returns a smartset
79 the given revs themselves, and returns a smartset
80
80
81 Scan ends at the stopdepth (exlusive) if specified. Revisions found
81 Scan ends at the stopdepth (exlusive) if specified. Revisions found
82 earlier than the startdepth are omitted.
82 earlier than the startdepth are omitted.
83 """
83 """
84 gen = _genrevancestors(repo, revs, followfirst, startdepth, stopdepth)
84 gen = _genrevancestors(repo, revs, followfirst, startdepth, stopdepth)
85 return generatorset(gen, iterasc=False)
85 return generatorset(gen, iterasc=False)
86
86
87 def revdescendants(repo, revs, followfirst):
87 def _genrevdescendants(repo, revs, followfirst):
88 """Like revlog.descendants() but supports followfirst."""
89 if followfirst:
88 if followfirst:
90 cut = 1
89 cut = 1
91 else:
90 else:
92 cut = None
91 cut = None
93
92
94 def iterate():
95 cl = repo.changelog
93 cl = repo.changelog
96 # XXX this should be 'parentset.min()' assuming 'parentset' is a
94 # XXX this should be 'parentset.min()' assuming 'parentset' is a
97 # smartset (and if it is not, it should.)
95 # smartset (and if it is not, it should.)
98 first = min(revs)
96 first = min(revs)
99 nullrev = node.nullrev
97 nullrev = node.nullrev
100 if first == nullrev:
98 if first == nullrev:
101 # Are there nodes with a null first parent and a non-null
99 # Are there nodes with a null first parent and a non-null
102 # second one? Maybe. Do we care? Probably not.
100 # second one? Maybe. Do we care? Probably not.
103 for i in cl:
101 for i in cl:
104 yield i
102 yield i
105 else:
103 else:
106 seen = set(revs)
104 seen = set(revs)
107 for i in cl.revs(first + 1):
105 for i in cl.revs(first + 1):
108 for x in cl.parentrevs(i)[:cut]:
106 for x in cl.parentrevs(i)[:cut]:
109 if x != nullrev and x in seen:
107 if x != nullrev and x in seen:
110 seen.add(i)
108 seen.add(i)
111 yield i
109 yield i
112 break
110 break
113
111
114 return generatorset(iterate(), iterasc=True)
112 def revdescendants(repo, revs, followfirst):
113 """Like revlog.descendants() but supports followfirst."""
114 gen = _genrevdescendants(repo, revs, followfirst)
115 return generatorset(gen, iterasc=True)
115
116
116 def _reachablerootspure(repo, minroot, roots, heads, includepath):
117 def _reachablerootspure(repo, minroot, roots, heads, includepath):
117 """return (heads(::<roots> and ::<heads>))
118 """return (heads(::<roots> and ::<heads>))
118
119
119 If includepath is True, return (<roots>::<heads>)."""
120 If includepath is True, return (<roots>::<heads>)."""
120 if not roots:
121 if not roots:
121 return []
122 return []
122 parentrevs = repo.changelog.parentrevs
123 parentrevs = repo.changelog.parentrevs
123 roots = set(roots)
124 roots = set(roots)
124 visit = list(heads)
125 visit = list(heads)
125 reachable = set()
126 reachable = set()
126 seen = {}
127 seen = {}
127 # prefetch all the things! (because python is slow)
128 # prefetch all the things! (because python is slow)
128 reached = reachable.add
129 reached = reachable.add
129 dovisit = visit.append
130 dovisit = visit.append
130 nextvisit = visit.pop
131 nextvisit = visit.pop
131 # open-code the post-order traversal due to the tiny size of
132 # open-code the post-order traversal due to the tiny size of
132 # sys.getrecursionlimit()
133 # sys.getrecursionlimit()
133 while visit:
134 while visit:
134 rev = nextvisit()
135 rev = nextvisit()
135 if rev in roots:
136 if rev in roots:
136 reached(rev)
137 reached(rev)
137 if not includepath:
138 if not includepath:
138 continue
139 continue
139 parents = parentrevs(rev)
140 parents = parentrevs(rev)
140 seen[rev] = parents
141 seen[rev] = parents
141 for parent in parents:
142 for parent in parents:
142 if parent >= minroot and parent not in seen:
143 if parent >= minroot and parent not in seen:
143 dovisit(parent)
144 dovisit(parent)
144 if not reachable:
145 if not reachable:
145 return baseset()
146 return baseset()
146 if not includepath:
147 if not includepath:
147 return reachable
148 return reachable
148 for rev in sorted(seen):
149 for rev in sorted(seen):
149 for parent in seen[rev]:
150 for parent in seen[rev]:
150 if parent in reachable:
151 if parent in reachable:
151 reached(rev)
152 reached(rev)
152 return reachable
153 return reachable
153
154
154 def reachableroots(repo, roots, heads, includepath=False):
155 def reachableroots(repo, roots, heads, includepath=False):
155 """return (heads(::<roots> and ::<heads>))
156 """return (heads(::<roots> and ::<heads>))
156
157
157 If includepath is True, return (<roots>::<heads>)."""
158 If includepath is True, return (<roots>::<heads>)."""
158 if not roots:
159 if not roots:
159 return baseset()
160 return baseset()
160 minroot = roots.min()
161 minroot = roots.min()
161 roots = list(roots)
162 roots = list(roots)
162 heads = list(heads)
163 heads = list(heads)
163 try:
164 try:
164 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
165 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
165 except AttributeError:
166 except AttributeError:
166 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
167 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
167 revs = baseset(revs)
168 revs = baseset(revs)
168 revs.sort()
169 revs.sort()
169 return revs
170 return revs
170
171
171 def _changesrange(fctx1, fctx2, linerange2, diffopts):
172 def _changesrange(fctx1, fctx2, linerange2, diffopts):
172 """Return `(diffinrange, linerange1)` where `diffinrange` is True
173 """Return `(diffinrange, linerange1)` where `diffinrange` is True
173 if diff from fctx2 to fctx1 has changes in linerange2 and
174 if diff from fctx2 to fctx1 has changes in linerange2 and
174 `linerange1` is the new line range for fctx1.
175 `linerange1` is the new line range for fctx1.
175 """
176 """
176 blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
177 blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
177 filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
178 filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
178 diffinrange = any(stype == '!' for _, stype in filteredblocks)
179 diffinrange = any(stype == '!' for _, stype in filteredblocks)
179 return diffinrange, linerange1
180 return diffinrange, linerange1
180
181
181 def blockancestors(fctx, fromline, toline, followfirst=False):
182 def blockancestors(fctx, fromline, toline, followfirst=False):
182 """Yield ancestors of `fctx` with respect to the block of lines within
183 """Yield ancestors of `fctx` with respect to the block of lines within
183 `fromline`-`toline` range.
184 `fromline`-`toline` range.
184 """
185 """
185 diffopts = patch.diffopts(fctx._repo.ui)
186 diffopts = patch.diffopts(fctx._repo.ui)
186 introrev = fctx.introrev()
187 introrev = fctx.introrev()
187 if fctx.rev() != introrev:
188 if fctx.rev() != introrev:
188 fctx = fctx.filectx(fctx.filenode(), changeid=introrev)
189 fctx = fctx.filectx(fctx.filenode(), changeid=introrev)
189 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
190 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
190 while visit:
191 while visit:
191 c, linerange2 = visit.pop(max(visit))
192 c, linerange2 = visit.pop(max(visit))
192 pl = c.parents()
193 pl = c.parents()
193 if followfirst:
194 if followfirst:
194 pl = pl[:1]
195 pl = pl[:1]
195 if not pl:
196 if not pl:
196 # The block originates from the initial revision.
197 # The block originates from the initial revision.
197 yield c, linerange2
198 yield c, linerange2
198 continue
199 continue
199 inrange = False
200 inrange = False
200 for p in pl:
201 for p in pl:
201 inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts)
202 inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts)
202 inrange = inrange or inrangep
203 inrange = inrange or inrangep
203 if linerange1[0] == linerange1[1]:
204 if linerange1[0] == linerange1[1]:
204 # Parent's linerange is empty, meaning that the block got
205 # Parent's linerange is empty, meaning that the block got
205 # introduced in this revision; no need to go futher in this
206 # introduced in this revision; no need to go futher in this
206 # branch.
207 # branch.
207 continue
208 continue
208 # Set _descendantrev with 'c' (a known descendant) so that, when
209 # Set _descendantrev with 'c' (a known descendant) so that, when
209 # _adjustlinkrev is called for 'p', it receives this descendant
210 # _adjustlinkrev is called for 'p', it receives this descendant
210 # (as srcrev) instead possibly topmost introrev.
211 # (as srcrev) instead possibly topmost introrev.
211 p._descendantrev = c.rev()
212 p._descendantrev = c.rev()
212 visit[p.linkrev(), p.filenode()] = p, linerange1
213 visit[p.linkrev(), p.filenode()] = p, linerange1
213 if inrange:
214 if inrange:
214 yield c, linerange2
215 yield c, linerange2
215
216
216 def blockdescendants(fctx, fromline, toline):
217 def blockdescendants(fctx, fromline, toline):
217 """Yield descendants of `fctx` with respect to the block of lines within
218 """Yield descendants of `fctx` with respect to the block of lines within
218 `fromline`-`toline` range.
219 `fromline`-`toline` range.
219 """
220 """
220 # First possibly yield 'fctx' if it has changes in range with respect to
221 # First possibly yield 'fctx' if it has changes in range with respect to
221 # its parents.
222 # its parents.
222 try:
223 try:
223 c, linerange1 = next(blockancestors(fctx, fromline, toline))
224 c, linerange1 = next(blockancestors(fctx, fromline, toline))
224 except StopIteration:
225 except StopIteration:
225 pass
226 pass
226 else:
227 else:
227 if c == fctx:
228 if c == fctx:
228 yield c, linerange1
229 yield c, linerange1
229
230
230 diffopts = patch.diffopts(fctx._repo.ui)
231 diffopts = patch.diffopts(fctx._repo.ui)
231 fl = fctx.filelog()
232 fl = fctx.filelog()
232 seen = {fctx.filerev(): (fctx, (fromline, toline))}
233 seen = {fctx.filerev(): (fctx, (fromline, toline))}
233 for i in fl.descendants([fctx.filerev()]):
234 for i in fl.descendants([fctx.filerev()]):
234 c = fctx.filectx(i)
235 c = fctx.filectx(i)
235 inrange = False
236 inrange = False
236 for x in fl.parentrevs(i):
237 for x in fl.parentrevs(i):
237 try:
238 try:
238 p, linerange2 = seen[x]
239 p, linerange2 = seen[x]
239 except KeyError:
240 except KeyError:
240 # nullrev or other branch
241 # nullrev or other branch
241 continue
242 continue
242 inrangep, linerange1 = _changesrange(c, p, linerange2, diffopts)
243 inrangep, linerange1 = _changesrange(c, p, linerange2, diffopts)
243 inrange = inrange or inrangep
244 inrange = inrange or inrangep
244 # If revision 'i' has been seen (it's a merge), we assume that its
245 # If revision 'i' has been seen (it's a merge), we assume that its
245 # line range is the same independently of which parents was used
246 # line range is the same independently of which parents was used
246 # to compute it.
247 # to compute it.
247 assert i not in seen or seen[i][1] == linerange1, (
248 assert i not in seen or seen[i][1] == linerange1, (
248 'computed line range for %s is not consistent between '
249 'computed line range for %s is not consistent between '
249 'ancestor branches' % c)
250 'ancestor branches' % c)
250 seen[i] = c, linerange1
251 seen[i] = c, linerange1
251 if inrange:
252 if inrange:
252 yield c, linerange1
253 yield c, linerange1
253
254
254 def toposort(revs, parentsfunc, firstbranch=()):
255 def toposort(revs, parentsfunc, firstbranch=()):
255 """Yield revisions from heads to roots one (topo) branch at a time.
256 """Yield revisions from heads to roots one (topo) branch at a time.
256
257
257 This function aims to be used by a graph generator that wishes to minimize
258 This function aims to be used by a graph generator that wishes to minimize
258 the number of parallel branches and their interleaving.
259 the number of parallel branches and their interleaving.
259
260
260 Example iteration order (numbers show the "true" order in a changelog):
261 Example iteration order (numbers show the "true" order in a changelog):
261
262
262 o 4
263 o 4
263 |
264 |
264 o 1
265 o 1
265 |
266 |
266 | o 3
267 | o 3
267 | |
268 | |
268 | o 2
269 | o 2
269 |/
270 |/
270 o 0
271 o 0
271
272
272 Note that the ancestors of merges are understood by the current
273 Note that the ancestors of merges are understood by the current
273 algorithm to be on the same branch. This means no reordering will
274 algorithm to be on the same branch. This means no reordering will
274 occur behind a merge.
275 occur behind a merge.
275 """
276 """
276
277
277 ### Quick summary of the algorithm
278 ### Quick summary of the algorithm
278 #
279 #
279 # This function is based around a "retention" principle. We keep revisions
280 # This function is based around a "retention" principle. We keep revisions
280 # in memory until we are ready to emit a whole branch that immediately
281 # in memory until we are ready to emit a whole branch that immediately
281 # "merges" into an existing one. This reduces the number of parallel
282 # "merges" into an existing one. This reduces the number of parallel
282 # branches with interleaved revisions.
283 # branches with interleaved revisions.
283 #
284 #
284 # During iteration revs are split into two groups:
285 # During iteration revs are split into two groups:
285 # A) revision already emitted
286 # A) revision already emitted
286 # B) revision in "retention". They are stored as different subgroups.
287 # B) revision in "retention". They are stored as different subgroups.
287 #
288 #
288 # for each REV, we do the following logic:
289 # for each REV, we do the following logic:
289 #
290 #
290 # 1) if REV is a parent of (A), we will emit it. If there is a
291 # 1) if REV is a parent of (A), we will emit it. If there is a
291 # retention group ((B) above) that is blocked on REV being
292 # retention group ((B) above) that is blocked on REV being
292 # available, we emit all the revisions out of that retention
293 # available, we emit all the revisions out of that retention
293 # group first.
294 # group first.
294 #
295 #
295 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
296 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
296 # available, if such subgroup exist, we add REV to it and the subgroup is
297 # available, if such subgroup exist, we add REV to it and the subgroup is
297 # now awaiting for REV.parents() to be available.
298 # now awaiting for REV.parents() to be available.
298 #
299 #
299 # 3) finally if no such group existed in (B), we create a new subgroup.
300 # 3) finally if no such group existed in (B), we create a new subgroup.
300 #
301 #
301 #
302 #
302 # To bootstrap the algorithm, we emit the tipmost revision (which
303 # To bootstrap the algorithm, we emit the tipmost revision (which
303 # puts it in group (A) from above).
304 # puts it in group (A) from above).
304
305
305 revs.sort(reverse=True)
306 revs.sort(reverse=True)
306
307
307 # Set of parents of revision that have been emitted. They can be considered
308 # Set of parents of revision that have been emitted. They can be considered
308 # unblocked as the graph generator is already aware of them so there is no
309 # unblocked as the graph generator is already aware of them so there is no
309 # need to delay the revisions that reference them.
310 # need to delay the revisions that reference them.
310 #
311 #
311 # If someone wants to prioritize a branch over the others, pre-filling this
312 # If someone wants to prioritize a branch over the others, pre-filling this
312 # set will force all other branches to wait until this branch is ready to be
313 # set will force all other branches to wait until this branch is ready to be
313 # emitted.
314 # emitted.
314 unblocked = set(firstbranch)
315 unblocked = set(firstbranch)
315
316
316 # list of groups waiting to be displayed, each group is defined by:
317 # list of groups waiting to be displayed, each group is defined by:
317 #
318 #
318 # (revs: lists of revs waiting to be displayed,
319 # (revs: lists of revs waiting to be displayed,
319 # blocked: set of that cannot be displayed before those in 'revs')
320 # blocked: set of that cannot be displayed before those in 'revs')
320 #
321 #
321 # The second value ('blocked') correspond to parents of any revision in the
322 # The second value ('blocked') correspond to parents of any revision in the
322 # group ('revs') that is not itself contained in the group. The main idea
323 # group ('revs') that is not itself contained in the group. The main idea
323 # of this algorithm is to delay as much as possible the emission of any
324 # of this algorithm is to delay as much as possible the emission of any
324 # revision. This means waiting for the moment we are about to display
325 # revision. This means waiting for the moment we are about to display
325 # these parents to display the revs in a group.
326 # these parents to display the revs in a group.
326 #
327 #
327 # This first implementation is smart until it encounters a merge: it will
328 # This first implementation is smart until it encounters a merge: it will
328 # emit revs as soon as any parent is about to be emitted and can grow an
329 # emit revs as soon as any parent is about to be emitted and can grow an
329 # arbitrary number of revs in 'blocked'. In practice this mean we properly
330 # arbitrary number of revs in 'blocked'. In practice this mean we properly
330 # retains new branches but gives up on any special ordering for ancestors
331 # retains new branches but gives up on any special ordering for ancestors
331 # of merges. The implementation can be improved to handle this better.
332 # of merges. The implementation can be improved to handle this better.
332 #
333 #
333 # The first subgroup is special. It corresponds to all the revision that
334 # The first subgroup is special. It corresponds to all the revision that
334 # were already emitted. The 'revs' lists is expected to be empty and the
335 # were already emitted. The 'revs' lists is expected to be empty and the
335 # 'blocked' set contains the parents revisions of already emitted revision.
336 # 'blocked' set contains the parents revisions of already emitted revision.
336 #
337 #
337 # You could pre-seed the <parents> set of groups[0] to a specific
338 # You could pre-seed the <parents> set of groups[0] to a specific
338 # changesets to select what the first emitted branch should be.
339 # changesets to select what the first emitted branch should be.
339 groups = [([], unblocked)]
340 groups = [([], unblocked)]
340 pendingheap = []
341 pendingheap = []
341 pendingset = set()
342 pendingset = set()
342
343
343 heapq.heapify(pendingheap)
344 heapq.heapify(pendingheap)
344 heappop = heapq.heappop
345 heappop = heapq.heappop
345 heappush = heapq.heappush
346 heappush = heapq.heappush
346 for currentrev in revs:
347 for currentrev in revs:
347 # Heap works with smallest element, we want highest so we invert
348 # Heap works with smallest element, we want highest so we invert
348 if currentrev not in pendingset:
349 if currentrev not in pendingset:
349 heappush(pendingheap, -currentrev)
350 heappush(pendingheap, -currentrev)
350 pendingset.add(currentrev)
351 pendingset.add(currentrev)
351 # iterates on pending rev until after the current rev have been
352 # iterates on pending rev until after the current rev have been
352 # processed.
353 # processed.
353 rev = None
354 rev = None
354 while rev != currentrev:
355 while rev != currentrev:
355 rev = -heappop(pendingheap)
356 rev = -heappop(pendingheap)
356 pendingset.remove(rev)
357 pendingset.remove(rev)
357
358
358 # Seek for a subgroup blocked, waiting for the current revision.
359 # Seek for a subgroup blocked, waiting for the current revision.
359 matching = [i for i, g in enumerate(groups) if rev in g[1]]
360 matching = [i for i, g in enumerate(groups) if rev in g[1]]
360
361
361 if matching:
362 if matching:
362 # The main idea is to gather together all sets that are blocked
363 # The main idea is to gather together all sets that are blocked
363 # on the same revision.
364 # on the same revision.
364 #
365 #
365 # Groups are merged when a common blocking ancestor is
366 # Groups are merged when a common blocking ancestor is
366 # observed. For example, given two groups:
367 # observed. For example, given two groups:
367 #
368 #
368 # revs [5, 4] waiting for 1
369 # revs [5, 4] waiting for 1
369 # revs [3, 2] waiting for 1
370 # revs [3, 2] waiting for 1
370 #
371 #
371 # These two groups will be merged when we process
372 # These two groups will be merged when we process
372 # 1. In theory, we could have merged the groups when
373 # 1. In theory, we could have merged the groups when
373 # we added 2 to the group it is now in (we could have
374 # we added 2 to the group it is now in (we could have
374 # noticed the groups were both blocked on 1 then), but
375 # noticed the groups were both blocked on 1 then), but
375 # the way it works now makes the algorithm simpler.
376 # the way it works now makes the algorithm simpler.
376 #
377 #
377 # We also always keep the oldest subgroup first. We can
378 # We also always keep the oldest subgroup first. We can
378 # probably improve the behavior by having the longest set
379 # probably improve the behavior by having the longest set
379 # first. That way, graph algorithms could minimise the length
380 # first. That way, graph algorithms could minimise the length
380 # of parallel lines their drawing. This is currently not done.
381 # of parallel lines their drawing. This is currently not done.
381 targetidx = matching.pop(0)
382 targetidx = matching.pop(0)
382 trevs, tparents = groups[targetidx]
383 trevs, tparents = groups[targetidx]
383 for i in matching:
384 for i in matching:
384 gr = groups[i]
385 gr = groups[i]
385 trevs.extend(gr[0])
386 trevs.extend(gr[0])
386 tparents |= gr[1]
387 tparents |= gr[1]
387 # delete all merged subgroups (except the one we kept)
388 # delete all merged subgroups (except the one we kept)
388 # (starting from the last subgroup for performance and
389 # (starting from the last subgroup for performance and
389 # sanity reasons)
390 # sanity reasons)
390 for i in reversed(matching):
391 for i in reversed(matching):
391 del groups[i]
392 del groups[i]
392 else:
393 else:
393 # This is a new head. We create a new subgroup for it.
394 # This is a new head. We create a new subgroup for it.
394 targetidx = len(groups)
395 targetidx = len(groups)
395 groups.append(([], {rev}))
396 groups.append(([], {rev}))
396
397
397 gr = groups[targetidx]
398 gr = groups[targetidx]
398
399
399 # We now add the current nodes to this subgroups. This is done
400 # We now add the current nodes to this subgroups. This is done
400 # after the subgroup merging because all elements from a subgroup
401 # after the subgroup merging because all elements from a subgroup
401 # that relied on this rev must precede it.
402 # that relied on this rev must precede it.
402 #
403 #
403 # we also update the <parents> set to include the parents of the
404 # we also update the <parents> set to include the parents of the
404 # new nodes.
405 # new nodes.
405 if rev == currentrev: # only display stuff in rev
406 if rev == currentrev: # only display stuff in rev
406 gr[0].append(rev)
407 gr[0].append(rev)
407 gr[1].remove(rev)
408 gr[1].remove(rev)
408 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
409 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
409 gr[1].update(parents)
410 gr[1].update(parents)
410 for p in parents:
411 for p in parents:
411 if p not in pendingset:
412 if p not in pendingset:
412 pendingset.add(p)
413 pendingset.add(p)
413 heappush(pendingheap, -p)
414 heappush(pendingheap, -p)
414
415
415 # Look for a subgroup to display
416 # Look for a subgroup to display
416 #
417 #
417 # When unblocked is empty (if clause), we were not waiting for any
418 # When unblocked is empty (if clause), we were not waiting for any
418 # revisions during the first iteration (if no priority was given) or
419 # revisions during the first iteration (if no priority was given) or
419 # if we emitted a whole disconnected set of the graph (reached a
420 # if we emitted a whole disconnected set of the graph (reached a
420 # root). In that case we arbitrarily take the oldest known
421 # root). In that case we arbitrarily take the oldest known
421 # subgroup. The heuristic could probably be better.
422 # subgroup. The heuristic could probably be better.
422 #
423 #
423 # Otherwise (elif clause) if the subgroup is blocked on
424 # Otherwise (elif clause) if the subgroup is blocked on
424 # a revision we just emitted, we can safely emit it as
425 # a revision we just emitted, we can safely emit it as
425 # well.
426 # well.
426 if not unblocked:
427 if not unblocked:
427 if len(groups) > 1: # display other subset
428 if len(groups) > 1: # display other subset
428 targetidx = 1
429 targetidx = 1
429 gr = groups[1]
430 gr = groups[1]
430 elif not gr[1] & unblocked:
431 elif not gr[1] & unblocked:
431 gr = None
432 gr = None
432
433
433 if gr is not None:
434 if gr is not None:
434 # update the set of awaited revisions with the one from the
435 # update the set of awaited revisions with the one from the
435 # subgroup
436 # subgroup
436 unblocked |= gr[1]
437 unblocked |= gr[1]
437 # output all revisions in the subgroup
438 # output all revisions in the subgroup
438 for r in gr[0]:
439 for r in gr[0]:
439 yield r
440 yield r
440 # delete the subgroup that you just output
441 # delete the subgroup that you just output
441 # unless it is groups[0] in which case you just empty it.
442 # unless it is groups[0] in which case you just empty it.
442 if targetidx:
443 if targetidx:
443 del groups[targetidx]
444 del groups[targetidx]
444 else:
445 else:
445 gr[0][:] = []
446 gr[0][:] = []
446 # Check if we have some subgroup waiting for revisions we are not going to
447 # Check if we have some subgroup waiting for revisions we are not going to
447 # iterate over
448 # iterate over
448 for g in groups:
449 for g in groups:
449 for r in g[0]:
450 for r in g[0]:
450 yield r
451 yield r
General Comments 0
You need to be logged in to leave comments. Login now