##// END OF EJS Templates
dagop: make walk direction switchable so it can track descendants...
Yuya Nishihara -
r33079:550c390c default
parent child Browse files
Show More
@@ -1,463 +1,468 b''
1 # dagop.py - graph ancestry and topology algorithm for revset
1 # dagop.py - graph ancestry and topology algorithm for revset
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import heapq
10 import heapq
11
11
12 from . import (
12 from . import (
13 error,
13 error,
14 mdiff,
14 mdiff,
15 node,
15 node,
16 patch,
16 patch,
17 smartset,
17 smartset,
18 )
18 )
19
19
20 baseset = smartset.baseset
20 baseset = smartset.baseset
21 generatorset = smartset.generatorset
21 generatorset = smartset.generatorset
22
22
23 # possible maximum depth between null and wdir()
23 # possible maximum depth between null and wdir()
24 _maxlogdepth = 0x80000000
24 _maxlogdepth = 0x80000000
25
25
26 def _walkrevtree(pfunc, revs, startdepth, stopdepth):
26 def _walkrevtree(pfunc, revs, startdepth, stopdepth, reverse):
27 """Walk DAG using 'pfunc' from the given 'revs' nodes
27 """Walk DAG using 'pfunc' from the given 'revs' nodes
28
28
29 'pfunc(rev)' should return the parent revisions of the given 'rev'.
29 'pfunc(rev)' should return the parent/child revisions of the given 'rev'
30 if 'reverse' is True/False respectively.
30
31
31 Scan ends at the stopdepth (exlusive) if specified. Revisions found
32 Scan ends at the stopdepth (exlusive) if specified. Revisions found
32 earlier than the startdepth are omitted.
33 earlier than the startdepth are omitted.
33 """
34 """
34 if startdepth is None:
35 if startdepth is None:
35 startdepth = 0
36 startdepth = 0
36 if stopdepth is None:
37 if stopdepth is None:
37 stopdepth = _maxlogdepth
38 stopdepth = _maxlogdepth
38 if stopdepth == 0:
39 if stopdepth == 0:
39 return
40 return
40 if stopdepth < 0:
41 if stopdepth < 0:
41 raise error.ProgrammingError('negative stopdepth')
42 raise error.ProgrammingError('negative stopdepth')
43 if reverse:
44 heapsign = -1 # max heap
45 else:
46 heapsign = +1 # min heap
42
47
43 # load input revs lazily to heap so earlier revisions can be yielded
48 # load input revs lazily to heap so earlier revisions can be yielded
44 # without fully computing the input revs
49 # without fully computing the input revs
45 revs.sort(reverse=True)
50 revs.sort(reverse)
46 irevs = iter(revs)
51 irevs = iter(revs)
47 pendingheap = [] # [(-rev, depth), ...] (i.e. lower depth first)
52 pendingheap = [] # [(heapsign * rev, depth), ...] (i.e. lower depth first)
48
53
49 inputrev = next(irevs, None)
54 inputrev = next(irevs, None)
50 if inputrev is not None:
55 if inputrev is not None:
51 heapq.heappush(pendingheap, (-inputrev, 0))
56 heapq.heappush(pendingheap, (heapsign * inputrev, 0))
52
57
53 lastrev = None
58 lastrev = None
54 while pendingheap:
59 while pendingheap:
55 currev, curdepth = heapq.heappop(pendingheap)
60 currev, curdepth = heapq.heappop(pendingheap)
56 currev = -currev
61 currev = heapsign * currev
57 if currev == inputrev:
62 if currev == inputrev:
58 inputrev = next(irevs, None)
63 inputrev = next(irevs, None)
59 if inputrev is not None:
64 if inputrev is not None:
60 heapq.heappush(pendingheap, (-inputrev, 0))
65 heapq.heappush(pendingheap, (heapsign * inputrev, 0))
61 # rescan parents until curdepth >= startdepth because queued entries
66 # rescan parents until curdepth >= startdepth because queued entries
62 # of the same revision are iterated from the lowest depth
67 # of the same revision are iterated from the lowest depth
63 foundnew = (currev != lastrev)
68 foundnew = (currev != lastrev)
64 if foundnew and curdepth >= startdepth:
69 if foundnew and curdepth >= startdepth:
65 lastrev = currev
70 lastrev = currev
66 yield currev
71 yield currev
67 pdepth = curdepth + 1
72 pdepth = curdepth + 1
68 if foundnew and pdepth < stopdepth:
73 if foundnew and pdepth < stopdepth:
69 for prev in pfunc(currev):
74 for prev in pfunc(currev):
70 if prev != node.nullrev:
75 if prev != node.nullrev:
71 heapq.heappush(pendingheap, (-prev, pdepth))
76 heapq.heappush(pendingheap, (heapsign * prev, pdepth))
72
77
73 def _genrevancestors(repo, revs, followfirst, startdepth, stopdepth):
78 def _genrevancestors(repo, revs, followfirst, startdepth, stopdepth):
74 if followfirst:
79 if followfirst:
75 cut = 1
80 cut = 1
76 else:
81 else:
77 cut = None
82 cut = None
78 cl = repo.changelog
83 cl = repo.changelog
79 def pfunc(rev):
84 def pfunc(rev):
80 try:
85 try:
81 return cl.parentrevs(rev)[:cut]
86 return cl.parentrevs(rev)[:cut]
82 except error.WdirUnsupported:
87 except error.WdirUnsupported:
83 return (pctx.rev() for pctx in repo[rev].parents()[:cut])
88 return (pctx.rev() for pctx in repo[rev].parents()[:cut])
84 return _walkrevtree(pfunc, revs, startdepth, stopdepth)
89 return _walkrevtree(pfunc, revs, startdepth, stopdepth, reverse=True)
85
90
86 def revancestors(repo, revs, followfirst, startdepth=None, stopdepth=None):
91 def revancestors(repo, revs, followfirst, startdepth=None, stopdepth=None):
87 """Like revlog.ancestors(), but supports additional options, includes
92 """Like revlog.ancestors(), but supports additional options, includes
88 the given revs themselves, and returns a smartset
93 the given revs themselves, and returns a smartset
89
94
90 Scan ends at the stopdepth (exlusive) if specified. Revisions found
95 Scan ends at the stopdepth (exlusive) if specified. Revisions found
91 earlier than the startdepth are omitted.
96 earlier than the startdepth are omitted.
92 """
97 """
93 gen = _genrevancestors(repo, revs, followfirst, startdepth, stopdepth)
98 gen = _genrevancestors(repo, revs, followfirst, startdepth, stopdepth)
94 return generatorset(gen, iterasc=False)
99 return generatorset(gen, iterasc=False)
95
100
96 def _genrevdescendants(repo, revs, followfirst):
101 def _genrevdescendants(repo, revs, followfirst):
97 if followfirst:
102 if followfirst:
98 cut = 1
103 cut = 1
99 else:
104 else:
100 cut = None
105 cut = None
101
106
102 cl = repo.changelog
107 cl = repo.changelog
103 first = revs.min()
108 first = revs.min()
104 nullrev = node.nullrev
109 nullrev = node.nullrev
105 if first == nullrev:
110 if first == nullrev:
106 # Are there nodes with a null first parent and a non-null
111 # Are there nodes with a null first parent and a non-null
107 # second one? Maybe. Do we care? Probably not.
112 # second one? Maybe. Do we care? Probably not.
108 yield first
113 yield first
109 for i in cl:
114 for i in cl:
110 yield i
115 yield i
111 else:
116 else:
112 seen = set(revs)
117 seen = set(revs)
113 for i in cl.revs(first):
118 for i in cl.revs(first):
114 if i in seen:
119 if i in seen:
115 yield i
120 yield i
116 continue
121 continue
117 for x in cl.parentrevs(i)[:cut]:
122 for x in cl.parentrevs(i)[:cut]:
118 if x != nullrev and x in seen:
123 if x != nullrev and x in seen:
119 seen.add(i)
124 seen.add(i)
120 yield i
125 yield i
121 break
126 break
122
127
123 def revdescendants(repo, revs, followfirst):
128 def revdescendants(repo, revs, followfirst):
124 """Like revlog.descendants() but supports additional options, includes
129 """Like revlog.descendants() but supports additional options, includes
125 the given revs themselves, and returns a smartset"""
130 the given revs themselves, and returns a smartset"""
126 gen = _genrevdescendants(repo, revs, followfirst)
131 gen = _genrevdescendants(repo, revs, followfirst)
127 return generatorset(gen, iterasc=True)
132 return generatorset(gen, iterasc=True)
128
133
129 def _reachablerootspure(repo, minroot, roots, heads, includepath):
134 def _reachablerootspure(repo, minroot, roots, heads, includepath):
130 """return (heads(::<roots> and ::<heads>))
135 """return (heads(::<roots> and ::<heads>))
131
136
132 If includepath is True, return (<roots>::<heads>)."""
137 If includepath is True, return (<roots>::<heads>)."""
133 if not roots:
138 if not roots:
134 return []
139 return []
135 parentrevs = repo.changelog.parentrevs
140 parentrevs = repo.changelog.parentrevs
136 roots = set(roots)
141 roots = set(roots)
137 visit = list(heads)
142 visit = list(heads)
138 reachable = set()
143 reachable = set()
139 seen = {}
144 seen = {}
140 # prefetch all the things! (because python is slow)
145 # prefetch all the things! (because python is slow)
141 reached = reachable.add
146 reached = reachable.add
142 dovisit = visit.append
147 dovisit = visit.append
143 nextvisit = visit.pop
148 nextvisit = visit.pop
144 # open-code the post-order traversal due to the tiny size of
149 # open-code the post-order traversal due to the tiny size of
145 # sys.getrecursionlimit()
150 # sys.getrecursionlimit()
146 while visit:
151 while visit:
147 rev = nextvisit()
152 rev = nextvisit()
148 if rev in roots:
153 if rev in roots:
149 reached(rev)
154 reached(rev)
150 if not includepath:
155 if not includepath:
151 continue
156 continue
152 parents = parentrevs(rev)
157 parents = parentrevs(rev)
153 seen[rev] = parents
158 seen[rev] = parents
154 for parent in parents:
159 for parent in parents:
155 if parent >= minroot and parent not in seen:
160 if parent >= minroot and parent not in seen:
156 dovisit(parent)
161 dovisit(parent)
157 if not reachable:
162 if not reachable:
158 return baseset()
163 return baseset()
159 if not includepath:
164 if not includepath:
160 return reachable
165 return reachable
161 for rev in sorted(seen):
166 for rev in sorted(seen):
162 for parent in seen[rev]:
167 for parent in seen[rev]:
163 if parent in reachable:
168 if parent in reachable:
164 reached(rev)
169 reached(rev)
165 return reachable
170 return reachable
166
171
167 def reachableroots(repo, roots, heads, includepath=False):
172 def reachableroots(repo, roots, heads, includepath=False):
168 """return (heads(::<roots> and ::<heads>))
173 """return (heads(::<roots> and ::<heads>))
169
174
170 If includepath is True, return (<roots>::<heads>)."""
175 If includepath is True, return (<roots>::<heads>)."""
171 if not roots:
176 if not roots:
172 return baseset()
177 return baseset()
173 minroot = roots.min()
178 minroot = roots.min()
174 roots = list(roots)
179 roots = list(roots)
175 heads = list(heads)
180 heads = list(heads)
176 try:
181 try:
177 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
182 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
178 except AttributeError:
183 except AttributeError:
179 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
184 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
180 revs = baseset(revs)
185 revs = baseset(revs)
181 revs.sort()
186 revs.sort()
182 return revs
187 return revs
183
188
184 def _changesrange(fctx1, fctx2, linerange2, diffopts):
189 def _changesrange(fctx1, fctx2, linerange2, diffopts):
185 """Return `(diffinrange, linerange1)` where `diffinrange` is True
190 """Return `(diffinrange, linerange1)` where `diffinrange` is True
186 if diff from fctx2 to fctx1 has changes in linerange2 and
191 if diff from fctx2 to fctx1 has changes in linerange2 and
187 `linerange1` is the new line range for fctx1.
192 `linerange1` is the new line range for fctx1.
188 """
193 """
189 blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
194 blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
190 filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
195 filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
191 diffinrange = any(stype == '!' for _, stype in filteredblocks)
196 diffinrange = any(stype == '!' for _, stype in filteredblocks)
192 return diffinrange, linerange1
197 return diffinrange, linerange1
193
198
194 def blockancestors(fctx, fromline, toline, followfirst=False):
199 def blockancestors(fctx, fromline, toline, followfirst=False):
195 """Yield ancestors of `fctx` with respect to the block of lines within
200 """Yield ancestors of `fctx` with respect to the block of lines within
196 `fromline`-`toline` range.
201 `fromline`-`toline` range.
197 """
202 """
198 diffopts = patch.diffopts(fctx._repo.ui)
203 diffopts = patch.diffopts(fctx._repo.ui)
199 introrev = fctx.introrev()
204 introrev = fctx.introrev()
200 if fctx.rev() != introrev:
205 if fctx.rev() != introrev:
201 fctx = fctx.filectx(fctx.filenode(), changeid=introrev)
206 fctx = fctx.filectx(fctx.filenode(), changeid=introrev)
202 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
207 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
203 while visit:
208 while visit:
204 c, linerange2 = visit.pop(max(visit))
209 c, linerange2 = visit.pop(max(visit))
205 pl = c.parents()
210 pl = c.parents()
206 if followfirst:
211 if followfirst:
207 pl = pl[:1]
212 pl = pl[:1]
208 if not pl:
213 if not pl:
209 # The block originates from the initial revision.
214 # The block originates from the initial revision.
210 yield c, linerange2
215 yield c, linerange2
211 continue
216 continue
212 inrange = False
217 inrange = False
213 for p in pl:
218 for p in pl:
214 inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts)
219 inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts)
215 inrange = inrange or inrangep
220 inrange = inrange or inrangep
216 if linerange1[0] == linerange1[1]:
221 if linerange1[0] == linerange1[1]:
217 # Parent's linerange is empty, meaning that the block got
222 # Parent's linerange is empty, meaning that the block got
218 # introduced in this revision; no need to go futher in this
223 # introduced in this revision; no need to go futher in this
219 # branch.
224 # branch.
220 continue
225 continue
221 # Set _descendantrev with 'c' (a known descendant) so that, when
226 # Set _descendantrev with 'c' (a known descendant) so that, when
222 # _adjustlinkrev is called for 'p', it receives this descendant
227 # _adjustlinkrev is called for 'p', it receives this descendant
223 # (as srcrev) instead possibly topmost introrev.
228 # (as srcrev) instead possibly topmost introrev.
224 p._descendantrev = c.rev()
229 p._descendantrev = c.rev()
225 visit[p.linkrev(), p.filenode()] = p, linerange1
230 visit[p.linkrev(), p.filenode()] = p, linerange1
226 if inrange:
231 if inrange:
227 yield c, linerange2
232 yield c, linerange2
228
233
229 def blockdescendants(fctx, fromline, toline):
234 def blockdescendants(fctx, fromline, toline):
230 """Yield descendants of `fctx` with respect to the block of lines within
235 """Yield descendants of `fctx` with respect to the block of lines within
231 `fromline`-`toline` range.
236 `fromline`-`toline` range.
232 """
237 """
233 # First possibly yield 'fctx' if it has changes in range with respect to
238 # First possibly yield 'fctx' if it has changes in range with respect to
234 # its parents.
239 # its parents.
235 try:
240 try:
236 c, linerange1 = next(blockancestors(fctx, fromline, toline))
241 c, linerange1 = next(blockancestors(fctx, fromline, toline))
237 except StopIteration:
242 except StopIteration:
238 pass
243 pass
239 else:
244 else:
240 if c == fctx:
245 if c == fctx:
241 yield c, linerange1
246 yield c, linerange1
242
247
243 diffopts = patch.diffopts(fctx._repo.ui)
248 diffopts = patch.diffopts(fctx._repo.ui)
244 fl = fctx.filelog()
249 fl = fctx.filelog()
245 seen = {fctx.filerev(): (fctx, (fromline, toline))}
250 seen = {fctx.filerev(): (fctx, (fromline, toline))}
246 for i in fl.descendants([fctx.filerev()]):
251 for i in fl.descendants([fctx.filerev()]):
247 c = fctx.filectx(i)
252 c = fctx.filectx(i)
248 inrange = False
253 inrange = False
249 for x in fl.parentrevs(i):
254 for x in fl.parentrevs(i):
250 try:
255 try:
251 p, linerange2 = seen[x]
256 p, linerange2 = seen[x]
252 except KeyError:
257 except KeyError:
253 # nullrev or other branch
258 # nullrev or other branch
254 continue
259 continue
255 inrangep, linerange1 = _changesrange(c, p, linerange2, diffopts)
260 inrangep, linerange1 = _changesrange(c, p, linerange2, diffopts)
256 inrange = inrange or inrangep
261 inrange = inrange or inrangep
257 # If revision 'i' has been seen (it's a merge), we assume that its
262 # If revision 'i' has been seen (it's a merge), we assume that its
258 # line range is the same independently of which parents was used
263 # line range is the same independently of which parents was used
259 # to compute it.
264 # to compute it.
260 assert i not in seen or seen[i][1] == linerange1, (
265 assert i not in seen or seen[i][1] == linerange1, (
261 'computed line range for %s is not consistent between '
266 'computed line range for %s is not consistent between '
262 'ancestor branches' % c)
267 'ancestor branches' % c)
263 seen[i] = c, linerange1
268 seen[i] = c, linerange1
264 if inrange:
269 if inrange:
265 yield c, linerange1
270 yield c, linerange1
266
271
267 def toposort(revs, parentsfunc, firstbranch=()):
272 def toposort(revs, parentsfunc, firstbranch=()):
268 """Yield revisions from heads to roots one (topo) branch at a time.
273 """Yield revisions from heads to roots one (topo) branch at a time.
269
274
270 This function aims to be used by a graph generator that wishes to minimize
275 This function aims to be used by a graph generator that wishes to minimize
271 the number of parallel branches and their interleaving.
276 the number of parallel branches and their interleaving.
272
277
273 Example iteration order (numbers show the "true" order in a changelog):
278 Example iteration order (numbers show the "true" order in a changelog):
274
279
275 o 4
280 o 4
276 |
281 |
277 o 1
282 o 1
278 |
283 |
279 | o 3
284 | o 3
280 | |
285 | |
281 | o 2
286 | o 2
282 |/
287 |/
283 o 0
288 o 0
284
289
285 Note that the ancestors of merges are understood by the current
290 Note that the ancestors of merges are understood by the current
286 algorithm to be on the same branch. This means no reordering will
291 algorithm to be on the same branch. This means no reordering will
287 occur behind a merge.
292 occur behind a merge.
288 """
293 """
289
294
290 ### Quick summary of the algorithm
295 ### Quick summary of the algorithm
291 #
296 #
292 # This function is based around a "retention" principle. We keep revisions
297 # This function is based around a "retention" principle. We keep revisions
293 # in memory until we are ready to emit a whole branch that immediately
298 # in memory until we are ready to emit a whole branch that immediately
294 # "merges" into an existing one. This reduces the number of parallel
299 # "merges" into an existing one. This reduces the number of parallel
295 # branches with interleaved revisions.
300 # branches with interleaved revisions.
296 #
301 #
297 # During iteration revs are split into two groups:
302 # During iteration revs are split into two groups:
298 # A) revision already emitted
303 # A) revision already emitted
299 # B) revision in "retention". They are stored as different subgroups.
304 # B) revision in "retention". They are stored as different subgroups.
300 #
305 #
301 # for each REV, we do the following logic:
306 # for each REV, we do the following logic:
302 #
307 #
303 # 1) if REV is a parent of (A), we will emit it. If there is a
308 # 1) if REV is a parent of (A), we will emit it. If there is a
304 # retention group ((B) above) that is blocked on REV being
309 # retention group ((B) above) that is blocked on REV being
305 # available, we emit all the revisions out of that retention
310 # available, we emit all the revisions out of that retention
306 # group first.
311 # group first.
307 #
312 #
308 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
313 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
309 # available, if such subgroup exist, we add REV to it and the subgroup is
314 # available, if such subgroup exist, we add REV to it and the subgroup is
310 # now awaiting for REV.parents() to be available.
315 # now awaiting for REV.parents() to be available.
311 #
316 #
312 # 3) finally if no such group existed in (B), we create a new subgroup.
317 # 3) finally if no such group existed in (B), we create a new subgroup.
313 #
318 #
314 #
319 #
315 # To bootstrap the algorithm, we emit the tipmost revision (which
320 # To bootstrap the algorithm, we emit the tipmost revision (which
316 # puts it in group (A) from above).
321 # puts it in group (A) from above).
317
322
318 revs.sort(reverse=True)
323 revs.sort(reverse=True)
319
324
320 # Set of parents of revision that have been emitted. They can be considered
325 # Set of parents of revision that have been emitted. They can be considered
321 # unblocked as the graph generator is already aware of them so there is no
326 # unblocked as the graph generator is already aware of them so there is no
322 # need to delay the revisions that reference them.
327 # need to delay the revisions that reference them.
323 #
328 #
324 # If someone wants to prioritize a branch over the others, pre-filling this
329 # If someone wants to prioritize a branch over the others, pre-filling this
325 # set will force all other branches to wait until this branch is ready to be
330 # set will force all other branches to wait until this branch is ready to be
326 # emitted.
331 # emitted.
327 unblocked = set(firstbranch)
332 unblocked = set(firstbranch)
328
333
329 # list of groups waiting to be displayed, each group is defined by:
334 # list of groups waiting to be displayed, each group is defined by:
330 #
335 #
331 # (revs: lists of revs waiting to be displayed,
336 # (revs: lists of revs waiting to be displayed,
332 # blocked: set of that cannot be displayed before those in 'revs')
337 # blocked: set of that cannot be displayed before those in 'revs')
333 #
338 #
334 # The second value ('blocked') correspond to parents of any revision in the
339 # The second value ('blocked') correspond to parents of any revision in the
335 # group ('revs') that is not itself contained in the group. The main idea
340 # group ('revs') that is not itself contained in the group. The main idea
336 # of this algorithm is to delay as much as possible the emission of any
341 # of this algorithm is to delay as much as possible the emission of any
337 # revision. This means waiting for the moment we are about to display
342 # revision. This means waiting for the moment we are about to display
338 # these parents to display the revs in a group.
343 # these parents to display the revs in a group.
339 #
344 #
340 # This first implementation is smart until it encounters a merge: it will
345 # This first implementation is smart until it encounters a merge: it will
341 # emit revs as soon as any parent is about to be emitted and can grow an
346 # emit revs as soon as any parent is about to be emitted and can grow an
342 # arbitrary number of revs in 'blocked'. In practice this mean we properly
347 # arbitrary number of revs in 'blocked'. In practice this mean we properly
343 # retains new branches but gives up on any special ordering for ancestors
348 # retains new branches but gives up on any special ordering for ancestors
344 # of merges. The implementation can be improved to handle this better.
349 # of merges. The implementation can be improved to handle this better.
345 #
350 #
346 # The first subgroup is special. It corresponds to all the revision that
351 # The first subgroup is special. It corresponds to all the revision that
347 # were already emitted. The 'revs' lists is expected to be empty and the
352 # were already emitted. The 'revs' lists is expected to be empty and the
348 # 'blocked' set contains the parents revisions of already emitted revision.
353 # 'blocked' set contains the parents revisions of already emitted revision.
349 #
354 #
350 # You could pre-seed the <parents> set of groups[0] to a specific
355 # You could pre-seed the <parents> set of groups[0] to a specific
351 # changesets to select what the first emitted branch should be.
356 # changesets to select what the first emitted branch should be.
352 groups = [([], unblocked)]
357 groups = [([], unblocked)]
353 pendingheap = []
358 pendingheap = []
354 pendingset = set()
359 pendingset = set()
355
360
356 heapq.heapify(pendingheap)
361 heapq.heapify(pendingheap)
357 heappop = heapq.heappop
362 heappop = heapq.heappop
358 heappush = heapq.heappush
363 heappush = heapq.heappush
359 for currentrev in revs:
364 for currentrev in revs:
360 # Heap works with smallest element, we want highest so we invert
365 # Heap works with smallest element, we want highest so we invert
361 if currentrev not in pendingset:
366 if currentrev not in pendingset:
362 heappush(pendingheap, -currentrev)
367 heappush(pendingheap, -currentrev)
363 pendingset.add(currentrev)
368 pendingset.add(currentrev)
364 # iterates on pending rev until after the current rev have been
369 # iterates on pending rev until after the current rev have been
365 # processed.
370 # processed.
366 rev = None
371 rev = None
367 while rev != currentrev:
372 while rev != currentrev:
368 rev = -heappop(pendingheap)
373 rev = -heappop(pendingheap)
369 pendingset.remove(rev)
374 pendingset.remove(rev)
370
375
371 # Seek for a subgroup blocked, waiting for the current revision.
376 # Seek for a subgroup blocked, waiting for the current revision.
372 matching = [i for i, g in enumerate(groups) if rev in g[1]]
377 matching = [i for i, g in enumerate(groups) if rev in g[1]]
373
378
374 if matching:
379 if matching:
375 # The main idea is to gather together all sets that are blocked
380 # The main idea is to gather together all sets that are blocked
376 # on the same revision.
381 # on the same revision.
377 #
382 #
378 # Groups are merged when a common blocking ancestor is
383 # Groups are merged when a common blocking ancestor is
379 # observed. For example, given two groups:
384 # observed. For example, given two groups:
380 #
385 #
381 # revs [5, 4] waiting for 1
386 # revs [5, 4] waiting for 1
382 # revs [3, 2] waiting for 1
387 # revs [3, 2] waiting for 1
383 #
388 #
384 # These two groups will be merged when we process
389 # These two groups will be merged when we process
385 # 1. In theory, we could have merged the groups when
390 # 1. In theory, we could have merged the groups when
386 # we added 2 to the group it is now in (we could have
391 # we added 2 to the group it is now in (we could have
387 # noticed the groups were both blocked on 1 then), but
392 # noticed the groups were both blocked on 1 then), but
388 # the way it works now makes the algorithm simpler.
393 # the way it works now makes the algorithm simpler.
389 #
394 #
390 # We also always keep the oldest subgroup first. We can
395 # We also always keep the oldest subgroup first. We can
391 # probably improve the behavior by having the longest set
396 # probably improve the behavior by having the longest set
392 # first. That way, graph algorithms could minimise the length
397 # first. That way, graph algorithms could minimise the length
393 # of parallel lines their drawing. This is currently not done.
398 # of parallel lines their drawing. This is currently not done.
394 targetidx = matching.pop(0)
399 targetidx = matching.pop(0)
395 trevs, tparents = groups[targetidx]
400 trevs, tparents = groups[targetidx]
396 for i in matching:
401 for i in matching:
397 gr = groups[i]
402 gr = groups[i]
398 trevs.extend(gr[0])
403 trevs.extend(gr[0])
399 tparents |= gr[1]
404 tparents |= gr[1]
400 # delete all merged subgroups (except the one we kept)
405 # delete all merged subgroups (except the one we kept)
401 # (starting from the last subgroup for performance and
406 # (starting from the last subgroup for performance and
402 # sanity reasons)
407 # sanity reasons)
403 for i in reversed(matching):
408 for i in reversed(matching):
404 del groups[i]
409 del groups[i]
405 else:
410 else:
406 # This is a new head. We create a new subgroup for it.
411 # This is a new head. We create a new subgroup for it.
407 targetidx = len(groups)
412 targetidx = len(groups)
408 groups.append(([], {rev}))
413 groups.append(([], {rev}))
409
414
410 gr = groups[targetidx]
415 gr = groups[targetidx]
411
416
412 # We now add the current nodes to this subgroups. This is done
417 # We now add the current nodes to this subgroups. This is done
413 # after the subgroup merging because all elements from a subgroup
418 # after the subgroup merging because all elements from a subgroup
414 # that relied on this rev must precede it.
419 # that relied on this rev must precede it.
415 #
420 #
416 # we also update the <parents> set to include the parents of the
421 # we also update the <parents> set to include the parents of the
417 # new nodes.
422 # new nodes.
418 if rev == currentrev: # only display stuff in rev
423 if rev == currentrev: # only display stuff in rev
419 gr[0].append(rev)
424 gr[0].append(rev)
420 gr[1].remove(rev)
425 gr[1].remove(rev)
421 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
426 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
422 gr[1].update(parents)
427 gr[1].update(parents)
423 for p in parents:
428 for p in parents:
424 if p not in pendingset:
429 if p not in pendingset:
425 pendingset.add(p)
430 pendingset.add(p)
426 heappush(pendingheap, -p)
431 heappush(pendingheap, -p)
427
432
428 # Look for a subgroup to display
433 # Look for a subgroup to display
429 #
434 #
430 # When unblocked is empty (if clause), we were not waiting for any
435 # When unblocked is empty (if clause), we were not waiting for any
431 # revisions during the first iteration (if no priority was given) or
436 # revisions during the first iteration (if no priority was given) or
432 # if we emitted a whole disconnected set of the graph (reached a
437 # if we emitted a whole disconnected set of the graph (reached a
433 # root). In that case we arbitrarily take the oldest known
438 # root). In that case we arbitrarily take the oldest known
434 # subgroup. The heuristic could probably be better.
439 # subgroup. The heuristic could probably be better.
435 #
440 #
436 # Otherwise (elif clause) if the subgroup is blocked on
441 # Otherwise (elif clause) if the subgroup is blocked on
437 # a revision we just emitted, we can safely emit it as
442 # a revision we just emitted, we can safely emit it as
438 # well.
443 # well.
439 if not unblocked:
444 if not unblocked:
440 if len(groups) > 1: # display other subset
445 if len(groups) > 1: # display other subset
441 targetidx = 1
446 targetidx = 1
442 gr = groups[1]
447 gr = groups[1]
443 elif not gr[1] & unblocked:
448 elif not gr[1] & unblocked:
444 gr = None
449 gr = None
445
450
446 if gr is not None:
451 if gr is not None:
447 # update the set of awaited revisions with the one from the
452 # update the set of awaited revisions with the one from the
448 # subgroup
453 # subgroup
449 unblocked |= gr[1]
454 unblocked |= gr[1]
450 # output all revisions in the subgroup
455 # output all revisions in the subgroup
451 for r in gr[0]:
456 for r in gr[0]:
452 yield r
457 yield r
453 # delete the subgroup that you just output
458 # delete the subgroup that you just output
454 # unless it is groups[0] in which case you just empty it.
459 # unless it is groups[0] in which case you just empty it.
455 if targetidx:
460 if targetidx:
456 del groups[targetidx]
461 del groups[targetidx]
457 else:
462 else:
458 gr[0][:] = []
463 gr[0][:] = []
459 # Check if we have some subgroup waiting for revisions we are not going to
464 # Check if we have some subgroup waiting for revisions we are not going to
460 # iterate over
465 # iterate over
461 for g in groups:
466 for g in groups:
462 for r in g[0]:
467 for r in g[0]:
463 yield r
468 yield r
General Comments 0
You need to be logged in to leave comments. Login now