##// END OF EJS Templates
dagop: raise ProgrammingError if stopdepth < 0...
Martin von Zweigbergk -
r33027:a10f5f67 default
parent child Browse files
Show More
@@ -1,448 +1,450 b''
1 # dagop.py - graph ancestry and topology algorithm for revset
1 # dagop.py - graph ancestry and topology algorithm for revset
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import heapq
10 import heapq
11
11
12 from . import (
12 from . import (
13 error,
13 error,
14 mdiff,
14 mdiff,
15 node,
15 node,
16 patch,
16 patch,
17 smartset,
17 smartset,
18 )
18 )
19
19
20 baseset = smartset.baseset
20 baseset = smartset.baseset
21 generatorset = smartset.generatorset
21 generatorset = smartset.generatorset
22
22
23 # possible maximum depth between null and wdir()
23 # possible maximum depth between null and wdir()
24 _maxlogdepth = 0x80000000
24 _maxlogdepth = 0x80000000
25
25
26 def _genrevancestors(repo, revs, followfirst, startdepth, stopdepth):
26 def _genrevancestors(repo, revs, followfirst, startdepth, stopdepth):
27 if followfirst:
27 if followfirst:
28 cut = 1
28 cut = 1
29 else:
29 else:
30 cut = None
30 cut = None
31 if startdepth is None:
31 if startdepth is None:
32 startdepth = 0
32 startdepth = 0
33 if stopdepth is None:
33 if stopdepth is None:
34 stopdepth = _maxlogdepth
34 stopdepth = _maxlogdepth
35 if stopdepth <= 0:
35 if stopdepth == 0:
36 return
36 return
37 if stopdepth < 0:
38 raise error.ProgrammingError('negative stopdepth')
37
39
38 cl = repo.changelog
40 cl = repo.changelog
39
41
40 # load input revs lazily to heap so earlier revisions can be yielded
42 # load input revs lazily to heap so earlier revisions can be yielded
41 # without fully computing the input revs
43 # without fully computing the input revs
42 revs.sort(reverse=True)
44 revs.sort(reverse=True)
43 irevs = iter(revs)
45 irevs = iter(revs)
44 pendingheap = [] # [(-rev, depth), ...] (i.e. lower depth first)
46 pendingheap = [] # [(-rev, depth), ...] (i.e. lower depth first)
45
47
46 inputrev = next(irevs, None)
48 inputrev = next(irevs, None)
47 if inputrev is not None:
49 if inputrev is not None:
48 heapq.heappush(pendingheap, (-inputrev, 0))
50 heapq.heappush(pendingheap, (-inputrev, 0))
49
51
50 lastrev = None
52 lastrev = None
51 while pendingheap:
53 while pendingheap:
52 currev, curdepth = heapq.heappop(pendingheap)
54 currev, curdepth = heapq.heappop(pendingheap)
53 currev = -currev
55 currev = -currev
54 if currev == inputrev:
56 if currev == inputrev:
55 inputrev = next(irevs, None)
57 inputrev = next(irevs, None)
56 if inputrev is not None:
58 if inputrev is not None:
57 heapq.heappush(pendingheap, (-inputrev, 0))
59 heapq.heappush(pendingheap, (-inputrev, 0))
58 # rescan parents until curdepth >= startdepth because queued entries
60 # rescan parents until curdepth >= startdepth because queued entries
59 # of the same revision are iterated from the lowest depth
61 # of the same revision are iterated from the lowest depth
60 foundnew = (currev != lastrev)
62 foundnew = (currev != lastrev)
61 if foundnew and curdepth >= startdepth:
63 if foundnew and curdepth >= startdepth:
62 lastrev = currev
64 lastrev = currev
63 yield currev
65 yield currev
64 pdepth = curdepth + 1
66 pdepth = curdepth + 1
65 if foundnew and pdepth < stopdepth:
67 if foundnew and pdepth < stopdepth:
66 try:
68 try:
67 for prev in cl.parentrevs(currev)[:cut]:
69 for prev in cl.parentrevs(currev)[:cut]:
68 if prev != node.nullrev:
70 if prev != node.nullrev:
69 heapq.heappush(pendingheap, (-prev, pdepth))
71 heapq.heappush(pendingheap, (-prev, pdepth))
70 except error.WdirUnsupported:
72 except error.WdirUnsupported:
71 for pctx in repo[currev].parents()[:cut]:
73 for pctx in repo[currev].parents()[:cut]:
72 if pctx.rev() != node.nullrev:
74 if pctx.rev() != node.nullrev:
73 heapq.heappush(pendingheap, (-pctx.rev(), pdepth))
75 heapq.heappush(pendingheap, (-pctx.rev(), pdepth))
74
76
75 def revancestors(repo, revs, followfirst, startdepth=None, stopdepth=None):
77 def revancestors(repo, revs, followfirst, startdepth=None, stopdepth=None):
76 """Like revlog.ancestors(), but supports additional options, includes
78 """Like revlog.ancestors(), but supports additional options, includes
77 the given revs themselves, and returns a smartset
79 the given revs themselves, and returns a smartset
78
80
79 Scan ends at the stopdepth (exlusive) if specified. Revisions found
81 Scan ends at the stopdepth (exlusive) if specified. Revisions found
80 earlier than the startdepth are omitted.
82 earlier than the startdepth are omitted.
81 """
83 """
82 gen = _genrevancestors(repo, revs, followfirst, startdepth, stopdepth)
84 gen = _genrevancestors(repo, revs, followfirst, startdepth, stopdepth)
83 return generatorset(gen, iterasc=False)
85 return generatorset(gen, iterasc=False)
84
86
85 def revdescendants(repo, revs, followfirst):
87 def revdescendants(repo, revs, followfirst):
86 """Like revlog.descendants() but supports followfirst."""
88 """Like revlog.descendants() but supports followfirst."""
87 if followfirst:
89 if followfirst:
88 cut = 1
90 cut = 1
89 else:
91 else:
90 cut = None
92 cut = None
91
93
92 def iterate():
94 def iterate():
93 cl = repo.changelog
95 cl = repo.changelog
94 # XXX this should be 'parentset.min()' assuming 'parentset' is a
96 # XXX this should be 'parentset.min()' assuming 'parentset' is a
95 # smartset (and if it is not, it should.)
97 # smartset (and if it is not, it should.)
96 first = min(revs)
98 first = min(revs)
97 nullrev = node.nullrev
99 nullrev = node.nullrev
98 if first == nullrev:
100 if first == nullrev:
99 # Are there nodes with a null first parent and a non-null
101 # Are there nodes with a null first parent and a non-null
100 # second one? Maybe. Do we care? Probably not.
102 # second one? Maybe. Do we care? Probably not.
101 for i in cl:
103 for i in cl:
102 yield i
104 yield i
103 else:
105 else:
104 seen = set(revs)
106 seen = set(revs)
105 for i in cl.revs(first + 1):
107 for i in cl.revs(first + 1):
106 for x in cl.parentrevs(i)[:cut]:
108 for x in cl.parentrevs(i)[:cut]:
107 if x != nullrev and x in seen:
109 if x != nullrev and x in seen:
108 seen.add(i)
110 seen.add(i)
109 yield i
111 yield i
110 break
112 break
111
113
112 return generatorset(iterate(), iterasc=True)
114 return generatorset(iterate(), iterasc=True)
113
115
114 def _reachablerootspure(repo, minroot, roots, heads, includepath):
116 def _reachablerootspure(repo, minroot, roots, heads, includepath):
115 """return (heads(::<roots> and ::<heads>))
117 """return (heads(::<roots> and ::<heads>))
116
118
117 If includepath is True, return (<roots>::<heads>)."""
119 If includepath is True, return (<roots>::<heads>)."""
118 if not roots:
120 if not roots:
119 return []
121 return []
120 parentrevs = repo.changelog.parentrevs
122 parentrevs = repo.changelog.parentrevs
121 roots = set(roots)
123 roots = set(roots)
122 visit = list(heads)
124 visit = list(heads)
123 reachable = set()
125 reachable = set()
124 seen = {}
126 seen = {}
125 # prefetch all the things! (because python is slow)
127 # prefetch all the things! (because python is slow)
126 reached = reachable.add
128 reached = reachable.add
127 dovisit = visit.append
129 dovisit = visit.append
128 nextvisit = visit.pop
130 nextvisit = visit.pop
129 # open-code the post-order traversal due to the tiny size of
131 # open-code the post-order traversal due to the tiny size of
130 # sys.getrecursionlimit()
132 # sys.getrecursionlimit()
131 while visit:
133 while visit:
132 rev = nextvisit()
134 rev = nextvisit()
133 if rev in roots:
135 if rev in roots:
134 reached(rev)
136 reached(rev)
135 if not includepath:
137 if not includepath:
136 continue
138 continue
137 parents = parentrevs(rev)
139 parents = parentrevs(rev)
138 seen[rev] = parents
140 seen[rev] = parents
139 for parent in parents:
141 for parent in parents:
140 if parent >= minroot and parent not in seen:
142 if parent >= minroot and parent not in seen:
141 dovisit(parent)
143 dovisit(parent)
142 if not reachable:
144 if not reachable:
143 return baseset()
145 return baseset()
144 if not includepath:
146 if not includepath:
145 return reachable
147 return reachable
146 for rev in sorted(seen):
148 for rev in sorted(seen):
147 for parent in seen[rev]:
149 for parent in seen[rev]:
148 if parent in reachable:
150 if parent in reachable:
149 reached(rev)
151 reached(rev)
150 return reachable
152 return reachable
151
153
152 def reachableroots(repo, roots, heads, includepath=False):
154 def reachableroots(repo, roots, heads, includepath=False):
153 """return (heads(::<roots> and ::<heads>))
155 """return (heads(::<roots> and ::<heads>))
154
156
155 If includepath is True, return (<roots>::<heads>)."""
157 If includepath is True, return (<roots>::<heads>)."""
156 if not roots:
158 if not roots:
157 return baseset()
159 return baseset()
158 minroot = roots.min()
160 minroot = roots.min()
159 roots = list(roots)
161 roots = list(roots)
160 heads = list(heads)
162 heads = list(heads)
161 try:
163 try:
162 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
164 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
163 except AttributeError:
165 except AttributeError:
164 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
166 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
165 revs = baseset(revs)
167 revs = baseset(revs)
166 revs.sort()
168 revs.sort()
167 return revs
169 return revs
168
170
169 def _changesrange(fctx1, fctx2, linerange2, diffopts):
171 def _changesrange(fctx1, fctx2, linerange2, diffopts):
170 """Return `(diffinrange, linerange1)` where `diffinrange` is True
172 """Return `(diffinrange, linerange1)` where `diffinrange` is True
171 if diff from fctx2 to fctx1 has changes in linerange2 and
173 if diff from fctx2 to fctx1 has changes in linerange2 and
172 `linerange1` is the new line range for fctx1.
174 `linerange1` is the new line range for fctx1.
173 """
175 """
174 blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
176 blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
175 filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
177 filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
176 diffinrange = any(stype == '!' for _, stype in filteredblocks)
178 diffinrange = any(stype == '!' for _, stype in filteredblocks)
177 return diffinrange, linerange1
179 return diffinrange, linerange1
178
180
179 def blockancestors(fctx, fromline, toline, followfirst=False):
181 def blockancestors(fctx, fromline, toline, followfirst=False):
180 """Yield ancestors of `fctx` with respect to the block of lines within
182 """Yield ancestors of `fctx` with respect to the block of lines within
181 `fromline`-`toline` range.
183 `fromline`-`toline` range.
182 """
184 """
183 diffopts = patch.diffopts(fctx._repo.ui)
185 diffopts = patch.diffopts(fctx._repo.ui)
184 introrev = fctx.introrev()
186 introrev = fctx.introrev()
185 if fctx.rev() != introrev:
187 if fctx.rev() != introrev:
186 fctx = fctx.filectx(fctx.filenode(), changeid=introrev)
188 fctx = fctx.filectx(fctx.filenode(), changeid=introrev)
187 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
189 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
188 while visit:
190 while visit:
189 c, linerange2 = visit.pop(max(visit))
191 c, linerange2 = visit.pop(max(visit))
190 pl = c.parents()
192 pl = c.parents()
191 if followfirst:
193 if followfirst:
192 pl = pl[:1]
194 pl = pl[:1]
193 if not pl:
195 if not pl:
194 # The block originates from the initial revision.
196 # The block originates from the initial revision.
195 yield c, linerange2
197 yield c, linerange2
196 continue
198 continue
197 inrange = False
199 inrange = False
198 for p in pl:
200 for p in pl:
199 inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts)
201 inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts)
200 inrange = inrange or inrangep
202 inrange = inrange or inrangep
201 if linerange1[0] == linerange1[1]:
203 if linerange1[0] == linerange1[1]:
202 # Parent's linerange is empty, meaning that the block got
204 # Parent's linerange is empty, meaning that the block got
203 # introduced in this revision; no need to go futher in this
205 # introduced in this revision; no need to go futher in this
204 # branch.
206 # branch.
205 continue
207 continue
206 # Set _descendantrev with 'c' (a known descendant) so that, when
208 # Set _descendantrev with 'c' (a known descendant) so that, when
207 # _adjustlinkrev is called for 'p', it receives this descendant
209 # _adjustlinkrev is called for 'p', it receives this descendant
208 # (as srcrev) instead possibly topmost introrev.
210 # (as srcrev) instead possibly topmost introrev.
209 p._descendantrev = c.rev()
211 p._descendantrev = c.rev()
210 visit[p.linkrev(), p.filenode()] = p, linerange1
212 visit[p.linkrev(), p.filenode()] = p, linerange1
211 if inrange:
213 if inrange:
212 yield c, linerange2
214 yield c, linerange2
213
215
214 def blockdescendants(fctx, fromline, toline):
216 def blockdescendants(fctx, fromline, toline):
215 """Yield descendants of `fctx` with respect to the block of lines within
217 """Yield descendants of `fctx` with respect to the block of lines within
216 `fromline`-`toline` range.
218 `fromline`-`toline` range.
217 """
219 """
218 # First possibly yield 'fctx' if it has changes in range with respect to
220 # First possibly yield 'fctx' if it has changes in range with respect to
219 # its parents.
221 # its parents.
220 try:
222 try:
221 c, linerange1 = next(blockancestors(fctx, fromline, toline))
223 c, linerange1 = next(blockancestors(fctx, fromline, toline))
222 except StopIteration:
224 except StopIteration:
223 pass
225 pass
224 else:
226 else:
225 if c == fctx:
227 if c == fctx:
226 yield c, linerange1
228 yield c, linerange1
227
229
228 diffopts = patch.diffopts(fctx._repo.ui)
230 diffopts = patch.diffopts(fctx._repo.ui)
229 fl = fctx.filelog()
231 fl = fctx.filelog()
230 seen = {fctx.filerev(): (fctx, (fromline, toline))}
232 seen = {fctx.filerev(): (fctx, (fromline, toline))}
231 for i in fl.descendants([fctx.filerev()]):
233 for i in fl.descendants([fctx.filerev()]):
232 c = fctx.filectx(i)
234 c = fctx.filectx(i)
233 inrange = False
235 inrange = False
234 for x in fl.parentrevs(i):
236 for x in fl.parentrevs(i):
235 try:
237 try:
236 p, linerange2 = seen[x]
238 p, linerange2 = seen[x]
237 except KeyError:
239 except KeyError:
238 # nullrev or other branch
240 # nullrev or other branch
239 continue
241 continue
240 inrangep, linerange1 = _changesrange(c, p, linerange2, diffopts)
242 inrangep, linerange1 = _changesrange(c, p, linerange2, diffopts)
241 inrange = inrange or inrangep
243 inrange = inrange or inrangep
242 # If revision 'i' has been seen (it's a merge), we assume that its
244 # If revision 'i' has been seen (it's a merge), we assume that its
243 # line range is the same independently of which parents was used
245 # line range is the same independently of which parents was used
244 # to compute it.
246 # to compute it.
245 assert i not in seen or seen[i][1] == linerange1, (
247 assert i not in seen or seen[i][1] == linerange1, (
246 'computed line range for %s is not consistent between '
248 'computed line range for %s is not consistent between '
247 'ancestor branches' % c)
249 'ancestor branches' % c)
248 seen[i] = c, linerange1
250 seen[i] = c, linerange1
249 if inrange:
251 if inrange:
250 yield c, linerange1
252 yield c, linerange1
251
253
252 def toposort(revs, parentsfunc, firstbranch=()):
254 def toposort(revs, parentsfunc, firstbranch=()):
253 """Yield revisions from heads to roots one (topo) branch at a time.
255 """Yield revisions from heads to roots one (topo) branch at a time.
254
256
255 This function aims to be used by a graph generator that wishes to minimize
257 This function aims to be used by a graph generator that wishes to minimize
256 the number of parallel branches and their interleaving.
258 the number of parallel branches and their interleaving.
257
259
258 Example iteration order (numbers show the "true" order in a changelog):
260 Example iteration order (numbers show the "true" order in a changelog):
259
261
260 o 4
262 o 4
261 |
263 |
262 o 1
264 o 1
263 |
265 |
264 | o 3
266 | o 3
265 | |
267 | |
266 | o 2
268 | o 2
267 |/
269 |/
268 o 0
270 o 0
269
271
270 Note that the ancestors of merges are understood by the current
272 Note that the ancestors of merges are understood by the current
271 algorithm to be on the same branch. This means no reordering will
273 algorithm to be on the same branch. This means no reordering will
272 occur behind a merge.
274 occur behind a merge.
273 """
275 """
274
276
275 ### Quick summary of the algorithm
277 ### Quick summary of the algorithm
276 #
278 #
277 # This function is based around a "retention" principle. We keep revisions
279 # This function is based around a "retention" principle. We keep revisions
278 # in memory until we are ready to emit a whole branch that immediately
280 # in memory until we are ready to emit a whole branch that immediately
279 # "merges" into an existing one. This reduces the number of parallel
281 # "merges" into an existing one. This reduces the number of parallel
280 # branches with interleaved revisions.
282 # branches with interleaved revisions.
281 #
283 #
282 # During iteration revs are split into two groups:
284 # During iteration revs are split into two groups:
283 # A) revision already emitted
285 # A) revision already emitted
284 # B) revision in "retention". They are stored as different subgroups.
286 # B) revision in "retention". They are stored as different subgroups.
285 #
287 #
286 # for each REV, we do the following logic:
288 # for each REV, we do the following logic:
287 #
289 #
288 # 1) if REV is a parent of (A), we will emit it. If there is a
290 # 1) if REV is a parent of (A), we will emit it. If there is a
289 # retention group ((B) above) that is blocked on REV being
291 # retention group ((B) above) that is blocked on REV being
290 # available, we emit all the revisions out of that retention
292 # available, we emit all the revisions out of that retention
291 # group first.
293 # group first.
292 #
294 #
293 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
295 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
294 # available, if such subgroup exist, we add REV to it and the subgroup is
296 # available, if such subgroup exist, we add REV to it and the subgroup is
295 # now awaiting for REV.parents() to be available.
297 # now awaiting for REV.parents() to be available.
296 #
298 #
297 # 3) finally if no such group existed in (B), we create a new subgroup.
299 # 3) finally if no such group existed in (B), we create a new subgroup.
298 #
300 #
299 #
301 #
300 # To bootstrap the algorithm, we emit the tipmost revision (which
302 # To bootstrap the algorithm, we emit the tipmost revision (which
301 # puts it in group (A) from above).
303 # puts it in group (A) from above).
302
304
303 revs.sort(reverse=True)
305 revs.sort(reverse=True)
304
306
305 # Set of parents of revision that have been emitted. They can be considered
307 # Set of parents of revision that have been emitted. They can be considered
306 # unblocked as the graph generator is already aware of them so there is no
308 # unblocked as the graph generator is already aware of them so there is no
307 # need to delay the revisions that reference them.
309 # need to delay the revisions that reference them.
308 #
310 #
309 # If someone wants to prioritize a branch over the others, pre-filling this
311 # If someone wants to prioritize a branch over the others, pre-filling this
310 # set will force all other branches to wait until this branch is ready to be
312 # set will force all other branches to wait until this branch is ready to be
311 # emitted.
313 # emitted.
312 unblocked = set(firstbranch)
314 unblocked = set(firstbranch)
313
315
314 # list of groups waiting to be displayed, each group is defined by:
316 # list of groups waiting to be displayed, each group is defined by:
315 #
317 #
316 # (revs: lists of revs waiting to be displayed,
318 # (revs: lists of revs waiting to be displayed,
317 # blocked: set of that cannot be displayed before those in 'revs')
319 # blocked: set of that cannot be displayed before those in 'revs')
318 #
320 #
319 # The second value ('blocked') correspond to parents of any revision in the
321 # The second value ('blocked') correspond to parents of any revision in the
320 # group ('revs') that is not itself contained in the group. The main idea
322 # group ('revs') that is not itself contained in the group. The main idea
321 # of this algorithm is to delay as much as possible the emission of any
323 # of this algorithm is to delay as much as possible the emission of any
322 # revision. This means waiting for the moment we are about to display
324 # revision. This means waiting for the moment we are about to display
323 # these parents to display the revs in a group.
325 # these parents to display the revs in a group.
324 #
326 #
325 # This first implementation is smart until it encounters a merge: it will
327 # This first implementation is smart until it encounters a merge: it will
326 # emit revs as soon as any parent is about to be emitted and can grow an
328 # emit revs as soon as any parent is about to be emitted and can grow an
327 # arbitrary number of revs in 'blocked'. In practice this mean we properly
329 # arbitrary number of revs in 'blocked'. In practice this mean we properly
328 # retains new branches but gives up on any special ordering for ancestors
330 # retains new branches but gives up on any special ordering for ancestors
329 # of merges. The implementation can be improved to handle this better.
331 # of merges. The implementation can be improved to handle this better.
330 #
332 #
331 # The first subgroup is special. It corresponds to all the revision that
333 # The first subgroup is special. It corresponds to all the revision that
332 # were already emitted. The 'revs' lists is expected to be empty and the
334 # were already emitted. The 'revs' lists is expected to be empty and the
333 # 'blocked' set contains the parents revisions of already emitted revision.
335 # 'blocked' set contains the parents revisions of already emitted revision.
334 #
336 #
335 # You could pre-seed the <parents> set of groups[0] to a specific
337 # You could pre-seed the <parents> set of groups[0] to a specific
336 # changesets to select what the first emitted branch should be.
338 # changesets to select what the first emitted branch should be.
337 groups = [([], unblocked)]
339 groups = [([], unblocked)]
338 pendingheap = []
340 pendingheap = []
339 pendingset = set()
341 pendingset = set()
340
342
341 heapq.heapify(pendingheap)
343 heapq.heapify(pendingheap)
342 heappop = heapq.heappop
344 heappop = heapq.heappop
343 heappush = heapq.heappush
345 heappush = heapq.heappush
344 for currentrev in revs:
346 for currentrev in revs:
345 # Heap works with smallest element, we want highest so we invert
347 # Heap works with smallest element, we want highest so we invert
346 if currentrev not in pendingset:
348 if currentrev not in pendingset:
347 heappush(pendingheap, -currentrev)
349 heappush(pendingheap, -currentrev)
348 pendingset.add(currentrev)
350 pendingset.add(currentrev)
349 # iterates on pending rev until after the current rev have been
351 # iterates on pending rev until after the current rev have been
350 # processed.
352 # processed.
351 rev = None
353 rev = None
352 while rev != currentrev:
354 while rev != currentrev:
353 rev = -heappop(pendingheap)
355 rev = -heappop(pendingheap)
354 pendingset.remove(rev)
356 pendingset.remove(rev)
355
357
356 # Seek for a subgroup blocked, waiting for the current revision.
358 # Seek for a subgroup blocked, waiting for the current revision.
357 matching = [i for i, g in enumerate(groups) if rev in g[1]]
359 matching = [i for i, g in enumerate(groups) if rev in g[1]]
358
360
359 if matching:
361 if matching:
360 # The main idea is to gather together all sets that are blocked
362 # The main idea is to gather together all sets that are blocked
361 # on the same revision.
363 # on the same revision.
362 #
364 #
363 # Groups are merged when a common blocking ancestor is
365 # Groups are merged when a common blocking ancestor is
364 # observed. For example, given two groups:
366 # observed. For example, given two groups:
365 #
367 #
366 # revs [5, 4] waiting for 1
368 # revs [5, 4] waiting for 1
367 # revs [3, 2] waiting for 1
369 # revs [3, 2] waiting for 1
368 #
370 #
369 # These two groups will be merged when we process
371 # These two groups will be merged when we process
370 # 1. In theory, we could have merged the groups when
372 # 1. In theory, we could have merged the groups when
371 # we added 2 to the group it is now in (we could have
373 # we added 2 to the group it is now in (we could have
372 # noticed the groups were both blocked on 1 then), but
374 # noticed the groups were both blocked on 1 then), but
373 # the way it works now makes the algorithm simpler.
375 # the way it works now makes the algorithm simpler.
374 #
376 #
375 # We also always keep the oldest subgroup first. We can
377 # We also always keep the oldest subgroup first. We can
376 # probably improve the behavior by having the longest set
378 # probably improve the behavior by having the longest set
377 # first. That way, graph algorithms could minimise the length
379 # first. That way, graph algorithms could minimise the length
378 # of parallel lines their drawing. This is currently not done.
380 # of parallel lines their drawing. This is currently not done.
379 targetidx = matching.pop(0)
381 targetidx = matching.pop(0)
380 trevs, tparents = groups[targetidx]
382 trevs, tparents = groups[targetidx]
381 for i in matching:
383 for i in matching:
382 gr = groups[i]
384 gr = groups[i]
383 trevs.extend(gr[0])
385 trevs.extend(gr[0])
384 tparents |= gr[1]
386 tparents |= gr[1]
385 # delete all merged subgroups (except the one we kept)
387 # delete all merged subgroups (except the one we kept)
386 # (starting from the last subgroup for performance and
388 # (starting from the last subgroup for performance and
387 # sanity reasons)
389 # sanity reasons)
388 for i in reversed(matching):
390 for i in reversed(matching):
389 del groups[i]
391 del groups[i]
390 else:
392 else:
391 # This is a new head. We create a new subgroup for it.
393 # This is a new head. We create a new subgroup for it.
392 targetidx = len(groups)
394 targetidx = len(groups)
393 groups.append(([], {rev}))
395 groups.append(([], {rev}))
394
396
395 gr = groups[targetidx]
397 gr = groups[targetidx]
396
398
397 # We now add the current nodes to this subgroups. This is done
399 # We now add the current nodes to this subgroups. This is done
398 # after the subgroup merging because all elements from a subgroup
400 # after the subgroup merging because all elements from a subgroup
399 # that relied on this rev must precede it.
401 # that relied on this rev must precede it.
400 #
402 #
401 # we also update the <parents> set to include the parents of the
403 # we also update the <parents> set to include the parents of the
402 # new nodes.
404 # new nodes.
403 if rev == currentrev: # only display stuff in rev
405 if rev == currentrev: # only display stuff in rev
404 gr[0].append(rev)
406 gr[0].append(rev)
405 gr[1].remove(rev)
407 gr[1].remove(rev)
406 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
408 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
407 gr[1].update(parents)
409 gr[1].update(parents)
408 for p in parents:
410 for p in parents:
409 if p not in pendingset:
411 if p not in pendingset:
410 pendingset.add(p)
412 pendingset.add(p)
411 heappush(pendingheap, -p)
413 heappush(pendingheap, -p)
412
414
413 # Look for a subgroup to display
415 # Look for a subgroup to display
414 #
416 #
415 # When unblocked is empty (if clause), we were not waiting for any
417 # When unblocked is empty (if clause), we were not waiting for any
416 # revisions during the first iteration (if no priority was given) or
418 # revisions during the first iteration (if no priority was given) or
417 # if we emitted a whole disconnected set of the graph (reached a
419 # if we emitted a whole disconnected set of the graph (reached a
418 # root). In that case we arbitrarily take the oldest known
420 # root). In that case we arbitrarily take the oldest known
419 # subgroup. The heuristic could probably be better.
421 # subgroup. The heuristic could probably be better.
420 #
422 #
421 # Otherwise (elif clause) if the subgroup is blocked on
423 # Otherwise (elif clause) if the subgroup is blocked on
422 # a revision we just emitted, we can safely emit it as
424 # a revision we just emitted, we can safely emit it as
423 # well.
425 # well.
424 if not unblocked:
426 if not unblocked:
425 if len(groups) > 1: # display other subset
427 if len(groups) > 1: # display other subset
426 targetidx = 1
428 targetidx = 1
427 gr = groups[1]
429 gr = groups[1]
428 elif not gr[1] & unblocked:
430 elif not gr[1] & unblocked:
429 gr = None
431 gr = None
430
432
431 if gr is not None:
433 if gr is not None:
432 # update the set of awaited revisions with the one from the
434 # update the set of awaited revisions with the one from the
433 # subgroup
435 # subgroup
434 unblocked |= gr[1]
436 unblocked |= gr[1]
435 # output all revisions in the subgroup
437 # output all revisions in the subgroup
436 for r in gr[0]:
438 for r in gr[0]:
437 yield r
439 yield r
438 # delete the subgroup that you just output
440 # delete the subgroup that you just output
439 # unless it is groups[0] in which case you just empty it.
441 # unless it is groups[0] in which case you just empty it.
440 if targetidx:
442 if targetidx:
441 del groups[targetidx]
443 del groups[targetidx]
442 else:
444 else:
443 gr[0][:] = []
445 gr[0][:] = []
444 # Check if we have some subgroup waiting for revisions we are not going to
446 # Check if we have some subgroup waiting for revisions we are not going to
445 # iterate over
447 # iterate over
446 for g in groups:
448 for g in groups:
447 for r in g[0]:
449 for r in g[0]:
448 yield r
450 yield r
General Comments 0
You need to be logged in to leave comments. Login now