##// END OF EJS Templates
dagop: use smartset.min() in revdescendants() generator...
Yuya Nishihara -
r33076:a76a64c7 default
parent child Browse files
Show More
@@ -1,456 +1,454 b''
1 # dagop.py - graph ancestry and topology algorithm for revset
1 # dagop.py - graph ancestry and topology algorithm for revset
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import heapq
10 import heapq
11
11
12 from . import (
12 from . import (
13 error,
13 error,
14 mdiff,
14 mdiff,
15 node,
15 node,
16 patch,
16 patch,
17 smartset,
17 smartset,
18 )
18 )
19
19
20 baseset = smartset.baseset
20 baseset = smartset.baseset
21 generatorset = smartset.generatorset
21 generatorset = smartset.generatorset
22
22
23 # possible maximum depth between null and wdir()
23 # possible maximum depth between null and wdir()
24 _maxlogdepth = 0x80000000
24 _maxlogdepth = 0x80000000
25
25
26 def _genrevancestors(repo, revs, followfirst, startdepth, stopdepth):
26 def _genrevancestors(repo, revs, followfirst, startdepth, stopdepth):
27 if followfirst:
27 if followfirst:
28 cut = 1
28 cut = 1
29 else:
29 else:
30 cut = None
30 cut = None
31 if startdepth is None:
31 if startdepth is None:
32 startdepth = 0
32 startdepth = 0
33 if stopdepth is None:
33 if stopdepth is None:
34 stopdepth = _maxlogdepth
34 stopdepth = _maxlogdepth
35 if stopdepth == 0:
35 if stopdepth == 0:
36 return
36 return
37 if stopdepth < 0:
37 if stopdepth < 0:
38 raise error.ProgrammingError('negative stopdepth')
38 raise error.ProgrammingError('negative stopdepth')
39
39
40 cl = repo.changelog
40 cl = repo.changelog
41
41
42 # load input revs lazily to heap so earlier revisions can be yielded
42 # load input revs lazily to heap so earlier revisions can be yielded
43 # without fully computing the input revs
43 # without fully computing the input revs
44 revs.sort(reverse=True)
44 revs.sort(reverse=True)
45 irevs = iter(revs)
45 irevs = iter(revs)
46 pendingheap = [] # [(-rev, depth), ...] (i.e. lower depth first)
46 pendingheap = [] # [(-rev, depth), ...] (i.e. lower depth first)
47
47
48 inputrev = next(irevs, None)
48 inputrev = next(irevs, None)
49 if inputrev is not None:
49 if inputrev is not None:
50 heapq.heappush(pendingheap, (-inputrev, 0))
50 heapq.heappush(pendingheap, (-inputrev, 0))
51
51
52 lastrev = None
52 lastrev = None
53 while pendingheap:
53 while pendingheap:
54 currev, curdepth = heapq.heappop(pendingheap)
54 currev, curdepth = heapq.heappop(pendingheap)
55 currev = -currev
55 currev = -currev
56 if currev == inputrev:
56 if currev == inputrev:
57 inputrev = next(irevs, None)
57 inputrev = next(irevs, None)
58 if inputrev is not None:
58 if inputrev is not None:
59 heapq.heappush(pendingheap, (-inputrev, 0))
59 heapq.heappush(pendingheap, (-inputrev, 0))
60 # rescan parents until curdepth >= startdepth because queued entries
60 # rescan parents until curdepth >= startdepth because queued entries
61 # of the same revision are iterated from the lowest depth
61 # of the same revision are iterated from the lowest depth
62 foundnew = (currev != lastrev)
62 foundnew = (currev != lastrev)
63 if foundnew and curdepth >= startdepth:
63 if foundnew and curdepth >= startdepth:
64 lastrev = currev
64 lastrev = currev
65 yield currev
65 yield currev
66 pdepth = curdepth + 1
66 pdepth = curdepth + 1
67 if foundnew and pdepth < stopdepth:
67 if foundnew and pdepth < stopdepth:
68 try:
68 try:
69 for prev in cl.parentrevs(currev)[:cut]:
69 for prev in cl.parentrevs(currev)[:cut]:
70 if prev != node.nullrev:
70 if prev != node.nullrev:
71 heapq.heappush(pendingheap, (-prev, pdepth))
71 heapq.heappush(pendingheap, (-prev, pdepth))
72 except error.WdirUnsupported:
72 except error.WdirUnsupported:
73 for pctx in repo[currev].parents()[:cut]:
73 for pctx in repo[currev].parents()[:cut]:
74 if pctx.rev() != node.nullrev:
74 if pctx.rev() != node.nullrev:
75 heapq.heappush(pendingheap, (-pctx.rev(), pdepth))
75 heapq.heappush(pendingheap, (-pctx.rev(), pdepth))
76
76
77 def revancestors(repo, revs, followfirst, startdepth=None, stopdepth=None):
77 def revancestors(repo, revs, followfirst, startdepth=None, stopdepth=None):
78 """Like revlog.ancestors(), but supports additional options, includes
78 """Like revlog.ancestors(), but supports additional options, includes
79 the given revs themselves, and returns a smartset
79 the given revs themselves, and returns a smartset
80
80
81 Scan ends at the stopdepth (exlusive) if specified. Revisions found
81 Scan ends at the stopdepth (exlusive) if specified. Revisions found
82 earlier than the startdepth are omitted.
82 earlier than the startdepth are omitted.
83 """
83 """
84 gen = _genrevancestors(repo, revs, followfirst, startdepth, stopdepth)
84 gen = _genrevancestors(repo, revs, followfirst, startdepth, stopdepth)
85 return generatorset(gen, iterasc=False)
85 return generatorset(gen, iterasc=False)
86
86
87 def _genrevdescendants(repo, revs, followfirst):
87 def _genrevdescendants(repo, revs, followfirst):
88 if followfirst:
88 if followfirst:
89 cut = 1
89 cut = 1
90 else:
90 else:
91 cut = None
91 cut = None
92
92
93 cl = repo.changelog
93 cl = repo.changelog
94 # XXX this should be 'parentset.min()' assuming 'parentset' is a
94 first = revs.min()
95 # smartset (and if it is not, it should.)
96 first = min(revs)
97 nullrev = node.nullrev
95 nullrev = node.nullrev
98 if first == nullrev:
96 if first == nullrev:
99 # Are there nodes with a null first parent and a non-null
97 # Are there nodes with a null first parent and a non-null
100 # second one? Maybe. Do we care? Probably not.
98 # second one? Maybe. Do we care? Probably not.
101 yield first
99 yield first
102 for i in cl:
100 for i in cl:
103 yield i
101 yield i
104 else:
102 else:
105 seen = set(revs)
103 seen = set(revs)
106 for i in cl.revs(first):
104 for i in cl.revs(first):
107 if i in seen:
105 if i in seen:
108 yield i
106 yield i
109 continue
107 continue
110 for x in cl.parentrevs(i)[:cut]:
108 for x in cl.parentrevs(i)[:cut]:
111 if x != nullrev and x in seen:
109 if x != nullrev and x in seen:
112 seen.add(i)
110 seen.add(i)
113 yield i
111 yield i
114 break
112 break
115
113
116 def revdescendants(repo, revs, followfirst):
114 def revdescendants(repo, revs, followfirst):
117 """Like revlog.descendants() but supports additional options, includes
115 """Like revlog.descendants() but supports additional options, includes
118 the given revs themselves, and returns a smartset"""
116 the given revs themselves, and returns a smartset"""
119 gen = _genrevdescendants(repo, revs, followfirst)
117 gen = _genrevdescendants(repo, revs, followfirst)
120 return generatorset(gen, iterasc=True)
118 return generatorset(gen, iterasc=True)
121
119
122 def _reachablerootspure(repo, minroot, roots, heads, includepath):
120 def _reachablerootspure(repo, minroot, roots, heads, includepath):
123 """return (heads(::<roots> and ::<heads>))
121 """return (heads(::<roots> and ::<heads>))
124
122
125 If includepath is True, return (<roots>::<heads>)."""
123 If includepath is True, return (<roots>::<heads>)."""
126 if not roots:
124 if not roots:
127 return []
125 return []
128 parentrevs = repo.changelog.parentrevs
126 parentrevs = repo.changelog.parentrevs
129 roots = set(roots)
127 roots = set(roots)
130 visit = list(heads)
128 visit = list(heads)
131 reachable = set()
129 reachable = set()
132 seen = {}
130 seen = {}
133 # prefetch all the things! (because python is slow)
131 # prefetch all the things! (because python is slow)
134 reached = reachable.add
132 reached = reachable.add
135 dovisit = visit.append
133 dovisit = visit.append
136 nextvisit = visit.pop
134 nextvisit = visit.pop
137 # open-code the post-order traversal due to the tiny size of
135 # open-code the post-order traversal due to the tiny size of
138 # sys.getrecursionlimit()
136 # sys.getrecursionlimit()
139 while visit:
137 while visit:
140 rev = nextvisit()
138 rev = nextvisit()
141 if rev in roots:
139 if rev in roots:
142 reached(rev)
140 reached(rev)
143 if not includepath:
141 if not includepath:
144 continue
142 continue
145 parents = parentrevs(rev)
143 parents = parentrevs(rev)
146 seen[rev] = parents
144 seen[rev] = parents
147 for parent in parents:
145 for parent in parents:
148 if parent >= minroot and parent not in seen:
146 if parent >= minroot and parent not in seen:
149 dovisit(parent)
147 dovisit(parent)
150 if not reachable:
148 if not reachable:
151 return baseset()
149 return baseset()
152 if not includepath:
150 if not includepath:
153 return reachable
151 return reachable
154 for rev in sorted(seen):
152 for rev in sorted(seen):
155 for parent in seen[rev]:
153 for parent in seen[rev]:
156 if parent in reachable:
154 if parent in reachable:
157 reached(rev)
155 reached(rev)
158 return reachable
156 return reachable
159
157
160 def reachableroots(repo, roots, heads, includepath=False):
158 def reachableroots(repo, roots, heads, includepath=False):
161 """return (heads(::<roots> and ::<heads>))
159 """return (heads(::<roots> and ::<heads>))
162
160
163 If includepath is True, return (<roots>::<heads>)."""
161 If includepath is True, return (<roots>::<heads>)."""
164 if not roots:
162 if not roots:
165 return baseset()
163 return baseset()
166 minroot = roots.min()
164 minroot = roots.min()
167 roots = list(roots)
165 roots = list(roots)
168 heads = list(heads)
166 heads = list(heads)
169 try:
167 try:
170 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
168 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
171 except AttributeError:
169 except AttributeError:
172 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
170 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
173 revs = baseset(revs)
171 revs = baseset(revs)
174 revs.sort()
172 revs.sort()
175 return revs
173 return revs
176
174
177 def _changesrange(fctx1, fctx2, linerange2, diffopts):
175 def _changesrange(fctx1, fctx2, linerange2, diffopts):
178 """Return `(diffinrange, linerange1)` where `diffinrange` is True
176 """Return `(diffinrange, linerange1)` where `diffinrange` is True
179 if diff from fctx2 to fctx1 has changes in linerange2 and
177 if diff from fctx2 to fctx1 has changes in linerange2 and
180 `linerange1` is the new line range for fctx1.
178 `linerange1` is the new line range for fctx1.
181 """
179 """
182 blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
180 blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
183 filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
181 filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
184 diffinrange = any(stype == '!' for _, stype in filteredblocks)
182 diffinrange = any(stype == '!' for _, stype in filteredblocks)
185 return diffinrange, linerange1
183 return diffinrange, linerange1
186
184
187 def blockancestors(fctx, fromline, toline, followfirst=False):
185 def blockancestors(fctx, fromline, toline, followfirst=False):
188 """Yield ancestors of `fctx` with respect to the block of lines within
186 """Yield ancestors of `fctx` with respect to the block of lines within
189 `fromline`-`toline` range.
187 `fromline`-`toline` range.
190 """
188 """
191 diffopts = patch.diffopts(fctx._repo.ui)
189 diffopts = patch.diffopts(fctx._repo.ui)
192 introrev = fctx.introrev()
190 introrev = fctx.introrev()
193 if fctx.rev() != introrev:
191 if fctx.rev() != introrev:
194 fctx = fctx.filectx(fctx.filenode(), changeid=introrev)
192 fctx = fctx.filectx(fctx.filenode(), changeid=introrev)
195 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
193 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
196 while visit:
194 while visit:
197 c, linerange2 = visit.pop(max(visit))
195 c, linerange2 = visit.pop(max(visit))
198 pl = c.parents()
196 pl = c.parents()
199 if followfirst:
197 if followfirst:
200 pl = pl[:1]
198 pl = pl[:1]
201 if not pl:
199 if not pl:
202 # The block originates from the initial revision.
200 # The block originates from the initial revision.
203 yield c, linerange2
201 yield c, linerange2
204 continue
202 continue
205 inrange = False
203 inrange = False
206 for p in pl:
204 for p in pl:
207 inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts)
205 inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts)
208 inrange = inrange or inrangep
206 inrange = inrange or inrangep
209 if linerange1[0] == linerange1[1]:
207 if linerange1[0] == linerange1[1]:
210 # Parent's linerange is empty, meaning that the block got
208 # Parent's linerange is empty, meaning that the block got
211 # introduced in this revision; no need to go futher in this
209 # introduced in this revision; no need to go futher in this
212 # branch.
210 # branch.
213 continue
211 continue
214 # Set _descendantrev with 'c' (a known descendant) so that, when
212 # Set _descendantrev with 'c' (a known descendant) so that, when
215 # _adjustlinkrev is called for 'p', it receives this descendant
213 # _adjustlinkrev is called for 'p', it receives this descendant
216 # (as srcrev) instead possibly topmost introrev.
214 # (as srcrev) instead possibly topmost introrev.
217 p._descendantrev = c.rev()
215 p._descendantrev = c.rev()
218 visit[p.linkrev(), p.filenode()] = p, linerange1
216 visit[p.linkrev(), p.filenode()] = p, linerange1
219 if inrange:
217 if inrange:
220 yield c, linerange2
218 yield c, linerange2
221
219
222 def blockdescendants(fctx, fromline, toline):
220 def blockdescendants(fctx, fromline, toline):
223 """Yield descendants of `fctx` with respect to the block of lines within
221 """Yield descendants of `fctx` with respect to the block of lines within
224 `fromline`-`toline` range.
222 `fromline`-`toline` range.
225 """
223 """
226 # First possibly yield 'fctx' if it has changes in range with respect to
224 # First possibly yield 'fctx' if it has changes in range with respect to
227 # its parents.
225 # its parents.
228 try:
226 try:
229 c, linerange1 = next(blockancestors(fctx, fromline, toline))
227 c, linerange1 = next(blockancestors(fctx, fromline, toline))
230 except StopIteration:
228 except StopIteration:
231 pass
229 pass
232 else:
230 else:
233 if c == fctx:
231 if c == fctx:
234 yield c, linerange1
232 yield c, linerange1
235
233
236 diffopts = patch.diffopts(fctx._repo.ui)
234 diffopts = patch.diffopts(fctx._repo.ui)
237 fl = fctx.filelog()
235 fl = fctx.filelog()
238 seen = {fctx.filerev(): (fctx, (fromline, toline))}
236 seen = {fctx.filerev(): (fctx, (fromline, toline))}
239 for i in fl.descendants([fctx.filerev()]):
237 for i in fl.descendants([fctx.filerev()]):
240 c = fctx.filectx(i)
238 c = fctx.filectx(i)
241 inrange = False
239 inrange = False
242 for x in fl.parentrevs(i):
240 for x in fl.parentrevs(i):
243 try:
241 try:
244 p, linerange2 = seen[x]
242 p, linerange2 = seen[x]
245 except KeyError:
243 except KeyError:
246 # nullrev or other branch
244 # nullrev or other branch
247 continue
245 continue
248 inrangep, linerange1 = _changesrange(c, p, linerange2, diffopts)
246 inrangep, linerange1 = _changesrange(c, p, linerange2, diffopts)
249 inrange = inrange or inrangep
247 inrange = inrange or inrangep
250 # If revision 'i' has been seen (it's a merge), we assume that its
248 # If revision 'i' has been seen (it's a merge), we assume that its
251 # line range is the same independently of which parents was used
249 # line range is the same independently of which parents was used
252 # to compute it.
250 # to compute it.
253 assert i not in seen or seen[i][1] == linerange1, (
251 assert i not in seen or seen[i][1] == linerange1, (
254 'computed line range for %s is not consistent between '
252 'computed line range for %s is not consistent between '
255 'ancestor branches' % c)
253 'ancestor branches' % c)
256 seen[i] = c, linerange1
254 seen[i] = c, linerange1
257 if inrange:
255 if inrange:
258 yield c, linerange1
256 yield c, linerange1
259
257
260 def toposort(revs, parentsfunc, firstbranch=()):
258 def toposort(revs, parentsfunc, firstbranch=()):
261 """Yield revisions from heads to roots one (topo) branch at a time.
259 """Yield revisions from heads to roots one (topo) branch at a time.
262
260
263 This function aims to be used by a graph generator that wishes to minimize
261 This function aims to be used by a graph generator that wishes to minimize
264 the number of parallel branches and their interleaving.
262 the number of parallel branches and their interleaving.
265
263
266 Example iteration order (numbers show the "true" order in a changelog):
264 Example iteration order (numbers show the "true" order in a changelog):
267
265
268 o 4
266 o 4
269 |
267 |
270 o 1
268 o 1
271 |
269 |
272 | o 3
270 | o 3
273 | |
271 | |
274 | o 2
272 | o 2
275 |/
273 |/
276 o 0
274 o 0
277
275
278 Note that the ancestors of merges are understood by the current
276 Note that the ancestors of merges are understood by the current
279 algorithm to be on the same branch. This means no reordering will
277 algorithm to be on the same branch. This means no reordering will
280 occur behind a merge.
278 occur behind a merge.
281 """
279 """
282
280
283 ### Quick summary of the algorithm
281 ### Quick summary of the algorithm
284 #
282 #
285 # This function is based around a "retention" principle. We keep revisions
283 # This function is based around a "retention" principle. We keep revisions
286 # in memory until we are ready to emit a whole branch that immediately
284 # in memory until we are ready to emit a whole branch that immediately
287 # "merges" into an existing one. This reduces the number of parallel
285 # "merges" into an existing one. This reduces the number of parallel
288 # branches with interleaved revisions.
286 # branches with interleaved revisions.
289 #
287 #
290 # During iteration revs are split into two groups:
288 # During iteration revs are split into two groups:
291 # A) revision already emitted
289 # A) revision already emitted
292 # B) revision in "retention". They are stored as different subgroups.
290 # B) revision in "retention". They are stored as different subgroups.
293 #
291 #
294 # for each REV, we do the following logic:
292 # for each REV, we do the following logic:
295 #
293 #
296 # 1) if REV is a parent of (A), we will emit it. If there is a
294 # 1) if REV is a parent of (A), we will emit it. If there is a
297 # retention group ((B) above) that is blocked on REV being
295 # retention group ((B) above) that is blocked on REV being
298 # available, we emit all the revisions out of that retention
296 # available, we emit all the revisions out of that retention
299 # group first.
297 # group first.
300 #
298 #
301 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
299 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
302 # available, if such subgroup exist, we add REV to it and the subgroup is
300 # available, if such subgroup exist, we add REV to it and the subgroup is
303 # now awaiting for REV.parents() to be available.
301 # now awaiting for REV.parents() to be available.
304 #
302 #
305 # 3) finally if no such group existed in (B), we create a new subgroup.
303 # 3) finally if no such group existed in (B), we create a new subgroup.
306 #
304 #
307 #
305 #
308 # To bootstrap the algorithm, we emit the tipmost revision (which
306 # To bootstrap the algorithm, we emit the tipmost revision (which
309 # puts it in group (A) from above).
307 # puts it in group (A) from above).
310
308
311 revs.sort(reverse=True)
309 revs.sort(reverse=True)
312
310
313 # Set of parents of revision that have been emitted. They can be considered
311 # Set of parents of revision that have been emitted. They can be considered
314 # unblocked as the graph generator is already aware of them so there is no
312 # unblocked as the graph generator is already aware of them so there is no
315 # need to delay the revisions that reference them.
313 # need to delay the revisions that reference them.
316 #
314 #
317 # If someone wants to prioritize a branch over the others, pre-filling this
315 # If someone wants to prioritize a branch over the others, pre-filling this
318 # set will force all other branches to wait until this branch is ready to be
316 # set will force all other branches to wait until this branch is ready to be
319 # emitted.
317 # emitted.
320 unblocked = set(firstbranch)
318 unblocked = set(firstbranch)
321
319
322 # list of groups waiting to be displayed, each group is defined by:
320 # list of groups waiting to be displayed, each group is defined by:
323 #
321 #
324 # (revs: lists of revs waiting to be displayed,
322 # (revs: lists of revs waiting to be displayed,
325 # blocked: set of that cannot be displayed before those in 'revs')
323 # blocked: set of that cannot be displayed before those in 'revs')
326 #
324 #
327 # The second value ('blocked') correspond to parents of any revision in the
325 # The second value ('blocked') correspond to parents of any revision in the
328 # group ('revs') that is not itself contained in the group. The main idea
326 # group ('revs') that is not itself contained in the group. The main idea
329 # of this algorithm is to delay as much as possible the emission of any
327 # of this algorithm is to delay as much as possible the emission of any
330 # revision. This means waiting for the moment we are about to display
328 # revision. This means waiting for the moment we are about to display
331 # these parents to display the revs in a group.
329 # these parents to display the revs in a group.
332 #
330 #
333 # This first implementation is smart until it encounters a merge: it will
331 # This first implementation is smart until it encounters a merge: it will
334 # emit revs as soon as any parent is about to be emitted and can grow an
332 # emit revs as soon as any parent is about to be emitted and can grow an
335 # arbitrary number of revs in 'blocked'. In practice this mean we properly
333 # arbitrary number of revs in 'blocked'. In practice this mean we properly
336 # retains new branches but gives up on any special ordering for ancestors
334 # retains new branches but gives up on any special ordering for ancestors
337 # of merges. The implementation can be improved to handle this better.
335 # of merges. The implementation can be improved to handle this better.
338 #
336 #
339 # The first subgroup is special. It corresponds to all the revision that
337 # The first subgroup is special. It corresponds to all the revision that
340 # were already emitted. The 'revs' lists is expected to be empty and the
338 # were already emitted. The 'revs' lists is expected to be empty and the
341 # 'blocked' set contains the parents revisions of already emitted revision.
339 # 'blocked' set contains the parents revisions of already emitted revision.
342 #
340 #
343 # You could pre-seed the <parents> set of groups[0] to a specific
341 # You could pre-seed the <parents> set of groups[0] to a specific
344 # changesets to select what the first emitted branch should be.
342 # changesets to select what the first emitted branch should be.
345 groups = [([], unblocked)]
343 groups = [([], unblocked)]
346 pendingheap = []
344 pendingheap = []
347 pendingset = set()
345 pendingset = set()
348
346
349 heapq.heapify(pendingheap)
347 heapq.heapify(pendingheap)
350 heappop = heapq.heappop
348 heappop = heapq.heappop
351 heappush = heapq.heappush
349 heappush = heapq.heappush
352 for currentrev in revs:
350 for currentrev in revs:
353 # Heap works with smallest element, we want highest so we invert
351 # Heap works with smallest element, we want highest so we invert
354 if currentrev not in pendingset:
352 if currentrev not in pendingset:
355 heappush(pendingheap, -currentrev)
353 heappush(pendingheap, -currentrev)
356 pendingset.add(currentrev)
354 pendingset.add(currentrev)
357 # iterates on pending rev until after the current rev have been
355 # iterates on pending rev until after the current rev have been
358 # processed.
356 # processed.
359 rev = None
357 rev = None
360 while rev != currentrev:
358 while rev != currentrev:
361 rev = -heappop(pendingheap)
359 rev = -heappop(pendingheap)
362 pendingset.remove(rev)
360 pendingset.remove(rev)
363
361
364 # Seek for a subgroup blocked, waiting for the current revision.
362 # Seek for a subgroup blocked, waiting for the current revision.
365 matching = [i for i, g in enumerate(groups) if rev in g[1]]
363 matching = [i for i, g in enumerate(groups) if rev in g[1]]
366
364
367 if matching:
365 if matching:
368 # The main idea is to gather together all sets that are blocked
366 # The main idea is to gather together all sets that are blocked
369 # on the same revision.
367 # on the same revision.
370 #
368 #
371 # Groups are merged when a common blocking ancestor is
369 # Groups are merged when a common blocking ancestor is
372 # observed. For example, given two groups:
370 # observed. For example, given two groups:
373 #
371 #
374 # revs [5, 4] waiting for 1
372 # revs [5, 4] waiting for 1
375 # revs [3, 2] waiting for 1
373 # revs [3, 2] waiting for 1
376 #
374 #
377 # These two groups will be merged when we process
375 # These two groups will be merged when we process
378 # 1. In theory, we could have merged the groups when
376 # 1. In theory, we could have merged the groups when
379 # we added 2 to the group it is now in (we could have
377 # we added 2 to the group it is now in (we could have
380 # noticed the groups were both blocked on 1 then), but
378 # noticed the groups were both blocked on 1 then), but
381 # the way it works now makes the algorithm simpler.
379 # the way it works now makes the algorithm simpler.
382 #
380 #
383 # We also always keep the oldest subgroup first. We can
381 # We also always keep the oldest subgroup first. We can
384 # probably improve the behavior by having the longest set
382 # probably improve the behavior by having the longest set
385 # first. That way, graph algorithms could minimise the length
383 # first. That way, graph algorithms could minimise the length
386 # of parallel lines their drawing. This is currently not done.
384 # of parallel lines their drawing. This is currently not done.
387 targetidx = matching.pop(0)
385 targetidx = matching.pop(0)
388 trevs, tparents = groups[targetidx]
386 trevs, tparents = groups[targetidx]
389 for i in matching:
387 for i in matching:
390 gr = groups[i]
388 gr = groups[i]
391 trevs.extend(gr[0])
389 trevs.extend(gr[0])
392 tparents |= gr[1]
390 tparents |= gr[1]
393 # delete all merged subgroups (except the one we kept)
391 # delete all merged subgroups (except the one we kept)
394 # (starting from the last subgroup for performance and
392 # (starting from the last subgroup for performance and
395 # sanity reasons)
393 # sanity reasons)
396 for i in reversed(matching):
394 for i in reversed(matching):
397 del groups[i]
395 del groups[i]
398 else:
396 else:
399 # This is a new head. We create a new subgroup for it.
397 # This is a new head. We create a new subgroup for it.
400 targetidx = len(groups)
398 targetidx = len(groups)
401 groups.append(([], {rev}))
399 groups.append(([], {rev}))
402
400
403 gr = groups[targetidx]
401 gr = groups[targetidx]
404
402
405 # We now add the current nodes to this subgroups. This is done
403 # We now add the current nodes to this subgroups. This is done
406 # after the subgroup merging because all elements from a subgroup
404 # after the subgroup merging because all elements from a subgroup
407 # that relied on this rev must precede it.
405 # that relied on this rev must precede it.
408 #
406 #
409 # we also update the <parents> set to include the parents of the
407 # we also update the <parents> set to include the parents of the
410 # new nodes.
408 # new nodes.
411 if rev == currentrev: # only display stuff in rev
409 if rev == currentrev: # only display stuff in rev
412 gr[0].append(rev)
410 gr[0].append(rev)
413 gr[1].remove(rev)
411 gr[1].remove(rev)
414 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
412 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
415 gr[1].update(parents)
413 gr[1].update(parents)
416 for p in parents:
414 for p in parents:
417 if p not in pendingset:
415 if p not in pendingset:
418 pendingset.add(p)
416 pendingset.add(p)
419 heappush(pendingheap, -p)
417 heappush(pendingheap, -p)
420
418
421 # Look for a subgroup to display
419 # Look for a subgroup to display
422 #
420 #
423 # When unblocked is empty (if clause), we were not waiting for any
421 # When unblocked is empty (if clause), we were not waiting for any
424 # revisions during the first iteration (if no priority was given) or
422 # revisions during the first iteration (if no priority was given) or
425 # if we emitted a whole disconnected set of the graph (reached a
423 # if we emitted a whole disconnected set of the graph (reached a
426 # root). In that case we arbitrarily take the oldest known
424 # root). In that case we arbitrarily take the oldest known
427 # subgroup. The heuristic could probably be better.
425 # subgroup. The heuristic could probably be better.
428 #
426 #
429 # Otherwise (elif clause) if the subgroup is blocked on
427 # Otherwise (elif clause) if the subgroup is blocked on
430 # a revision we just emitted, we can safely emit it as
428 # a revision we just emitted, we can safely emit it as
431 # well.
429 # well.
432 if not unblocked:
430 if not unblocked:
433 if len(groups) > 1: # display other subset
431 if len(groups) > 1: # display other subset
434 targetidx = 1
432 targetidx = 1
435 gr = groups[1]
433 gr = groups[1]
436 elif not gr[1] & unblocked:
434 elif not gr[1] & unblocked:
437 gr = None
435 gr = None
438
436
439 if gr is not None:
437 if gr is not None:
440 # update the set of awaited revisions with the one from the
438 # update the set of awaited revisions with the one from the
441 # subgroup
439 # subgroup
442 unblocked |= gr[1]
440 unblocked |= gr[1]
443 # output all revisions in the subgroup
441 # output all revisions in the subgroup
444 for r in gr[0]:
442 for r in gr[0]:
445 yield r
443 yield r
446 # delete the subgroup that you just output
444 # delete the subgroup that you just output
447 # unless it is groups[0] in which case you just empty it.
445 # unless it is groups[0] in which case you just empty it.
448 if targetidx:
446 if targetidx:
449 del groups[targetidx]
447 del groups[targetidx]
450 else:
448 else:
451 gr[0][:] = []
449 gr[0][:] = []
452 # Check if we have some subgroup waiting for revisions we are not going to
450 # Check if we have some subgroup waiting for revisions we are not going to
453 # iterate over
451 # iterate over
454 for g in groups:
452 for g in groups:
455 for r in g[0]:
453 for r in g[0]:
456 yield r
454 yield r
General Comments 0
You need to be logged in to leave comments. Login now