##// END OF EJS Templates
dagop: move lines() out of annotate()
Yuya Nishihara -
r36937:8fba3197 default
parent child Browse files
Show More
@@ -1,703 +1,703 b''
1 1 # dagop.py - graph ancestry and topology algorithm for revset
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import heapq
11 11
12 12 from .thirdparty import (
13 13 attr,
14 14 )
15 15 from . import (
16 16 error,
17 17 mdiff,
18 18 node,
19 19 patch,
20 20 pycompat,
21 21 smartset,
22 22 )
23 23
24 24 baseset = smartset.baseset
25 25 generatorset = smartset.generatorset
26 26
27 27 # possible maximum depth between null and wdir()
28 28 _maxlogdepth = 0x80000000
29 29
30 30 def _walkrevtree(pfunc, revs, startdepth, stopdepth, reverse):
31 31 """Walk DAG using 'pfunc' from the given 'revs' nodes
32 32
33 33 'pfunc(rev)' should return the parent/child revisions of the given 'rev'
34 34 if 'reverse' is True/False respectively.
35 35
36 36 Scan ends at the stopdepth (exlusive) if specified. Revisions found
37 37 earlier than the startdepth are omitted.
38 38 """
39 39 if startdepth is None:
40 40 startdepth = 0
41 41 if stopdepth is None:
42 42 stopdepth = _maxlogdepth
43 43 if stopdepth == 0:
44 44 return
45 45 if stopdepth < 0:
46 46 raise error.ProgrammingError('negative stopdepth')
47 47 if reverse:
48 48 heapsign = -1 # max heap
49 49 else:
50 50 heapsign = +1 # min heap
51 51
52 52 # load input revs lazily to heap so earlier revisions can be yielded
53 53 # without fully computing the input revs
54 54 revs.sort(reverse)
55 55 irevs = iter(revs)
56 56 pendingheap = [] # [(heapsign * rev, depth), ...] (i.e. lower depth first)
57 57
58 58 inputrev = next(irevs, None)
59 59 if inputrev is not None:
60 60 heapq.heappush(pendingheap, (heapsign * inputrev, 0))
61 61
62 62 lastrev = None
63 63 while pendingheap:
64 64 currev, curdepth = heapq.heappop(pendingheap)
65 65 currev = heapsign * currev
66 66 if currev == inputrev:
67 67 inputrev = next(irevs, None)
68 68 if inputrev is not None:
69 69 heapq.heappush(pendingheap, (heapsign * inputrev, 0))
70 70 # rescan parents until curdepth >= startdepth because queued entries
71 71 # of the same revision are iterated from the lowest depth
72 72 foundnew = (currev != lastrev)
73 73 if foundnew and curdepth >= startdepth:
74 74 lastrev = currev
75 75 yield currev
76 76 pdepth = curdepth + 1
77 77 if foundnew and pdepth < stopdepth:
78 78 for prev in pfunc(currev):
79 79 if prev != node.nullrev:
80 80 heapq.heappush(pendingheap, (heapsign * prev, pdepth))
81 81
82 82 def filectxancestors(fctxs, followfirst=False):
83 83 """Like filectx.ancestors(), but can walk from multiple files/revisions,
84 84 and includes the given fctxs themselves
85 85
86 86 Yields (rev, {fctx, ...}) pairs in descending order.
87 87 """
88 88 visit = {}
89 89 visitheap = []
90 90 def addvisit(fctx):
91 91 rev = fctx.rev()
92 92 if rev not in visit:
93 93 visit[rev] = set()
94 94 heapq.heappush(visitheap, -rev) # max heap
95 95 visit[rev].add(fctx)
96 96
97 97 if followfirst:
98 98 cut = 1
99 99 else:
100 100 cut = None
101 101
102 102 for c in fctxs:
103 103 addvisit(c)
104 104 while visit:
105 105 currev = -heapq.heappop(visitheap)
106 106 curfctxs = visit.pop(currev)
107 107 yield currev, curfctxs
108 108 for c in curfctxs:
109 109 for parent in c.parents()[:cut]:
110 110 addvisit(parent)
111 111 assert not visitheap
112 112
113 113 def filerevancestors(fctxs, followfirst=False):
114 114 """Like filectx.ancestors(), but can walk from multiple files/revisions,
115 115 and includes the given fctxs themselves
116 116
117 117 Returns a smartset.
118 118 """
119 119 gen = (rev for rev, _cs in filectxancestors(fctxs, followfirst))
120 120 return generatorset(gen, iterasc=False)
121 121
122 122 def _genrevancestors(repo, revs, followfirst, startdepth, stopdepth, cutfunc):
123 123 if followfirst:
124 124 cut = 1
125 125 else:
126 126 cut = None
127 127 cl = repo.changelog
128 128 def plainpfunc(rev):
129 129 try:
130 130 return cl.parentrevs(rev)[:cut]
131 131 except error.WdirUnsupported:
132 132 return (pctx.rev() for pctx in repo[rev].parents()[:cut])
133 133 if cutfunc is None:
134 134 pfunc = plainpfunc
135 135 else:
136 136 pfunc = lambda rev: [r for r in plainpfunc(rev) if not cutfunc(r)]
137 137 revs = revs.filter(lambda rev: not cutfunc(rev))
138 138 return _walkrevtree(pfunc, revs, startdepth, stopdepth, reverse=True)
139 139
140 140 def revancestors(repo, revs, followfirst=False, startdepth=None,
141 141 stopdepth=None, cutfunc=None):
142 142 """Like revlog.ancestors(), but supports additional options, includes
143 143 the given revs themselves, and returns a smartset
144 144
145 145 Scan ends at the stopdepth (exlusive) if specified. Revisions found
146 146 earlier than the startdepth are omitted.
147 147
148 148 If cutfunc is provided, it will be used to cut the traversal of the DAG.
149 149 When cutfunc(X) returns True, the DAG traversal stops - revision X and
150 150 X's ancestors in the traversal path will be skipped. This could be an
151 151 optimization sometimes.
152 152
153 153 Note: if Y is an ancestor of X, cutfunc(X) returning True does not
154 154 necessarily mean Y will also be cut. Usually cutfunc(Y) also wants to
155 155 return True in this case. For example,
156 156
157 157 D # revancestors(repo, D, cutfunc=lambda rev: rev == B)
158 158 |\ # will include "A", because the path D -> C -> A was not cut.
159 159 B C # If "B" gets cut, "A" might want to be cut too.
160 160 |/
161 161 A
162 162 """
163 163 gen = _genrevancestors(repo, revs, followfirst, startdepth, stopdepth,
164 164 cutfunc)
165 165 return generatorset(gen, iterasc=False)
166 166
167 167 def _genrevdescendants(repo, revs, followfirst):
168 168 if followfirst:
169 169 cut = 1
170 170 else:
171 171 cut = None
172 172
173 173 cl = repo.changelog
174 174 first = revs.min()
175 175 nullrev = node.nullrev
176 176 if first == nullrev:
177 177 # Are there nodes with a null first parent and a non-null
178 178 # second one? Maybe. Do we care? Probably not.
179 179 yield first
180 180 for i in cl:
181 181 yield i
182 182 else:
183 183 seen = set(revs)
184 184 for i in cl.revs(first):
185 185 if i in seen:
186 186 yield i
187 187 continue
188 188 for x in cl.parentrevs(i)[:cut]:
189 189 if x != nullrev and x in seen:
190 190 seen.add(i)
191 191 yield i
192 192 break
193 193
194 194 def _builddescendantsmap(repo, startrev, followfirst):
195 195 """Build map of 'rev -> child revs', offset from startrev"""
196 196 cl = repo.changelog
197 197 nullrev = node.nullrev
198 198 descmap = [[] for _rev in xrange(startrev, len(cl))]
199 199 for currev in cl.revs(startrev + 1):
200 200 p1rev, p2rev = cl.parentrevs(currev)
201 201 if p1rev >= startrev:
202 202 descmap[p1rev - startrev].append(currev)
203 203 if not followfirst and p2rev != nullrev and p2rev >= startrev:
204 204 descmap[p2rev - startrev].append(currev)
205 205 return descmap
206 206
207 207 def _genrevdescendantsofdepth(repo, revs, followfirst, startdepth, stopdepth):
208 208 startrev = revs.min()
209 209 descmap = _builddescendantsmap(repo, startrev, followfirst)
210 210 def pfunc(rev):
211 211 return descmap[rev - startrev]
212 212 return _walkrevtree(pfunc, revs, startdepth, stopdepth, reverse=False)
213 213
214 214 def revdescendants(repo, revs, followfirst, startdepth=None, stopdepth=None):
215 215 """Like revlog.descendants() but supports additional options, includes
216 216 the given revs themselves, and returns a smartset
217 217
218 218 Scan ends at the stopdepth (exlusive) if specified. Revisions found
219 219 earlier than the startdepth are omitted.
220 220 """
221 221 if startdepth is None and stopdepth is None:
222 222 gen = _genrevdescendants(repo, revs, followfirst)
223 223 else:
224 224 gen = _genrevdescendantsofdepth(repo, revs, followfirst,
225 225 startdepth, stopdepth)
226 226 return generatorset(gen, iterasc=True)
227 227
228 228 def _reachablerootspure(repo, minroot, roots, heads, includepath):
229 229 """return (heads(::<roots> and ::<heads>))
230 230
231 231 If includepath is True, return (<roots>::<heads>)."""
232 232 if not roots:
233 233 return []
234 234 parentrevs = repo.changelog.parentrevs
235 235 roots = set(roots)
236 236 visit = list(heads)
237 237 reachable = set()
238 238 seen = {}
239 239 # prefetch all the things! (because python is slow)
240 240 reached = reachable.add
241 241 dovisit = visit.append
242 242 nextvisit = visit.pop
243 243 # open-code the post-order traversal due to the tiny size of
244 244 # sys.getrecursionlimit()
245 245 while visit:
246 246 rev = nextvisit()
247 247 if rev in roots:
248 248 reached(rev)
249 249 if not includepath:
250 250 continue
251 251 parents = parentrevs(rev)
252 252 seen[rev] = parents
253 253 for parent in parents:
254 254 if parent >= minroot and parent not in seen:
255 255 dovisit(parent)
256 256 if not reachable:
257 257 return baseset()
258 258 if not includepath:
259 259 return reachable
260 260 for rev in sorted(seen):
261 261 for parent in seen[rev]:
262 262 if parent in reachable:
263 263 reached(rev)
264 264 return reachable
265 265
266 266 def reachableroots(repo, roots, heads, includepath=False):
267 267 """return (heads(::<roots> and ::<heads>))
268 268
269 269 If includepath is True, return (<roots>::<heads>)."""
270 270 if not roots:
271 271 return baseset()
272 272 minroot = roots.min()
273 273 roots = list(roots)
274 274 heads = list(heads)
275 275 try:
276 276 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
277 277 except AttributeError:
278 278 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
279 279 revs = baseset(revs)
280 280 revs.sort()
281 281 return revs
282 282
283 283 def _changesrange(fctx1, fctx2, linerange2, diffopts):
284 284 """Return `(diffinrange, linerange1)` where `diffinrange` is True
285 285 if diff from fctx2 to fctx1 has changes in linerange2 and
286 286 `linerange1` is the new line range for fctx1.
287 287 """
288 288 blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
289 289 filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
290 290 diffinrange = any(stype == '!' for _, stype in filteredblocks)
291 291 return diffinrange, linerange1
292 292
293 293 def blockancestors(fctx, fromline, toline, followfirst=False):
294 294 """Yield ancestors of `fctx` with respect to the block of lines within
295 295 `fromline`-`toline` range.
296 296 """
297 297 diffopts = patch.diffopts(fctx._repo.ui)
298 298 fctx = fctx.introfilectx()
299 299 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
300 300 while visit:
301 301 c, linerange2 = visit.pop(max(visit))
302 302 pl = c.parents()
303 303 if followfirst:
304 304 pl = pl[:1]
305 305 if not pl:
306 306 # The block originates from the initial revision.
307 307 yield c, linerange2
308 308 continue
309 309 inrange = False
310 310 for p in pl:
311 311 inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts)
312 312 inrange = inrange or inrangep
313 313 if linerange1[0] == linerange1[1]:
314 314 # Parent's linerange is empty, meaning that the block got
315 315 # introduced in this revision; no need to go futher in this
316 316 # branch.
317 317 continue
318 318 # Set _descendantrev with 'c' (a known descendant) so that, when
319 319 # _adjustlinkrev is called for 'p', it receives this descendant
320 320 # (as srcrev) instead possibly topmost introrev.
321 321 p._descendantrev = c.rev()
322 322 visit[p.linkrev(), p.filenode()] = p, linerange1
323 323 if inrange:
324 324 yield c, linerange2
325 325
326 326 def blockdescendants(fctx, fromline, toline):
327 327 """Yield descendants of `fctx` with respect to the block of lines within
328 328 `fromline`-`toline` range.
329 329 """
330 330 # First possibly yield 'fctx' if it has changes in range with respect to
331 331 # its parents.
332 332 try:
333 333 c, linerange1 = next(blockancestors(fctx, fromline, toline))
334 334 except StopIteration:
335 335 pass
336 336 else:
337 337 if c == fctx:
338 338 yield c, linerange1
339 339
340 340 diffopts = patch.diffopts(fctx._repo.ui)
341 341 fl = fctx.filelog()
342 342 seen = {fctx.filerev(): (fctx, (fromline, toline))}
343 343 for i in fl.descendants([fctx.filerev()]):
344 344 c = fctx.filectx(i)
345 345 inrange = False
346 346 for x in fl.parentrevs(i):
347 347 try:
348 348 p, linerange2 = seen[x]
349 349 except KeyError:
350 350 # nullrev or other branch
351 351 continue
352 352 inrangep, linerange1 = _changesrange(c, p, linerange2, diffopts)
353 353 inrange = inrange or inrangep
354 354 # If revision 'i' has been seen (it's a merge) and the line range
355 355 # previously computed differs from the one we just got, we take the
356 356 # surrounding interval. This is conservative but avoids loosing
357 357 # information.
358 358 if i in seen and seen[i][1] != linerange1:
359 359 lbs, ubs = zip(linerange1, seen[i][1])
360 360 linerange1 = min(lbs), max(ubs)
361 361 seen[i] = c, linerange1
362 362 if inrange:
363 363 yield c, linerange1
364 364
365 365 @attr.s(slots=True, frozen=True)
366 366 class annotateline(object):
367 367 fctx = attr.ib()
368 368 lineno = attr.ib(default=False)
369 369 # Whether this annotation was the result of a skip-annotate.
370 370 skip = attr.ib(default=False)
371 371
372 def _countlines(text):
373 if text.endswith("\n"):
374 return text.count("\n")
375 return text.count("\n") + int(bool(text))
376
372 377 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
373 378 r'''
374 379 Given parent and child fctxes and annotate data for parents, for all lines
375 380 in either parent that match the child, annotate the child with the parent's
376 381 data.
377 382
378 383 Additionally, if `skipchild` is True, replace all other lines with parent
379 384 annotate data as well such that child is never blamed for any lines.
380 385
381 386 See test-annotate.py for unit tests.
382 387 '''
383 388 pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
384 389 for parent in parents]
385 390
386 391 if skipchild:
387 392 # Need to iterate over the blocks twice -- make it a list
388 393 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
389 394 # Mercurial currently prefers p2 over p1 for annotate.
390 395 # TODO: change this?
391 396 for parent, blocks in pblocks:
392 397 for (a1, a2, b1, b2), t in blocks:
393 398 # Changed blocks ('!') or blocks made only of blank lines ('~')
394 399 # belong to the child.
395 400 if t == '=':
396 401 child[0][b1:b2] = parent[0][a1:a2]
397 402
398 403 if skipchild:
399 404 # Now try and match up anything that couldn't be matched,
400 405 # Reversing pblocks maintains bias towards p2, matching above
401 406 # behavior.
402 407 pblocks.reverse()
403 408
404 409 # The heuristics are:
405 410 # * Work on blocks of changed lines (effectively diff hunks with -U0).
406 411 # This could potentially be smarter but works well enough.
407 412 # * For a non-matching section, do a best-effort fit. Match lines in
408 413 # diff hunks 1:1, dropping lines as necessary.
409 414 # * Repeat the last line as a last resort.
410 415
411 416 # First, replace as much as possible without repeating the last line.
412 417 remaining = [(parent, []) for parent, _blocks in pblocks]
413 418 for idx, (parent, blocks) in enumerate(pblocks):
414 419 for (a1, a2, b1, b2), _t in blocks:
415 420 if a2 - a1 >= b2 - b1:
416 421 for bk in xrange(b1, b2):
417 422 if child[0][bk].fctx == childfctx:
418 423 ak = min(a1 + (bk - b1), a2 - 1)
419 424 child[0][bk] = attr.evolve(parent[0][ak], skip=True)
420 425 else:
421 426 remaining[idx][1].append((a1, a2, b1, b2))
422 427
423 428 # Then, look at anything left, which might involve repeating the last
424 429 # line.
425 430 for parent, blocks in remaining:
426 431 for a1, a2, b1, b2 in blocks:
427 432 for bk in xrange(b1, b2):
428 433 if child[0][bk].fctx == childfctx:
429 434 ak = min(a1 + (bk - b1), a2 - 1)
430 435 child[0][bk] = attr.evolve(parent[0][ak], skip=True)
431 436 return child
432 437
433 438 def annotate(base, parents, linenumber=False, skiprevs=None, diffopts=None):
434 439 """Core algorithm for filectx.annotate()
435 440
436 441 `parents(fctx)` is a function returning a list of parent filectxs.
437 442 """
438 443
439 def lines(text):
440 if text.endswith("\n"):
441 return text.count("\n")
442 return text.count("\n") + int(bool(text))
443
444 444 if linenumber:
445 445 def decorate(text, rev):
446 446 return ([annotateline(fctx=rev, lineno=i)
447 for i in xrange(1, lines(text) + 1)], text)
447 for i in xrange(1, _countlines(text) + 1)], text)
448 448 else:
449 449 def decorate(text, rev):
450 return ([annotateline(fctx=rev)] * lines(text), text)
450 return ([annotateline(fctx=rev)] * _countlines(text), text)
451 451
452 452 # This algorithm would prefer to be recursive, but Python is a
453 453 # bit recursion-hostile. Instead we do an iterative
454 454 # depth-first search.
455 455
456 456 # 1st DFS pre-calculates pcache and needed
457 457 visit = [base]
458 458 pcache = {}
459 459 needed = {base: 1}
460 460 while visit:
461 461 f = visit.pop()
462 462 if f in pcache:
463 463 continue
464 464 pl = parents(f)
465 465 pcache[f] = pl
466 466 for p in pl:
467 467 needed[p] = needed.get(p, 0) + 1
468 468 if p not in pcache:
469 469 visit.append(p)
470 470
471 471 # 2nd DFS does the actual annotate
472 472 visit[:] = [base]
473 473 hist = {}
474 474 while visit:
475 475 f = visit[-1]
476 476 if f in hist:
477 477 visit.pop()
478 478 continue
479 479
480 480 ready = True
481 481 pl = pcache[f]
482 482 for p in pl:
483 483 if p not in hist:
484 484 ready = False
485 485 visit.append(p)
486 486 if ready:
487 487 visit.pop()
488 488 curr = decorate(f.data(), f)
489 489 skipchild = False
490 490 if skiprevs is not None:
491 491 skipchild = f._changeid in skiprevs
492 492 curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
493 493 diffopts)
494 494 for p in pl:
495 495 if needed[p] == 1:
496 496 del hist[p]
497 497 del needed[p]
498 498 else:
499 499 needed[p] -= 1
500 500
501 501 hist[f] = curr
502 502 del pcache[f]
503 503
504 504 lineattrs, text = hist[base]
505 505 return pycompat.ziplist(lineattrs, mdiff.splitnewlines(text))
506 506
507 507 def toposort(revs, parentsfunc, firstbranch=()):
508 508 """Yield revisions from heads to roots one (topo) branch at a time.
509 509
510 510 This function aims to be used by a graph generator that wishes to minimize
511 511 the number of parallel branches and their interleaving.
512 512
513 513 Example iteration order (numbers show the "true" order in a changelog):
514 514
515 515 o 4
516 516 |
517 517 o 1
518 518 |
519 519 | o 3
520 520 | |
521 521 | o 2
522 522 |/
523 523 o 0
524 524
525 525 Note that the ancestors of merges are understood by the current
526 526 algorithm to be on the same branch. This means no reordering will
527 527 occur behind a merge.
528 528 """
529 529
530 530 ### Quick summary of the algorithm
531 531 #
532 532 # This function is based around a "retention" principle. We keep revisions
533 533 # in memory until we are ready to emit a whole branch that immediately
534 534 # "merges" into an existing one. This reduces the number of parallel
535 535 # branches with interleaved revisions.
536 536 #
537 537 # During iteration revs are split into two groups:
538 538 # A) revision already emitted
539 539 # B) revision in "retention". They are stored as different subgroups.
540 540 #
541 541 # for each REV, we do the following logic:
542 542 #
543 543 # 1) if REV is a parent of (A), we will emit it. If there is a
544 544 # retention group ((B) above) that is blocked on REV being
545 545 # available, we emit all the revisions out of that retention
546 546 # group first.
547 547 #
548 548 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
549 549 # available, if such subgroup exist, we add REV to it and the subgroup is
550 550 # now awaiting for REV.parents() to be available.
551 551 #
552 552 # 3) finally if no such group existed in (B), we create a new subgroup.
553 553 #
554 554 #
555 555 # To bootstrap the algorithm, we emit the tipmost revision (which
556 556 # puts it in group (A) from above).
557 557
558 558 revs.sort(reverse=True)
559 559
560 560 # Set of parents of revision that have been emitted. They can be considered
561 561 # unblocked as the graph generator is already aware of them so there is no
562 562 # need to delay the revisions that reference them.
563 563 #
564 564 # If someone wants to prioritize a branch over the others, pre-filling this
565 565 # set will force all other branches to wait until this branch is ready to be
566 566 # emitted.
567 567 unblocked = set(firstbranch)
568 568
569 569 # list of groups waiting to be displayed, each group is defined by:
570 570 #
571 571 # (revs: lists of revs waiting to be displayed,
572 572 # blocked: set of that cannot be displayed before those in 'revs')
573 573 #
574 574 # The second value ('blocked') correspond to parents of any revision in the
575 575 # group ('revs') that is not itself contained in the group. The main idea
576 576 # of this algorithm is to delay as much as possible the emission of any
577 577 # revision. This means waiting for the moment we are about to display
578 578 # these parents to display the revs in a group.
579 579 #
580 580 # This first implementation is smart until it encounters a merge: it will
581 581 # emit revs as soon as any parent is about to be emitted and can grow an
582 582 # arbitrary number of revs in 'blocked'. In practice this mean we properly
583 583 # retains new branches but gives up on any special ordering for ancestors
584 584 # of merges. The implementation can be improved to handle this better.
585 585 #
586 586 # The first subgroup is special. It corresponds to all the revision that
587 587 # were already emitted. The 'revs' lists is expected to be empty and the
588 588 # 'blocked' set contains the parents revisions of already emitted revision.
589 589 #
590 590 # You could pre-seed the <parents> set of groups[0] to a specific
591 591 # changesets to select what the first emitted branch should be.
592 592 groups = [([], unblocked)]
593 593 pendingheap = []
594 594 pendingset = set()
595 595
596 596 heapq.heapify(pendingheap)
597 597 heappop = heapq.heappop
598 598 heappush = heapq.heappush
599 599 for currentrev in revs:
600 600 # Heap works with smallest element, we want highest so we invert
601 601 if currentrev not in pendingset:
602 602 heappush(pendingheap, -currentrev)
603 603 pendingset.add(currentrev)
604 604 # iterates on pending rev until after the current rev have been
605 605 # processed.
606 606 rev = None
607 607 while rev != currentrev:
608 608 rev = -heappop(pendingheap)
609 609 pendingset.remove(rev)
610 610
611 611 # Seek for a subgroup blocked, waiting for the current revision.
612 612 matching = [i for i, g in enumerate(groups) if rev in g[1]]
613 613
614 614 if matching:
615 615 # The main idea is to gather together all sets that are blocked
616 616 # on the same revision.
617 617 #
618 618 # Groups are merged when a common blocking ancestor is
619 619 # observed. For example, given two groups:
620 620 #
621 621 # revs [5, 4] waiting for 1
622 622 # revs [3, 2] waiting for 1
623 623 #
624 624 # These two groups will be merged when we process
625 625 # 1. In theory, we could have merged the groups when
626 626 # we added 2 to the group it is now in (we could have
627 627 # noticed the groups were both blocked on 1 then), but
628 628 # the way it works now makes the algorithm simpler.
629 629 #
630 630 # We also always keep the oldest subgroup first. We can
631 631 # probably improve the behavior by having the longest set
632 632 # first. That way, graph algorithms could minimise the length
633 633 # of parallel lines their drawing. This is currently not done.
634 634 targetidx = matching.pop(0)
635 635 trevs, tparents = groups[targetidx]
636 636 for i in matching:
637 637 gr = groups[i]
638 638 trevs.extend(gr[0])
639 639 tparents |= gr[1]
640 640 # delete all merged subgroups (except the one we kept)
641 641 # (starting from the last subgroup for performance and
642 642 # sanity reasons)
643 643 for i in reversed(matching):
644 644 del groups[i]
645 645 else:
646 646 # This is a new head. We create a new subgroup for it.
647 647 targetidx = len(groups)
648 648 groups.append(([], {rev}))
649 649
650 650 gr = groups[targetidx]
651 651
652 652 # We now add the current nodes to this subgroups. This is done
653 653 # after the subgroup merging because all elements from a subgroup
654 654 # that relied on this rev must precede it.
655 655 #
656 656 # we also update the <parents> set to include the parents of the
657 657 # new nodes.
658 658 if rev == currentrev: # only display stuff in rev
659 659 gr[0].append(rev)
660 660 gr[1].remove(rev)
661 661 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
662 662 gr[1].update(parents)
663 663 for p in parents:
664 664 if p not in pendingset:
665 665 pendingset.add(p)
666 666 heappush(pendingheap, -p)
667 667
668 668 # Look for a subgroup to display
669 669 #
670 670 # When unblocked is empty (if clause), we were not waiting for any
671 671 # revisions during the first iteration (if no priority was given) or
672 672 # if we emitted a whole disconnected set of the graph (reached a
673 673 # root). In that case we arbitrarily take the oldest known
674 674 # subgroup. The heuristic could probably be better.
675 675 #
676 676 # Otherwise (elif clause) if the subgroup is blocked on
677 677 # a revision we just emitted, we can safely emit it as
678 678 # well.
679 679 if not unblocked:
680 680 if len(groups) > 1: # display other subset
681 681 targetidx = 1
682 682 gr = groups[1]
683 683 elif not gr[1] & unblocked:
684 684 gr = None
685 685
686 686 if gr is not None:
687 687 # update the set of awaited revisions with the one from the
688 688 # subgroup
689 689 unblocked |= gr[1]
690 690 # output all revisions in the subgroup
691 691 for r in gr[0]:
692 692 yield r
693 693 # delete the subgroup that you just output
694 694 # unless it is groups[0] in which case you just empty it.
695 695 if targetidx:
696 696 del groups[targetidx]
697 697 else:
698 698 gr[0][:] = []
699 699 # Check if we have some subgroup waiting for revisions we are not going to
700 700 # iterate over
701 701 for g in groups:
702 702 for r in g[0]:
703 703 yield r
General Comments 0
You need to be logged in to leave comments. Login now