##// END OF EJS Templates
groubranchhiter: indent most of the inner code...
Pierre-Yves David -
r23566:fee7a30c default
parent child Browse files
Show More
@@ -1,532 +1,534
1 1 # Revision graph generator for Mercurial
2 2 #
3 3 # Copyright 2008 Dirkjan Ochtman <dirkjan@ochtman.nl>
4 4 # Copyright 2007 Joel Rosdahl <joel@rosdahl.net>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 """supports walking the history as DAGs suitable for graphical output
10 10
11 11 The most basic format we use is that of::
12 12
13 13 (id, type, data, [parentids])
14 14
15 15 The node and parent ids are arbitrary integers which identify a node in the
16 16 context of the graph returned. Type is a constant specifying the node type.
17 17 Data depends on type.
18 18 """
19 19
20 20 from mercurial.node import nullrev
21 21 import util
22 22
23 23 CHANGESET = 'C'
24 24
25 25 def groupbranchiter(revs, parentsfunc):
26 26 """yield revision from heads to roots one (topo) branch after the other.
27 27
28 28 This function aims to be used by a graph generator that wishes to minimize
29 29 the amount of parallel branches and their interleaving.
30 30
31 31 Example iteration order:
32 32
33 33 o 4
34 34 |
35 35 o 1
36 36 |
37 37 | o 3
38 38 | |
39 39 | o 2
40 40 |/
41 41 o 0
42 42
43 43 Currently does not handle non-contiguous <revs> input.
44 44
45 45 Currently consider every changeset under a merge to be on the same branch
46 46 using revision number to sort them.
47 47
48 48 Could be easily extend to give priority to an initial branch."""
49 49 ### Quick summary of the algorithm
50 50 #
51 51 # This function is based around a "retention" principle. We keep revisions
52 52 # in memory until we are ready to emit a whole branch that immediately
53 53 # "merge" into an existing one. This reduce the number of branch "ongoing"
54 54 # at the same time.
55 55 #
56 56 # During iteration revs are split into two groups:
57 57 # A) revision already emitted
58 58 # B) revision in "retention". They are stored as different subgroups.
59 59 #
60 60 # for each REV, we do the follow logic:
61 61 #
62 62 # a) if REV is a parent of (A), we will emit it. But before emitting it,
63 63 # we'll "free" all the revs from subgroup in (B) that were waiting for
64 64 # REV to be available. So we emit all revision of such subgroup before
65 65 # emitting REV
66 66 #
67 67 # b) else, we'll search for a subgroup in (B) awaiting for REV to be
68 68 # available, if such subgroup exist, we add REV to it and the subgroup is
69 69 # now awaiting for REV.parents() to be available.
70 70 #
71 71 # c) finally if no such group existed in (B), we create a new subgroup.
72 72 #
73 73 #
74 74 # To bootstrap the algorithm, we emit the tipmost revision.
75 75
76 76 revs.sort(reverse=True)
77 77
78 78 # Set of parents of revision that have been yield. They can be considered
79 79 # unblocked as the graph generator is already aware of them so there is no
80 80 # need to delay the one that reference them.
81 81 unblocked = set()
82 82
83 83 # list of group waiting to be displayed, each group is defined by:
84 84 #
85 85 # (revs: lists of revs waiting to be displayed,
86 86 # blocked: set of that cannot be displayed before those in 'revs')
87 87 #
88 88 # The second value ('blocked') correspond to parents of any revision in the
89 89 # group ('revs') that is not itself contained in the group. The main idea
90 90 # of this algorithm is to delay as much as possible the emission of any
91 91 # revision. This means waiting for the moment we are about to display
92 92 # theses parents to display the revs in a group.
93 93 #
94 94 # This first implementation is smart until it meet a merge: it will emit
95 95 # revs as soon as any parents is about to be emitted and can grow an
96 96 # arbitrary number of revs in 'blocked'. In practice this mean we properly
97 97 # retains new branches but give up on any special ordering for ancestors of
98 98 # merges. The implementation can be improved to handle this better.
99 99 #
100 100 # The first subgroup is special. It correspond to all the revision that
101 101 # were already emitted. The 'revs' lists is expected to be empty and the
102 102 # 'blocked' set contains the parents revisions of already emitted revision.
103 103 #
104 104 # You could pre-seed the <parents> set of groups[0] to a specific
105 105 # changesets to select what the first emitted branch should be.
106 106 #
107 107 # We do not support revisions will hole yet, but adding such support would
108 108 # be easy. The iteration will have to be done using both input revision and
109 109 # parents (see cl.ancestors function + a few tweaks) but only revisions
110 110 # parts of the initial set should be emitted.
111 111 groups = [([], unblocked)]
112 112 for current in revs:
113 # Look for a subgroup blocked, waiting for the current revision.
114 matching = [i for i, g in enumerate(groups) if current in g[1]]
113 if True:
114 # Seek for a subgroup blocked, waiting for the current revision.
115 matching = [i for i, g in enumerate(groups) if current in g[1]]
115 116
116 if matching:
117 # The main idea is to gather together all sets that await on the
118 # same revision.
119 #
120 # This merging is done at the time we are about to add this common
121 # awaited to the subgroup for simplicity purpose. Such merge could
122 # happen sooner when we update the "blocked" set of revision.
123 #
124 # We also always keep the oldest subgroup first. We can probably
125 # improve the behavior by having the longuest set first. That way,
126 # graph algorythms could minimise the length of parallele lines
127 # their draw. This is currently not done.
128 targetidx = matching.pop(0)
129 trevs, tparents = groups[targetidx]
130 for i in matching:
131 gr = groups[i]
132 trevs.extend(gr[0])
133 tparents |= gr[1]
134 # delete all merged subgroups (but the one we keep)
135 # (starting from the last subgroup for performance and sanity reason)
136 for i in reversed(matching):
137 del groups[i]
138 else:
139 # This is a new head. We create a new subgroup for it.
140 targetidx = len(groups)
141 groups.append(([], set([current])))
117 if matching:
118 # The main idea is to gather together all sets that await on
119 # the same revision.
120 #
121 # This merging is done at the time we are about to add this
122 # common awaited to the subgroup for simplicity purpose. Such
123 # merge could happen sooner when we update the "blocked" set of
124 # revision.
125 #
126 # We also always keep the oldest subgroup first. We can
127 # probably improve the behavior by having the longuest set
128 # first. That way, graph algorythms could minimise the length
129 # of parallele lines their draw. This is currently not done.
130 targetidx = matching.pop(0)
131 trevs, tparents = groups[targetidx]
132 for i in matching:
133 gr = groups[i]
134 trevs.extend(gr[0])
135 tparents |= gr[1]
136 # delete all merged subgroups (but the one we keep) (starting
137 # from the last subgroup for performance and sanity reason)
138 for i in reversed(matching):
139 del groups[i]
140 else:
141 # This is a new head. We create a new subgroup for it.
142 targetidx = len(groups)
143 groups.append(([], set([current])))
142 144
143 gr = groups[targetidx]
145 gr = groups[targetidx]
144 146
145 # We now adds the current nodes to this subgroups. This is done after
146 # the subgroup merging because all elements from a subgroup that relied
147 # on this rev must preceed it.
148 #
149 # we also update the <parents> set to includes the parents on the
150 # new nodes.
151 gr[0].append(current)
152 gr[1].remove(current)
153 gr[1].update([p for p in parentsfunc(current) if p > nullrev])
147 # We now adds the current nodes to this subgroups. This is done
148 # after the subgroup merging because all elements from a subgroup
149 # that relied on this rev must preceed it.
150 #
151 # we also update the <parents> set to includes the parents on the
152 # new nodes.
153 gr[0].append(current)
154 gr[1].remove(current)
155 gr[1].update([p for p in parentsfunc(current) if p > nullrev])
154 156
155 # Look for a subgroup to display
156 #
157 # When unblocked is empty (if clause), We are not waiting over any
158 # revision during the first iteration (if no priority was given) or if
159 # we outputed a whole disconnected sets of the graph (reached a root).
160 # In that case we arbitrarily takes the oldest known subgroup. The
161 # heuristique could probably be better.
162 #
163 # Otherwise (elif clause) this mean we have some emitted revision. if
164 # the subgroup awaits on the same revision that the outputed ones, we
165 # can safely output it.
166 if not unblocked:
167 if len(groups) > 1: # display other subset
168 targetidx = 1
169 gr = groups[1]
170 elif not gr[1] & unblocked:
171 gr = None
157 # Look for a subgroup to display
158 #
159 # When unblocked is empty (if clause), We are not waiting over any
160 # revision during the first iteration (if no priority was given) or
161 # if we outputed a whole disconnected sets of the graph (reached a
162 # root). In that case we arbitrarily takes the oldest known
163 # subgroup. The heuristique could probably be better.
164 #
165 # Otherwise (elif clause) this mean we have some emitted revision.
166 # if the subgroup awaits on the same revision that the outputed
167 # ones, we can safely output it.
168 if not unblocked:
169 if len(groups) > 1: # display other subset
170 targetidx = 1
171 gr = groups[1]
172 elif not gr[1] & unblocked:
173 gr = None
172 174
173 if gr is not None:
174 # update the set of awaited revisions with the one from the
175 # subgroup
176 unblocked |= gr[1]
177 # output all revisions in the subgroup
178 for r in gr[0]:
179 yield r
180 # delete the subgroup that you just output
181 # unless it is groups[0] in which case you just empty it.
182 if targetidx:
183 del groups[targetidx]
184 else:
185 gr[0][:] = []
175 if gr is not None:
176 # update the set of awaited revisions with the one from the
177 # subgroup
178 unblocked |= gr[1]
179 # output all revisions in the subgroup
180 for r in gr[0]:
181 yield r
182 # delete the subgroup that you just output
183 # unless it is groups[0] in which case you just empty it.
184 if targetidx:
185 del groups[targetidx]
186 else:
187 gr[0][:] = []
186 188
187 189 def dagwalker(repo, revs):
188 190 """cset DAG generator yielding (id, CHANGESET, ctx, [parentids]) tuples
189 191
190 192 This generator function walks through revisions (which should be ordered
191 193 from bigger to lower). It returns a tuple for each node. The node and parent
192 194 ids are arbitrary integers which identify a node in the context of the graph
193 195 returned.
194 196 """
195 197 if not revs:
196 198 return
197 199
198 200 cl = repo.changelog
199 201 lowestrev = revs.min()
200 202 gpcache = {}
201 203
202 204 if repo.ui.configbool('experimental', 'graph-topological', False):
203 205 revs = list(groupbranchiter(revs, repo.changelog.parentrevs))
204 206
205 207 for rev in revs:
206 208 ctx = repo[rev]
207 209 parents = sorted(set([p.rev() for p in ctx.parents()
208 210 if p.rev() in revs]))
209 211 mpars = [p.rev() for p in ctx.parents() if
210 212 p.rev() != nullrev and p.rev() not in parents]
211 213
212 214 for mpar in mpars:
213 215 gp = gpcache.get(mpar)
214 216 if gp is None:
215 217 gp = gpcache[mpar] = grandparent(cl, lowestrev, revs, mpar)
216 218 if not gp:
217 219 parents.append(mpar)
218 220 else:
219 221 parents.extend(g for g in gp if g not in parents)
220 222
221 223 yield (ctx.rev(), CHANGESET, ctx, parents)
222 224
223 225 def nodes(repo, nodes):
224 226 """cset DAG generator yielding (id, CHANGESET, ctx, [parentids]) tuples
225 227
226 228 This generator function walks the given nodes. It only returns parents
227 229 that are in nodes, too.
228 230 """
229 231 include = set(nodes)
230 232 for node in nodes:
231 233 ctx = repo[node]
232 234 parents = set([p.rev() for p in ctx.parents() if p.node() in include])
233 235 yield (ctx.rev(), CHANGESET, ctx, sorted(parents))
234 236
235 237 def colored(dag, repo):
236 238 """annotates a DAG with colored edge information
237 239
238 240 For each DAG node this function emits tuples::
239 241
240 242 (id, type, data, (col, color), [(col, nextcol, color)])
241 243
242 244 with the following new elements:
243 245
244 246 - Tuple (col, color) with column and color index for the current node
245 247 - A list of tuples indicating the edges between the current node and its
246 248 parents.
247 249 """
248 250 seen = []
249 251 colors = {}
250 252 newcolor = 1
251 253 config = {}
252 254
253 255 for key, val in repo.ui.configitems('graph'):
254 256 if '.' in key:
255 257 branch, setting = key.rsplit('.', 1)
256 258 # Validation
257 259 if setting == "width" and val.isdigit():
258 260 config.setdefault(branch, {})[setting] = int(val)
259 261 elif setting == "color" and val.isalnum():
260 262 config.setdefault(branch, {})[setting] = val
261 263
262 264 if config:
263 265 getconf = util.lrucachefunc(
264 266 lambda rev: config.get(repo[rev].branch(), {}))
265 267 else:
266 268 getconf = lambda rev: {}
267 269
268 270 for (cur, type, data, parents) in dag:
269 271
270 272 # Compute seen and next
271 273 if cur not in seen:
272 274 seen.append(cur) # new head
273 275 colors[cur] = newcolor
274 276 newcolor += 1
275 277
276 278 col = seen.index(cur)
277 279 color = colors.pop(cur)
278 280 next = seen[:]
279 281
280 282 # Add parents to next
281 283 addparents = [p for p in parents if p not in next]
282 284 next[col:col + 1] = addparents
283 285
284 286 # Set colors for the parents
285 287 for i, p in enumerate(addparents):
286 288 if not i:
287 289 colors[p] = color
288 290 else:
289 291 colors[p] = newcolor
290 292 newcolor += 1
291 293
292 294 # Add edges to the graph
293 295 edges = []
294 296 for ecol, eid in enumerate(seen):
295 297 if eid in next:
296 298 bconf = getconf(eid)
297 299 edges.append((
298 300 ecol, next.index(eid), colors[eid],
299 301 bconf.get('width', -1),
300 302 bconf.get('color', '')))
301 303 elif eid == cur:
302 304 for p in parents:
303 305 bconf = getconf(p)
304 306 edges.append((
305 307 ecol, next.index(p), color,
306 308 bconf.get('width', -1),
307 309 bconf.get('color', '')))
308 310
309 311 # Yield and move on
310 312 yield (cur, type, data, (col, color), edges)
311 313 seen = next
312 314
313 315 def grandparent(cl, lowestrev, roots, head):
314 316 """Return all ancestors of head in roots which revision is
315 317 greater or equal to lowestrev.
316 318 """
317 319 pending = set([head])
318 320 seen = set()
319 321 kept = set()
320 322 llowestrev = max(nullrev, lowestrev)
321 323 while pending:
322 324 r = pending.pop()
323 325 if r >= llowestrev and r not in seen:
324 326 if r in roots:
325 327 kept.add(r)
326 328 else:
327 329 pending.update([p for p in cl.parentrevs(r)])
328 330 seen.add(r)
329 331 return sorted(kept)
330 332
331 333 def asciiedges(type, char, lines, seen, rev, parents):
332 334 """adds edge info to changelog DAG walk suitable for ascii()"""
333 335 if rev not in seen:
334 336 seen.append(rev)
335 337 nodeidx = seen.index(rev)
336 338
337 339 knownparents = []
338 340 newparents = []
339 341 for parent in parents:
340 342 if parent in seen:
341 343 knownparents.append(parent)
342 344 else:
343 345 newparents.append(parent)
344 346
345 347 ncols = len(seen)
346 348 nextseen = seen[:]
347 349 nextseen[nodeidx:nodeidx + 1] = newparents
348 350 edges = [(nodeidx, nextseen.index(p)) for p in knownparents if p != nullrev]
349 351
350 352 while len(newparents) > 2:
351 353 # ascii() only knows how to add or remove a single column between two
352 354 # calls. Nodes with more than two parents break this constraint so we
353 355 # introduce intermediate expansion lines to grow the active node list
354 356 # slowly.
355 357 edges.append((nodeidx, nodeidx))
356 358 edges.append((nodeidx, nodeidx + 1))
357 359 nmorecols = 1
358 360 yield (type, char, lines, (nodeidx, edges, ncols, nmorecols))
359 361 char = '\\'
360 362 lines = []
361 363 nodeidx += 1
362 364 ncols += 1
363 365 edges = []
364 366 del newparents[0]
365 367
366 368 if len(newparents) > 0:
367 369 edges.append((nodeidx, nodeidx))
368 370 if len(newparents) > 1:
369 371 edges.append((nodeidx, nodeidx + 1))
370 372 nmorecols = len(nextseen) - ncols
371 373 seen[:] = nextseen
372 374 yield (type, char, lines, (nodeidx, edges, ncols, nmorecols))
373 375
374 376 def _fixlongrightedges(edges):
375 377 for (i, (start, end)) in enumerate(edges):
376 378 if end > start:
377 379 edges[i] = (start, end + 1)
378 380
379 381 def _getnodelineedgestail(
380 382 node_index, p_node_index, n_columns, n_columns_diff, p_diff, fix_tail):
381 383 if fix_tail and n_columns_diff == p_diff and n_columns_diff != 0:
382 384 # Still going in the same non-vertical direction.
383 385 if n_columns_diff == -1:
384 386 start = max(node_index + 1, p_node_index)
385 387 tail = ["|", " "] * (start - node_index - 1)
386 388 tail.extend(["/", " "] * (n_columns - start))
387 389 return tail
388 390 else:
389 391 return ["\\", " "] * (n_columns - node_index - 1)
390 392 else:
391 393 return ["|", " "] * (n_columns - node_index - 1)
392 394
393 395 def _drawedges(edges, nodeline, interline):
394 396 for (start, end) in edges:
395 397 if start == end + 1:
396 398 interline[2 * end + 1] = "/"
397 399 elif start == end - 1:
398 400 interline[2 * start + 1] = "\\"
399 401 elif start == end:
400 402 interline[2 * start] = "|"
401 403 else:
402 404 if 2 * end >= len(nodeline):
403 405 continue
404 406 nodeline[2 * end] = "+"
405 407 if start > end:
406 408 (start, end) = (end, start)
407 409 for i in range(2 * start + 1, 2 * end):
408 410 if nodeline[i] != "+":
409 411 nodeline[i] = "-"
410 412
411 413 def _getpaddingline(ni, n_columns, edges):
412 414 line = []
413 415 line.extend(["|", " "] * ni)
414 416 if (ni, ni - 1) in edges or (ni, ni) in edges:
415 417 # (ni, ni - 1) (ni, ni)
416 418 # | | | | | | | |
417 419 # +---o | | o---+
418 420 # | | c | | c | |
419 421 # | |/ / | |/ /
420 422 # | | | | | |
421 423 c = "|"
422 424 else:
423 425 c = " "
424 426 line.extend([c, " "])
425 427 line.extend(["|", " "] * (n_columns - ni - 1))
426 428 return line
427 429
428 430 def asciistate():
429 431 """returns the initial value for the "state" argument to ascii()"""
430 432 return [0, 0]
431 433
432 434 def ascii(ui, state, type, char, text, coldata):
433 435 """prints an ASCII graph of the DAG
434 436
435 437 takes the following arguments (one call per node in the graph):
436 438
437 439 - ui to write to
438 440 - Somewhere to keep the needed state in (init to asciistate())
439 441 - Column of the current node in the set of ongoing edges.
440 442 - Type indicator of node data, usually 'C' for changesets.
441 443 - Payload: (char, lines):
442 444 - Character to use as node's symbol.
443 445 - List of lines to display as the node's text.
444 446 - Edges; a list of (col, next_col) indicating the edges between
445 447 the current node and its parents.
446 448 - Number of columns (ongoing edges) in the current revision.
447 449 - The difference between the number of columns (ongoing edges)
448 450 in the next revision and the number of columns (ongoing edges)
449 451 in the current revision. That is: -1 means one column removed;
450 452 0 means no columns added or removed; 1 means one column added.
451 453 """
452 454
453 455 idx, edges, ncols, coldiff = coldata
454 456 assert -2 < coldiff < 2
455 457 if coldiff == -1:
456 458 # Transform
457 459 #
458 460 # | | | | | |
459 461 # o | | into o---+
460 462 # |X / |/ /
461 463 # | | | |
462 464 _fixlongrightedges(edges)
463 465
464 466 # add_padding_line says whether to rewrite
465 467 #
466 468 # | | | | | | | |
467 469 # | o---+ into | o---+
468 470 # | / / | | | # <--- padding line
469 471 # o | | | / /
470 472 # o | |
471 473 add_padding_line = (len(text) > 2 and coldiff == -1 and
472 474 [x for (x, y) in edges if x + 1 < y])
473 475
474 476 # fix_nodeline_tail says whether to rewrite
475 477 #
476 478 # | | o | | | | o | |
477 479 # | | |/ / | | |/ /
478 480 # | o | | into | o / / # <--- fixed nodeline tail
479 481 # | |/ / | |/ /
480 482 # o | | o | |
481 483 fix_nodeline_tail = len(text) <= 2 and not add_padding_line
482 484
483 485 # nodeline is the line containing the node character (typically o)
484 486 nodeline = ["|", " "] * idx
485 487 nodeline.extend([char, " "])
486 488
487 489 nodeline.extend(
488 490 _getnodelineedgestail(idx, state[1], ncols, coldiff,
489 491 state[0], fix_nodeline_tail))
490 492
491 493 # shift_interline is the line containing the non-vertical
492 494 # edges between this entry and the next
493 495 shift_interline = ["|", " "] * idx
494 496 if coldiff == -1:
495 497 n_spaces = 1
496 498 edge_ch = "/"
497 499 elif coldiff == 0:
498 500 n_spaces = 2
499 501 edge_ch = "|"
500 502 else:
501 503 n_spaces = 3
502 504 edge_ch = "\\"
503 505 shift_interline.extend(n_spaces * [" "])
504 506 shift_interline.extend([edge_ch, " "] * (ncols - idx - 1))
505 507
506 508 # draw edges from the current node to its parents
507 509 _drawedges(edges, nodeline, shift_interline)
508 510
509 511 # lines is the list of all graph lines to print
510 512 lines = [nodeline]
511 513 if add_padding_line:
512 514 lines.append(_getpaddingline(idx, ncols, edges))
513 515 lines.append(shift_interline)
514 516
515 517 # make sure that there are as many graph lines as there are
516 518 # log strings
517 519 while len(text) < len(lines):
518 520 text.append("")
519 521 if len(lines) < len(text):
520 522 extra_interline = ["|", " "] * (ncols + coldiff)
521 523 while len(lines) < len(text):
522 524 lines.append(extra_interline)
523 525
524 526 # print lines
525 527 indentation_level = max(ncols, ncols + coldiff)
526 528 for (line, logstr) in zip(lines, text):
527 529 ln = "%-*s %s" % (2 * indentation_level, "".join(line), logstr)
528 530 ui.write(ln.rstrip() + '\n')
529 531
530 532 # ... and start over
531 533 state[0] = coldiff
532 534 state[1] = idx
General Comments 0
You need to be logged in to leave comments. Login now