##// END OF EJS Templates
groubranchhiter: indent most of the inner code...
Pierre-Yves David -
r23566:fee7a30c default
parent child Browse files
Show More
@@ -1,532 +1,534
1 # Revision graph generator for Mercurial
1 # Revision graph generator for Mercurial
2 #
2 #
3 # Copyright 2008 Dirkjan Ochtman <dirkjan@ochtman.nl>
3 # Copyright 2008 Dirkjan Ochtman <dirkjan@ochtman.nl>
4 # Copyright 2007 Joel Rosdahl <joel@rosdahl.net>
4 # Copyright 2007 Joel Rosdahl <joel@rosdahl.net>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """supports walking the history as DAGs suitable for graphical output
9 """supports walking the history as DAGs suitable for graphical output
10
10
11 The most basic format we use is that of::
11 The most basic format we use is that of::
12
12
13 (id, type, data, [parentids])
13 (id, type, data, [parentids])
14
14
15 The node and parent ids are arbitrary integers which identify a node in the
15 The node and parent ids are arbitrary integers which identify a node in the
16 context of the graph returned. Type is a constant specifying the node type.
16 context of the graph returned. Type is a constant specifying the node type.
17 Data depends on type.
17 Data depends on type.
18 """
18 """
19
19
20 from mercurial.node import nullrev
20 from mercurial.node import nullrev
21 import util
21 import util
22
22
23 CHANGESET = 'C'
23 CHANGESET = 'C'
24
24
25 def groupbranchiter(revs, parentsfunc):
25 def groupbranchiter(revs, parentsfunc):
26 """yield revision from heads to roots one (topo) branch after the other.
26 """yield revision from heads to roots one (topo) branch after the other.
27
27
28 This function aims to be used by a graph generator that wishes to minimize
28 This function aims to be used by a graph generator that wishes to minimize
29 the amount of parallel branches and their interleaving.
29 the amount of parallel branches and their interleaving.
30
30
31 Example iteration order:
31 Example iteration order:
32
32
33 o 4
33 o 4
34 |
34 |
35 o 1
35 o 1
36 |
36 |
37 | o 3
37 | o 3
38 | |
38 | |
39 | o 2
39 | o 2
40 |/
40 |/
41 o 0
41 o 0
42
42
43 Currently does not handle non-contiguous <revs> input.
43 Currently does not handle non-contiguous <revs> input.
44
44
45 Currently consider every changeset under a merge to be on the same branch
45 Currently consider every changeset under a merge to be on the same branch
46 using revision number to sort them.
46 using revision number to sort them.
47
47
48 Could be easily extend to give priority to an initial branch."""
48 Could be easily extend to give priority to an initial branch."""
49 ### Quick summary of the algorithm
49 ### Quick summary of the algorithm
50 #
50 #
51 # This function is based around a "retention" principle. We keep revisions
51 # This function is based around a "retention" principle. We keep revisions
52 # in memory until we are ready to emit a whole branch that immediately
52 # in memory until we are ready to emit a whole branch that immediately
53 # "merge" into an existing one. This reduce the number of branch "ongoing"
53 # "merge" into an existing one. This reduce the number of branch "ongoing"
54 # at the same time.
54 # at the same time.
55 #
55 #
56 # During iteration revs are split into two groups:
56 # During iteration revs are split into two groups:
57 # A) revision already emitted
57 # A) revision already emitted
58 # B) revision in "retention". They are stored as different subgroups.
58 # B) revision in "retention". They are stored as different subgroups.
59 #
59 #
60 # for each REV, we do the follow logic:
60 # for each REV, we do the follow logic:
61 #
61 #
62 # a) if REV is a parent of (A), we will emit it. But before emitting it,
62 # a) if REV is a parent of (A), we will emit it. But before emitting it,
63 # we'll "free" all the revs from subgroup in (B) that were waiting for
63 # we'll "free" all the revs from subgroup in (B) that were waiting for
64 # REV to be available. So we emit all revision of such subgroup before
64 # REV to be available. So we emit all revision of such subgroup before
65 # emitting REV
65 # emitting REV
66 #
66 #
67 # b) else, we'll search for a subgroup in (B) awaiting for REV to be
67 # b) else, we'll search for a subgroup in (B) awaiting for REV to be
68 # available, if such subgroup exist, we add REV to it and the subgroup is
68 # available, if such subgroup exist, we add REV to it and the subgroup is
69 # now awaiting for REV.parents() to be available.
69 # now awaiting for REV.parents() to be available.
70 #
70 #
71 # c) finally if no such group existed in (B), we create a new subgroup.
71 # c) finally if no such group existed in (B), we create a new subgroup.
72 #
72 #
73 #
73 #
74 # To bootstrap the algorithm, we emit the tipmost revision.
74 # To bootstrap the algorithm, we emit the tipmost revision.
75
75
76 revs.sort(reverse=True)
76 revs.sort(reverse=True)
77
77
78 # Set of parents of revision that have been yield. They can be considered
78 # Set of parents of revision that have been yield. They can be considered
79 # unblocked as the graph generator is already aware of them so there is no
79 # unblocked as the graph generator is already aware of them so there is no
80 # need to delay the one that reference them.
80 # need to delay the one that reference them.
81 unblocked = set()
81 unblocked = set()
82
82
83 # list of group waiting to be displayed, each group is defined by:
83 # list of group waiting to be displayed, each group is defined by:
84 #
84 #
85 # (revs: lists of revs waiting to be displayed,
85 # (revs: lists of revs waiting to be displayed,
86 # blocked: set of that cannot be displayed before those in 'revs')
86 # blocked: set of that cannot be displayed before those in 'revs')
87 #
87 #
88 # The second value ('blocked') correspond to parents of any revision in the
88 # The second value ('blocked') correspond to parents of any revision in the
89 # group ('revs') that is not itself contained in the group. The main idea
89 # group ('revs') that is not itself contained in the group. The main idea
90 # of this algorithm is to delay as much as possible the emission of any
90 # of this algorithm is to delay as much as possible the emission of any
91 # revision. This means waiting for the moment we are about to display
91 # revision. This means waiting for the moment we are about to display
92 # theses parents to display the revs in a group.
92 # theses parents to display the revs in a group.
93 #
93 #
94 # This first implementation is smart until it meet a merge: it will emit
94 # This first implementation is smart until it meet a merge: it will emit
95 # revs as soon as any parents is about to be emitted and can grow an
95 # revs as soon as any parents is about to be emitted and can grow an
96 # arbitrary number of revs in 'blocked'. In practice this mean we properly
96 # arbitrary number of revs in 'blocked'. In practice this mean we properly
97 # retains new branches but give up on any special ordering for ancestors of
97 # retains new branches but give up on any special ordering for ancestors of
98 # merges. The implementation can be improved to handle this better.
98 # merges. The implementation can be improved to handle this better.
99 #
99 #
100 # The first subgroup is special. It correspond to all the revision that
100 # The first subgroup is special. It correspond to all the revision that
101 # were already emitted. The 'revs' lists is expected to be empty and the
101 # were already emitted. The 'revs' lists is expected to be empty and the
102 # 'blocked' set contains the parents revisions of already emitted revision.
102 # 'blocked' set contains the parents revisions of already emitted revision.
103 #
103 #
104 # You could pre-seed the <parents> set of groups[0] to a specific
104 # You could pre-seed the <parents> set of groups[0] to a specific
105 # changesets to select what the first emitted branch should be.
105 # changesets to select what the first emitted branch should be.
106 #
106 #
107 # We do not support revisions will hole yet, but adding such support would
107 # We do not support revisions will hole yet, but adding such support would
108 # be easy. The iteration will have to be done using both input revision and
108 # be easy. The iteration will have to be done using both input revision and
109 # parents (see cl.ancestors function + a few tweaks) but only revisions
109 # parents (see cl.ancestors function + a few tweaks) but only revisions
110 # parts of the initial set should be emitted.
110 # parts of the initial set should be emitted.
111 groups = [([], unblocked)]
111 groups = [([], unblocked)]
112 for current in revs:
112 for current in revs:
113 # Look for a subgroup blocked, waiting for the current revision.
113 if True:
114 matching = [i for i, g in enumerate(groups) if current in g[1]]
114 # Seek for a subgroup blocked, waiting for the current revision.
115 matching = [i for i, g in enumerate(groups) if current in g[1]]
115
116
116 if matching:
117 if matching:
117 # The main idea is to gather together all sets that await on the
118 # The main idea is to gather together all sets that await on
118 # same revision.
119 # the same revision.
119 #
120 #
120 # This merging is done at the time we are about to add this common
121 # This merging is done at the time we are about to add this
121 # awaited to the subgroup for simplicity purpose. Such merge could
122 # common awaited to the subgroup for simplicity purpose. Such
122 # happen sooner when we update the "blocked" set of revision.
123 # merge could happen sooner when we update the "blocked" set of
123 #
124 # revision.
124 # We also always keep the oldest subgroup first. We can probably
125 #
125 # improve the behavior by having the longuest set first. That way,
126 # We also always keep the oldest subgroup first. We can
126 # graph algorythms could minimise the length of parallele lines
127 # probably improve the behavior by having the longuest set
127 # their draw. This is currently not done.
128 # first. That way, graph algorythms could minimise the length
128 targetidx = matching.pop(0)
129 # of parallele lines their draw. This is currently not done.
129 trevs, tparents = groups[targetidx]
130 targetidx = matching.pop(0)
130 for i in matching:
131 trevs, tparents = groups[targetidx]
131 gr = groups[i]
132 for i in matching:
132 trevs.extend(gr[0])
133 gr = groups[i]
133 tparents |= gr[1]
134 trevs.extend(gr[0])
134 # delete all merged subgroups (but the one we keep)
135 tparents |= gr[1]
135 # (starting from the last subgroup for performance and sanity reason)
136 # delete all merged subgroups (but the one we keep) (starting
136 for i in reversed(matching):
137 # from the last subgroup for performance and sanity reason)
137 del groups[i]
138 for i in reversed(matching):
138 else:
139 del groups[i]
139 # This is a new head. We create a new subgroup for it.
140 else:
140 targetidx = len(groups)
141 # This is a new head. We create a new subgroup for it.
141 groups.append(([], set([current])))
142 targetidx = len(groups)
143 groups.append(([], set([current])))
142
144
143 gr = groups[targetidx]
145 gr = groups[targetidx]
144
146
145 # We now adds the current nodes to this subgroups. This is done after
147 # We now adds the current nodes to this subgroups. This is done
146 # the subgroup merging because all elements from a subgroup that relied
148 # after the subgroup merging because all elements from a subgroup
147 # on this rev must preceed it.
149 # that relied on this rev must preceed it.
148 #
150 #
149 # we also update the <parents> set to includes the parents on the
151 # we also update the <parents> set to includes the parents on the
150 # new nodes.
152 # new nodes.
151 gr[0].append(current)
153 gr[0].append(current)
152 gr[1].remove(current)
154 gr[1].remove(current)
153 gr[1].update([p for p in parentsfunc(current) if p > nullrev])
155 gr[1].update([p for p in parentsfunc(current) if p > nullrev])
154
156
155 # Look for a subgroup to display
157 # Look for a subgroup to display
156 #
158 #
157 # When unblocked is empty (if clause), We are not waiting over any
159 # When unblocked is empty (if clause), We are not waiting over any
158 # revision during the first iteration (if no priority was given) or if
160 # revision during the first iteration (if no priority was given) or
159 # we outputed a whole disconnected sets of the graph (reached a root).
161 # if we outputed a whole disconnected sets of the graph (reached a
160 # In that case we arbitrarily takes the oldest known subgroup. The
162 # root). In that case we arbitrarily takes the oldest known
161 # heuristique could probably be better.
163 # subgroup. The heuristique could probably be better.
162 #
164 #
163 # Otherwise (elif clause) this mean we have some emitted revision. if
165 # Otherwise (elif clause) this mean we have some emitted revision.
164 # the subgroup awaits on the same revision that the outputed ones, we
166 # if the subgroup awaits on the same revision that the outputed
165 # can safely output it.
167 # ones, we can safely output it.
166 if not unblocked:
168 if not unblocked:
167 if len(groups) > 1: # display other subset
169 if len(groups) > 1: # display other subset
168 targetidx = 1
170 targetidx = 1
169 gr = groups[1]
171 gr = groups[1]
170 elif not gr[1] & unblocked:
172 elif not gr[1] & unblocked:
171 gr = None
173 gr = None
172
174
173 if gr is not None:
175 if gr is not None:
174 # update the set of awaited revisions with the one from the
176 # update the set of awaited revisions with the one from the
175 # subgroup
177 # subgroup
176 unblocked |= gr[1]
178 unblocked |= gr[1]
177 # output all revisions in the subgroup
179 # output all revisions in the subgroup
178 for r in gr[0]:
180 for r in gr[0]:
179 yield r
181 yield r
180 # delete the subgroup that you just output
182 # delete the subgroup that you just output
181 # unless it is groups[0] in which case you just empty it.
183 # unless it is groups[0] in which case you just empty it.
182 if targetidx:
184 if targetidx:
183 del groups[targetidx]
185 del groups[targetidx]
184 else:
186 else:
185 gr[0][:] = []
187 gr[0][:] = []
186
188
187 def dagwalker(repo, revs):
189 def dagwalker(repo, revs):
188 """cset DAG generator yielding (id, CHANGESET, ctx, [parentids]) tuples
190 """cset DAG generator yielding (id, CHANGESET, ctx, [parentids]) tuples
189
191
190 This generator function walks through revisions (which should be ordered
192 This generator function walks through revisions (which should be ordered
191 from bigger to lower). It returns a tuple for each node. The node and parent
193 from bigger to lower). It returns a tuple for each node. The node and parent
192 ids are arbitrary integers which identify a node in the context of the graph
194 ids are arbitrary integers which identify a node in the context of the graph
193 returned.
195 returned.
194 """
196 """
195 if not revs:
197 if not revs:
196 return
198 return
197
199
198 cl = repo.changelog
200 cl = repo.changelog
199 lowestrev = revs.min()
201 lowestrev = revs.min()
200 gpcache = {}
202 gpcache = {}
201
203
202 if repo.ui.configbool('experimental', 'graph-topological', False):
204 if repo.ui.configbool('experimental', 'graph-topological', False):
203 revs = list(groupbranchiter(revs, repo.changelog.parentrevs))
205 revs = list(groupbranchiter(revs, repo.changelog.parentrevs))
204
206
205 for rev in revs:
207 for rev in revs:
206 ctx = repo[rev]
208 ctx = repo[rev]
207 parents = sorted(set([p.rev() for p in ctx.parents()
209 parents = sorted(set([p.rev() for p in ctx.parents()
208 if p.rev() in revs]))
210 if p.rev() in revs]))
209 mpars = [p.rev() for p in ctx.parents() if
211 mpars = [p.rev() for p in ctx.parents() if
210 p.rev() != nullrev and p.rev() not in parents]
212 p.rev() != nullrev and p.rev() not in parents]
211
213
212 for mpar in mpars:
214 for mpar in mpars:
213 gp = gpcache.get(mpar)
215 gp = gpcache.get(mpar)
214 if gp is None:
216 if gp is None:
215 gp = gpcache[mpar] = grandparent(cl, lowestrev, revs, mpar)
217 gp = gpcache[mpar] = grandparent(cl, lowestrev, revs, mpar)
216 if not gp:
218 if not gp:
217 parents.append(mpar)
219 parents.append(mpar)
218 else:
220 else:
219 parents.extend(g for g in gp if g not in parents)
221 parents.extend(g for g in gp if g not in parents)
220
222
221 yield (ctx.rev(), CHANGESET, ctx, parents)
223 yield (ctx.rev(), CHANGESET, ctx, parents)
222
224
223 def nodes(repo, nodes):
225 def nodes(repo, nodes):
224 """cset DAG generator yielding (id, CHANGESET, ctx, [parentids]) tuples
226 """cset DAG generator yielding (id, CHANGESET, ctx, [parentids]) tuples
225
227
226 This generator function walks the given nodes. It only returns parents
228 This generator function walks the given nodes. It only returns parents
227 that are in nodes, too.
229 that are in nodes, too.
228 """
230 """
229 include = set(nodes)
231 include = set(nodes)
230 for node in nodes:
232 for node in nodes:
231 ctx = repo[node]
233 ctx = repo[node]
232 parents = set([p.rev() for p in ctx.parents() if p.node() in include])
234 parents = set([p.rev() for p in ctx.parents() if p.node() in include])
233 yield (ctx.rev(), CHANGESET, ctx, sorted(parents))
235 yield (ctx.rev(), CHANGESET, ctx, sorted(parents))
234
236
235 def colored(dag, repo):
237 def colored(dag, repo):
236 """annotates a DAG with colored edge information
238 """annotates a DAG with colored edge information
237
239
238 For each DAG node this function emits tuples::
240 For each DAG node this function emits tuples::
239
241
240 (id, type, data, (col, color), [(col, nextcol, color)])
242 (id, type, data, (col, color), [(col, nextcol, color)])
241
243
242 with the following new elements:
244 with the following new elements:
243
245
244 - Tuple (col, color) with column and color index for the current node
246 - Tuple (col, color) with column and color index for the current node
245 - A list of tuples indicating the edges between the current node and its
247 - A list of tuples indicating the edges between the current node and its
246 parents.
248 parents.
247 """
249 """
248 seen = []
250 seen = []
249 colors = {}
251 colors = {}
250 newcolor = 1
252 newcolor = 1
251 config = {}
253 config = {}
252
254
253 for key, val in repo.ui.configitems('graph'):
255 for key, val in repo.ui.configitems('graph'):
254 if '.' in key:
256 if '.' in key:
255 branch, setting = key.rsplit('.', 1)
257 branch, setting = key.rsplit('.', 1)
256 # Validation
258 # Validation
257 if setting == "width" and val.isdigit():
259 if setting == "width" and val.isdigit():
258 config.setdefault(branch, {})[setting] = int(val)
260 config.setdefault(branch, {})[setting] = int(val)
259 elif setting == "color" and val.isalnum():
261 elif setting == "color" and val.isalnum():
260 config.setdefault(branch, {})[setting] = val
262 config.setdefault(branch, {})[setting] = val
261
263
262 if config:
264 if config:
263 getconf = util.lrucachefunc(
265 getconf = util.lrucachefunc(
264 lambda rev: config.get(repo[rev].branch(), {}))
266 lambda rev: config.get(repo[rev].branch(), {}))
265 else:
267 else:
266 getconf = lambda rev: {}
268 getconf = lambda rev: {}
267
269
268 for (cur, type, data, parents) in dag:
270 for (cur, type, data, parents) in dag:
269
271
270 # Compute seen and next
272 # Compute seen and next
271 if cur not in seen:
273 if cur not in seen:
272 seen.append(cur) # new head
274 seen.append(cur) # new head
273 colors[cur] = newcolor
275 colors[cur] = newcolor
274 newcolor += 1
276 newcolor += 1
275
277
276 col = seen.index(cur)
278 col = seen.index(cur)
277 color = colors.pop(cur)
279 color = colors.pop(cur)
278 next = seen[:]
280 next = seen[:]
279
281
280 # Add parents to next
282 # Add parents to next
281 addparents = [p for p in parents if p not in next]
283 addparents = [p for p in parents if p not in next]
282 next[col:col + 1] = addparents
284 next[col:col + 1] = addparents
283
285
284 # Set colors for the parents
286 # Set colors for the parents
285 for i, p in enumerate(addparents):
287 for i, p in enumerate(addparents):
286 if not i:
288 if not i:
287 colors[p] = color
289 colors[p] = color
288 else:
290 else:
289 colors[p] = newcolor
291 colors[p] = newcolor
290 newcolor += 1
292 newcolor += 1
291
293
292 # Add edges to the graph
294 # Add edges to the graph
293 edges = []
295 edges = []
294 for ecol, eid in enumerate(seen):
296 for ecol, eid in enumerate(seen):
295 if eid in next:
297 if eid in next:
296 bconf = getconf(eid)
298 bconf = getconf(eid)
297 edges.append((
299 edges.append((
298 ecol, next.index(eid), colors[eid],
300 ecol, next.index(eid), colors[eid],
299 bconf.get('width', -1),
301 bconf.get('width', -1),
300 bconf.get('color', '')))
302 bconf.get('color', '')))
301 elif eid == cur:
303 elif eid == cur:
302 for p in parents:
304 for p in parents:
303 bconf = getconf(p)
305 bconf = getconf(p)
304 edges.append((
306 edges.append((
305 ecol, next.index(p), color,
307 ecol, next.index(p), color,
306 bconf.get('width', -1),
308 bconf.get('width', -1),
307 bconf.get('color', '')))
309 bconf.get('color', '')))
308
310
309 # Yield and move on
311 # Yield and move on
310 yield (cur, type, data, (col, color), edges)
312 yield (cur, type, data, (col, color), edges)
311 seen = next
313 seen = next
312
314
313 def grandparent(cl, lowestrev, roots, head):
315 def grandparent(cl, lowestrev, roots, head):
314 """Return all ancestors of head in roots which revision is
316 """Return all ancestors of head in roots which revision is
315 greater or equal to lowestrev.
317 greater or equal to lowestrev.
316 """
318 """
317 pending = set([head])
319 pending = set([head])
318 seen = set()
320 seen = set()
319 kept = set()
321 kept = set()
320 llowestrev = max(nullrev, lowestrev)
322 llowestrev = max(nullrev, lowestrev)
321 while pending:
323 while pending:
322 r = pending.pop()
324 r = pending.pop()
323 if r >= llowestrev and r not in seen:
325 if r >= llowestrev and r not in seen:
324 if r in roots:
326 if r in roots:
325 kept.add(r)
327 kept.add(r)
326 else:
328 else:
327 pending.update([p for p in cl.parentrevs(r)])
329 pending.update([p for p in cl.parentrevs(r)])
328 seen.add(r)
330 seen.add(r)
329 return sorted(kept)
331 return sorted(kept)
330
332
331 def asciiedges(type, char, lines, seen, rev, parents):
333 def asciiedges(type, char, lines, seen, rev, parents):
332 """adds edge info to changelog DAG walk suitable for ascii()"""
334 """adds edge info to changelog DAG walk suitable for ascii()"""
333 if rev not in seen:
335 if rev not in seen:
334 seen.append(rev)
336 seen.append(rev)
335 nodeidx = seen.index(rev)
337 nodeidx = seen.index(rev)
336
338
337 knownparents = []
339 knownparents = []
338 newparents = []
340 newparents = []
339 for parent in parents:
341 for parent in parents:
340 if parent in seen:
342 if parent in seen:
341 knownparents.append(parent)
343 knownparents.append(parent)
342 else:
344 else:
343 newparents.append(parent)
345 newparents.append(parent)
344
346
345 ncols = len(seen)
347 ncols = len(seen)
346 nextseen = seen[:]
348 nextseen = seen[:]
347 nextseen[nodeidx:nodeidx + 1] = newparents
349 nextseen[nodeidx:nodeidx + 1] = newparents
348 edges = [(nodeidx, nextseen.index(p)) for p in knownparents if p != nullrev]
350 edges = [(nodeidx, nextseen.index(p)) for p in knownparents if p != nullrev]
349
351
350 while len(newparents) > 2:
352 while len(newparents) > 2:
351 # ascii() only knows how to add or remove a single column between two
353 # ascii() only knows how to add or remove a single column between two
352 # calls. Nodes with more than two parents break this constraint so we
354 # calls. Nodes with more than two parents break this constraint so we
353 # introduce intermediate expansion lines to grow the active node list
355 # introduce intermediate expansion lines to grow the active node list
354 # slowly.
356 # slowly.
355 edges.append((nodeidx, nodeidx))
357 edges.append((nodeidx, nodeidx))
356 edges.append((nodeidx, nodeidx + 1))
358 edges.append((nodeidx, nodeidx + 1))
357 nmorecols = 1
359 nmorecols = 1
358 yield (type, char, lines, (nodeidx, edges, ncols, nmorecols))
360 yield (type, char, lines, (nodeidx, edges, ncols, nmorecols))
359 char = '\\'
361 char = '\\'
360 lines = []
362 lines = []
361 nodeidx += 1
363 nodeidx += 1
362 ncols += 1
364 ncols += 1
363 edges = []
365 edges = []
364 del newparents[0]
366 del newparents[0]
365
367
366 if len(newparents) > 0:
368 if len(newparents) > 0:
367 edges.append((nodeidx, nodeidx))
369 edges.append((nodeidx, nodeidx))
368 if len(newparents) > 1:
370 if len(newparents) > 1:
369 edges.append((nodeidx, nodeidx + 1))
371 edges.append((nodeidx, nodeidx + 1))
370 nmorecols = len(nextseen) - ncols
372 nmorecols = len(nextseen) - ncols
371 seen[:] = nextseen
373 seen[:] = nextseen
372 yield (type, char, lines, (nodeidx, edges, ncols, nmorecols))
374 yield (type, char, lines, (nodeidx, edges, ncols, nmorecols))
373
375
374 def _fixlongrightedges(edges):
376 def _fixlongrightedges(edges):
375 for (i, (start, end)) in enumerate(edges):
377 for (i, (start, end)) in enumerate(edges):
376 if end > start:
378 if end > start:
377 edges[i] = (start, end + 1)
379 edges[i] = (start, end + 1)
378
380
379 def _getnodelineedgestail(
381 def _getnodelineedgestail(
380 node_index, p_node_index, n_columns, n_columns_diff, p_diff, fix_tail):
382 node_index, p_node_index, n_columns, n_columns_diff, p_diff, fix_tail):
381 if fix_tail and n_columns_diff == p_diff and n_columns_diff != 0:
383 if fix_tail and n_columns_diff == p_diff and n_columns_diff != 0:
382 # Still going in the same non-vertical direction.
384 # Still going in the same non-vertical direction.
383 if n_columns_diff == -1:
385 if n_columns_diff == -1:
384 start = max(node_index + 1, p_node_index)
386 start = max(node_index + 1, p_node_index)
385 tail = ["|", " "] * (start - node_index - 1)
387 tail = ["|", " "] * (start - node_index - 1)
386 tail.extend(["/", " "] * (n_columns - start))
388 tail.extend(["/", " "] * (n_columns - start))
387 return tail
389 return tail
388 else:
390 else:
389 return ["\\", " "] * (n_columns - node_index - 1)
391 return ["\\", " "] * (n_columns - node_index - 1)
390 else:
392 else:
391 return ["|", " "] * (n_columns - node_index - 1)
393 return ["|", " "] * (n_columns - node_index - 1)
392
394
393 def _drawedges(edges, nodeline, interline):
395 def _drawedges(edges, nodeline, interline):
394 for (start, end) in edges:
396 for (start, end) in edges:
395 if start == end + 1:
397 if start == end + 1:
396 interline[2 * end + 1] = "/"
398 interline[2 * end + 1] = "/"
397 elif start == end - 1:
399 elif start == end - 1:
398 interline[2 * start + 1] = "\\"
400 interline[2 * start + 1] = "\\"
399 elif start == end:
401 elif start == end:
400 interline[2 * start] = "|"
402 interline[2 * start] = "|"
401 else:
403 else:
402 if 2 * end >= len(nodeline):
404 if 2 * end >= len(nodeline):
403 continue
405 continue
404 nodeline[2 * end] = "+"
406 nodeline[2 * end] = "+"
405 if start > end:
407 if start > end:
406 (start, end) = (end, start)
408 (start, end) = (end, start)
407 for i in range(2 * start + 1, 2 * end):
409 for i in range(2 * start + 1, 2 * end):
408 if nodeline[i] != "+":
410 if nodeline[i] != "+":
409 nodeline[i] = "-"
411 nodeline[i] = "-"
410
412
411 def _getpaddingline(ni, n_columns, edges):
413 def _getpaddingline(ni, n_columns, edges):
412 line = []
414 line = []
413 line.extend(["|", " "] * ni)
415 line.extend(["|", " "] * ni)
414 if (ni, ni - 1) in edges or (ni, ni) in edges:
416 if (ni, ni - 1) in edges or (ni, ni) in edges:
415 # (ni, ni - 1) (ni, ni)
417 # (ni, ni - 1) (ni, ni)
416 # | | | | | | | |
418 # | | | | | | | |
417 # +---o | | o---+
419 # +---o | | o---+
418 # | | c | | c | |
420 # | | c | | c | |
419 # | |/ / | |/ /
421 # | |/ / | |/ /
420 # | | | | | |
422 # | | | | | |
421 c = "|"
423 c = "|"
422 else:
424 else:
423 c = " "
425 c = " "
424 line.extend([c, " "])
426 line.extend([c, " "])
425 line.extend(["|", " "] * (n_columns - ni - 1))
427 line.extend(["|", " "] * (n_columns - ni - 1))
426 return line
428 return line
427
429
428 def asciistate():
430 def asciistate():
429 """returns the initial value for the "state" argument to ascii()"""
431 """returns the initial value for the "state" argument to ascii()"""
430 return [0, 0]
432 return [0, 0]
431
433
432 def ascii(ui, state, type, char, text, coldata):
434 def ascii(ui, state, type, char, text, coldata):
433 """prints an ASCII graph of the DAG
435 """prints an ASCII graph of the DAG
434
436
435 takes the following arguments (one call per node in the graph):
437 takes the following arguments (one call per node in the graph):
436
438
437 - ui to write to
439 - ui to write to
438 - Somewhere to keep the needed state in (init to asciistate())
440 - Somewhere to keep the needed state in (init to asciistate())
439 - Column of the current node in the set of ongoing edges.
441 - Column of the current node in the set of ongoing edges.
440 - Type indicator of node data, usually 'C' for changesets.
442 - Type indicator of node data, usually 'C' for changesets.
441 - Payload: (char, lines):
443 - Payload: (char, lines):
442 - Character to use as node's symbol.
444 - Character to use as node's symbol.
443 - List of lines to display as the node's text.
445 - List of lines to display as the node's text.
444 - Edges; a list of (col, next_col) indicating the edges between
446 - Edges; a list of (col, next_col) indicating the edges between
445 the current node and its parents.
447 the current node and its parents.
446 - Number of columns (ongoing edges) in the current revision.
448 - Number of columns (ongoing edges) in the current revision.
447 - The difference between the number of columns (ongoing edges)
449 - The difference between the number of columns (ongoing edges)
448 in the next revision and the number of columns (ongoing edges)
450 in the next revision and the number of columns (ongoing edges)
449 in the current revision. That is: -1 means one column removed;
451 in the current revision. That is: -1 means one column removed;
450 0 means no columns added or removed; 1 means one column added.
452 0 means no columns added or removed; 1 means one column added.
451 """
453 """
452
454
453 idx, edges, ncols, coldiff = coldata
455 idx, edges, ncols, coldiff = coldata
454 assert -2 < coldiff < 2
456 assert -2 < coldiff < 2
455 if coldiff == -1:
457 if coldiff == -1:
456 # Transform
458 # Transform
457 #
459 #
458 # | | | | | |
460 # | | | | | |
459 # o | | into o---+
461 # o | | into o---+
460 # |X / |/ /
462 # |X / |/ /
461 # | | | |
463 # | | | |
462 _fixlongrightedges(edges)
464 _fixlongrightedges(edges)
463
465
464 # add_padding_line says whether to rewrite
466 # add_padding_line says whether to rewrite
465 #
467 #
466 # | | | | | | | |
468 # | | | | | | | |
467 # | o---+ into | o---+
469 # | o---+ into | o---+
468 # | / / | | | # <--- padding line
470 # | / / | | | # <--- padding line
469 # o | | | / /
471 # o | | | / /
470 # o | |
472 # o | |
471 add_padding_line = (len(text) > 2 and coldiff == -1 and
473 add_padding_line = (len(text) > 2 and coldiff == -1 and
472 [x for (x, y) in edges if x + 1 < y])
474 [x for (x, y) in edges if x + 1 < y])
473
475
474 # fix_nodeline_tail says whether to rewrite
476 # fix_nodeline_tail says whether to rewrite
475 #
477 #
476 # | | o | | | | o | |
478 # | | o | | | | o | |
477 # | | |/ / | | |/ /
479 # | | |/ / | | |/ /
478 # | o | | into | o / / # <--- fixed nodeline tail
480 # | o | | into | o / / # <--- fixed nodeline tail
479 # | |/ / | |/ /
481 # | |/ / | |/ /
480 # o | | o | |
482 # o | | o | |
481 fix_nodeline_tail = len(text) <= 2 and not add_padding_line
483 fix_nodeline_tail = len(text) <= 2 and not add_padding_line
482
484
483 # nodeline is the line containing the node character (typically o)
485 # nodeline is the line containing the node character (typically o)
484 nodeline = ["|", " "] * idx
486 nodeline = ["|", " "] * idx
485 nodeline.extend([char, " "])
487 nodeline.extend([char, " "])
486
488
487 nodeline.extend(
489 nodeline.extend(
488 _getnodelineedgestail(idx, state[1], ncols, coldiff,
490 _getnodelineedgestail(idx, state[1], ncols, coldiff,
489 state[0], fix_nodeline_tail))
491 state[0], fix_nodeline_tail))
490
492
491 # shift_interline is the line containing the non-vertical
493 # shift_interline is the line containing the non-vertical
492 # edges between this entry and the next
494 # edges between this entry and the next
493 shift_interline = ["|", " "] * idx
495 shift_interline = ["|", " "] * idx
494 if coldiff == -1:
496 if coldiff == -1:
495 n_spaces = 1
497 n_spaces = 1
496 edge_ch = "/"
498 edge_ch = "/"
497 elif coldiff == 0:
499 elif coldiff == 0:
498 n_spaces = 2
500 n_spaces = 2
499 edge_ch = "|"
501 edge_ch = "|"
500 else:
502 else:
501 n_spaces = 3
503 n_spaces = 3
502 edge_ch = "\\"
504 edge_ch = "\\"
503 shift_interline.extend(n_spaces * [" "])
505 shift_interline.extend(n_spaces * [" "])
504 shift_interline.extend([edge_ch, " "] * (ncols - idx - 1))
506 shift_interline.extend([edge_ch, " "] * (ncols - idx - 1))
505
507
506 # draw edges from the current node to its parents
508 # draw edges from the current node to its parents
507 _drawedges(edges, nodeline, shift_interline)
509 _drawedges(edges, nodeline, shift_interline)
508
510
509 # lines is the list of all graph lines to print
511 # lines is the list of all graph lines to print
510 lines = [nodeline]
512 lines = [nodeline]
511 if add_padding_line:
513 if add_padding_line:
512 lines.append(_getpaddingline(idx, ncols, edges))
514 lines.append(_getpaddingline(idx, ncols, edges))
513 lines.append(shift_interline)
515 lines.append(shift_interline)
514
516
515 # make sure that there are as many graph lines as there are
517 # make sure that there are as many graph lines as there are
516 # log strings
518 # log strings
517 while len(text) < len(lines):
519 while len(text) < len(lines):
518 text.append("")
520 text.append("")
519 if len(lines) < len(text):
521 if len(lines) < len(text):
520 extra_interline = ["|", " "] * (ncols + coldiff)
522 extra_interline = ["|", " "] * (ncols + coldiff)
521 while len(lines) < len(text):
523 while len(lines) < len(text):
522 lines.append(extra_interline)
524 lines.append(extra_interline)
523
525
524 # print lines
526 # print lines
525 indentation_level = max(ncols, ncols + coldiff)
527 indentation_level = max(ncols, ncols + coldiff)
526 for (line, logstr) in zip(lines, text):
528 for (line, logstr) in zip(lines, text):
527 ln = "%-*s %s" % (2 * indentation_level, "".join(line), logstr)
529 ln = "%-*s %s" % (2 * indentation_level, "".join(line), logstr)
528 ui.write(ln.rstrip() + '\n')
530 ui.write(ln.rstrip() + '\n')
529
531
530 # ... and start over
532 # ... and start over
531 state[0] = coldiff
533 state[0] = coldiff
532 state[1] = idx
534 state[1] = idx
General Comments 0
You need to be logged in to leave comments. Login now