##// END OF EJS Templates
graphmod: augment the graph to include more information about the edges...
Martijn Pieters -
r28376:fa2cd0c9 default
parent child Browse files
Show More
@@ -1,567 +1,581 b''
1 # Revision graph generator for Mercurial
1 # Revision graph generator for Mercurial
2 #
2 #
3 # Copyright 2008 Dirkjan Ochtman <dirkjan@ochtman.nl>
3 # Copyright 2008 Dirkjan Ochtman <dirkjan@ochtman.nl>
4 # Copyright 2007 Joel Rosdahl <joel@rosdahl.net>
4 # Copyright 2007 Joel Rosdahl <joel@rosdahl.net>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """supports walking the history as DAGs suitable for graphical output
9 """supports walking the history as DAGs suitable for graphical output
10
10
11 The most basic format we use is that of::
11 The most basic format we use is that of::
12
12
13 (id, type, data, [parentids])
13 (id, type, data, [parentids])
14
14
15 The node and parent ids are arbitrary integers which identify a node in the
15 The node and parent ids are arbitrary integers which identify a node in the
16 context of the graph returned. Type is a constant specifying the node type.
16 context of the graph returned. Type is a constant specifying the node type.
17 Data depends on type.
17 Data depends on type.
18 """
18 """
19
19
20 from __future__ import absolute_import
20 from __future__ import absolute_import
21
21
22 import heapq
22 import heapq
23
23
24 from .node import nullrev
24 from .node import nullrev
25 from . import (
25 from . import (
26 revset,
26 revset,
27 util,
27 util,
28 )
28 )
29
29
30 CHANGESET = 'C'
30 CHANGESET = 'C'
31 PARENT = 'P'
32 GRANDPARENT = 'G'
33 MISSINGPARENT = 'M'
31
34
32 def groupbranchiter(revs, parentsfunc, firstbranch=()):
35 def groupbranchiter(revs, parentsfunc, firstbranch=()):
33 """Yield revisions from heads to roots one (topo) branch at a time.
36 """Yield revisions from heads to roots one (topo) branch at a time.
34
37
35 This function aims to be used by a graph generator that wishes to minimize
38 This function aims to be used by a graph generator that wishes to minimize
36 the number of parallel branches and their interleaving.
39 the number of parallel branches and their interleaving.
37
40
38 Example iteration order (numbers show the "true" order in a changelog):
41 Example iteration order (numbers show the "true" order in a changelog):
39
42
40 o 4
43 o 4
41 |
44 |
42 o 1
45 o 1
43 |
46 |
44 | o 3
47 | o 3
45 | |
48 | |
46 | o 2
49 | o 2
47 |/
50 |/
48 o 0
51 o 0
49
52
50 Note that the ancestors of merges are understood by the current
53 Note that the ancestors of merges are understood by the current
51 algorithm to be on the same branch. This means no reordering will
54 algorithm to be on the same branch. This means no reordering will
52 occur behind a merge.
55 occur behind a merge.
53 """
56 """
54
57
55 ### Quick summary of the algorithm
58 ### Quick summary of the algorithm
56 #
59 #
57 # This function is based around a "retention" principle. We keep revisions
60 # This function is based around a "retention" principle. We keep revisions
58 # in memory until we are ready to emit a whole branch that immediately
61 # in memory until we are ready to emit a whole branch that immediately
59 # "merges" into an existing one. This reduces the number of parallel
62 # "merges" into an existing one. This reduces the number of parallel
60 # branches with interleaved revisions.
63 # branches with interleaved revisions.
61 #
64 #
62 # During iteration revs are split into two groups:
65 # During iteration revs are split into two groups:
63 # A) revision already emitted
66 # A) revision already emitted
64 # B) revision in "retention". They are stored as different subgroups.
67 # B) revision in "retention". They are stored as different subgroups.
65 #
68 #
66 # for each REV, we do the following logic:
69 # for each REV, we do the following logic:
67 #
70 #
68 # 1) if REV is a parent of (A), we will emit it. If there is a
71 # 1) if REV is a parent of (A), we will emit it. If there is a
69 # retention group ((B) above) that is blocked on REV being
72 # retention group ((B) above) that is blocked on REV being
70 # available, we emit all the revisions out of that retention
73 # available, we emit all the revisions out of that retention
71 # group first.
74 # group first.
72 #
75 #
73 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
76 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
74 # available, if such subgroup exist, we add REV to it and the subgroup is
77 # available, if such subgroup exist, we add REV to it and the subgroup is
75 # now awaiting for REV.parents() to be available.
78 # now awaiting for REV.parents() to be available.
76 #
79 #
77 # 3) finally if no such group existed in (B), we create a new subgroup.
80 # 3) finally if no such group existed in (B), we create a new subgroup.
78 #
81 #
79 #
82 #
80 # To bootstrap the algorithm, we emit the tipmost revision (which
83 # To bootstrap the algorithm, we emit the tipmost revision (which
81 # puts it in group (A) from above).
84 # puts it in group (A) from above).
82
85
83 revs.sort(reverse=True)
86 revs.sort(reverse=True)
84
87
85 # Set of parents of revision that have been emitted. They can be considered
88 # Set of parents of revision that have been emitted. They can be considered
86 # unblocked as the graph generator is already aware of them so there is no
89 # unblocked as the graph generator is already aware of them so there is no
87 # need to delay the revisions that reference them.
90 # need to delay the revisions that reference them.
88 #
91 #
89 # If someone wants to prioritize a branch over the others, pre-filling this
92 # If someone wants to prioritize a branch over the others, pre-filling this
90 # set will force all other branches to wait until this branch is ready to be
93 # set will force all other branches to wait until this branch is ready to be
91 # emitted.
94 # emitted.
92 unblocked = set(firstbranch)
95 unblocked = set(firstbranch)
93
96
94 # list of groups waiting to be displayed, each group is defined by:
97 # list of groups waiting to be displayed, each group is defined by:
95 #
98 #
96 # (revs: lists of revs waiting to be displayed,
99 # (revs: lists of revs waiting to be displayed,
97 # blocked: set of that cannot be displayed before those in 'revs')
100 # blocked: set of that cannot be displayed before those in 'revs')
98 #
101 #
99 # The second value ('blocked') correspond to parents of any revision in the
102 # The second value ('blocked') correspond to parents of any revision in the
100 # group ('revs') that is not itself contained in the group. The main idea
103 # group ('revs') that is not itself contained in the group. The main idea
101 # of this algorithm is to delay as much as possible the emission of any
104 # of this algorithm is to delay as much as possible the emission of any
102 # revision. This means waiting for the moment we are about to display
105 # revision. This means waiting for the moment we are about to display
103 # these parents to display the revs in a group.
106 # these parents to display the revs in a group.
104 #
107 #
105 # This first implementation is smart until it encounters a merge: it will
108 # This first implementation is smart until it encounters a merge: it will
106 # emit revs as soon as any parent is about to be emitted and can grow an
109 # emit revs as soon as any parent is about to be emitted and can grow an
107 # arbitrary number of revs in 'blocked'. In practice this mean we properly
110 # arbitrary number of revs in 'blocked'. In practice this mean we properly
108 # retains new branches but gives up on any special ordering for ancestors
111 # retains new branches but gives up on any special ordering for ancestors
109 # of merges. The implementation can be improved to handle this better.
112 # of merges. The implementation can be improved to handle this better.
110 #
113 #
111 # The first subgroup is special. It corresponds to all the revision that
114 # The first subgroup is special. It corresponds to all the revision that
112 # were already emitted. The 'revs' lists is expected to be empty and the
115 # were already emitted. The 'revs' lists is expected to be empty and the
113 # 'blocked' set contains the parents revisions of already emitted revision.
116 # 'blocked' set contains the parents revisions of already emitted revision.
114 #
117 #
115 # You could pre-seed the <parents> set of groups[0] to a specific
118 # You could pre-seed the <parents> set of groups[0] to a specific
116 # changesets to select what the first emitted branch should be.
119 # changesets to select what the first emitted branch should be.
117 groups = [([], unblocked)]
120 groups = [([], unblocked)]
118 pendingheap = []
121 pendingheap = []
119 pendingset = set()
122 pendingset = set()
120
123
121 heapq.heapify(pendingheap)
124 heapq.heapify(pendingheap)
122 heappop = heapq.heappop
125 heappop = heapq.heappop
123 heappush = heapq.heappush
126 heappush = heapq.heappush
124 for currentrev in revs:
127 for currentrev in revs:
125 # Heap works with smallest element, we want highest so we invert
128 # Heap works with smallest element, we want highest so we invert
126 if currentrev not in pendingset:
129 if currentrev not in pendingset:
127 heappush(pendingheap, -currentrev)
130 heappush(pendingheap, -currentrev)
128 pendingset.add(currentrev)
131 pendingset.add(currentrev)
129 # iterates on pending rev until after the current rev have been
132 # iterates on pending rev until after the current rev have been
130 # processed.
133 # processed.
131 rev = None
134 rev = None
132 while rev != currentrev:
135 while rev != currentrev:
133 rev = -heappop(pendingheap)
136 rev = -heappop(pendingheap)
134 pendingset.remove(rev)
137 pendingset.remove(rev)
135
138
136 # Seek for a subgroup blocked, waiting for the current revision.
139 # Seek for a subgroup blocked, waiting for the current revision.
137 matching = [i for i, g in enumerate(groups) if rev in g[1]]
140 matching = [i for i, g in enumerate(groups) if rev in g[1]]
138
141
139 if matching:
142 if matching:
140 # The main idea is to gather together all sets that are blocked
143 # The main idea is to gather together all sets that are blocked
141 # on the same revision.
144 # on the same revision.
142 #
145 #
143 # Groups are merged when a common blocking ancestor is
146 # Groups are merged when a common blocking ancestor is
144 # observed. For example, given two groups:
147 # observed. For example, given two groups:
145 #
148 #
146 # revs [5, 4] waiting for 1
149 # revs [5, 4] waiting for 1
147 # revs [3, 2] waiting for 1
150 # revs [3, 2] waiting for 1
148 #
151 #
149 # These two groups will be merged when we process
152 # These two groups will be merged when we process
150 # 1. In theory, we could have merged the groups when
153 # 1. In theory, we could have merged the groups when
151 # we added 2 to the group it is now in (we could have
154 # we added 2 to the group it is now in (we could have
152 # noticed the groups were both blocked on 1 then), but
155 # noticed the groups were both blocked on 1 then), but
153 # the way it works now makes the algorithm simpler.
156 # the way it works now makes the algorithm simpler.
154 #
157 #
155 # We also always keep the oldest subgroup first. We can
158 # We also always keep the oldest subgroup first. We can
156 # probably improve the behavior by having the longest set
159 # probably improve the behavior by having the longest set
157 # first. That way, graph algorithms could minimise the length
160 # first. That way, graph algorithms could minimise the length
158 # of parallel lines their drawing. This is currently not done.
161 # of parallel lines their drawing. This is currently not done.
159 targetidx = matching.pop(0)
162 targetidx = matching.pop(0)
160 trevs, tparents = groups[targetidx]
163 trevs, tparents = groups[targetidx]
161 for i in matching:
164 for i in matching:
162 gr = groups[i]
165 gr = groups[i]
163 trevs.extend(gr[0])
166 trevs.extend(gr[0])
164 tparents |= gr[1]
167 tparents |= gr[1]
165 # delete all merged subgroups (except the one we kept)
168 # delete all merged subgroups (except the one we kept)
166 # (starting from the last subgroup for performance and
169 # (starting from the last subgroup for performance and
167 # sanity reasons)
170 # sanity reasons)
168 for i in reversed(matching):
171 for i in reversed(matching):
169 del groups[i]
172 del groups[i]
170 else:
173 else:
171 # This is a new head. We create a new subgroup for it.
174 # This is a new head. We create a new subgroup for it.
172 targetidx = len(groups)
175 targetidx = len(groups)
173 groups.append(([], set([rev])))
176 groups.append(([], set([rev])))
174
177
175 gr = groups[targetidx]
178 gr = groups[targetidx]
176
179
177 # We now add the current nodes to this subgroups. This is done
180 # We now add the current nodes to this subgroups. This is done
178 # after the subgroup merging because all elements from a subgroup
181 # after the subgroup merging because all elements from a subgroup
179 # that relied on this rev must precede it.
182 # that relied on this rev must precede it.
180 #
183 #
181 # we also update the <parents> set to include the parents of the
184 # we also update the <parents> set to include the parents of the
182 # new nodes.
185 # new nodes.
183 if rev == currentrev: # only display stuff in rev
186 if rev == currentrev: # only display stuff in rev
184 gr[0].append(rev)
187 gr[0].append(rev)
185 gr[1].remove(rev)
188 gr[1].remove(rev)
186 parents = [p for p in parentsfunc(rev) if p > nullrev]
189 parents = [p for p in parentsfunc(rev) if p > nullrev]
187 gr[1].update(parents)
190 gr[1].update(parents)
188 for p in parents:
191 for p in parents:
189 if p not in pendingset:
192 if p not in pendingset:
190 pendingset.add(p)
193 pendingset.add(p)
191 heappush(pendingheap, -p)
194 heappush(pendingheap, -p)
192
195
193 # Look for a subgroup to display
196 # Look for a subgroup to display
194 #
197 #
195 # When unblocked is empty (if clause), we were not waiting for any
198 # When unblocked is empty (if clause), we were not waiting for any
196 # revisions during the first iteration (if no priority was given) or
199 # revisions during the first iteration (if no priority was given) or
197 # if we emitted a whole disconnected set of the graph (reached a
200 # if we emitted a whole disconnected set of the graph (reached a
198 # root). In that case we arbitrarily take the oldest known
201 # root). In that case we arbitrarily take the oldest known
199 # subgroup. The heuristic could probably be better.
202 # subgroup. The heuristic could probably be better.
200 #
203 #
201 # Otherwise (elif clause) if the subgroup is blocked on
204 # Otherwise (elif clause) if the subgroup is blocked on
202 # a revision we just emitted, we can safely emit it as
205 # a revision we just emitted, we can safely emit it as
203 # well.
206 # well.
204 if not unblocked:
207 if not unblocked:
205 if len(groups) > 1: # display other subset
208 if len(groups) > 1: # display other subset
206 targetidx = 1
209 targetidx = 1
207 gr = groups[1]
210 gr = groups[1]
208 elif not gr[1] & unblocked:
211 elif not gr[1] & unblocked:
209 gr = None
212 gr = None
210
213
211 if gr is not None:
214 if gr is not None:
212 # update the set of awaited revisions with the one from the
215 # update the set of awaited revisions with the one from the
213 # subgroup
216 # subgroup
214 unblocked |= gr[1]
217 unblocked |= gr[1]
215 # output all revisions in the subgroup
218 # output all revisions in the subgroup
216 for r in gr[0]:
219 for r in gr[0]:
217 yield r
220 yield r
218 # delete the subgroup that you just output
221 # delete the subgroup that you just output
219 # unless it is groups[0] in which case you just empty it.
222 # unless it is groups[0] in which case you just empty it.
220 if targetidx:
223 if targetidx:
221 del groups[targetidx]
224 del groups[targetidx]
222 else:
225 else:
223 gr[0][:] = []
226 gr[0][:] = []
224 # Check if we have some subgroup waiting for revisions we are not going to
227 # Check if we have some subgroup waiting for revisions we are not going to
225 # iterate over
228 # iterate over
226 for g in groups:
229 for g in groups:
227 for r in g[0]:
230 for r in g[0]:
228 yield r
231 yield r
229
232
230 def dagwalker(repo, revs):
233 def dagwalker(repo, revs):
231 """cset DAG generator yielding (id, CHANGESET, ctx, [parentids]) tuples
234 """cset DAG generator yielding (id, CHANGESET, ctx, [parentinfo]) tuples
232
235
233 This generator function walks through revisions (which should be ordered
236 This generator function walks through revisions (which should be ordered
234 from bigger to lower). It returns a tuple for each node. The node and parent
237 from bigger to lower). It returns a tuple for each node.
235 ids are arbitrary integers which identify a node in the context of the graph
238
239 Each parentinfo entry is a tuple with (edgetype, parentid), where edgetype
240 is one of PARENT, GRANDPARENT or MISSINGPARENT. The node and parent ids
241 are arbitrary integers which identify a node in the context of the graph
236 returned.
242 returned.
243
237 """
244 """
238 if not revs:
245 if not revs:
239 return
246 return
240
247
241 gpcache = {}
248 gpcache = {}
242
249
243 if repo.ui.configbool('experimental', 'graph-group-branches', False):
250 if repo.ui.configbool('experimental', 'graph-group-branches', False):
244 firstbranch = ()
251 firstbranch = ()
245 firstbranchrevset = repo.ui.config(
252 firstbranchrevset = repo.ui.config(
246 'experimental', 'graph-group-branches.firstbranch', '')
253 'experimental', 'graph-group-branches.firstbranch', '')
247 if firstbranchrevset:
254 if firstbranchrevset:
248 firstbranch = repo.revs(firstbranchrevset)
255 firstbranch = repo.revs(firstbranchrevset)
249 parentrevs = repo.changelog.parentrevs
256 parentrevs = repo.changelog.parentrevs
250 revs = groupbranchiter(revs, parentrevs, firstbranch)
257 revs = groupbranchiter(revs, parentrevs, firstbranch)
251 revs = revset.baseset(revs)
258 revs = revset.baseset(revs)
252
259
253 for rev in revs:
260 for rev in revs:
254 ctx = repo[rev]
261 ctx = repo[rev]
255 parents = sorted(set([p.rev() for p in ctx.parents()
262 # partition into parents in the rev set and missing parents, then
256 if p.rev() in revs]))
263 # augment the lists with markers, to inform graph drawing code about
257 mpars = [p.rev() for p in ctx.parents() if
264 # what kind of edge to draw between nodes.
258 p.rev() != nullrev and p.rev() not in parents]
265 pset = set(p.rev() for p in ctx.parents() if p.rev() in revs)
266 mpars = [p.rev() for p in ctx.parents()
267 if p.rev() != nullrev and p.rev() not in pset]
268 parents = [(PARENT, p) for p in sorted(pset)]
259
269
260 for mpar in mpars:
270 for mpar in mpars:
261 gp = gpcache.get(mpar)
271 gp = gpcache.get(mpar)
262 if gp is None:
272 if gp is None:
263 # precompute slow query as we know reachableroots() goes
273 # precompute slow query as we know reachableroots() goes
264 # through all revs (issue4782)
274 # through all revs (issue4782)
265 if not isinstance(revs, revset.baseset):
275 if not isinstance(revs, revset.baseset):
266 revs = revset.baseset(revs)
276 revs = revset.baseset(revs)
267 gp = gpcache[mpar] = revset.reachableroots(repo, revs, [mpar])
277 gp = gpcache[mpar] = sorted(set(revset.reachableroots(
278 repo, revs, [mpar])))
268 if not gp:
279 if not gp:
269 parents.append(mpar)
280 parents.append((MISSINGPARENT, mpar))
281 pset.add(mpar)
270 else:
282 else:
271 parents.extend(g for g in gp if g not in parents)
283 parents.extend((GRANDPARENT, g) for g in gp if g not in pset)
284 pset.update(gp)
272
285
273 yield (ctx.rev(), CHANGESET, ctx, parents)
286 yield (ctx.rev(), CHANGESET, ctx, parents)
274
287
275 def nodes(repo, nodes):
288 def nodes(repo, nodes):
276 """cset DAG generator yielding (id, CHANGESET, ctx, [parentids]) tuples
289 """cset DAG generator yielding (id, CHANGESET, ctx, [parentids]) tuples
277
290
278 This generator function walks the given nodes. It only returns parents
291 This generator function walks the given nodes. It only returns parents
279 that are in nodes, too.
292 that are in nodes, too.
280 """
293 """
281 include = set(nodes)
294 include = set(nodes)
282 for node in nodes:
295 for node in nodes:
283 ctx = repo[node]
296 ctx = repo[node]
284 parents = set([p.rev() for p in ctx.parents() if p.node() in include])
297 parents = set((PARENT, p.rev()) for p in ctx.parents()
298 if p.node() in include)
285 yield (ctx.rev(), CHANGESET, ctx, sorted(parents))
299 yield (ctx.rev(), CHANGESET, ctx, sorted(parents))
286
300
287 def colored(dag, repo):
301 def colored(dag, repo):
288 """annotates a DAG with colored edge information
302 """annotates a DAG with colored edge information
289
303
290 For each DAG node this function emits tuples::
304 For each DAG node this function emits tuples::
291
305
292 (id, type, data, (col, color), [(col, nextcol, color)])
306 (id, type, data, (col, color), [(col, nextcol, color)])
293
307
294 with the following new elements:
308 with the following new elements:
295
309
296 - Tuple (col, color) with column and color index for the current node
310 - Tuple (col, color) with column and color index for the current node
297 - A list of tuples indicating the edges between the current node and its
311 - A list of tuples indicating the edges between the current node and its
298 parents.
312 parents.
299 """
313 """
300 seen = []
314 seen = []
301 colors = {}
315 colors = {}
302 newcolor = 1
316 newcolor = 1
303 config = {}
317 config = {}
304
318
305 for key, val in repo.ui.configitems('graph'):
319 for key, val in repo.ui.configitems('graph'):
306 if '.' in key:
320 if '.' in key:
307 branch, setting = key.rsplit('.', 1)
321 branch, setting = key.rsplit('.', 1)
308 # Validation
322 # Validation
309 if setting == "width" and val.isdigit():
323 if setting == "width" and val.isdigit():
310 config.setdefault(branch, {})[setting] = int(val)
324 config.setdefault(branch, {})[setting] = int(val)
311 elif setting == "color" and val.isalnum():
325 elif setting == "color" and val.isalnum():
312 config.setdefault(branch, {})[setting] = val
326 config.setdefault(branch, {})[setting] = val
313
327
314 if config:
328 if config:
315 getconf = util.lrucachefunc(
329 getconf = util.lrucachefunc(
316 lambda rev: config.get(repo[rev].branch(), {}))
330 lambda rev: config.get(repo[rev].branch(), {}))
317 else:
331 else:
318 getconf = lambda rev: {}
332 getconf = lambda rev: {}
319
333
320 for (cur, type, data, parents) in dag:
334 for (cur, type, data, parents) in dag:
321
335
322 # Compute seen and next
336 # Compute seen and next
323 if cur not in seen:
337 if cur not in seen:
324 seen.append(cur) # new head
338 seen.append(cur) # new head
325 colors[cur] = newcolor
339 colors[cur] = newcolor
326 newcolor += 1
340 newcolor += 1
327
341
328 col = seen.index(cur)
342 col = seen.index(cur)
329 color = colors.pop(cur)
343 color = colors.pop(cur)
330 next = seen[:]
344 next = seen[:]
331
345
332 # Add parents to next
346 # Add parents to next
333 addparents = [p for p in parents if p not in next]
347 addparents = [p for pt, p in parents if p not in next]
334 next[col:col + 1] = addparents
348 next[col:col + 1] = addparents
335
349
336 # Set colors for the parents
350 # Set colors for the parents
337 for i, p in enumerate(addparents):
351 for i, p in enumerate(addparents):
338 if not i:
352 if not i:
339 colors[p] = color
353 colors[p] = color
340 else:
354 else:
341 colors[p] = newcolor
355 colors[p] = newcolor
342 newcolor += 1
356 newcolor += 1
343
357
344 # Add edges to the graph
358 # Add edges to the graph
345 edges = []
359 edges = []
346 for ecol, eid in enumerate(seen):
360 for ecol, eid in enumerate(seen):
347 if eid in next:
361 if eid in next:
348 bconf = getconf(eid)
362 bconf = getconf(eid)
349 edges.append((
363 edges.append((
350 ecol, next.index(eid), colors[eid],
364 ecol, next.index(eid), colors[eid],
351 bconf.get('width', -1),
365 bconf.get('width', -1),
352 bconf.get('color', '')))
366 bconf.get('color', '')))
353 elif eid == cur:
367 elif eid == cur:
354 for p in parents:
368 for ptype, p in parents:
355 bconf = getconf(p)
369 bconf = getconf(p)
356 edges.append((
370 edges.append((
357 ecol, next.index(p), color,
371 ecol, next.index(p), color,
358 bconf.get('width', -1),
372 bconf.get('width', -1),
359 bconf.get('color', '')))
373 bconf.get('color', '')))
360
374
361 # Yield and move on
375 # Yield and move on
362 yield (cur, type, data, (col, color), edges)
376 yield (cur, type, data, (col, color), edges)
363 seen = next
377 seen = next
364
378
365 def asciiedges(type, char, lines, state, rev, parents):
379 def asciiedges(type, char, lines, state, rev, parents):
366 """adds edge info to changelog DAG walk suitable for ascii()"""
380 """adds edge info to changelog DAG walk suitable for ascii()"""
367 seen = state['seen']
381 seen = state['seen']
368 if rev not in seen:
382 if rev not in seen:
369 seen.append(rev)
383 seen.append(rev)
370 nodeidx = seen.index(rev)
384 nodeidx = seen.index(rev)
371
385
372 knownparents = []
386 knownparents = []
373 newparents = []
387 newparents = []
374 for parent in parents:
388 for ptype, parent in parents:
375 if parent in seen:
389 if parent in seen:
376 knownparents.append(parent)
390 knownparents.append(parent)
377 else:
391 else:
378 newparents.append(parent)
392 newparents.append(parent)
379
393
380 ncols = len(seen)
394 ncols = len(seen)
381 nextseen = seen[:]
395 nextseen = seen[:]
382 nextseen[nodeidx:nodeidx + 1] = newparents
396 nextseen[nodeidx:nodeidx + 1] = newparents
383 edges = [(nodeidx, nextseen.index(p)) for p in knownparents if p != nullrev]
397 edges = [(nodeidx, nextseen.index(p)) for p in knownparents if p != nullrev]
384
398
385 while len(newparents) > 2:
399 while len(newparents) > 2:
386 # ascii() only knows how to add or remove a single column between two
400 # ascii() only knows how to add or remove a single column between two
387 # calls. Nodes with more than two parents break this constraint so we
401 # calls. Nodes with more than two parents break this constraint so we
388 # introduce intermediate expansion lines to grow the active node list
402 # introduce intermediate expansion lines to grow the active node list
389 # slowly.
403 # slowly.
390 edges.append((nodeidx, nodeidx))
404 edges.append((nodeidx, nodeidx))
391 edges.append((nodeidx, nodeidx + 1))
405 edges.append((nodeidx, nodeidx + 1))
392 nmorecols = 1
406 nmorecols = 1
393 yield (type, char, lines, (nodeidx, edges, ncols, nmorecols))
407 yield (type, char, lines, (nodeidx, edges, ncols, nmorecols))
394 char = '\\'
408 char = '\\'
395 lines = []
409 lines = []
396 nodeidx += 1
410 nodeidx += 1
397 ncols += 1
411 ncols += 1
398 edges = []
412 edges = []
399 del newparents[0]
413 del newparents[0]
400
414
401 if len(newparents) > 0:
415 if len(newparents) > 0:
402 edges.append((nodeidx, nodeidx))
416 edges.append((nodeidx, nodeidx))
403 if len(newparents) > 1:
417 if len(newparents) > 1:
404 edges.append((nodeidx, nodeidx + 1))
418 edges.append((nodeidx, nodeidx + 1))
405 nmorecols = len(nextseen) - ncols
419 nmorecols = len(nextseen) - ncols
406 seen[:] = nextseen
420 seen[:] = nextseen
407 yield (type, char, lines, (nodeidx, edges, ncols, nmorecols))
421 yield (type, char, lines, (nodeidx, edges, ncols, nmorecols))
408
422
409 def _fixlongrightedges(edges):
423 def _fixlongrightedges(edges):
410 for (i, (start, end)) in enumerate(edges):
424 for (i, (start, end)) in enumerate(edges):
411 if end > start:
425 if end > start:
412 edges[i] = (start, end + 1)
426 edges[i] = (start, end + 1)
413
427
414 def _getnodelineedgestail(
428 def _getnodelineedgestail(
415 node_index, p_node_index, n_columns, n_columns_diff, p_diff, fix_tail):
429 node_index, p_node_index, n_columns, n_columns_diff, p_diff, fix_tail):
416 if fix_tail and n_columns_diff == p_diff and n_columns_diff != 0:
430 if fix_tail and n_columns_diff == p_diff and n_columns_diff != 0:
417 # Still going in the same non-vertical direction.
431 # Still going in the same non-vertical direction.
418 if n_columns_diff == -1:
432 if n_columns_diff == -1:
419 start = max(node_index + 1, p_node_index)
433 start = max(node_index + 1, p_node_index)
420 tail = ["|", " "] * (start - node_index - 1)
434 tail = ["|", " "] * (start - node_index - 1)
421 tail.extend(["/", " "] * (n_columns - start))
435 tail.extend(["/", " "] * (n_columns - start))
422 return tail
436 return tail
423 else:
437 else:
424 return ["\\", " "] * (n_columns - node_index - 1)
438 return ["\\", " "] * (n_columns - node_index - 1)
425 else:
439 else:
426 return ["|", " "] * (n_columns - node_index - 1)
440 return ["|", " "] * (n_columns - node_index - 1)
427
441
428 def _drawedges(edges, nodeline, interline):
442 def _drawedges(edges, nodeline, interline):
429 for (start, end) in edges:
443 for (start, end) in edges:
430 if start == end + 1:
444 if start == end + 1:
431 interline[2 * end + 1] = "/"
445 interline[2 * end + 1] = "/"
432 elif start == end - 1:
446 elif start == end - 1:
433 interline[2 * start + 1] = "\\"
447 interline[2 * start + 1] = "\\"
434 elif start == end:
448 elif start == end:
435 interline[2 * start] = "|"
449 interline[2 * start] = "|"
436 else:
450 else:
437 if 2 * end >= len(nodeline):
451 if 2 * end >= len(nodeline):
438 continue
452 continue
439 nodeline[2 * end] = "+"
453 nodeline[2 * end] = "+"
440 if start > end:
454 if start > end:
441 (start, end) = (end, start)
455 (start, end) = (end, start)
442 for i in range(2 * start + 1, 2 * end):
456 for i in range(2 * start + 1, 2 * end):
443 if nodeline[i] != "+":
457 if nodeline[i] != "+":
444 nodeline[i] = "-"
458 nodeline[i] = "-"
445
459
446 def _getpaddingline(ni, n_columns, edges):
460 def _getpaddingline(ni, n_columns, edges):
447 line = []
461 line = []
448 line.extend(["|", " "] * ni)
462 line.extend(["|", " "] * ni)
449 if (ni, ni - 1) in edges or (ni, ni) in edges:
463 if (ni, ni - 1) in edges or (ni, ni) in edges:
450 # (ni, ni - 1) (ni, ni)
464 # (ni, ni - 1) (ni, ni)
451 # | | | | | | | |
465 # | | | | | | | |
452 # +---o | | o---+
466 # +---o | | o---+
453 # | | c | | c | |
467 # | | c | | c | |
454 # | |/ / | |/ /
468 # | |/ / | |/ /
455 # | | | | | |
469 # | | | | | |
456 c = "|"
470 c = "|"
457 else:
471 else:
458 c = " "
472 c = " "
459 line.extend([c, " "])
473 line.extend([c, " "])
460 line.extend(["|", " "] * (n_columns - ni - 1))
474 line.extend(["|", " "] * (n_columns - ni - 1))
461 return line
475 return line
462
476
463 def asciistate():
477 def asciistate():
464 """returns the initial value for the "state" argument to ascii()"""
478 """returns the initial value for the "state" argument to ascii()"""
465 return {'seen': [], 'lastcoldiff': 0, 'lastindex': 0}
479 return {'seen': [], 'lastcoldiff': 0, 'lastindex': 0}
466
480
467 def ascii(ui, state, type, char, text, coldata):
481 def ascii(ui, state, type, char, text, coldata):
468 """prints an ASCII graph of the DAG
482 """prints an ASCII graph of the DAG
469
483
470 takes the following arguments (one call per node in the graph):
484 takes the following arguments (one call per node in the graph):
471
485
472 - ui to write to
486 - ui to write to
473 - Somewhere to keep the needed state in (init to asciistate())
487 - Somewhere to keep the needed state in (init to asciistate())
474 - Column of the current node in the set of ongoing edges.
488 - Column of the current node in the set of ongoing edges.
475 - Type indicator of node data, usually 'C' for changesets.
489 - Type indicator of node data, usually 'C' for changesets.
476 - Payload: (char, lines):
490 - Payload: (char, lines):
477 - Character to use as node's symbol.
491 - Character to use as node's symbol.
478 - List of lines to display as the node's text.
492 - List of lines to display as the node's text.
479 - Edges; a list of (col, next_col) indicating the edges between
493 - Edges; a list of (col, next_col) indicating the edges between
480 the current node and its parents.
494 the current node and its parents.
481 - Number of columns (ongoing edges) in the current revision.
495 - Number of columns (ongoing edges) in the current revision.
482 - The difference between the number of columns (ongoing edges)
496 - The difference between the number of columns (ongoing edges)
483 in the next revision and the number of columns (ongoing edges)
497 in the next revision and the number of columns (ongoing edges)
484 in the current revision. That is: -1 means one column removed;
498 in the current revision. That is: -1 means one column removed;
485 0 means no columns added or removed; 1 means one column added.
499 0 means no columns added or removed; 1 means one column added.
486 """
500 """
487
501
488 idx, edges, ncols, coldiff = coldata
502 idx, edges, ncols, coldiff = coldata
489 assert -2 < coldiff < 2
503 assert -2 < coldiff < 2
490 if coldiff == -1:
504 if coldiff == -1:
491 # Transform
505 # Transform
492 #
506 #
493 # | | | | | |
507 # | | | | | |
494 # o | | into o---+
508 # o | | into o---+
495 # |X / |/ /
509 # |X / |/ /
496 # | | | |
510 # | | | |
497 _fixlongrightedges(edges)
511 _fixlongrightedges(edges)
498
512
499 # add_padding_line says whether to rewrite
513 # add_padding_line says whether to rewrite
500 #
514 #
501 # | | | | | | | |
515 # | | | | | | | |
502 # | o---+ into | o---+
516 # | o---+ into | o---+
503 # | / / | | | # <--- padding line
517 # | / / | | | # <--- padding line
504 # o | | | / /
518 # o | | | / /
505 # o | |
519 # o | |
506 add_padding_line = (len(text) > 2 and coldiff == -1 and
520 add_padding_line = (len(text) > 2 and coldiff == -1 and
507 [x for (x, y) in edges if x + 1 < y])
521 [x for (x, y) in edges if x + 1 < y])
508
522
509 # fix_nodeline_tail says whether to rewrite
523 # fix_nodeline_tail says whether to rewrite
510 #
524 #
511 # | | o | | | | o | |
525 # | | o | | | | o | |
512 # | | |/ / | | |/ /
526 # | | |/ / | | |/ /
513 # | o | | into | o / / # <--- fixed nodeline tail
527 # | o | | into | o / / # <--- fixed nodeline tail
514 # | |/ / | |/ /
528 # | |/ / | |/ /
515 # o | | o | |
529 # o | | o | |
516 fix_nodeline_tail = len(text) <= 2 and not add_padding_line
530 fix_nodeline_tail = len(text) <= 2 and not add_padding_line
517
531
518 # nodeline is the line containing the node character (typically o)
532 # nodeline is the line containing the node character (typically o)
519 nodeline = ["|", " "] * idx
533 nodeline = ["|", " "] * idx
520 nodeline.extend([char, " "])
534 nodeline.extend([char, " "])
521
535
522 nodeline.extend(
536 nodeline.extend(
523 _getnodelineedgestail(idx, state['lastindex'], ncols, coldiff,
537 _getnodelineedgestail(idx, state['lastindex'], ncols, coldiff,
524 state['lastcoldiff'], fix_nodeline_tail))
538 state['lastcoldiff'], fix_nodeline_tail))
525
539
526 # shift_interline is the line containing the non-vertical
540 # shift_interline is the line containing the non-vertical
527 # edges between this entry and the next
541 # edges between this entry and the next
528 shift_interline = ["|", " "] * idx
542 shift_interline = ["|", " "] * idx
529 if coldiff == -1:
543 if coldiff == -1:
530 n_spaces = 1
544 n_spaces = 1
531 edge_ch = "/"
545 edge_ch = "/"
532 elif coldiff == 0:
546 elif coldiff == 0:
533 n_spaces = 2
547 n_spaces = 2
534 edge_ch = "|"
548 edge_ch = "|"
535 else:
549 else:
536 n_spaces = 3
550 n_spaces = 3
537 edge_ch = "\\"
551 edge_ch = "\\"
538 shift_interline.extend(n_spaces * [" "])
552 shift_interline.extend(n_spaces * [" "])
539 shift_interline.extend([edge_ch, " "] * (ncols - idx - 1))
553 shift_interline.extend([edge_ch, " "] * (ncols - idx - 1))
540
554
541 # draw edges from the current node to its parents
555 # draw edges from the current node to its parents
542 _drawedges(edges, nodeline, shift_interline)
556 _drawedges(edges, nodeline, shift_interline)
543
557
544 # lines is the list of all graph lines to print
558 # lines is the list of all graph lines to print
545 lines = [nodeline]
559 lines = [nodeline]
546 if add_padding_line:
560 if add_padding_line:
547 lines.append(_getpaddingline(idx, ncols, edges))
561 lines.append(_getpaddingline(idx, ncols, edges))
548 lines.append(shift_interline)
562 lines.append(shift_interline)
549
563
550 # make sure that there are as many graph lines as there are
564 # make sure that there are as many graph lines as there are
551 # log strings
565 # log strings
552 while len(text) < len(lines):
566 while len(text) < len(lines):
553 text.append("")
567 text.append("")
554 if len(lines) < len(text):
568 if len(lines) < len(text):
555 extra_interline = ["|", " "] * (ncols + coldiff)
569 extra_interline = ["|", " "] * (ncols + coldiff)
556 while len(lines) < len(text):
570 while len(lines) < len(text):
557 lines.append(extra_interline)
571 lines.append(extra_interline)
558
572
559 # print lines
573 # print lines
560 indentation_level = max(ncols, ncols + coldiff)
574 indentation_level = max(ncols, ncols + coldiff)
561 for (line, logstr) in zip(lines, text):
575 for (line, logstr) in zip(lines, text):
562 ln = "%-*s %s" % (2 * indentation_level, "".join(line), logstr)
576 ln = "%-*s %s" % (2 * indentation_level, "".join(line), logstr)
563 ui.write(ln.rstrip() + '\n')
577 ui.write(ln.rstrip() + '\n')
564
578
565 # ... and start over
579 # ... and start over
566 state['lastcoldiff'] = coldiff
580 state['lastcoldiff'] = coldiff
567 state['lastindex'] = idx
581 state['lastindex'] = idx
General Comments 0
You need to be logged in to leave comments. Login now