##// END OF EJS Templates
graphmod: stop building a list out of branchgroupiter...
Pierre-Yves David -
r26092:014044db default
parent child Browse files
Show More
@@ -1,561 +1,562 b''
1 # Revision graph generator for Mercurial
1 # Revision graph generator for Mercurial
2 #
2 #
3 # Copyright 2008 Dirkjan Ochtman <dirkjan@ochtman.nl>
3 # Copyright 2008 Dirkjan Ochtman <dirkjan@ochtman.nl>
4 # Copyright 2007 Joel Rosdahl <joel@rosdahl.net>
4 # Copyright 2007 Joel Rosdahl <joel@rosdahl.net>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """supports walking the history as DAGs suitable for graphical output
9 """supports walking the history as DAGs suitable for graphical output
10
10
11 The most basic format we use is that of::
11 The most basic format we use is that of::
12
12
13 (id, type, data, [parentids])
13 (id, type, data, [parentids])
14
14
15 The node and parent ids are arbitrary integers which identify a node in the
15 The node and parent ids are arbitrary integers which identify a node in the
16 context of the graph returned. Type is a constant specifying the node type.
16 context of the graph returned. Type is a constant specifying the node type.
17 Data depends on type.
17 Data depends on type.
18 """
18 """
19
19
20 from __future__ import absolute_import
20 from __future__ import absolute_import
21
21
22 import heapq
22 import heapq
23
23
24 from .node import nullrev
24 from .node import nullrev
25 from . import (
25 from . import (
26 revset,
26 revset,
27 util,
27 util,
28 )
28 )
29
29
30 CHANGESET = 'C'
30 CHANGESET = 'C'
31
31
32 def groupbranchiter(revs, parentsfunc, firstbranch=()):
32 def groupbranchiter(revs, parentsfunc, firstbranch=()):
33 """Yield revisions from heads to roots one (topo) branch at a time.
33 """Yield revisions from heads to roots one (topo) branch at a time.
34
34
35 This function aims to be used by a graph generator that wishes to minimize
35 This function aims to be used by a graph generator that wishes to minimize
36 the number of parallel branches and their interleaving.
36 the number of parallel branches and their interleaving.
37
37
38 Example iteration order (numbers show the "true" order in a changelog):
38 Example iteration order (numbers show the "true" order in a changelog):
39
39
40 o 4
40 o 4
41 |
41 |
42 o 1
42 o 1
43 |
43 |
44 | o 3
44 | o 3
45 | |
45 | |
46 | o 2
46 | o 2
47 |/
47 |/
48 o 0
48 o 0
49
49
50 Note that the ancestors of merges are understood by the current
50 Note that the ancestors of merges are understood by the current
51 algorithm to be on the same branch. This means no reordering will
51 algorithm to be on the same branch. This means no reordering will
52 occur behind a merge.
52 occur behind a merge.
53 """
53 """
54
54
55 ### Quick summary of the algorithm
55 ### Quick summary of the algorithm
56 #
56 #
57 # This function is based around a "retention" principle. We keep revisions
57 # This function is based around a "retention" principle. We keep revisions
58 # in memory until we are ready to emit a whole branch that immediately
58 # in memory until we are ready to emit a whole branch that immediately
59 # "merges" into an existing one. This reduces the number of parallel
59 # "merges" into an existing one. This reduces the number of parallel
60 # branches with interleaved revisions.
60 # branches with interleaved revisions.
61 #
61 #
62 # During iteration revs are split into two groups:
62 # During iteration revs are split into two groups:
63 # A) revision already emitted
63 # A) revision already emitted
64 # B) revision in "retention". They are stored as different subgroups.
64 # B) revision in "retention". They are stored as different subgroups.
65 #
65 #
66 # for each REV, we do the following logic:
66 # for each REV, we do the following logic:
67 #
67 #
68 # 1) if REV is a parent of (A), we will emit it. If there is a
68 # 1) if REV is a parent of (A), we will emit it. If there is a
69 # retention group ((B) above) that is blocked on REV being
69 # retention group ((B) above) that is blocked on REV being
70 # available, we emit all the revisions out of that retention
70 # available, we emit all the revisions out of that retention
71 # group first.
71 # group first.
72 #
72 #
73 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
73 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
74 # available, if such subgroup exist, we add REV to it and the subgroup is
74 # available, if such subgroup exist, we add REV to it and the subgroup is
75 # now awaiting for REV.parents() to be available.
75 # now awaiting for REV.parents() to be available.
76 #
76 #
77 # 3) finally if no such group existed in (B), we create a new subgroup.
77 # 3) finally if no such group existed in (B), we create a new subgroup.
78 #
78 #
79 #
79 #
80 # To bootstrap the algorithm, we emit the tipmost revision (which
80 # To bootstrap the algorithm, we emit the tipmost revision (which
81 # puts it in group (A) from above).
81 # puts it in group (A) from above).
82
82
83 revs.sort(reverse=True)
83 revs.sort(reverse=True)
84
84
85 # Set of parents of revision that have been emitted. They can be considered
85 # Set of parents of revision that have been emitted. They can be considered
86 # unblocked as the graph generator is already aware of them so there is no
86 # unblocked as the graph generator is already aware of them so there is no
87 # need to delay the revisions that reference them.
87 # need to delay the revisions that reference them.
88 #
88 #
89 # If someone wants to prioritize a branch over the others, pre-filling this
89 # If someone wants to prioritize a branch over the others, pre-filling this
90 # set will force all other branches to wait until this branch is ready to be
90 # set will force all other branches to wait until this branch is ready to be
91 # emitted.
91 # emitted.
92 unblocked = set(firstbranch)
92 unblocked = set(firstbranch)
93
93
94 # list of groups waiting to be displayed, each group is defined by:
94 # list of groups waiting to be displayed, each group is defined by:
95 #
95 #
96 # (revs: lists of revs waiting to be displayed,
96 # (revs: lists of revs waiting to be displayed,
97 # blocked: set of that cannot be displayed before those in 'revs')
97 # blocked: set of that cannot be displayed before those in 'revs')
98 #
98 #
99 # The second value ('blocked') correspond to parents of any revision in the
99 # The second value ('blocked') correspond to parents of any revision in the
100 # group ('revs') that is not itself contained in the group. The main idea
100 # group ('revs') that is not itself contained in the group. The main idea
101 # of this algorithm is to delay as much as possible the emission of any
101 # of this algorithm is to delay as much as possible the emission of any
102 # revision. This means waiting for the moment we are about to display
102 # revision. This means waiting for the moment we are about to display
103 # these parents to display the revs in a group.
103 # these parents to display the revs in a group.
104 #
104 #
105 # This first implementation is smart until it encounters a merge: it will
105 # This first implementation is smart until it encounters a merge: it will
106 # emit revs as soon as any parent is about to be emitted and can grow an
106 # emit revs as soon as any parent is about to be emitted and can grow an
107 # arbitrary number of revs in 'blocked'. In practice this mean we properly
107 # arbitrary number of revs in 'blocked'. In practice this mean we properly
108 # retains new branches but gives up on any special ordering for ancestors
108 # retains new branches but gives up on any special ordering for ancestors
109 # of merges. The implementation can be improved to handle this better.
109 # of merges. The implementation can be improved to handle this better.
110 #
110 #
111 # The first subgroup is special. It corresponds to all the revision that
111 # The first subgroup is special. It corresponds to all the revision that
112 # were already emitted. The 'revs' lists is expected to be empty and the
112 # were already emitted. The 'revs' lists is expected to be empty and the
113 # 'blocked' set contains the parents revisions of already emitted revision.
113 # 'blocked' set contains the parents revisions of already emitted revision.
114 #
114 #
115 # You could pre-seed the <parents> set of groups[0] to a specific
115 # You could pre-seed the <parents> set of groups[0] to a specific
116 # changesets to select what the first emitted branch should be.
116 # changesets to select what the first emitted branch should be.
117 groups = [([], unblocked)]
117 groups = [([], unblocked)]
118 pendingheap = []
118 pendingheap = []
119 pendingset = set()
119 pendingset = set()
120
120
121 heapq.heapify(pendingheap)
121 heapq.heapify(pendingheap)
122 heappop = heapq.heappop
122 heappop = heapq.heappop
123 heappush = heapq.heappush
123 heappush = heapq.heappush
124 for currentrev in revs:
124 for currentrev in revs:
125 # Heap works with smallest element, we want highest so we invert
125 # Heap works with smallest element, we want highest so we invert
126 if currentrev not in pendingset:
126 if currentrev not in pendingset:
127 heappush(pendingheap, -currentrev)
127 heappush(pendingheap, -currentrev)
128 pendingset.add(currentrev)
128 pendingset.add(currentrev)
129 # iterates on pending rev until after the current rev have been
129 # iterates on pending rev until after the current rev have been
130 # processed.
130 # processed.
131 rev = None
131 rev = None
132 while rev != currentrev:
132 while rev != currentrev:
133 rev = -heappop(pendingheap)
133 rev = -heappop(pendingheap)
134 pendingset.remove(rev)
134 pendingset.remove(rev)
135
135
136 # Seek for a subgroup blocked, waiting for the current revision.
136 # Seek for a subgroup blocked, waiting for the current revision.
137 matching = [i for i, g in enumerate(groups) if rev in g[1]]
137 matching = [i for i, g in enumerate(groups) if rev in g[1]]
138
138
139 if matching:
139 if matching:
140 # The main idea is to gather together all sets that are blocked
140 # The main idea is to gather together all sets that are blocked
141 # on the same revision.
141 # on the same revision.
142 #
142 #
143 # Groups are merged when a common blocking ancestor is
143 # Groups are merged when a common blocking ancestor is
144 # observed. For example, given two groups:
144 # observed. For example, given two groups:
145 #
145 #
146 # revs [5, 4] waiting for 1
146 # revs [5, 4] waiting for 1
147 # revs [3, 2] waiting for 1
147 # revs [3, 2] waiting for 1
148 #
148 #
149 # These two groups will be merged when we process
149 # These two groups will be merged when we process
150 # 1. In theory, we could have merged the groups when
150 # 1. In theory, we could have merged the groups when
151 # we added 2 to the group it is now in (we could have
151 # we added 2 to the group it is now in (we could have
152 # noticed the groups were both blocked on 1 then), but
152 # noticed the groups were both blocked on 1 then), but
153 # the way it works now makes the algorithm simpler.
153 # the way it works now makes the algorithm simpler.
154 #
154 #
155 # We also always keep the oldest subgroup first. We can
155 # We also always keep the oldest subgroup first. We can
156 # probably improve the behavior by having the longest set
156 # probably improve the behavior by having the longest set
157 # first. That way, graph algorithms could minimise the length
157 # first. That way, graph algorithms could minimise the length
158 # of parallel lines their drawing. This is currently not done.
158 # of parallel lines their drawing. This is currently not done.
159 targetidx = matching.pop(0)
159 targetidx = matching.pop(0)
160 trevs, tparents = groups[targetidx]
160 trevs, tparents = groups[targetidx]
161 for i in matching:
161 for i in matching:
162 gr = groups[i]
162 gr = groups[i]
163 trevs.extend(gr[0])
163 trevs.extend(gr[0])
164 tparents |= gr[1]
164 tparents |= gr[1]
165 # delete all merged subgroups (except the one we kept)
165 # delete all merged subgroups (except the one we kept)
166 # (starting from the last subgroup for performance and
166 # (starting from the last subgroup for performance and
167 # sanity reasons)
167 # sanity reasons)
168 for i in reversed(matching):
168 for i in reversed(matching):
169 del groups[i]
169 del groups[i]
170 else:
170 else:
171 # This is a new head. We create a new subgroup for it.
171 # This is a new head. We create a new subgroup for it.
172 targetidx = len(groups)
172 targetidx = len(groups)
173 groups.append(([], set([rev])))
173 groups.append(([], set([rev])))
174
174
175 gr = groups[targetidx]
175 gr = groups[targetidx]
176
176
177 # We now add the current nodes to this subgroups. This is done
177 # We now add the current nodes to this subgroups. This is done
178 # after the subgroup merging because all elements from a subgroup
178 # after the subgroup merging because all elements from a subgroup
179 # that relied on this rev must precede it.
179 # that relied on this rev must precede it.
180 #
180 #
181 # we also update the <parents> set to include the parents of the
181 # we also update the <parents> set to include the parents of the
182 # new nodes.
182 # new nodes.
183 if rev == currentrev: # only display stuff in rev
183 if rev == currentrev: # only display stuff in rev
184 gr[0].append(rev)
184 gr[0].append(rev)
185 gr[1].remove(rev)
185 gr[1].remove(rev)
186 parents = [p for p in parentsfunc(rev) if p > nullrev]
186 parents = [p for p in parentsfunc(rev) if p > nullrev]
187 gr[1].update(parents)
187 gr[1].update(parents)
188 for p in parents:
188 for p in parents:
189 if p not in pendingset:
189 if p not in pendingset:
190 pendingset.add(p)
190 pendingset.add(p)
191 heappush(pendingheap, -p)
191 heappush(pendingheap, -p)
192
192
193 # Look for a subgroup to display
193 # Look for a subgroup to display
194 #
194 #
195 # When unblocked is empty (if clause), we were not waiting for any
195 # When unblocked is empty (if clause), we were not waiting for any
196 # revisions during the first iteration (if no priority was given) or
196 # revisions during the first iteration (if no priority was given) or
197 # if we emitted a whole disconnected set of the graph (reached a
197 # if we emitted a whole disconnected set of the graph (reached a
198 # root). In that case we arbitrarily take the oldest known
198 # root). In that case we arbitrarily take the oldest known
199 # subgroup. The heuristic could probably be better.
199 # subgroup. The heuristic could probably be better.
200 #
200 #
201 # Otherwise (elif clause) if the subgroup is blocked on
201 # Otherwise (elif clause) if the subgroup is blocked on
202 # a revision we just emitted, we can safely emit it as
202 # a revision we just emitted, we can safely emit it as
203 # well.
203 # well.
204 if not unblocked:
204 if not unblocked:
205 if len(groups) > 1: # display other subset
205 if len(groups) > 1: # display other subset
206 targetidx = 1
206 targetidx = 1
207 gr = groups[1]
207 gr = groups[1]
208 elif not gr[1] & unblocked:
208 elif not gr[1] & unblocked:
209 gr = None
209 gr = None
210
210
211 if gr is not None:
211 if gr is not None:
212 # update the set of awaited revisions with the one from the
212 # update the set of awaited revisions with the one from the
213 # subgroup
213 # subgroup
214 unblocked |= gr[1]
214 unblocked |= gr[1]
215 # output all revisions in the subgroup
215 # output all revisions in the subgroup
216 for r in gr[0]:
216 for r in gr[0]:
217 yield r
217 yield r
218 # delete the subgroup that you just output
218 # delete the subgroup that you just output
219 # unless it is groups[0] in which case you just empty it.
219 # unless it is groups[0] in which case you just empty it.
220 if targetidx:
220 if targetidx:
221 del groups[targetidx]
221 del groups[targetidx]
222 else:
222 else:
223 gr[0][:] = []
223 gr[0][:] = []
224 # Check if we have some subgroup waiting for revisions we are not going to
224 # Check if we have some subgroup waiting for revisions we are not going to
225 # iterate over
225 # iterate over
226 for g in groups:
226 for g in groups:
227 for r in g[0]:
227 for r in g[0]:
228 yield r
228 yield r
229
229
230 def dagwalker(repo, revs):
230 def dagwalker(repo, revs):
231 """cset DAG generator yielding (id, CHANGESET, ctx, [parentids]) tuples
231 """cset DAG generator yielding (id, CHANGESET, ctx, [parentids]) tuples
232
232
233 This generator function walks through revisions (which should be ordered
233 This generator function walks through revisions (which should be ordered
234 from bigger to lower). It returns a tuple for each node. The node and parent
234 from bigger to lower). It returns a tuple for each node. The node and parent
235 ids are arbitrary integers which identify a node in the context of the graph
235 ids are arbitrary integers which identify a node in the context of the graph
236 returned.
236 returned.
237 """
237 """
238 if not revs:
238 if not revs:
239 return
239 return
240
240
241 gpcache = {}
241 gpcache = {}
242
242
243 if repo.ui.configbool('experimental', 'graph-group-branches', False):
243 if repo.ui.configbool('experimental', 'graph-group-branches', False):
244 firstbranch = ()
244 firstbranch = ()
245 firstbranchrevset = repo.ui.config(
245 firstbranchrevset = repo.ui.config(
246 'experimental', 'graph-group-branches.firstbranch', '')
246 'experimental', 'graph-group-branches.firstbranch', '')
247 if firstbranchrevset:
247 if firstbranchrevset:
248 firstbranch = repo.revs(firstbranchrevset)
248 firstbranch = repo.revs(firstbranchrevset)
249 parentrevs = repo.changelog.parentrevs
249 parentrevs = repo.changelog.parentrevs
250 revs = list(groupbranchiter(revs, parentrevs, firstbranch))
250 revs = groupbranchiter(revs, parentrevs, firstbranch)
251 revs = revset.baseset(revs)
251
252
252 for rev in revs:
253 for rev in revs:
253 ctx = repo[rev]
254 ctx = repo[rev]
254 parents = sorted(set([p.rev() for p in ctx.parents()
255 parents = sorted(set([p.rev() for p in ctx.parents()
255 if p.rev() in revs]))
256 if p.rev() in revs]))
256 mpars = [p.rev() for p in ctx.parents() if
257 mpars = [p.rev() for p in ctx.parents() if
257 p.rev() != nullrev and p.rev() not in parents]
258 p.rev() != nullrev and p.rev() not in parents]
258
259
259 for mpar in mpars:
260 for mpar in mpars:
260 gp = gpcache.get(mpar)
261 gp = gpcache.get(mpar)
261 if gp is None:
262 if gp is None:
262 gp = gpcache[mpar] = revset.reachableroots(repo, revs, [mpar])
263 gp = gpcache[mpar] = revset.reachableroots(repo, revs, [mpar])
263 if not gp:
264 if not gp:
264 parents.append(mpar)
265 parents.append(mpar)
265 else:
266 else:
266 parents.extend(g for g in gp if g not in parents)
267 parents.extend(g for g in gp if g not in parents)
267
268
268 yield (ctx.rev(), CHANGESET, ctx, parents)
269 yield (ctx.rev(), CHANGESET, ctx, parents)
269
270
270 def nodes(repo, nodes):
271 def nodes(repo, nodes):
271 """cset DAG generator yielding (id, CHANGESET, ctx, [parentids]) tuples
272 """cset DAG generator yielding (id, CHANGESET, ctx, [parentids]) tuples
272
273
273 This generator function walks the given nodes. It only returns parents
274 This generator function walks the given nodes. It only returns parents
274 that are in nodes, too.
275 that are in nodes, too.
275 """
276 """
276 include = set(nodes)
277 include = set(nodes)
277 for node in nodes:
278 for node in nodes:
278 ctx = repo[node]
279 ctx = repo[node]
279 parents = set([p.rev() for p in ctx.parents() if p.node() in include])
280 parents = set([p.rev() for p in ctx.parents() if p.node() in include])
280 yield (ctx.rev(), CHANGESET, ctx, sorted(parents))
281 yield (ctx.rev(), CHANGESET, ctx, sorted(parents))
281
282
282 def colored(dag, repo):
283 def colored(dag, repo):
283 """annotates a DAG with colored edge information
284 """annotates a DAG with colored edge information
284
285
285 For each DAG node this function emits tuples::
286 For each DAG node this function emits tuples::
286
287
287 (id, type, data, (col, color), [(col, nextcol, color)])
288 (id, type, data, (col, color), [(col, nextcol, color)])
288
289
289 with the following new elements:
290 with the following new elements:
290
291
291 - Tuple (col, color) with column and color index for the current node
292 - Tuple (col, color) with column and color index for the current node
292 - A list of tuples indicating the edges between the current node and its
293 - A list of tuples indicating the edges between the current node and its
293 parents.
294 parents.
294 """
295 """
295 seen = []
296 seen = []
296 colors = {}
297 colors = {}
297 newcolor = 1
298 newcolor = 1
298 config = {}
299 config = {}
299
300
300 for key, val in repo.ui.configitems('graph'):
301 for key, val in repo.ui.configitems('graph'):
301 if '.' in key:
302 if '.' in key:
302 branch, setting = key.rsplit('.', 1)
303 branch, setting = key.rsplit('.', 1)
303 # Validation
304 # Validation
304 if setting == "width" and val.isdigit():
305 if setting == "width" and val.isdigit():
305 config.setdefault(branch, {})[setting] = int(val)
306 config.setdefault(branch, {})[setting] = int(val)
306 elif setting == "color" and val.isalnum():
307 elif setting == "color" and val.isalnum():
307 config.setdefault(branch, {})[setting] = val
308 config.setdefault(branch, {})[setting] = val
308
309
309 if config:
310 if config:
310 getconf = util.lrucachefunc(
311 getconf = util.lrucachefunc(
311 lambda rev: config.get(repo[rev].branch(), {}))
312 lambda rev: config.get(repo[rev].branch(), {}))
312 else:
313 else:
313 getconf = lambda rev: {}
314 getconf = lambda rev: {}
314
315
315 for (cur, type, data, parents) in dag:
316 for (cur, type, data, parents) in dag:
316
317
317 # Compute seen and next
318 # Compute seen and next
318 if cur not in seen:
319 if cur not in seen:
319 seen.append(cur) # new head
320 seen.append(cur) # new head
320 colors[cur] = newcolor
321 colors[cur] = newcolor
321 newcolor += 1
322 newcolor += 1
322
323
323 col = seen.index(cur)
324 col = seen.index(cur)
324 color = colors.pop(cur)
325 color = colors.pop(cur)
325 next = seen[:]
326 next = seen[:]
326
327
327 # Add parents to next
328 # Add parents to next
328 addparents = [p for p in parents if p not in next]
329 addparents = [p for p in parents if p not in next]
329 next[col:col + 1] = addparents
330 next[col:col + 1] = addparents
330
331
331 # Set colors for the parents
332 # Set colors for the parents
332 for i, p in enumerate(addparents):
333 for i, p in enumerate(addparents):
333 if not i:
334 if not i:
334 colors[p] = color
335 colors[p] = color
335 else:
336 else:
336 colors[p] = newcolor
337 colors[p] = newcolor
337 newcolor += 1
338 newcolor += 1
338
339
339 # Add edges to the graph
340 # Add edges to the graph
340 edges = []
341 edges = []
341 for ecol, eid in enumerate(seen):
342 for ecol, eid in enumerate(seen):
342 if eid in next:
343 if eid in next:
343 bconf = getconf(eid)
344 bconf = getconf(eid)
344 edges.append((
345 edges.append((
345 ecol, next.index(eid), colors[eid],
346 ecol, next.index(eid), colors[eid],
346 bconf.get('width', -1),
347 bconf.get('width', -1),
347 bconf.get('color', '')))
348 bconf.get('color', '')))
348 elif eid == cur:
349 elif eid == cur:
349 for p in parents:
350 for p in parents:
350 bconf = getconf(p)
351 bconf = getconf(p)
351 edges.append((
352 edges.append((
352 ecol, next.index(p), color,
353 ecol, next.index(p), color,
353 bconf.get('width', -1),
354 bconf.get('width', -1),
354 bconf.get('color', '')))
355 bconf.get('color', '')))
355
356
356 # Yield and move on
357 # Yield and move on
357 yield (cur, type, data, (col, color), edges)
358 yield (cur, type, data, (col, color), edges)
358 seen = next
359 seen = next
359
360
360 def asciiedges(type, char, lines, seen, rev, parents):
361 def asciiedges(type, char, lines, seen, rev, parents):
361 """adds edge info to changelog DAG walk suitable for ascii()"""
362 """adds edge info to changelog DAG walk suitable for ascii()"""
362 if rev not in seen:
363 if rev not in seen:
363 seen.append(rev)
364 seen.append(rev)
364 nodeidx = seen.index(rev)
365 nodeidx = seen.index(rev)
365
366
366 knownparents = []
367 knownparents = []
367 newparents = []
368 newparents = []
368 for parent in parents:
369 for parent in parents:
369 if parent in seen:
370 if parent in seen:
370 knownparents.append(parent)
371 knownparents.append(parent)
371 else:
372 else:
372 newparents.append(parent)
373 newparents.append(parent)
373
374
374 ncols = len(seen)
375 ncols = len(seen)
375 nextseen = seen[:]
376 nextseen = seen[:]
376 nextseen[nodeidx:nodeidx + 1] = newparents
377 nextseen[nodeidx:nodeidx + 1] = newparents
377 edges = [(nodeidx, nextseen.index(p)) for p in knownparents if p != nullrev]
378 edges = [(nodeidx, nextseen.index(p)) for p in knownparents if p != nullrev]
378
379
379 while len(newparents) > 2:
380 while len(newparents) > 2:
380 # ascii() only knows how to add or remove a single column between two
381 # ascii() only knows how to add or remove a single column between two
381 # calls. Nodes with more than two parents break this constraint so we
382 # calls. Nodes with more than two parents break this constraint so we
382 # introduce intermediate expansion lines to grow the active node list
383 # introduce intermediate expansion lines to grow the active node list
383 # slowly.
384 # slowly.
384 edges.append((nodeidx, nodeidx))
385 edges.append((nodeidx, nodeidx))
385 edges.append((nodeidx, nodeidx + 1))
386 edges.append((nodeidx, nodeidx + 1))
386 nmorecols = 1
387 nmorecols = 1
387 yield (type, char, lines, (nodeidx, edges, ncols, nmorecols))
388 yield (type, char, lines, (nodeidx, edges, ncols, nmorecols))
388 char = '\\'
389 char = '\\'
389 lines = []
390 lines = []
390 nodeidx += 1
391 nodeidx += 1
391 ncols += 1
392 ncols += 1
392 edges = []
393 edges = []
393 del newparents[0]
394 del newparents[0]
394
395
395 if len(newparents) > 0:
396 if len(newparents) > 0:
396 edges.append((nodeidx, nodeidx))
397 edges.append((nodeidx, nodeidx))
397 if len(newparents) > 1:
398 if len(newparents) > 1:
398 edges.append((nodeidx, nodeidx + 1))
399 edges.append((nodeidx, nodeidx + 1))
399 nmorecols = len(nextseen) - ncols
400 nmorecols = len(nextseen) - ncols
400 seen[:] = nextseen
401 seen[:] = nextseen
401 yield (type, char, lines, (nodeidx, edges, ncols, nmorecols))
402 yield (type, char, lines, (nodeidx, edges, ncols, nmorecols))
402
403
403 def _fixlongrightedges(edges):
404 def _fixlongrightedges(edges):
404 for (i, (start, end)) in enumerate(edges):
405 for (i, (start, end)) in enumerate(edges):
405 if end > start:
406 if end > start:
406 edges[i] = (start, end + 1)
407 edges[i] = (start, end + 1)
407
408
408 def _getnodelineedgestail(
409 def _getnodelineedgestail(
409 node_index, p_node_index, n_columns, n_columns_diff, p_diff, fix_tail):
410 node_index, p_node_index, n_columns, n_columns_diff, p_diff, fix_tail):
410 if fix_tail and n_columns_diff == p_diff and n_columns_diff != 0:
411 if fix_tail and n_columns_diff == p_diff and n_columns_diff != 0:
411 # Still going in the same non-vertical direction.
412 # Still going in the same non-vertical direction.
412 if n_columns_diff == -1:
413 if n_columns_diff == -1:
413 start = max(node_index + 1, p_node_index)
414 start = max(node_index + 1, p_node_index)
414 tail = ["|", " "] * (start - node_index - 1)
415 tail = ["|", " "] * (start - node_index - 1)
415 tail.extend(["/", " "] * (n_columns - start))
416 tail.extend(["/", " "] * (n_columns - start))
416 return tail
417 return tail
417 else:
418 else:
418 return ["\\", " "] * (n_columns - node_index - 1)
419 return ["\\", " "] * (n_columns - node_index - 1)
419 else:
420 else:
420 return ["|", " "] * (n_columns - node_index - 1)
421 return ["|", " "] * (n_columns - node_index - 1)
421
422
422 def _drawedges(edges, nodeline, interline):
423 def _drawedges(edges, nodeline, interline):
423 for (start, end) in edges:
424 for (start, end) in edges:
424 if start == end + 1:
425 if start == end + 1:
425 interline[2 * end + 1] = "/"
426 interline[2 * end + 1] = "/"
426 elif start == end - 1:
427 elif start == end - 1:
427 interline[2 * start + 1] = "\\"
428 interline[2 * start + 1] = "\\"
428 elif start == end:
429 elif start == end:
429 interline[2 * start] = "|"
430 interline[2 * start] = "|"
430 else:
431 else:
431 if 2 * end >= len(nodeline):
432 if 2 * end >= len(nodeline):
432 continue
433 continue
433 nodeline[2 * end] = "+"
434 nodeline[2 * end] = "+"
434 if start > end:
435 if start > end:
435 (start, end) = (end, start)
436 (start, end) = (end, start)
436 for i in range(2 * start + 1, 2 * end):
437 for i in range(2 * start + 1, 2 * end):
437 if nodeline[i] != "+":
438 if nodeline[i] != "+":
438 nodeline[i] = "-"
439 nodeline[i] = "-"
439
440
440 def _getpaddingline(ni, n_columns, edges):
441 def _getpaddingline(ni, n_columns, edges):
441 line = []
442 line = []
442 line.extend(["|", " "] * ni)
443 line.extend(["|", " "] * ni)
443 if (ni, ni - 1) in edges or (ni, ni) in edges:
444 if (ni, ni - 1) in edges or (ni, ni) in edges:
444 # (ni, ni - 1) (ni, ni)
445 # (ni, ni - 1) (ni, ni)
445 # | | | | | | | |
446 # | | | | | | | |
446 # +---o | | o---+
447 # +---o | | o---+
447 # | | c | | c | |
448 # | | c | | c | |
448 # | |/ / | |/ /
449 # | |/ / | |/ /
449 # | | | | | |
450 # | | | | | |
450 c = "|"
451 c = "|"
451 else:
452 else:
452 c = " "
453 c = " "
453 line.extend([c, " "])
454 line.extend([c, " "])
454 line.extend(["|", " "] * (n_columns - ni - 1))
455 line.extend(["|", " "] * (n_columns - ni - 1))
455 return line
456 return line
456
457
457 def asciistate():
458 def asciistate():
458 """returns the initial value for the "state" argument to ascii()"""
459 """returns the initial value for the "state" argument to ascii()"""
459 return [0, 0]
460 return [0, 0]
460
461
461 def ascii(ui, state, type, char, text, coldata):
462 def ascii(ui, state, type, char, text, coldata):
462 """prints an ASCII graph of the DAG
463 """prints an ASCII graph of the DAG
463
464
464 takes the following arguments (one call per node in the graph):
465 takes the following arguments (one call per node in the graph):
465
466
466 - ui to write to
467 - ui to write to
467 - Somewhere to keep the needed state in (init to asciistate())
468 - Somewhere to keep the needed state in (init to asciistate())
468 - Column of the current node in the set of ongoing edges.
469 - Column of the current node in the set of ongoing edges.
469 - Type indicator of node data, usually 'C' for changesets.
470 - Type indicator of node data, usually 'C' for changesets.
470 - Payload: (char, lines):
471 - Payload: (char, lines):
471 - Character to use as node's symbol.
472 - Character to use as node's symbol.
472 - List of lines to display as the node's text.
473 - List of lines to display as the node's text.
473 - Edges; a list of (col, next_col) indicating the edges between
474 - Edges; a list of (col, next_col) indicating the edges between
474 the current node and its parents.
475 the current node and its parents.
475 - Number of columns (ongoing edges) in the current revision.
476 - Number of columns (ongoing edges) in the current revision.
476 - The difference between the number of columns (ongoing edges)
477 - The difference between the number of columns (ongoing edges)
477 in the next revision and the number of columns (ongoing edges)
478 in the next revision and the number of columns (ongoing edges)
478 in the current revision. That is: -1 means one column removed;
479 in the current revision. That is: -1 means one column removed;
479 0 means no columns added or removed; 1 means one column added.
480 0 means no columns added or removed; 1 means one column added.
480 """
481 """
481
482
482 idx, edges, ncols, coldiff = coldata
483 idx, edges, ncols, coldiff = coldata
483 assert -2 < coldiff < 2
484 assert -2 < coldiff < 2
484 if coldiff == -1:
485 if coldiff == -1:
485 # Transform
486 # Transform
486 #
487 #
487 # | | | | | |
488 # | | | | | |
488 # o | | into o---+
489 # o | | into o---+
489 # |X / |/ /
490 # |X / |/ /
490 # | | | |
491 # | | | |
491 _fixlongrightedges(edges)
492 _fixlongrightedges(edges)
492
493
493 # add_padding_line says whether to rewrite
494 # add_padding_line says whether to rewrite
494 #
495 #
495 # | | | | | | | |
496 # | | | | | | | |
496 # | o---+ into | o---+
497 # | o---+ into | o---+
497 # | / / | | | # <--- padding line
498 # | / / | | | # <--- padding line
498 # o | | | / /
499 # o | | | / /
499 # o | |
500 # o | |
500 add_padding_line = (len(text) > 2 and coldiff == -1 and
501 add_padding_line = (len(text) > 2 and coldiff == -1 and
501 [x for (x, y) in edges if x + 1 < y])
502 [x for (x, y) in edges if x + 1 < y])
502
503
503 # fix_nodeline_tail says whether to rewrite
504 # fix_nodeline_tail says whether to rewrite
504 #
505 #
505 # | | o | | | | o | |
506 # | | o | | | | o | |
506 # | | |/ / | | |/ /
507 # | | |/ / | | |/ /
507 # | o | | into | o / / # <--- fixed nodeline tail
508 # | o | | into | o / / # <--- fixed nodeline tail
508 # | |/ / | |/ /
509 # | |/ / | |/ /
509 # o | | o | |
510 # o | | o | |
510 fix_nodeline_tail = len(text) <= 2 and not add_padding_line
511 fix_nodeline_tail = len(text) <= 2 and not add_padding_line
511
512
512 # nodeline is the line containing the node character (typically o)
513 # nodeline is the line containing the node character (typically o)
513 nodeline = ["|", " "] * idx
514 nodeline = ["|", " "] * idx
514 nodeline.extend([char, " "])
515 nodeline.extend([char, " "])
515
516
516 nodeline.extend(
517 nodeline.extend(
517 _getnodelineedgestail(idx, state[1], ncols, coldiff,
518 _getnodelineedgestail(idx, state[1], ncols, coldiff,
518 state[0], fix_nodeline_tail))
519 state[0], fix_nodeline_tail))
519
520
520 # shift_interline is the line containing the non-vertical
521 # shift_interline is the line containing the non-vertical
521 # edges between this entry and the next
522 # edges between this entry and the next
522 shift_interline = ["|", " "] * idx
523 shift_interline = ["|", " "] * idx
523 if coldiff == -1:
524 if coldiff == -1:
524 n_spaces = 1
525 n_spaces = 1
525 edge_ch = "/"
526 edge_ch = "/"
526 elif coldiff == 0:
527 elif coldiff == 0:
527 n_spaces = 2
528 n_spaces = 2
528 edge_ch = "|"
529 edge_ch = "|"
529 else:
530 else:
530 n_spaces = 3
531 n_spaces = 3
531 edge_ch = "\\"
532 edge_ch = "\\"
532 shift_interline.extend(n_spaces * [" "])
533 shift_interline.extend(n_spaces * [" "])
533 shift_interline.extend([edge_ch, " "] * (ncols - idx - 1))
534 shift_interline.extend([edge_ch, " "] * (ncols - idx - 1))
534
535
535 # draw edges from the current node to its parents
536 # draw edges from the current node to its parents
536 _drawedges(edges, nodeline, shift_interline)
537 _drawedges(edges, nodeline, shift_interline)
537
538
538 # lines is the list of all graph lines to print
539 # lines is the list of all graph lines to print
539 lines = [nodeline]
540 lines = [nodeline]
540 if add_padding_line:
541 if add_padding_line:
541 lines.append(_getpaddingline(idx, ncols, edges))
542 lines.append(_getpaddingline(idx, ncols, edges))
542 lines.append(shift_interline)
543 lines.append(shift_interline)
543
544
544 # make sure that there are as many graph lines as there are
545 # make sure that there are as many graph lines as there are
545 # log strings
546 # log strings
546 while len(text) < len(lines):
547 while len(text) < len(lines):
547 text.append("")
548 text.append("")
548 if len(lines) < len(text):
549 if len(lines) < len(text):
549 extra_interline = ["|", " "] * (ncols + coldiff)
550 extra_interline = ["|", " "] * (ncols + coldiff)
550 while len(lines) < len(text):
551 while len(lines) < len(text):
551 lines.append(extra_interline)
552 lines.append(extra_interline)
552
553
553 # print lines
554 # print lines
554 indentation_level = max(ncols, ncols + coldiff)
555 indentation_level = max(ncols, ncols + coldiff)
555 for (line, logstr) in zip(lines, text):
556 for (line, logstr) in zip(lines, text):
556 ln = "%-*s %s" % (2 * indentation_level, "".join(line), logstr)
557 ln = "%-*s %s" % (2 * indentation_level, "".join(line), logstr)
557 ui.write(ln.rstrip() + '\n')
558 ui.write(ln.rstrip() + '\n')
558
559
559 # ... and start over
560 # ... and start over
560 state[0] = coldiff
561 state[0] = coldiff
561 state[1] = idx
562 state[1] = idx
General Comments 0
You need to be logged in to leave comments. Login now