Show More
@@ -110,79 +110,81 b' def groupbranchiter(revs, parentsfunc):' | |||
|
110 | 110 | # parts of the initial set should be emitted. |
|
111 | 111 | groups = [([], unblocked)] |
|
112 | 112 | for current in revs: |
|
113 | # Look for a subgroup blocked, waiting for the current revision. | |
|
114 | matching = [i for i, g in enumerate(groups) if current in g[1]] | |
|
113 | if True: | |
|
114 | # Seek for a subgroup blocked, waiting for the current revision. | |
|
115 | matching = [i for i, g in enumerate(groups) if current in g[1]] | |
|
115 | 116 | |
|
116 | if matching: | |
|
117 |
# The main idea is to gather together all sets that await on |
|
|
118 | # same revision. | |
|
119 | # | |
|
120 |
# This merging is done at the time we are about to add this |
|
|
121 |
# awaited to the subgroup for simplicity purpose. Such |
|
|
122 |
# happen sooner when we update the "blocked" set of |
|
|
123 | # | |
|
124 | # We also always keep the oldest subgroup first. We can probably | |
|
125 | # improve the behavior by having the longuest set first. That way, | |
|
126 | # graph algorythms could minimise the length of parallele lines | |
|
127 | # their draw. This is currently not done. | |
|
128 | targetidx = matching.pop(0) | |
|
129 | trevs, tparents = groups[targetidx] | |
|
130 | for i in matching: | |
|
131 |
|
|
|
132 | trevs.extend(gr[0]) | |
|
133 |
|
|
|
134 | # delete all merged subgroups (but the one we keep) | |
|
135 | # (starting from the last subgroup for performance and sanity reason) | |
|
136 | for i in reversed(matching): | |
|
137 | del groups[i] | |
|
138 | else: | |
|
139 | # This is a new head. We create a new subgroup for it. | |
|
140 | targetidx = len(groups) | |
|
141 | groups.append(([], set([current]))) | |
|
117 | if matching: | |
|
118 | # The main idea is to gather together all sets that await on | |
|
119 | # the same revision. | |
|
120 | # | |
|
121 | # This merging is done at the time we are about to add this | |
|
122 | # common awaited to the subgroup for simplicity purpose. Such | |
|
123 | # merge could happen sooner when we update the "blocked" set of | |
|
124 | # revision. | |
|
125 | # | |
|
126 | # We also always keep the oldest subgroup first. We can | |
|
127 | # probably improve the behavior by having the longuest set | |
|
128 | # first. That way, graph algorythms could minimise the length | |
|
129 | # of parallele lines their draw. This is currently not done. | |
|
130 | targetidx = matching.pop(0) | |
|
131 | trevs, tparents = groups[targetidx] | |
|
132 | for i in matching: | |
|
133 | gr = groups[i] | |
|
134 | trevs.extend(gr[0]) | |
|
135 | tparents |= gr[1] | |
|
136 | # delete all merged subgroups (but the one we keep) (starting | |
|
137 | # from the last subgroup for performance and sanity reason) | |
|
138 | for i in reversed(matching): | |
|
139 | del groups[i] | |
|
140 | else: | |
|
141 | # This is a new head. We create a new subgroup for it. | |
|
142 | targetidx = len(groups) | |
|
143 | groups.append(([], set([current]))) | |
|
142 | 144 | |
|
143 | gr = groups[targetidx] | |
|
145 | gr = groups[targetidx] | |
|
144 | 146 | |
|
145 |
# We now adds the current nodes to this subgroups. This is done |
|
|
146 |
# the subgroup merging because all elements from a subgroup |
|
|
147 | # on this rev must preceed it. | |
|
148 | # | |
|
149 | # we also update the <parents> set to includes the parents on the | |
|
150 | # new nodes. | |
|
151 | gr[0].append(current) | |
|
152 | gr[1].remove(current) | |
|
153 | gr[1].update([p for p in parentsfunc(current) if p > nullrev]) | |
|
147 | # We now adds the current nodes to this subgroups. This is done | |
|
148 | # after the subgroup merging because all elements from a subgroup | |
|
149 | # that relied on this rev must preceed it. | |
|
150 | # | |
|
151 | # we also update the <parents> set to includes the parents on the | |
|
152 | # new nodes. | |
|
153 | gr[0].append(current) | |
|
154 | gr[1].remove(current) | |
|
155 | gr[1].update([p for p in parentsfunc(current) if p > nullrev]) | |
|
154 | 156 | |
|
155 | # Look for a subgroup to display | |
|
156 | # | |
|
157 | # When unblocked is empty (if clause), We are not waiting over any | |
|
158 |
# revision during the first iteration (if no priority was given) or |
|
|
159 |
# we outputed a whole disconnected sets of the graph (reached a |
|
|
160 |
# In that case we arbitrarily takes the oldest known |
|
|
161 | # heuristique could probably be better. | |
|
162 | # | |
|
163 |
# Otherwise (elif clause) this mean we have some emitted revision. |
|
|
164 |
# the subgroup awaits on the same revision that the outputed |
|
|
165 | # can safely output it. | |
|
166 | if not unblocked: | |
|
167 | if len(groups) > 1: # display other subset | |
|
168 | targetidx = 1 | |
|
169 | gr = groups[1] | |
|
170 | elif not gr[1] & unblocked: | |
|
171 | gr = None | |
|
157 | # Look for a subgroup to display | |
|
158 | # | |
|
159 | # When unblocked is empty (if clause), We are not waiting over any | |
|
160 | # revision during the first iteration (if no priority was given) or | |
|
161 | # if we outputed a whole disconnected sets of the graph (reached a | |
|
162 | # root). In that case we arbitrarily takes the oldest known | |
|
163 | # subgroup. The heuristique could probably be better. | |
|
164 | # | |
|
165 | # Otherwise (elif clause) this mean we have some emitted revision. | |
|
166 | # if the subgroup awaits on the same revision that the outputed | |
|
167 | # ones, we can safely output it. | |
|
168 | if not unblocked: | |
|
169 | if len(groups) > 1: # display other subset | |
|
170 | targetidx = 1 | |
|
171 | gr = groups[1] | |
|
172 | elif not gr[1] & unblocked: | |
|
173 | gr = None | |
|
172 | 174 | |
|
173 | if gr is not None: | |
|
174 | # update the set of awaited revisions with the one from the | |
|
175 | # subgroup | |
|
176 | unblocked |= gr[1] | |
|
177 | # output all revisions in the subgroup | |
|
178 | for r in gr[0]: | |
|
179 | yield r | |
|
180 | # delete the subgroup that you just output | |
|
181 | # unless it is groups[0] in which case you just empty it. | |
|
182 | if targetidx: | |
|
183 | del groups[targetidx] | |
|
184 | else: | |
|
185 | gr[0][:] = [] | |
|
175 | if gr is not None: | |
|
176 | # update the set of awaited revisions with the one from the | |
|
177 | # subgroup | |
|
178 | unblocked |= gr[1] | |
|
179 | # output all revisions in the subgroup | |
|
180 | for r in gr[0]: | |
|
181 | yield r | |
|
182 | # delete the subgroup that you just output | |
|
183 | # unless it is groups[0] in which case you just empty it. | |
|
184 | if targetidx: | |
|
185 | del groups[targetidx] | |
|
186 | else: | |
|
187 | gr[0][:] = [] | |
|
186 | 188 | |
|
187 | 189 | def dagwalker(repo, revs): |
|
188 | 190 | """cset DAG generator yielding (id, CHANGESET, ctx, [parentids]) tuples |
General Comments 0
You need to be logged in to leave comments.
Login now