##// END OF EJS Templates
obsolete: small doc update for 'successorssets'...
Boris Feld -
r33272:df90f4d6 default
parent child Browse files
Show More
@@ -1,517 +1,518
1 # obsutil.py - utility functions for obsolescence
1 # obsutil.py - utility functions for obsolescence
2 #
2 #
3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from . import (
10 from . import (
11 phases,
11 phases,
12 )
12 )
13
13
14 class marker(object):
14 class marker(object):
15 """Wrap obsolete marker raw data"""
15 """Wrap obsolete marker raw data"""
16
16
17 def __init__(self, repo, data):
17 def __init__(self, repo, data):
18 # the repo argument will be used to create changectx in later version
18 # the repo argument will be used to create changectx in later version
19 self._repo = repo
19 self._repo = repo
20 self._data = data
20 self._data = data
21 self._decodedmeta = None
21 self._decodedmeta = None
22
22
23 def __hash__(self):
23 def __hash__(self):
24 return hash(self._data)
24 return hash(self._data)
25
25
26 def __eq__(self, other):
26 def __eq__(self, other):
27 if type(other) != type(self):
27 if type(other) != type(self):
28 return False
28 return False
29 return self._data == other._data
29 return self._data == other._data
30
30
31 def precnode(self):
31 def precnode(self):
32 """Precursor changeset node identifier"""
32 """Precursor changeset node identifier"""
33 return self._data[0]
33 return self._data[0]
34
34
35 def succnodes(self):
35 def succnodes(self):
36 """List of successor changesets node identifiers"""
36 """List of successor changesets node identifiers"""
37 return self._data[1]
37 return self._data[1]
38
38
39 def parentnodes(self):
39 def parentnodes(self):
40 """Parents of the precursors (None if not recorded)"""
40 """Parents of the precursors (None if not recorded)"""
41 return self._data[5]
41 return self._data[5]
42
42
43 def metadata(self):
43 def metadata(self):
44 """Decoded metadata dictionary"""
44 """Decoded metadata dictionary"""
45 return dict(self._data[3])
45 return dict(self._data[3])
46
46
47 def date(self):
47 def date(self):
48 """Creation date as (unixtime, offset)"""
48 """Creation date as (unixtime, offset)"""
49 return self._data[4]
49 return self._data[4]
50
50
51 def flags(self):
51 def flags(self):
52 """The flags field of the marker"""
52 """The flags field of the marker"""
53 return self._data[2]
53 return self._data[2]
54
54
55 def getmarkers(repo, nodes=None, exclusive=False):
55 def getmarkers(repo, nodes=None, exclusive=False):
56 """returns markers known in a repository
56 """returns markers known in a repository
57
57
58 If <nodes> is specified, only markers "relevant" to those nodes are are
58 If <nodes> is specified, only markers "relevant" to those nodes are are
59 returned"""
59 returned"""
60 if nodes is None:
60 if nodes is None:
61 rawmarkers = repo.obsstore
61 rawmarkers = repo.obsstore
62 elif exclusive:
62 elif exclusive:
63 rawmarkers = exclusivemarkers(repo, nodes)
63 rawmarkers = exclusivemarkers(repo, nodes)
64 else:
64 else:
65 rawmarkers = repo.obsstore.relevantmarkers(nodes)
65 rawmarkers = repo.obsstore.relevantmarkers(nodes)
66
66
67 for markerdata in rawmarkers:
67 for markerdata in rawmarkers:
68 yield marker(repo, markerdata)
68 yield marker(repo, markerdata)
69
69
70 def closestpredecessors(repo, nodeid):
70 def closestpredecessors(repo, nodeid):
71 """yield the list of next predecessors pointing on visible changectx nodes
71 """yield the list of next predecessors pointing on visible changectx nodes
72
72
73 This function respect the repoview filtering, filtered revision will be
73 This function respect the repoview filtering, filtered revision will be
74 considered missing.
74 considered missing.
75 """
75 """
76
76
77 precursors = repo.obsstore.precursors
77 precursors = repo.obsstore.precursors
78 stack = [nodeid]
78 stack = [nodeid]
79 seen = set(stack)
79 seen = set(stack)
80
80
81 while stack:
81 while stack:
82 current = stack.pop()
82 current = stack.pop()
83 currentpreccs = precursors.get(current, ())
83 currentpreccs = precursors.get(current, ())
84
84
85 for prec in currentpreccs:
85 for prec in currentpreccs:
86 precnodeid = prec[0]
86 precnodeid = prec[0]
87
87
88 # Basic cycle protection
88 # Basic cycle protection
89 if precnodeid in seen:
89 if precnodeid in seen:
90 continue
90 continue
91 seen.add(precnodeid)
91 seen.add(precnodeid)
92
92
93 if precnodeid in repo:
93 if precnodeid in repo:
94 yield precnodeid
94 yield precnodeid
95 else:
95 else:
96 stack.append(precnodeid)
96 stack.append(precnodeid)
97
97
98 def allprecursors(obsstore, nodes, ignoreflags=0):
98 def allprecursors(obsstore, nodes, ignoreflags=0):
99 """Yield node for every precursors of <nodes>.
99 """Yield node for every precursors of <nodes>.
100
100
101 Some precursors may be unknown locally.
101 Some precursors may be unknown locally.
102
102
103 This is a linear yield unsuited to detecting folded changesets. It includes
103 This is a linear yield unsuited to detecting folded changesets. It includes
104 initial nodes too."""
104 initial nodes too."""
105
105
106 remaining = set(nodes)
106 remaining = set(nodes)
107 seen = set(remaining)
107 seen = set(remaining)
108 while remaining:
108 while remaining:
109 current = remaining.pop()
109 current = remaining.pop()
110 yield current
110 yield current
111 for mark in obsstore.precursors.get(current, ()):
111 for mark in obsstore.precursors.get(current, ()):
112 # ignore marker flagged with specified flag
112 # ignore marker flagged with specified flag
113 if mark[2] & ignoreflags:
113 if mark[2] & ignoreflags:
114 continue
114 continue
115 suc = mark[0]
115 suc = mark[0]
116 if suc not in seen:
116 if suc not in seen:
117 seen.add(suc)
117 seen.add(suc)
118 remaining.add(suc)
118 remaining.add(suc)
119
119
120 def allsuccessors(obsstore, nodes, ignoreflags=0):
120 def allsuccessors(obsstore, nodes, ignoreflags=0):
121 """Yield node for every successor of <nodes>.
121 """Yield node for every successor of <nodes>.
122
122
123 Some successors may be unknown locally.
123 Some successors may be unknown locally.
124
124
125 This is a linear yield unsuited to detecting split changesets. It includes
125 This is a linear yield unsuited to detecting split changesets. It includes
126 initial nodes too."""
126 initial nodes too."""
127 remaining = set(nodes)
127 remaining = set(nodes)
128 seen = set(remaining)
128 seen = set(remaining)
129 while remaining:
129 while remaining:
130 current = remaining.pop()
130 current = remaining.pop()
131 yield current
131 yield current
132 for mark in obsstore.successors.get(current, ()):
132 for mark in obsstore.successors.get(current, ()):
133 # ignore marker flagged with specified flag
133 # ignore marker flagged with specified flag
134 if mark[2] & ignoreflags:
134 if mark[2] & ignoreflags:
135 continue
135 continue
136 for suc in mark[1]:
136 for suc in mark[1]:
137 if suc not in seen:
137 if suc not in seen:
138 seen.add(suc)
138 seen.add(suc)
139 remaining.add(suc)
139 remaining.add(suc)
140
140
141 def _filterprunes(markers):
141 def _filterprunes(markers):
142 """return a set with no prune markers"""
142 """return a set with no prune markers"""
143 return set(m for m in markers if m[1])
143 return set(m for m in markers if m[1])
144
144
145 def exclusivemarkers(repo, nodes):
145 def exclusivemarkers(repo, nodes):
146 """set of markers relevant to "nodes" but no other locally-known nodes
146 """set of markers relevant to "nodes" but no other locally-known nodes
147
147
148 This function compute the set of markers "exclusive" to a locally-known
148 This function compute the set of markers "exclusive" to a locally-known
149 node. This means we walk the markers starting from <nodes> until we reach a
149 node. This means we walk the markers starting from <nodes> until we reach a
150 locally-known precursors outside of <nodes>. Element of <nodes> with
150 locally-known precursors outside of <nodes>. Element of <nodes> with
151 locally-known successors outside of <nodes> are ignored (since their
151 locally-known successors outside of <nodes> are ignored (since their
152 precursors markers are also relevant to these successors).
152 precursors markers are also relevant to these successors).
153
153
154 For example:
154 For example:
155
155
156 # (A0 rewritten as A1)
156 # (A0 rewritten as A1)
157 #
157 #
158 # A0 <-1- A1 # Marker "1" is exclusive to A1
158 # A0 <-1- A1 # Marker "1" is exclusive to A1
159
159
160 or
160 or
161
161
162 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
162 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
163 #
163 #
164 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
164 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
165
165
166 or
166 or
167
167
168 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
168 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
169 #
169 #
170 # <-2- A1 # Marker "2" is exclusive to A0,A1
170 # <-2- A1 # Marker "2" is exclusive to A0,A1
171 # /
171 # /
172 # <-1- A0
172 # <-1- A0
173 # \
173 # \
174 # <-3- A2 # Marker "3" is exclusive to A0,A2
174 # <-3- A2 # Marker "3" is exclusive to A0,A2
175 #
175 #
176 # in addition:
176 # in addition:
177 #
177 #
178 # Markers "2,3" are exclusive to A1,A2
178 # Markers "2,3" are exclusive to A1,A2
179 # Markers "1,2,3" are exclusive to A0,A1,A2
179 # Markers "1,2,3" are exclusive to A0,A1,A2
180
180
181 See test/test-obsolete-bundle-strip.t for more examples.
181 See test/test-obsolete-bundle-strip.t for more examples.
182
182
183 An example usage is strip. When stripping a changeset, we also want to
183 An example usage is strip. When stripping a changeset, we also want to
184 strip the markers exclusive to this changeset. Otherwise we would have
184 strip the markers exclusive to this changeset. Otherwise we would have
185 "dangling"" obsolescence markers from its precursors: Obsolescence markers
185 "dangling"" obsolescence markers from its precursors: Obsolescence markers
186 marking a node as obsolete without any successors available locally.
186 marking a node as obsolete without any successors available locally.
187
187
188 As for relevant markers, the prune markers for children will be followed.
188 As for relevant markers, the prune markers for children will be followed.
189 Of course, they will only be followed if the pruned children is
189 Of course, they will only be followed if the pruned children is
190 locally-known. Since the prune markers are relevant to the pruned node.
190 locally-known. Since the prune markers are relevant to the pruned node.
191 However, while prune markers are considered relevant to the parent of the
191 However, while prune markers are considered relevant to the parent of the
192 pruned changesets, prune markers for locally-known changeset (with no
192 pruned changesets, prune markers for locally-known changeset (with no
193 successors) are considered exclusive to the pruned nodes. This allows
193 successors) are considered exclusive to the pruned nodes. This allows
194 to strip the prune markers (with the rest of the exclusive chain) alongside
194 to strip the prune markers (with the rest of the exclusive chain) alongside
195 the pruned changesets.
195 the pruned changesets.
196 """
196 """
197 # running on a filtered repository would be dangerous as markers could be
197 # running on a filtered repository would be dangerous as markers could be
198 # reported as exclusive when they are relevant for other filtered nodes.
198 # reported as exclusive when they are relevant for other filtered nodes.
199 unfi = repo.unfiltered()
199 unfi = repo.unfiltered()
200
200
201 # shortcut to various useful item
201 # shortcut to various useful item
202 nm = unfi.changelog.nodemap
202 nm = unfi.changelog.nodemap
203 precursorsmarkers = unfi.obsstore.precursors
203 precursorsmarkers = unfi.obsstore.precursors
204 successormarkers = unfi.obsstore.successors
204 successormarkers = unfi.obsstore.successors
205 childrenmarkers = unfi.obsstore.children
205 childrenmarkers = unfi.obsstore.children
206
206
207 # exclusive markers (return of the function)
207 # exclusive markers (return of the function)
208 exclmarkers = set()
208 exclmarkers = set()
209 # we need fast membership testing
209 # we need fast membership testing
210 nodes = set(nodes)
210 nodes = set(nodes)
211 # looking for head in the obshistory
211 # looking for head in the obshistory
212 #
212 #
213 # XXX we are ignoring all issues in regard with cycle for now.
213 # XXX we are ignoring all issues in regard with cycle for now.
214 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
214 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
215 stack.sort()
215 stack.sort()
216 # nodes already stacked
216 # nodes already stacked
217 seennodes = set(stack)
217 seennodes = set(stack)
218 while stack:
218 while stack:
219 current = stack.pop()
219 current = stack.pop()
220 # fetch precursors markers
220 # fetch precursors markers
221 markers = list(precursorsmarkers.get(current, ()))
221 markers = list(precursorsmarkers.get(current, ()))
222 # extend the list with prune markers
222 # extend the list with prune markers
223 for mark in successormarkers.get(current, ()):
223 for mark in successormarkers.get(current, ()):
224 if not mark[1]:
224 if not mark[1]:
225 markers.append(mark)
225 markers.append(mark)
226 # and markers from children (looking for prune)
226 # and markers from children (looking for prune)
227 for mark in childrenmarkers.get(current, ()):
227 for mark in childrenmarkers.get(current, ()):
228 if not mark[1]:
228 if not mark[1]:
229 markers.append(mark)
229 markers.append(mark)
230 # traverse the markers
230 # traverse the markers
231 for mark in markers:
231 for mark in markers:
232 if mark in exclmarkers:
232 if mark in exclmarkers:
233 # markers already selected
233 # markers already selected
234 continue
234 continue
235
235
236 # If the markers is about the current node, select it
236 # If the markers is about the current node, select it
237 #
237 #
238 # (this delay the addition of markers from children)
238 # (this delay the addition of markers from children)
239 if mark[1] or mark[0] == current:
239 if mark[1] or mark[0] == current:
240 exclmarkers.add(mark)
240 exclmarkers.add(mark)
241
241
242 # should we keep traversing through the precursors?
242 # should we keep traversing through the precursors?
243 prec = mark[0]
243 prec = mark[0]
244
244
245 # nodes in the stack or already processed
245 # nodes in the stack or already processed
246 if prec in seennodes:
246 if prec in seennodes:
247 continue
247 continue
248
248
249 # is this a locally known node ?
249 # is this a locally known node ?
250 known = prec in nm
250 known = prec in nm
251 # if locally-known and not in the <nodes> set the traversal
251 # if locally-known and not in the <nodes> set the traversal
252 # stop here.
252 # stop here.
253 if known and prec not in nodes:
253 if known and prec not in nodes:
254 continue
254 continue
255
255
256 # do not keep going if there are unselected markers pointing to this
256 # do not keep going if there are unselected markers pointing to this
257 # nodes. If we end up traversing these unselected markers later the
257 # nodes. If we end up traversing these unselected markers later the
258 # node will be taken care of at that point.
258 # node will be taken care of at that point.
259 precmarkers = _filterprunes(successormarkers.get(prec))
259 precmarkers = _filterprunes(successormarkers.get(prec))
260 if precmarkers.issubset(exclmarkers):
260 if precmarkers.issubset(exclmarkers):
261 seennodes.add(prec)
261 seennodes.add(prec)
262 stack.append(prec)
262 stack.append(prec)
263
263
264 return exclmarkers
264 return exclmarkers
265
265
266 def foreground(repo, nodes):
266 def foreground(repo, nodes):
267 """return all nodes in the "foreground" of other node
267 """return all nodes in the "foreground" of other node
268
268
269 The foreground of a revision is anything reachable using parent -> children
269 The foreground of a revision is anything reachable using parent -> children
270 or precursor -> successor relation. It is very similar to "descendant" but
270 or precursor -> successor relation. It is very similar to "descendant" but
271 augmented with obsolescence information.
271 augmented with obsolescence information.
272
272
273 Beware that possible obsolescence cycle may result if complex situation.
273 Beware that possible obsolescence cycle may result if complex situation.
274 """
274 """
275 repo = repo.unfiltered()
275 repo = repo.unfiltered()
276 foreground = set(repo.set('%ln::', nodes))
276 foreground = set(repo.set('%ln::', nodes))
277 if repo.obsstore:
277 if repo.obsstore:
278 # We only need this complicated logic if there is obsolescence
278 # We only need this complicated logic if there is obsolescence
279 # XXX will probably deserve an optimised revset.
279 # XXX will probably deserve an optimised revset.
280 nm = repo.changelog.nodemap
280 nm = repo.changelog.nodemap
281 plen = -1
281 plen = -1
282 # compute the whole set of successors or descendants
282 # compute the whole set of successors or descendants
283 while len(foreground) != plen:
283 while len(foreground) != plen:
284 plen = len(foreground)
284 plen = len(foreground)
285 succs = set(c.node() for c in foreground)
285 succs = set(c.node() for c in foreground)
286 mutable = [c.node() for c in foreground if c.mutable()]
286 mutable = [c.node() for c in foreground if c.mutable()]
287 succs.update(allsuccessors(repo.obsstore, mutable))
287 succs.update(allsuccessors(repo.obsstore, mutable))
288 known = (n for n in succs if n in nm)
288 known = (n for n in succs if n in nm)
289 foreground = set(repo.set('%ln::', known))
289 foreground = set(repo.set('%ln::', known))
290 return set(c.node() for c in foreground)
290 return set(c.node() for c in foreground)
291
291
292 def getobsoleted(repo, tr):
292 def getobsoleted(repo, tr):
293 """return the set of pre-existing revisions obsoleted by a transaction"""
293 """return the set of pre-existing revisions obsoleted by a transaction"""
294 torev = repo.unfiltered().changelog.nodemap.get
294 torev = repo.unfiltered().changelog.nodemap.get
295 phase = repo._phasecache.phase
295 phase = repo._phasecache.phase
296 succsmarkers = repo.obsstore.successors.get
296 succsmarkers = repo.obsstore.successors.get
297 public = phases.public
297 public = phases.public
298 addedmarkers = tr.changes.get('obsmarkers')
298 addedmarkers = tr.changes.get('obsmarkers')
299 addedrevs = tr.changes.get('revs')
299 addedrevs = tr.changes.get('revs')
300 seenrevs = set(addedrevs)
300 seenrevs = set(addedrevs)
301 obsoleted = set()
301 obsoleted = set()
302 for mark in addedmarkers:
302 for mark in addedmarkers:
303 node = mark[0]
303 node = mark[0]
304 rev = torev(node)
304 rev = torev(node)
305 if rev is None or rev in seenrevs:
305 if rev is None or rev in seenrevs:
306 continue
306 continue
307 seenrevs.add(rev)
307 seenrevs.add(rev)
308 if phase(repo, rev) == public:
308 if phase(repo, rev) == public:
309 continue
309 continue
310 if set(succsmarkers(node)).issubset(addedmarkers):
310 if set(succsmarkers(node)).issubset(addedmarkers):
311 obsoleted.add(rev)
311 obsoleted.add(rev)
312 return obsoleted
312 return obsoleted
313
313
314 def successorssets(repo, initialnode, cache=None):
314 def successorssets(repo, initialnode, cache=None):
315 """Return set of all latest successors of initial nodes
315 """Return set of all latest successors of initial nodes
316
316
317 The successors set of a changeset A are the group of revisions that succeed
317 The successors set of a changeset A are the group of revisions that succeed
318 A. It succeeds A as a consistent whole, each revision being only a partial
318 A. It succeeds A as a consistent whole, each revision being only a partial
319 replacement. The successors set contains non-obsolete changesets only.
319 replacement. The successors set contains non-obsolete changesets only.
320
320
321 This function returns the full list of successor sets which is why it
321 This function returns the full list of successor sets which is why it
322 returns a list of tuples and not just a single tuple. Each tuple is a valid
322 returns a list of tuples and not just a single tuple. Each tuple is a valid
323 successors set. Note that (A,) may be a valid successors set for changeset A
323 successors set. Note that (A,) may be a valid successors set for changeset A
324 (see below).
324 (see below).
325
325
326 In most cases, a changeset A will have a single element (e.g. the changeset
326 In most cases, a changeset A will have a single element (e.g. the changeset
327 A is replaced by A') in its successors set. Though, it is also common for a
327 A is replaced by A') in its successors set. Though, it is also common for a
328 changeset A to have no elements in its successor set (e.g. the changeset
328 changeset A to have no elements in its successor set (e.g. the changeset
329 has been pruned). Therefore, the returned list of successors sets will be
329 has been pruned). Therefore, the returned list of successors sets will be
330 [(A',)] or [], respectively.
330 [(A',)] or [], respectively.
331
331
332 When a changeset A is split into A' and B', however, it will result in a
332 When a changeset A is split into A' and B', however, it will result in a
333 successors set containing more than a single element, i.e. [(A',B')].
333 successors set containing more than a single element, i.e. [(A',B')].
334 Divergent changesets will result in multiple successors sets, i.e. [(A',),
334 Divergent changesets will result in multiple successors sets, i.e. [(A',),
335 (A'')].
335 (A'')].
336
336
337 If a changeset A is not obsolete, then it will conceptually have no
337 If a changeset A is not obsolete, then it will conceptually have no
338 successors set. To distinguish this from a pruned changeset, the successor
338 successors set. To distinguish this from a pruned changeset, the successor
339 set will contain itself only, i.e. [(A,)].
339 set will contain itself only, i.e. [(A,)].
340
340
341 Finally, successors unknown locally are considered to be pruned (obsoleted
341 Finally, final successors unknown locally are considered to be pruned
342 without any successors).
342 (pruned: obsoleted without any successors). (Final: successors not affected
343 by markers).
343
344
344 The optional `cache` parameter is a dictionary that may contain precomputed
345 The optional `cache` parameter is a dictionary that may contains
345 successors sets. It is meant to reuse the computation of a previous call to
346 precomputed successors sets. It is meant to reuse the computation of a
346 `successorssets` when multiple calls are made at the same time. The cache
347 previous call to `successorssets` when multiple calls are made at the same
347 dictionary is updated in place. The caller is responsible for its life
348 time. The cache dictionary is updated in place. The caller is responsible
348 span. Code that makes multiple calls to `successorssets` *must* use this
349 for its life span. Code that makes multiple calls to `successorssets`
349 cache mechanism or suffer terrible performance.
350 *should* use this cache mechanism or risk a performance hit.
350 """
351 """
351
352
352 succmarkers = repo.obsstore.successors
353 succmarkers = repo.obsstore.successors
353
354
354 # Stack of nodes we search successors sets for
355 # Stack of nodes we search successors sets for
355 toproceed = [initialnode]
356 toproceed = [initialnode]
356 # set version of above list for fast loop detection
357 # set version of above list for fast loop detection
357 # element added to "toproceed" must be added here
358 # element added to "toproceed" must be added here
358 stackedset = set(toproceed)
359 stackedset = set(toproceed)
359 if cache is None:
360 if cache is None:
360 cache = {}
361 cache = {}
361
362
362 # This while loop is the flattened version of a recursive search for
363 # This while loop is the flattened version of a recursive search for
363 # successors sets
364 # successors sets
364 #
365 #
365 # def successorssets(x):
366 # def successorssets(x):
366 # successors = directsuccessors(x)
367 # successors = directsuccessors(x)
367 # ss = [[]]
368 # ss = [[]]
368 # for succ in directsuccessors(x):
369 # for succ in directsuccessors(x):
369 # # product as in itertools cartesian product
370 # # product as in itertools cartesian product
370 # ss = product(ss, successorssets(succ))
371 # ss = product(ss, successorssets(succ))
371 # return ss
372 # return ss
372 #
373 #
373 # But we can not use plain recursive calls here:
374 # But we can not use plain recursive calls here:
374 # - that would blow the python call stack
375 # - that would blow the python call stack
375 # - obsolescence markers may have cycles, we need to handle them.
376 # - obsolescence markers may have cycles, we need to handle them.
376 #
377 #
377 # The `toproceed` list act as our call stack. Every node we search
378 # The `toproceed` list act as our call stack. Every node we search
378 # successors set for are stacked there.
379 # successors set for are stacked there.
379 #
380 #
380 # The `stackedset` is set version of this stack used to check if a node is
381 # The `stackedset` is set version of this stack used to check if a node is
381 # already stacked. This check is used to detect cycles and prevent infinite
382 # already stacked. This check is used to detect cycles and prevent infinite
382 # loop.
383 # loop.
383 #
384 #
384 # successors set of all nodes are stored in the `cache` dictionary.
385 # successors set of all nodes are stored in the `cache` dictionary.
385 #
386 #
386 # After this while loop ends we use the cache to return the successors sets
387 # After this while loop ends we use the cache to return the successors sets
387 # for the node requested by the caller.
388 # for the node requested by the caller.
388 while toproceed:
389 while toproceed:
389 # Every iteration tries to compute the successors sets of the topmost
390 # Every iteration tries to compute the successors sets of the topmost
390 # node of the stack: CURRENT.
391 # node of the stack: CURRENT.
391 #
392 #
392 # There are four possible outcomes:
393 # There are four possible outcomes:
393 #
394 #
394 # 1) We already know the successors sets of CURRENT:
395 # 1) We already know the successors sets of CURRENT:
395 # -> mission accomplished, pop it from the stack.
396 # -> mission accomplished, pop it from the stack.
396 # 2) Node is not obsolete:
397 # 2) Node is not obsolete:
397 # -> the node is its own successors sets. Add it to the cache.
398 # -> the node is its own successors sets. Add it to the cache.
398 # 3) We do not know successors set of direct successors of CURRENT:
399 # 3) We do not know successors set of direct successors of CURRENT:
399 # -> We add those successors to the stack.
400 # -> We add those successors to the stack.
400 # 4) We know successors sets of all direct successors of CURRENT:
401 # 4) We know successors sets of all direct successors of CURRENT:
401 # -> We can compute CURRENT successors set and add it to the
402 # -> We can compute CURRENT successors set and add it to the
402 # cache.
403 # cache.
403 #
404 #
404 current = toproceed[-1]
405 current = toproceed[-1]
405 if current in cache:
406 if current in cache:
406 # case (1): We already know the successors sets
407 # case (1): We already know the successors sets
407 stackedset.remove(toproceed.pop())
408 stackedset.remove(toproceed.pop())
408 elif current not in succmarkers:
409 elif current not in succmarkers:
409 # case (2): The node is not obsolete.
410 # case (2): The node is not obsolete.
410 if current in repo:
411 if current in repo:
411 # We have a valid last successors.
412 # We have a valid last successors.
412 cache[current] = [(current,)]
413 cache[current] = [(current,)]
413 else:
414 else:
414 # Final obsolete version is unknown locally.
415 # Final obsolete version is unknown locally.
415 # Do not count that as a valid successors
416 # Do not count that as a valid successors
416 cache[current] = []
417 cache[current] = []
417 else:
418 else:
418 # cases (3) and (4)
419 # cases (3) and (4)
419 #
420 #
420 # We proceed in two phases. Phase 1 aims to distinguish case (3)
421 # We proceed in two phases. Phase 1 aims to distinguish case (3)
421 # from case (4):
422 # from case (4):
422 #
423 #
423 # For each direct successors of CURRENT, we check whether its
424 # For each direct successors of CURRENT, we check whether its
424 # successors sets are known. If they are not, we stack the
425 # successors sets are known. If they are not, we stack the
425 # unknown node and proceed to the next iteration of the while
426 # unknown node and proceed to the next iteration of the while
426 # loop. (case 3)
427 # loop. (case 3)
427 #
428 #
428 # During this step, we may detect obsolescence cycles: a node
429 # During this step, we may detect obsolescence cycles: a node
429 # with unknown successors sets but already in the call stack.
430 # with unknown successors sets but already in the call stack.
430 # In such a situation, we arbitrary set the successors sets of
431 # In such a situation, we arbitrary set the successors sets of
431 # the node to nothing (node pruned) to break the cycle.
432 # the node to nothing (node pruned) to break the cycle.
432 #
433 #
433 # If no break was encountered we proceed to phase 2.
434 # If no break was encountered we proceed to phase 2.
434 #
435 #
435 # Phase 2 computes successors sets of CURRENT (case 4); see details
436 # Phase 2 computes successors sets of CURRENT (case 4); see details
436 # in phase 2 itself.
437 # in phase 2 itself.
437 #
438 #
438 # Note the two levels of iteration in each phase.
439 # Note the two levels of iteration in each phase.
439 # - The first one handles obsolescence markers using CURRENT as
440 # - The first one handles obsolescence markers using CURRENT as
440 # precursor (successors markers of CURRENT).
441 # precursor (successors markers of CURRENT).
441 #
442 #
442 # Having multiple entry here means divergence.
443 # Having multiple entry here means divergence.
443 #
444 #
444 # - The second one handles successors defined in each marker.
445 # - The second one handles successors defined in each marker.
445 #
446 #
446 # Having none means pruned node, multiple successors means split,
447 # Having none means pruned node, multiple successors means split,
447 # single successors are standard replacement.
448 # single successors are standard replacement.
448 #
449 #
449 for mark in sorted(succmarkers[current]):
450 for mark in sorted(succmarkers[current]):
450 for suc in mark[1]:
451 for suc in mark[1]:
451 if suc not in cache:
452 if suc not in cache:
452 if suc in stackedset:
453 if suc in stackedset:
453 # cycle breaking
454 # cycle breaking
454 cache[suc] = []
455 cache[suc] = []
455 else:
456 else:
456 # case (3) If we have not computed successors sets
457 # case (3) If we have not computed successors sets
457 # of one of those successors we add it to the
458 # of one of those successors we add it to the
458 # `toproceed` stack and stop all work for this
459 # `toproceed` stack and stop all work for this
459 # iteration.
460 # iteration.
460 toproceed.append(suc)
461 toproceed.append(suc)
461 stackedset.add(suc)
462 stackedset.add(suc)
462 break
463 break
463 else:
464 else:
464 continue
465 continue
465 break
466 break
466 else:
467 else:
467 # case (4): we know all successors sets of all direct
468 # case (4): we know all successors sets of all direct
468 # successors
469 # successors
469 #
470 #
470 # Successors set contributed by each marker depends on the
471 # Successors set contributed by each marker depends on the
471 # successors sets of all its "successors" node.
472 # successors sets of all its "successors" node.
472 #
473 #
473 # Each different marker is a divergence in the obsolescence
474 # Each different marker is a divergence in the obsolescence
474 # history. It contributes successors sets distinct from other
475 # history. It contributes successors sets distinct from other
475 # markers.
476 # markers.
476 #
477 #
477 # Within a marker, a successor may have divergent successors
478 # Within a marker, a successor may have divergent successors
478 # sets. In such a case, the marker will contribute multiple
479 # sets. In such a case, the marker will contribute multiple
479 # divergent successors sets. If multiple successors have
480 # divergent successors sets. If multiple successors have
480 # divergent successors sets, a Cartesian product is used.
481 # divergent successors sets, a Cartesian product is used.
481 #
482 #
482 # At the end we post-process successors sets to remove
483 # At the end we post-process successors sets to remove
483 # duplicated entry and successors set that are strict subset of
484 # duplicated entry and successors set that are strict subset of
484 # another one.
485 # another one.
485 succssets = []
486 succssets = []
486 for mark in sorted(succmarkers[current]):
487 for mark in sorted(succmarkers[current]):
487 # successors sets contributed by this marker
488 # successors sets contributed by this marker
488 markss = [[]]
489 markss = [[]]
489 for suc in mark[1]:
490 for suc in mark[1]:
490 # cardinal product with previous successors
491 # cardinal product with previous successors
491 productresult = []
492 productresult = []
492 for prefix in markss:
493 for prefix in markss:
493 for suffix in cache[suc]:
494 for suffix in cache[suc]:
494 newss = list(prefix)
495 newss = list(prefix)
495 for part in suffix:
496 for part in suffix:
496 # do not duplicated entry in successors set
497 # do not duplicated entry in successors set
497 # first entry wins.
498 # first entry wins.
498 if part not in newss:
499 if part not in newss:
499 newss.append(part)
500 newss.append(part)
500 productresult.append(newss)
501 productresult.append(newss)
501 markss = productresult
502 markss = productresult
502 succssets.extend(markss)
503 succssets.extend(markss)
503 # remove duplicated and subset
504 # remove duplicated and subset
504 seen = []
505 seen = []
505 final = []
506 final = []
506 candidate = sorted(((set(s), s) for s in succssets if s),
507 candidate = sorted(((set(s), s) for s in succssets if s),
507 key=lambda x: len(x[1]), reverse=True)
508 key=lambda x: len(x[1]), reverse=True)
508 for setversion, listversion in candidate:
509 for setversion, listversion in candidate:
509 for seenset in seen:
510 for seenset in seen:
510 if setversion.issubset(seenset):
511 if setversion.issubset(seenset):
511 break
512 break
512 else:
513 else:
513 final.append(listversion)
514 final.append(listversion)
514 seen.append(setversion)
515 seen.append(setversion)
515 final.reverse() # put small successors set first
516 final.reverse() # put small successors set first
516 cache[current] = final
517 cache[current] = final
517 return cache[initialnode]
518 return cache[initialnode]
General Comments 0
You need to be logged in to leave comments. Login now