##// END OF EJS Templates
obsutil: defend against succsmarkers() returning None...
Augie Fackler -
r33679:888f2481 default
parent child Browse files
Show More
@@ -1,537 +1,537 b''
1 # obsutil.py - utility functions for obsolescence
1 # obsutil.py - utility functions for obsolescence
2 #
2 #
3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from . import (
10 from . import (
11 phases,
11 phases,
12 )
12 )
13
13
14 class marker(object):
14 class marker(object):
15 """Wrap obsolete marker raw data"""
15 """Wrap obsolete marker raw data"""
16
16
17 def __init__(self, repo, data):
17 def __init__(self, repo, data):
18 # the repo argument will be used to create changectx in later version
18 # the repo argument will be used to create changectx in later version
19 self._repo = repo
19 self._repo = repo
20 self._data = data
20 self._data = data
21 self._decodedmeta = None
21 self._decodedmeta = None
22
22
23 def __hash__(self):
23 def __hash__(self):
24 return hash(self._data)
24 return hash(self._data)
25
25
26 def __eq__(self, other):
26 def __eq__(self, other):
27 if type(other) != type(self):
27 if type(other) != type(self):
28 return False
28 return False
29 return self._data == other._data
29 return self._data == other._data
30
30
31 def precnode(self):
31 def precnode(self):
32 """Precursor changeset node identifier"""
32 """Precursor changeset node identifier"""
33 return self._data[0]
33 return self._data[0]
34
34
35 def succnodes(self):
35 def succnodes(self):
36 """List of successor changesets node identifiers"""
36 """List of successor changesets node identifiers"""
37 return self._data[1]
37 return self._data[1]
38
38
39 def parentnodes(self):
39 def parentnodes(self):
40 """Parents of the precursors (None if not recorded)"""
40 """Parents of the precursors (None if not recorded)"""
41 return self._data[5]
41 return self._data[5]
42
42
43 def metadata(self):
43 def metadata(self):
44 """Decoded metadata dictionary"""
44 """Decoded metadata dictionary"""
45 return dict(self._data[3])
45 return dict(self._data[3])
46
46
47 def date(self):
47 def date(self):
48 """Creation date as (unixtime, offset)"""
48 """Creation date as (unixtime, offset)"""
49 return self._data[4]
49 return self._data[4]
50
50
51 def flags(self):
51 def flags(self):
52 """The flags field of the marker"""
52 """The flags field of the marker"""
53 return self._data[2]
53 return self._data[2]
54
54
55 def getmarkers(repo, nodes=None, exclusive=False):
55 def getmarkers(repo, nodes=None, exclusive=False):
56 """returns markers known in a repository
56 """returns markers known in a repository
57
57
58 If <nodes> is specified, only markers "relevant" to those nodes are are
58 If <nodes> is specified, only markers "relevant" to those nodes are are
59 returned"""
59 returned"""
60 if nodes is None:
60 if nodes is None:
61 rawmarkers = repo.obsstore
61 rawmarkers = repo.obsstore
62 elif exclusive:
62 elif exclusive:
63 rawmarkers = exclusivemarkers(repo, nodes)
63 rawmarkers = exclusivemarkers(repo, nodes)
64 else:
64 else:
65 rawmarkers = repo.obsstore.relevantmarkers(nodes)
65 rawmarkers = repo.obsstore.relevantmarkers(nodes)
66
66
67 for markerdata in rawmarkers:
67 for markerdata in rawmarkers:
68 yield marker(repo, markerdata)
68 yield marker(repo, markerdata)
69
69
70 def closestpredecessors(repo, nodeid):
70 def closestpredecessors(repo, nodeid):
71 """yield the list of next predecessors pointing on visible changectx nodes
71 """yield the list of next predecessors pointing on visible changectx nodes
72
72
73 This function respect the repoview filtering, filtered revision will be
73 This function respect the repoview filtering, filtered revision will be
74 considered missing.
74 considered missing.
75 """
75 """
76
76
77 precursors = repo.obsstore.precursors
77 precursors = repo.obsstore.precursors
78 stack = [nodeid]
78 stack = [nodeid]
79 seen = set(stack)
79 seen = set(stack)
80
80
81 while stack:
81 while stack:
82 current = stack.pop()
82 current = stack.pop()
83 currentpreccs = precursors.get(current, ())
83 currentpreccs = precursors.get(current, ())
84
84
85 for prec in currentpreccs:
85 for prec in currentpreccs:
86 precnodeid = prec[0]
86 precnodeid = prec[0]
87
87
88 # Basic cycle protection
88 # Basic cycle protection
89 if precnodeid in seen:
89 if precnodeid in seen:
90 continue
90 continue
91 seen.add(precnodeid)
91 seen.add(precnodeid)
92
92
93 if precnodeid in repo:
93 if precnodeid in repo:
94 yield precnodeid
94 yield precnodeid
95 else:
95 else:
96 stack.append(precnodeid)
96 stack.append(precnodeid)
97
97
98 def allprecursors(obsstore, nodes, ignoreflags=0):
98 def allprecursors(obsstore, nodes, ignoreflags=0):
99 """Yield node for every precursors of <nodes>.
99 """Yield node for every precursors of <nodes>.
100
100
101 Some precursors may be unknown locally.
101 Some precursors may be unknown locally.
102
102
103 This is a linear yield unsuited to detecting folded changesets. It includes
103 This is a linear yield unsuited to detecting folded changesets. It includes
104 initial nodes too."""
104 initial nodes too."""
105
105
106 remaining = set(nodes)
106 remaining = set(nodes)
107 seen = set(remaining)
107 seen = set(remaining)
108 while remaining:
108 while remaining:
109 current = remaining.pop()
109 current = remaining.pop()
110 yield current
110 yield current
111 for mark in obsstore.precursors.get(current, ()):
111 for mark in obsstore.precursors.get(current, ()):
112 # ignore marker flagged with specified flag
112 # ignore marker flagged with specified flag
113 if mark[2] & ignoreflags:
113 if mark[2] & ignoreflags:
114 continue
114 continue
115 suc = mark[0]
115 suc = mark[0]
116 if suc not in seen:
116 if suc not in seen:
117 seen.add(suc)
117 seen.add(suc)
118 remaining.add(suc)
118 remaining.add(suc)
119
119
120 def allsuccessors(obsstore, nodes, ignoreflags=0):
120 def allsuccessors(obsstore, nodes, ignoreflags=0):
121 """Yield node for every successor of <nodes>.
121 """Yield node for every successor of <nodes>.
122
122
123 Some successors may be unknown locally.
123 Some successors may be unknown locally.
124
124
125 This is a linear yield unsuited to detecting split changesets. It includes
125 This is a linear yield unsuited to detecting split changesets. It includes
126 initial nodes too."""
126 initial nodes too."""
127 remaining = set(nodes)
127 remaining = set(nodes)
128 seen = set(remaining)
128 seen = set(remaining)
129 while remaining:
129 while remaining:
130 current = remaining.pop()
130 current = remaining.pop()
131 yield current
131 yield current
132 for mark in obsstore.successors.get(current, ()):
132 for mark in obsstore.successors.get(current, ()):
133 # ignore marker flagged with specified flag
133 # ignore marker flagged with specified flag
134 if mark[2] & ignoreflags:
134 if mark[2] & ignoreflags:
135 continue
135 continue
136 for suc in mark[1]:
136 for suc in mark[1]:
137 if suc not in seen:
137 if suc not in seen:
138 seen.add(suc)
138 seen.add(suc)
139 remaining.add(suc)
139 remaining.add(suc)
140
140
141 def _filterprunes(markers):
141 def _filterprunes(markers):
142 """return a set with no prune markers"""
142 """return a set with no prune markers"""
143 return set(m for m in markers if m[1])
143 return set(m for m in markers if m[1])
144
144
145 def exclusivemarkers(repo, nodes):
145 def exclusivemarkers(repo, nodes):
146 """set of markers relevant to "nodes" but no other locally-known nodes
146 """set of markers relevant to "nodes" but no other locally-known nodes
147
147
148 This function compute the set of markers "exclusive" to a locally-known
148 This function compute the set of markers "exclusive" to a locally-known
149 node. This means we walk the markers starting from <nodes> until we reach a
149 node. This means we walk the markers starting from <nodes> until we reach a
150 locally-known precursors outside of <nodes>. Element of <nodes> with
150 locally-known precursors outside of <nodes>. Element of <nodes> with
151 locally-known successors outside of <nodes> are ignored (since their
151 locally-known successors outside of <nodes> are ignored (since their
152 precursors markers are also relevant to these successors).
152 precursors markers are also relevant to these successors).
153
153
154 For example:
154 For example:
155
155
156 # (A0 rewritten as A1)
156 # (A0 rewritten as A1)
157 #
157 #
158 # A0 <-1- A1 # Marker "1" is exclusive to A1
158 # A0 <-1- A1 # Marker "1" is exclusive to A1
159
159
160 or
160 or
161
161
162 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
162 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
163 #
163 #
164 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
164 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
165
165
166 or
166 or
167
167
168 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
168 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
169 #
169 #
170 # <-2- A1 # Marker "2" is exclusive to A0,A1
170 # <-2- A1 # Marker "2" is exclusive to A0,A1
171 # /
171 # /
172 # <-1- A0
172 # <-1- A0
173 # \
173 # \
174 # <-3- A2 # Marker "3" is exclusive to A0,A2
174 # <-3- A2 # Marker "3" is exclusive to A0,A2
175 #
175 #
176 # in addition:
176 # in addition:
177 #
177 #
178 # Markers "2,3" are exclusive to A1,A2
178 # Markers "2,3" are exclusive to A1,A2
179 # Markers "1,2,3" are exclusive to A0,A1,A2
179 # Markers "1,2,3" are exclusive to A0,A1,A2
180
180
181 See test/test-obsolete-bundle-strip.t for more examples.
181 See test/test-obsolete-bundle-strip.t for more examples.
182
182
183 An example usage is strip. When stripping a changeset, we also want to
183 An example usage is strip. When stripping a changeset, we also want to
184 strip the markers exclusive to this changeset. Otherwise we would have
184 strip the markers exclusive to this changeset. Otherwise we would have
185 "dangling"" obsolescence markers from its precursors: Obsolescence markers
185 "dangling"" obsolescence markers from its precursors: Obsolescence markers
186 marking a node as obsolete without any successors available locally.
186 marking a node as obsolete without any successors available locally.
187
187
188 As for relevant markers, the prune markers for children will be followed.
188 As for relevant markers, the prune markers for children will be followed.
189 Of course, they will only be followed if the pruned children is
189 Of course, they will only be followed if the pruned children is
190 locally-known. Since the prune markers are relevant to the pruned node.
190 locally-known. Since the prune markers are relevant to the pruned node.
191 However, while prune markers are considered relevant to the parent of the
191 However, while prune markers are considered relevant to the parent of the
192 pruned changesets, prune markers for locally-known changeset (with no
192 pruned changesets, prune markers for locally-known changeset (with no
193 successors) are considered exclusive to the pruned nodes. This allows
193 successors) are considered exclusive to the pruned nodes. This allows
194 to strip the prune markers (with the rest of the exclusive chain) alongside
194 to strip the prune markers (with the rest of the exclusive chain) alongside
195 the pruned changesets.
195 the pruned changesets.
196 """
196 """
197 # running on a filtered repository would be dangerous as markers could be
197 # running on a filtered repository would be dangerous as markers could be
198 # reported as exclusive when they are relevant for other filtered nodes.
198 # reported as exclusive when they are relevant for other filtered nodes.
199 unfi = repo.unfiltered()
199 unfi = repo.unfiltered()
200
200
201 # shortcut to various useful item
201 # shortcut to various useful item
202 nm = unfi.changelog.nodemap
202 nm = unfi.changelog.nodemap
203 precursorsmarkers = unfi.obsstore.precursors
203 precursorsmarkers = unfi.obsstore.precursors
204 successormarkers = unfi.obsstore.successors
204 successormarkers = unfi.obsstore.successors
205 childrenmarkers = unfi.obsstore.children
205 childrenmarkers = unfi.obsstore.children
206
206
207 # exclusive markers (return of the function)
207 # exclusive markers (return of the function)
208 exclmarkers = set()
208 exclmarkers = set()
209 # we need fast membership testing
209 # we need fast membership testing
210 nodes = set(nodes)
210 nodes = set(nodes)
211 # looking for head in the obshistory
211 # looking for head in the obshistory
212 #
212 #
213 # XXX we are ignoring all issues in regard with cycle for now.
213 # XXX we are ignoring all issues in regard with cycle for now.
214 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
214 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
215 stack.sort()
215 stack.sort()
216 # nodes already stacked
216 # nodes already stacked
217 seennodes = set(stack)
217 seennodes = set(stack)
218 while stack:
218 while stack:
219 current = stack.pop()
219 current = stack.pop()
220 # fetch precursors markers
220 # fetch precursors markers
221 markers = list(precursorsmarkers.get(current, ()))
221 markers = list(precursorsmarkers.get(current, ()))
222 # extend the list with prune markers
222 # extend the list with prune markers
223 for mark in successormarkers.get(current, ()):
223 for mark in successormarkers.get(current, ()):
224 if not mark[1]:
224 if not mark[1]:
225 markers.append(mark)
225 markers.append(mark)
226 # and markers from children (looking for prune)
226 # and markers from children (looking for prune)
227 for mark in childrenmarkers.get(current, ()):
227 for mark in childrenmarkers.get(current, ()):
228 if not mark[1]:
228 if not mark[1]:
229 markers.append(mark)
229 markers.append(mark)
230 # traverse the markers
230 # traverse the markers
231 for mark in markers:
231 for mark in markers:
232 if mark in exclmarkers:
232 if mark in exclmarkers:
233 # markers already selected
233 # markers already selected
234 continue
234 continue
235
235
236 # If the markers is about the current node, select it
236 # If the markers is about the current node, select it
237 #
237 #
238 # (this delay the addition of markers from children)
238 # (this delay the addition of markers from children)
239 if mark[1] or mark[0] == current:
239 if mark[1] or mark[0] == current:
240 exclmarkers.add(mark)
240 exclmarkers.add(mark)
241
241
242 # should we keep traversing through the precursors?
242 # should we keep traversing through the precursors?
243 prec = mark[0]
243 prec = mark[0]
244
244
245 # nodes in the stack or already processed
245 # nodes in the stack or already processed
246 if prec in seennodes:
246 if prec in seennodes:
247 continue
247 continue
248
248
249 # is this a locally known node ?
249 # is this a locally known node ?
250 known = prec in nm
250 known = prec in nm
251 # if locally-known and not in the <nodes> set the traversal
251 # if locally-known and not in the <nodes> set the traversal
252 # stop here.
252 # stop here.
253 if known and prec not in nodes:
253 if known and prec not in nodes:
254 continue
254 continue
255
255
256 # do not keep going if there are unselected markers pointing to this
256 # do not keep going if there are unselected markers pointing to this
257 # nodes. If we end up traversing these unselected markers later the
257 # nodes. If we end up traversing these unselected markers later the
258 # node will be taken care of at that point.
258 # node will be taken care of at that point.
259 precmarkers = _filterprunes(successormarkers.get(prec))
259 precmarkers = _filterprunes(successormarkers.get(prec))
260 if precmarkers.issubset(exclmarkers):
260 if precmarkers.issubset(exclmarkers):
261 seennodes.add(prec)
261 seennodes.add(prec)
262 stack.append(prec)
262 stack.append(prec)
263
263
264 return exclmarkers
264 return exclmarkers
265
265
266 def foreground(repo, nodes):
266 def foreground(repo, nodes):
267 """return all nodes in the "foreground" of other node
267 """return all nodes in the "foreground" of other node
268
268
269 The foreground of a revision is anything reachable using parent -> children
269 The foreground of a revision is anything reachable using parent -> children
270 or precursor -> successor relation. It is very similar to "descendant" but
270 or precursor -> successor relation. It is very similar to "descendant" but
271 augmented with obsolescence information.
271 augmented with obsolescence information.
272
272
273 Beware that possible obsolescence cycle may result if complex situation.
273 Beware that possible obsolescence cycle may result if complex situation.
274 """
274 """
275 repo = repo.unfiltered()
275 repo = repo.unfiltered()
276 foreground = set(repo.set('%ln::', nodes))
276 foreground = set(repo.set('%ln::', nodes))
277 if repo.obsstore:
277 if repo.obsstore:
278 # We only need this complicated logic if there is obsolescence
278 # We only need this complicated logic if there is obsolescence
279 # XXX will probably deserve an optimised revset.
279 # XXX will probably deserve an optimised revset.
280 nm = repo.changelog.nodemap
280 nm = repo.changelog.nodemap
281 plen = -1
281 plen = -1
282 # compute the whole set of successors or descendants
282 # compute the whole set of successors or descendants
283 while len(foreground) != plen:
283 while len(foreground) != plen:
284 plen = len(foreground)
284 plen = len(foreground)
285 succs = set(c.node() for c in foreground)
285 succs = set(c.node() for c in foreground)
286 mutable = [c.node() for c in foreground if c.mutable()]
286 mutable = [c.node() for c in foreground if c.mutable()]
287 succs.update(allsuccessors(repo.obsstore, mutable))
287 succs.update(allsuccessors(repo.obsstore, mutable))
288 known = (n for n in succs if n in nm)
288 known = (n for n in succs if n in nm)
289 foreground = set(repo.set('%ln::', known))
289 foreground = set(repo.set('%ln::', known))
290 return set(c.node() for c in foreground)
290 return set(c.node() for c in foreground)
291
291
292 def getobsoleted(repo, tr):
292 def getobsoleted(repo, tr):
293 """return the set of pre-existing revisions obsoleted by a transaction"""
293 """return the set of pre-existing revisions obsoleted by a transaction"""
294 torev = repo.unfiltered().changelog.nodemap.get
294 torev = repo.unfiltered().changelog.nodemap.get
295 phase = repo._phasecache.phase
295 phase = repo._phasecache.phase
296 succsmarkers = repo.obsstore.successors.get
296 succsmarkers = repo.obsstore.successors.get
297 public = phases.public
297 public = phases.public
298 addedmarkers = tr.changes.get('obsmarkers')
298 addedmarkers = tr.changes.get('obsmarkers')
299 addedrevs = tr.changes.get('revs')
299 addedrevs = tr.changes.get('revs')
300 seenrevs = set(addedrevs)
300 seenrevs = set(addedrevs)
301 obsoleted = set()
301 obsoleted = set()
302 for mark in addedmarkers:
302 for mark in addedmarkers:
303 node = mark[0]
303 node = mark[0]
304 rev = torev(node)
304 rev = torev(node)
305 if rev is None or rev in seenrevs:
305 if rev is None or rev in seenrevs:
306 continue
306 continue
307 seenrevs.add(rev)
307 seenrevs.add(rev)
308 if phase(repo, rev) == public:
308 if phase(repo, rev) == public:
309 continue
309 continue
310 if set(succsmarkers(node)).issubset(addedmarkers):
310 if set(succsmarkers(node) or []).issubset(addedmarkers):
311 obsoleted.add(rev)
311 obsoleted.add(rev)
312 return obsoleted
312 return obsoleted
313
313
314 def successorssets(repo, initialnode, closest=False, cache=None):
314 def successorssets(repo, initialnode, closest=False, cache=None):
315 """Return set of all latest successors of initial nodes
315 """Return set of all latest successors of initial nodes
316
316
317 The successors set of a changeset A are the group of revisions that succeed
317 The successors set of a changeset A are the group of revisions that succeed
318 A. It succeeds A as a consistent whole, each revision being only a partial
318 A. It succeeds A as a consistent whole, each revision being only a partial
319 replacement. By default, the successors set contains non-obsolete
319 replacement. By default, the successors set contains non-obsolete
320 changesets only, walking the obsolescence graph until reaching a leaf. If
320 changesets only, walking the obsolescence graph until reaching a leaf. If
321 'closest' is set to True, closest successors-sets are return (the
321 'closest' is set to True, closest successors-sets are return (the
322 obsolescence walk stops on known changesets).
322 obsolescence walk stops on known changesets).
323
323
324 This function returns the full list of successor sets which is why it
324 This function returns the full list of successor sets which is why it
325 returns a list of tuples and not just a single tuple. Each tuple is a valid
325 returns a list of tuples and not just a single tuple. Each tuple is a valid
326 successors set. Note that (A,) may be a valid successors set for changeset A
326 successors set. Note that (A,) may be a valid successors set for changeset A
327 (see below).
327 (see below).
328
328
329 In most cases, a changeset A will have a single element (e.g. the changeset
329 In most cases, a changeset A will have a single element (e.g. the changeset
330 A is replaced by A') in its successors set. Though, it is also common for a
330 A is replaced by A') in its successors set. Though, it is also common for a
331 changeset A to have no elements in its successor set (e.g. the changeset
331 changeset A to have no elements in its successor set (e.g. the changeset
332 has been pruned). Therefore, the returned list of successors sets will be
332 has been pruned). Therefore, the returned list of successors sets will be
333 [(A',)] or [], respectively.
333 [(A',)] or [], respectively.
334
334
335 When a changeset A is split into A' and B', however, it will result in a
335 When a changeset A is split into A' and B', however, it will result in a
336 successors set containing more than a single element, i.e. [(A',B')].
336 successors set containing more than a single element, i.e. [(A',B')].
337 Divergent changesets will result in multiple successors sets, i.e. [(A',),
337 Divergent changesets will result in multiple successors sets, i.e. [(A',),
338 (A'')].
338 (A'')].
339
339
340 If a changeset A is not obsolete, then it will conceptually have no
340 If a changeset A is not obsolete, then it will conceptually have no
341 successors set. To distinguish this from a pruned changeset, the successor
341 successors set. To distinguish this from a pruned changeset, the successor
342 set will contain itself only, i.e. [(A,)].
342 set will contain itself only, i.e. [(A,)].
343
343
344 Finally, final successors unknown locally are considered to be pruned
344 Finally, final successors unknown locally are considered to be pruned
345 (pruned: obsoleted without any successors). (Final: successors not affected
345 (pruned: obsoleted without any successors). (Final: successors not affected
346 by markers).
346 by markers).
347
347
348 The 'closest' mode respect the repoview filtering. For example, without
348 The 'closest' mode respect the repoview filtering. For example, without
349 filter it will stop at the first locally known changeset, with 'visible'
349 filter it will stop at the first locally known changeset, with 'visible'
350 filter it will stop on visible changesets).
350 filter it will stop on visible changesets).
351
351
352 The optional `cache` parameter is a dictionary that may contains
352 The optional `cache` parameter is a dictionary that may contains
353 precomputed successors sets. It is meant to reuse the computation of a
353 precomputed successors sets. It is meant to reuse the computation of a
354 previous call to `successorssets` when multiple calls are made at the same
354 previous call to `successorssets` when multiple calls are made at the same
355 time. The cache dictionary is updated in place. The caller is responsible
355 time. The cache dictionary is updated in place. The caller is responsible
356 for its life span. Code that makes multiple calls to `successorssets`
356 for its life span. Code that makes multiple calls to `successorssets`
357 *should* use this cache mechanism or risk a performance hit.
357 *should* use this cache mechanism or risk a performance hit.
358
358
359 Since results are different depending of the 'closest' most, the same cache
359 Since results are different depending of the 'closest' most, the same cache
360 cannot be reused for both mode.
360 cannot be reused for both mode.
361 """
361 """
362
362
363 succmarkers = repo.obsstore.successors
363 succmarkers = repo.obsstore.successors
364
364
365 # Stack of nodes we search successors sets for
365 # Stack of nodes we search successors sets for
366 toproceed = [initialnode]
366 toproceed = [initialnode]
367 # set version of above list for fast loop detection
367 # set version of above list for fast loop detection
368 # element added to "toproceed" must be added here
368 # element added to "toproceed" must be added here
369 stackedset = set(toproceed)
369 stackedset = set(toproceed)
370 if cache is None:
370 if cache is None:
371 cache = {}
371 cache = {}
372
372
373 # This while loop is the flattened version of a recursive search for
373 # This while loop is the flattened version of a recursive search for
374 # successors sets
374 # successors sets
375 #
375 #
376 # def successorssets(x):
376 # def successorssets(x):
377 # successors = directsuccessors(x)
377 # successors = directsuccessors(x)
378 # ss = [[]]
378 # ss = [[]]
379 # for succ in directsuccessors(x):
379 # for succ in directsuccessors(x):
380 # # product as in itertools cartesian product
380 # # product as in itertools cartesian product
381 # ss = product(ss, successorssets(succ))
381 # ss = product(ss, successorssets(succ))
382 # return ss
382 # return ss
383 #
383 #
384 # But we can not use plain recursive calls here:
384 # But we can not use plain recursive calls here:
385 # - that would blow the python call stack
385 # - that would blow the python call stack
386 # - obsolescence markers may have cycles, we need to handle them.
386 # - obsolescence markers may have cycles, we need to handle them.
387 #
387 #
388 # The `toproceed` list act as our call stack. Every node we search
388 # The `toproceed` list act as our call stack. Every node we search
389 # successors set for are stacked there.
389 # successors set for are stacked there.
390 #
390 #
391 # The `stackedset` is set version of this stack used to check if a node is
391 # The `stackedset` is set version of this stack used to check if a node is
392 # already stacked. This check is used to detect cycles and prevent infinite
392 # already stacked. This check is used to detect cycles and prevent infinite
393 # loop.
393 # loop.
394 #
394 #
395 # successors set of all nodes are stored in the `cache` dictionary.
395 # successors set of all nodes are stored in the `cache` dictionary.
396 #
396 #
397 # After this while loop ends we use the cache to return the successors sets
397 # After this while loop ends we use the cache to return the successors sets
398 # for the node requested by the caller.
398 # for the node requested by the caller.
399 while toproceed:
399 while toproceed:
400 # Every iteration tries to compute the successors sets of the topmost
400 # Every iteration tries to compute the successors sets of the topmost
401 # node of the stack: CURRENT.
401 # node of the stack: CURRENT.
402 #
402 #
403 # There are four possible outcomes:
403 # There are four possible outcomes:
404 #
404 #
405 # 1) We already know the successors sets of CURRENT:
405 # 1) We already know the successors sets of CURRENT:
406 # -> mission accomplished, pop it from the stack.
406 # -> mission accomplished, pop it from the stack.
407 # 2) Stop the walk:
407 # 2) Stop the walk:
408 # default case: Node is not obsolete
408 # default case: Node is not obsolete
409 # closest case: Node is known at this repo filter level
409 # closest case: Node is known at this repo filter level
410 # -> the node is its own successors sets. Add it to the cache.
410 # -> the node is its own successors sets. Add it to the cache.
411 # 3) We do not know successors set of direct successors of CURRENT:
411 # 3) We do not know successors set of direct successors of CURRENT:
412 # -> We add those successors to the stack.
412 # -> We add those successors to the stack.
413 # 4) We know successors sets of all direct successors of CURRENT:
413 # 4) We know successors sets of all direct successors of CURRENT:
414 # -> We can compute CURRENT successors set and add it to the
414 # -> We can compute CURRENT successors set and add it to the
415 # cache.
415 # cache.
416 #
416 #
417 current = toproceed[-1]
417 current = toproceed[-1]
418
418
419 # case 2 condition is a bit hairy because of closest,
419 # case 2 condition is a bit hairy because of closest,
420 # we compute it on its own
420 # we compute it on its own
421 case2condition = ((current not in succmarkers)
421 case2condition = ((current not in succmarkers)
422 or (closest and current != initialnode
422 or (closest and current != initialnode
423 and current in repo))
423 and current in repo))
424
424
425 if current in cache:
425 if current in cache:
426 # case (1): We already know the successors sets
426 # case (1): We already know the successors sets
427 stackedset.remove(toproceed.pop())
427 stackedset.remove(toproceed.pop())
428 elif case2condition:
428 elif case2condition:
429 # case (2): end of walk.
429 # case (2): end of walk.
430 if current in repo:
430 if current in repo:
431 # We have a valid successors.
431 # We have a valid successors.
432 cache[current] = [(current,)]
432 cache[current] = [(current,)]
433 else:
433 else:
434 # Final obsolete version is unknown locally.
434 # Final obsolete version is unknown locally.
435 # Do not count that as a valid successors
435 # Do not count that as a valid successors
436 cache[current] = []
436 cache[current] = []
437 else:
437 else:
438 # cases (3) and (4)
438 # cases (3) and (4)
439 #
439 #
440 # We proceed in two phases. Phase 1 aims to distinguish case (3)
440 # We proceed in two phases. Phase 1 aims to distinguish case (3)
441 # from case (4):
441 # from case (4):
442 #
442 #
443 # For each direct successors of CURRENT, we check whether its
443 # For each direct successors of CURRENT, we check whether its
444 # successors sets are known. If they are not, we stack the
444 # successors sets are known. If they are not, we stack the
445 # unknown node and proceed to the next iteration of the while
445 # unknown node and proceed to the next iteration of the while
446 # loop. (case 3)
446 # loop. (case 3)
447 #
447 #
448 # During this step, we may detect obsolescence cycles: a node
448 # During this step, we may detect obsolescence cycles: a node
449 # with unknown successors sets but already in the call stack.
449 # with unknown successors sets but already in the call stack.
450 # In such a situation, we arbitrary set the successors sets of
450 # In such a situation, we arbitrary set the successors sets of
451 # the node to nothing (node pruned) to break the cycle.
451 # the node to nothing (node pruned) to break the cycle.
452 #
452 #
453 # If no break was encountered we proceed to phase 2.
453 # If no break was encountered we proceed to phase 2.
454 #
454 #
455 # Phase 2 computes successors sets of CURRENT (case 4); see details
455 # Phase 2 computes successors sets of CURRENT (case 4); see details
456 # in phase 2 itself.
456 # in phase 2 itself.
457 #
457 #
458 # Note the two levels of iteration in each phase.
458 # Note the two levels of iteration in each phase.
459 # - The first one handles obsolescence markers using CURRENT as
459 # - The first one handles obsolescence markers using CURRENT as
460 # precursor (successors markers of CURRENT).
460 # precursor (successors markers of CURRENT).
461 #
461 #
462 # Having multiple entry here means divergence.
462 # Having multiple entry here means divergence.
463 #
463 #
464 # - The second one handles successors defined in each marker.
464 # - The second one handles successors defined in each marker.
465 #
465 #
466 # Having none means pruned node, multiple successors means split,
466 # Having none means pruned node, multiple successors means split,
467 # single successors are standard replacement.
467 # single successors are standard replacement.
468 #
468 #
469 for mark in sorted(succmarkers[current]):
469 for mark in sorted(succmarkers[current]):
470 for suc in mark[1]:
470 for suc in mark[1]:
471 if suc not in cache:
471 if suc not in cache:
472 if suc in stackedset:
472 if suc in stackedset:
473 # cycle breaking
473 # cycle breaking
474 cache[suc] = []
474 cache[suc] = []
475 else:
475 else:
476 # case (3) If we have not computed successors sets
476 # case (3) If we have not computed successors sets
477 # of one of those successors we add it to the
477 # of one of those successors we add it to the
478 # `toproceed` stack and stop all work for this
478 # `toproceed` stack and stop all work for this
479 # iteration.
479 # iteration.
480 toproceed.append(suc)
480 toproceed.append(suc)
481 stackedset.add(suc)
481 stackedset.add(suc)
482 break
482 break
483 else:
483 else:
484 continue
484 continue
485 break
485 break
486 else:
486 else:
487 # case (4): we know all successors sets of all direct
487 # case (4): we know all successors sets of all direct
488 # successors
488 # successors
489 #
489 #
490 # Successors set contributed by each marker depends on the
490 # Successors set contributed by each marker depends on the
491 # successors sets of all its "successors" node.
491 # successors sets of all its "successors" node.
492 #
492 #
493 # Each different marker is a divergence in the obsolescence
493 # Each different marker is a divergence in the obsolescence
494 # history. It contributes successors sets distinct from other
494 # history. It contributes successors sets distinct from other
495 # markers.
495 # markers.
496 #
496 #
497 # Within a marker, a successor may have divergent successors
497 # Within a marker, a successor may have divergent successors
498 # sets. In such a case, the marker will contribute multiple
498 # sets. In such a case, the marker will contribute multiple
499 # divergent successors sets. If multiple successors have
499 # divergent successors sets. If multiple successors have
500 # divergent successors sets, a Cartesian product is used.
500 # divergent successors sets, a Cartesian product is used.
501 #
501 #
502 # At the end we post-process successors sets to remove
502 # At the end we post-process successors sets to remove
503 # duplicated entry and successors set that are strict subset of
503 # duplicated entry and successors set that are strict subset of
504 # another one.
504 # another one.
505 succssets = []
505 succssets = []
506 for mark in sorted(succmarkers[current]):
506 for mark in sorted(succmarkers[current]):
507 # successors sets contributed by this marker
507 # successors sets contributed by this marker
508 markss = [[]]
508 markss = [[]]
509 for suc in mark[1]:
509 for suc in mark[1]:
510 # cardinal product with previous successors
510 # cardinal product with previous successors
511 productresult = []
511 productresult = []
512 for prefix in markss:
512 for prefix in markss:
513 for suffix in cache[suc]:
513 for suffix in cache[suc]:
514 newss = list(prefix)
514 newss = list(prefix)
515 for part in suffix:
515 for part in suffix:
516 # do not duplicated entry in successors set
516 # do not duplicated entry in successors set
517 # first entry wins.
517 # first entry wins.
518 if part not in newss:
518 if part not in newss:
519 newss.append(part)
519 newss.append(part)
520 productresult.append(newss)
520 productresult.append(newss)
521 markss = productresult
521 markss = productresult
522 succssets.extend(markss)
522 succssets.extend(markss)
523 # remove duplicated and subset
523 # remove duplicated and subset
524 seen = []
524 seen = []
525 final = []
525 final = []
526 candidate = sorted(((set(s), s) for s in succssets if s),
526 candidate = sorted(((set(s), s) for s in succssets if s),
527 key=lambda x: len(x[1]), reverse=True)
527 key=lambda x: len(x[1]), reverse=True)
528 for setversion, listversion in candidate:
528 for setversion, listversion in candidate:
529 for seenset in seen:
529 for seenset in seen:
530 if setversion.issubset(seenset):
530 if setversion.issubset(seenset):
531 break
531 break
532 else:
532 else:
533 final.append(listversion)
533 final.append(listversion)
534 seen.append(setversion)
534 seen.append(setversion)
535 final.reverse() # put small successors set first
535 final.reverse() # put small successors set first
536 cache[current] = final
536 cache[current] = final
537 return cache[initialnode]
537 return cache[initialnode]
General Comments 0
You need to be logged in to leave comments. Login now