##// END OF EJS Templates
obsolete: fix old typo...
Boris Feld -
r33944:54c21114 default
parent child Browse files
Show More
@@ -1,592 +1,592
1 # obsutil.py - utility functions for obsolescence
1 # obsutil.py - utility functions for obsolescence
2 #
2 #
3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from . import (
10 from . import (
11 phases,
11 phases,
12 util
12 util
13 )
13 )
14
14
15 class marker(object):
15 class marker(object):
16 """Wrap obsolete marker raw data"""
16 """Wrap obsolete marker raw data"""
17
17
18 def __init__(self, repo, data):
18 def __init__(self, repo, data):
19 # the repo argument will be used to create changectx in later version
19 # the repo argument will be used to create changectx in later version
20 self._repo = repo
20 self._repo = repo
21 self._data = data
21 self._data = data
22 self._decodedmeta = None
22 self._decodedmeta = None
23
23
24 def __hash__(self):
24 def __hash__(self):
25 return hash(self._data)
25 return hash(self._data)
26
26
27 def __eq__(self, other):
27 def __eq__(self, other):
28 if type(other) != type(self):
28 if type(other) != type(self):
29 return False
29 return False
30 return self._data == other._data
30 return self._data == other._data
31
31
32 def precnode(self):
32 def precnode(self):
33 msg = ("'marker.precnode' is deprecated, "
33 msg = ("'marker.precnode' is deprecated, "
34 "use 'marker.prednode'")
34 "use 'marker.prednode'")
35 util.nouideprecwarn(msg, '4.4')
35 util.nouideprecwarn(msg, '4.4')
36 return self.prednode()
36 return self.prednode()
37
37
38 def prednode(self):
38 def prednode(self):
39 """Predecessor changeset node identifier"""
39 """Predecessor changeset node identifier"""
40 return self._data[0]
40 return self._data[0]
41
41
42 def succnodes(self):
42 def succnodes(self):
43 """List of successor changesets node identifiers"""
43 """List of successor changesets node identifiers"""
44 return self._data[1]
44 return self._data[1]
45
45
46 def parentnodes(self):
46 def parentnodes(self):
47 """Parents of the predecessors (None if not recorded)"""
47 """Parents of the predecessors (None if not recorded)"""
48 return self._data[5]
48 return self._data[5]
49
49
50 def metadata(self):
50 def metadata(self):
51 """Decoded metadata dictionary"""
51 """Decoded metadata dictionary"""
52 return dict(self._data[3])
52 return dict(self._data[3])
53
53
54 def date(self):
54 def date(self):
55 """Creation date as (unixtime, offset)"""
55 """Creation date as (unixtime, offset)"""
56 return self._data[4]
56 return self._data[4]
57
57
58 def flags(self):
58 def flags(self):
59 """The flags field of the marker"""
59 """The flags field of the marker"""
60 return self._data[2]
60 return self._data[2]
61
61
62 def getmarkers(repo, nodes=None, exclusive=False):
62 def getmarkers(repo, nodes=None, exclusive=False):
63 """returns markers known in a repository
63 """returns markers known in a repository
64
64
65 If <nodes> is specified, only markers "relevant" to those nodes are are
65 If <nodes> is specified, only markers "relevant" to those nodes are are
66 returned"""
66 returned"""
67 if nodes is None:
67 if nodes is None:
68 rawmarkers = repo.obsstore
68 rawmarkers = repo.obsstore
69 elif exclusive:
69 elif exclusive:
70 rawmarkers = exclusivemarkers(repo, nodes)
70 rawmarkers = exclusivemarkers(repo, nodes)
71 else:
71 else:
72 rawmarkers = repo.obsstore.relevantmarkers(nodes)
72 rawmarkers = repo.obsstore.relevantmarkers(nodes)
73
73
74 for markerdata in rawmarkers:
74 for markerdata in rawmarkers:
75 yield marker(repo, markerdata)
75 yield marker(repo, markerdata)
76
76
77 def closestpredecessors(repo, nodeid):
77 def closestpredecessors(repo, nodeid):
78 """yield the list of next predecessors pointing on visible changectx nodes
78 """yield the list of next predecessors pointing on visible changectx nodes
79
79
80 This function respect the repoview filtering, filtered revision will be
80 This function respect the repoview filtering, filtered revision will be
81 considered missing.
81 considered missing.
82 """
82 """
83
83
84 precursors = repo.obsstore.predecessors
84 precursors = repo.obsstore.predecessors
85 stack = [nodeid]
85 stack = [nodeid]
86 seen = set(stack)
86 seen = set(stack)
87
87
88 while stack:
88 while stack:
89 current = stack.pop()
89 current = stack.pop()
90 currentpreccs = precursors.get(current, ())
90 currentpreccs = precursors.get(current, ())
91
91
92 for prec in currentpreccs:
92 for prec in currentpreccs:
93 precnodeid = prec[0]
93 precnodeid = prec[0]
94
94
95 # Basic cycle protection
95 # Basic cycle protection
96 if precnodeid in seen:
96 if precnodeid in seen:
97 continue
97 continue
98 seen.add(precnodeid)
98 seen.add(precnodeid)
99
99
100 if precnodeid in repo:
100 if precnodeid in repo:
101 yield precnodeid
101 yield precnodeid
102 else:
102 else:
103 stack.append(precnodeid)
103 stack.append(precnodeid)
104
104
105 def allprecursors(*args, **kwargs):
105 def allprecursors(*args, **kwargs):
106 """ (DEPRECATED)
106 """ (DEPRECATED)
107 """
107 """
108 msg = ("'obsutil.allprecursors' is deprecated, "
108 msg = ("'obsutil.allprecursors' is deprecated, "
109 "use 'obsutil.allpredecessors'")
109 "use 'obsutil.allpredecessors'")
110 util.nouideprecwarn(msg, '4.4')
110 util.nouideprecwarn(msg, '4.4')
111
111
112 return allpredecessors(*args, **kwargs)
112 return allpredecessors(*args, **kwargs)
113
113
114 def allpredecessors(obsstore, nodes, ignoreflags=0):
114 def allpredecessors(obsstore, nodes, ignoreflags=0):
115 """Yield node for every precursors of <nodes>.
115 """Yield node for every precursors of <nodes>.
116
116
117 Some precursors may be unknown locally.
117 Some precursors may be unknown locally.
118
118
119 This is a linear yield unsuited to detecting folded changesets. It includes
119 This is a linear yield unsuited to detecting folded changesets. It includes
120 initial nodes too."""
120 initial nodes too."""
121
121
122 remaining = set(nodes)
122 remaining = set(nodes)
123 seen = set(remaining)
123 seen = set(remaining)
124 while remaining:
124 while remaining:
125 current = remaining.pop()
125 current = remaining.pop()
126 yield current
126 yield current
127 for mark in obsstore.predecessors.get(current, ()):
127 for mark in obsstore.predecessors.get(current, ()):
128 # ignore marker flagged with specified flag
128 # ignore marker flagged with specified flag
129 if mark[2] & ignoreflags:
129 if mark[2] & ignoreflags:
130 continue
130 continue
131 suc = mark[0]
131 suc = mark[0]
132 if suc not in seen:
132 if suc not in seen:
133 seen.add(suc)
133 seen.add(suc)
134 remaining.add(suc)
134 remaining.add(suc)
135
135
136 def allsuccessors(obsstore, nodes, ignoreflags=0):
136 def allsuccessors(obsstore, nodes, ignoreflags=0):
137 """Yield node for every successor of <nodes>.
137 """Yield node for every successor of <nodes>.
138
138
139 Some successors may be unknown locally.
139 Some successors may be unknown locally.
140
140
141 This is a linear yield unsuited to detecting split changesets. It includes
141 This is a linear yield unsuited to detecting split changesets. It includes
142 initial nodes too."""
142 initial nodes too."""
143 remaining = set(nodes)
143 remaining = set(nodes)
144 seen = set(remaining)
144 seen = set(remaining)
145 while remaining:
145 while remaining:
146 current = remaining.pop()
146 current = remaining.pop()
147 yield current
147 yield current
148 for mark in obsstore.successors.get(current, ()):
148 for mark in obsstore.successors.get(current, ()):
149 # ignore marker flagged with specified flag
149 # ignore marker flagged with specified flag
150 if mark[2] & ignoreflags:
150 if mark[2] & ignoreflags:
151 continue
151 continue
152 for suc in mark[1]:
152 for suc in mark[1]:
153 if suc not in seen:
153 if suc not in seen:
154 seen.add(suc)
154 seen.add(suc)
155 remaining.add(suc)
155 remaining.add(suc)
156
156
157 def _filterprunes(markers):
157 def _filterprunes(markers):
158 """return a set with no prune markers"""
158 """return a set with no prune markers"""
159 return set(m for m in markers if m[1])
159 return set(m for m in markers if m[1])
160
160
161 def exclusivemarkers(repo, nodes):
161 def exclusivemarkers(repo, nodes):
162 """set of markers relevant to "nodes" but no other locally-known nodes
162 """set of markers relevant to "nodes" but no other locally-known nodes
163
163
164 This function compute the set of markers "exclusive" to a locally-known
164 This function compute the set of markers "exclusive" to a locally-known
165 node. This means we walk the markers starting from <nodes> until we reach a
165 node. This means we walk the markers starting from <nodes> until we reach a
166 locally-known precursors outside of <nodes>. Element of <nodes> with
166 locally-known precursors outside of <nodes>. Element of <nodes> with
167 locally-known successors outside of <nodes> are ignored (since their
167 locally-known successors outside of <nodes> are ignored (since their
168 precursors markers are also relevant to these successors).
168 precursors markers are also relevant to these successors).
169
169
170 For example:
170 For example:
171
171
172 # (A0 rewritten as A1)
172 # (A0 rewritten as A1)
173 #
173 #
174 # A0 <-1- A1 # Marker "1" is exclusive to A1
174 # A0 <-1- A1 # Marker "1" is exclusive to A1
175
175
176 or
176 or
177
177
178 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
178 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
179 #
179 #
180 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
180 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
181
181
182 or
182 or
183
183
184 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
184 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
185 #
185 #
186 # <-2- A1 # Marker "2" is exclusive to A0,A1
186 # <-2- A1 # Marker "2" is exclusive to A0,A1
187 # /
187 # /
188 # <-1- A0
188 # <-1- A0
189 # \
189 # \
190 # <-3- A2 # Marker "3" is exclusive to A0,A2
190 # <-3- A2 # Marker "3" is exclusive to A0,A2
191 #
191 #
192 # in addition:
192 # in addition:
193 #
193 #
194 # Markers "2,3" are exclusive to A1,A2
194 # Markers "2,3" are exclusive to A1,A2
195 # Markers "1,2,3" are exclusive to A0,A1,A2
195 # Markers "1,2,3" are exclusive to A0,A1,A2
196
196
197 See test/test-obsolete-bundle-strip.t for more examples.
197 See test/test-obsolete-bundle-strip.t for more examples.
198
198
199 An example usage is strip. When stripping a changeset, we also want to
199 An example usage is strip. When stripping a changeset, we also want to
200 strip the markers exclusive to this changeset. Otherwise we would have
200 strip the markers exclusive to this changeset. Otherwise we would have
201 "dangling"" obsolescence markers from its precursors: Obsolescence markers
201 "dangling"" obsolescence markers from its precursors: Obsolescence markers
202 marking a node as obsolete without any successors available locally.
202 marking a node as obsolete without any successors available locally.
203
203
204 As for relevant markers, the prune markers for children will be followed.
204 As for relevant markers, the prune markers for children will be followed.
205 Of course, they will only be followed if the pruned children is
205 Of course, they will only be followed if the pruned children is
206 locally-known. Since the prune markers are relevant to the pruned node.
206 locally-known. Since the prune markers are relevant to the pruned node.
207 However, while prune markers are considered relevant to the parent of the
207 However, while prune markers are considered relevant to the parent of the
208 pruned changesets, prune markers for locally-known changeset (with no
208 pruned changesets, prune markers for locally-known changeset (with no
209 successors) are considered exclusive to the pruned nodes. This allows
209 successors) are considered exclusive to the pruned nodes. This allows
210 to strip the prune markers (with the rest of the exclusive chain) alongside
210 to strip the prune markers (with the rest of the exclusive chain) alongside
211 the pruned changesets.
211 the pruned changesets.
212 """
212 """
213 # running on a filtered repository would be dangerous as markers could be
213 # running on a filtered repository would be dangerous as markers could be
214 # reported as exclusive when they are relevant for other filtered nodes.
214 # reported as exclusive when they are relevant for other filtered nodes.
215 unfi = repo.unfiltered()
215 unfi = repo.unfiltered()
216
216
217 # shortcut to various useful item
217 # shortcut to various useful item
218 nm = unfi.changelog.nodemap
218 nm = unfi.changelog.nodemap
219 precursorsmarkers = unfi.obsstore.predecessors
219 precursorsmarkers = unfi.obsstore.predecessors
220 successormarkers = unfi.obsstore.successors
220 successormarkers = unfi.obsstore.successors
221 childrenmarkers = unfi.obsstore.children
221 childrenmarkers = unfi.obsstore.children
222
222
223 # exclusive markers (return of the function)
223 # exclusive markers (return of the function)
224 exclmarkers = set()
224 exclmarkers = set()
225 # we need fast membership testing
225 # we need fast membership testing
226 nodes = set(nodes)
226 nodes = set(nodes)
227 # looking for head in the obshistory
227 # looking for head in the obshistory
228 #
228 #
229 # XXX we are ignoring all issues in regard with cycle for now.
229 # XXX we are ignoring all issues in regard with cycle for now.
230 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
230 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
231 stack.sort()
231 stack.sort()
232 # nodes already stacked
232 # nodes already stacked
233 seennodes = set(stack)
233 seennodes = set(stack)
234 while stack:
234 while stack:
235 current = stack.pop()
235 current = stack.pop()
236 # fetch precursors markers
236 # fetch precursors markers
237 markers = list(precursorsmarkers.get(current, ()))
237 markers = list(precursorsmarkers.get(current, ()))
238 # extend the list with prune markers
238 # extend the list with prune markers
239 for mark in successormarkers.get(current, ()):
239 for mark in successormarkers.get(current, ()):
240 if not mark[1]:
240 if not mark[1]:
241 markers.append(mark)
241 markers.append(mark)
242 # and markers from children (looking for prune)
242 # and markers from children (looking for prune)
243 for mark in childrenmarkers.get(current, ()):
243 for mark in childrenmarkers.get(current, ()):
244 if not mark[1]:
244 if not mark[1]:
245 markers.append(mark)
245 markers.append(mark)
246 # traverse the markers
246 # traverse the markers
247 for mark in markers:
247 for mark in markers:
248 if mark in exclmarkers:
248 if mark in exclmarkers:
249 # markers already selected
249 # markers already selected
250 continue
250 continue
251
251
252 # If the markers is about the current node, select it
252 # If the markers is about the current node, select it
253 #
253 #
254 # (this delay the addition of markers from children)
254 # (this delay the addition of markers from children)
255 if mark[1] or mark[0] == current:
255 if mark[1] or mark[0] == current:
256 exclmarkers.add(mark)
256 exclmarkers.add(mark)
257
257
258 # should we keep traversing through the precursors?
258 # should we keep traversing through the precursors?
259 prec = mark[0]
259 prec = mark[0]
260
260
261 # nodes in the stack or already processed
261 # nodes in the stack or already processed
262 if prec in seennodes:
262 if prec in seennodes:
263 continue
263 continue
264
264
265 # is this a locally known node ?
265 # is this a locally known node ?
266 known = prec in nm
266 known = prec in nm
267 # if locally-known and not in the <nodes> set the traversal
267 # if locally-known and not in the <nodes> set the traversal
268 # stop here.
268 # stop here.
269 if known and prec not in nodes:
269 if known and prec not in nodes:
270 continue
270 continue
271
271
272 # do not keep going if there are unselected markers pointing to this
272 # do not keep going if there are unselected markers pointing to this
273 # nodes. If we end up traversing these unselected markers later the
273 # nodes. If we end up traversing these unselected markers later the
274 # node will be taken care of at that point.
274 # node will be taken care of at that point.
275 precmarkers = _filterprunes(successormarkers.get(prec))
275 precmarkers = _filterprunes(successormarkers.get(prec))
276 if precmarkers.issubset(exclmarkers):
276 if precmarkers.issubset(exclmarkers):
277 seennodes.add(prec)
277 seennodes.add(prec)
278 stack.append(prec)
278 stack.append(prec)
279
279
280 return exclmarkers
280 return exclmarkers
281
281
282 def foreground(repo, nodes):
282 def foreground(repo, nodes):
283 """return all nodes in the "foreground" of other node
283 """return all nodes in the "foreground" of other node
284
284
285 The foreground of a revision is anything reachable using parent -> children
285 The foreground of a revision is anything reachable using parent -> children
286 or precursor -> successor relation. It is very similar to "descendant" but
286 or precursor -> successor relation. It is very similar to "descendant" but
287 augmented with obsolescence information.
287 augmented with obsolescence information.
288
288
289 Beware that possible obsolescence cycle may result if complex situation.
289 Beware that possible obsolescence cycle may result if complex situation.
290 """
290 """
291 repo = repo.unfiltered()
291 repo = repo.unfiltered()
292 foreground = set(repo.set('%ln::', nodes))
292 foreground = set(repo.set('%ln::', nodes))
293 if repo.obsstore:
293 if repo.obsstore:
294 # We only need this complicated logic if there is obsolescence
294 # We only need this complicated logic if there is obsolescence
295 # XXX will probably deserve an optimised revset.
295 # XXX will probably deserve an optimised revset.
296 nm = repo.changelog.nodemap
296 nm = repo.changelog.nodemap
297 plen = -1
297 plen = -1
298 # compute the whole set of successors or descendants
298 # compute the whole set of successors or descendants
299 while len(foreground) != plen:
299 while len(foreground) != plen:
300 plen = len(foreground)
300 plen = len(foreground)
301 succs = set(c.node() for c in foreground)
301 succs = set(c.node() for c in foreground)
302 mutable = [c.node() for c in foreground if c.mutable()]
302 mutable = [c.node() for c in foreground if c.mutable()]
303 succs.update(allsuccessors(repo.obsstore, mutable))
303 succs.update(allsuccessors(repo.obsstore, mutable))
304 known = (n for n in succs if n in nm)
304 known = (n for n in succs if n in nm)
305 foreground = set(repo.set('%ln::', known))
305 foreground = set(repo.set('%ln::', known))
306 return set(c.node() for c in foreground)
306 return set(c.node() for c in foreground)
307
307
308 def getobsoleted(repo, tr):
308 def getobsoleted(repo, tr):
309 """return the set of pre-existing revisions obsoleted by a transaction"""
309 """return the set of pre-existing revisions obsoleted by a transaction"""
310 torev = repo.unfiltered().changelog.nodemap.get
310 torev = repo.unfiltered().changelog.nodemap.get
311 phase = repo._phasecache.phase
311 phase = repo._phasecache.phase
312 succsmarkers = repo.obsstore.successors.get
312 succsmarkers = repo.obsstore.successors.get
313 public = phases.public
313 public = phases.public
314 addedmarkers = tr.changes.get('obsmarkers')
314 addedmarkers = tr.changes.get('obsmarkers')
315 addedrevs = tr.changes.get('revs')
315 addedrevs = tr.changes.get('revs')
316 seenrevs = set(addedrevs)
316 seenrevs = set(addedrevs)
317 obsoleted = set()
317 obsoleted = set()
318 for mark in addedmarkers:
318 for mark in addedmarkers:
319 node = mark[0]
319 node = mark[0]
320 rev = torev(node)
320 rev = torev(node)
321 if rev is None or rev in seenrevs:
321 if rev is None or rev in seenrevs:
322 continue
322 continue
323 seenrevs.add(rev)
323 seenrevs.add(rev)
324 if phase(repo, rev) == public:
324 if phase(repo, rev) == public:
325 continue
325 continue
326 if set(succsmarkers(node) or []).issubset(addedmarkers):
326 if set(succsmarkers(node) or []).issubset(addedmarkers):
327 obsoleted.add(rev)
327 obsoleted.add(rev)
328 return obsoleted
328 return obsoleted
329
329
330 class _succs(list):
330 class _succs(list):
331 """small class to represent a successors with some metadata about it"""
331 """small class to represent a successors with some metadata about it"""
332
332
333 def __init__(self, *args, **kwargs):
333 def __init__(self, *args, **kwargs):
334 super(_succs, self).__init__(*args, **kwargs)
334 super(_succs, self).__init__(*args, **kwargs)
335 self.markers = set()
335 self.markers = set()
336
336
337 def copy(self):
337 def copy(self):
338 new = _succs(self)
338 new = _succs(self)
339 new.markers = self.markers.copy()
339 new.markers = self.markers.copy()
340 return new
340 return new
341
341
342 @util.propertycache
342 @util.propertycache
343 def _set(self):
343 def _set(self):
344 # immutable
344 # immutable
345 return set(self)
345 return set(self)
346
346
347 def canmerge(self, other):
347 def canmerge(self, other):
348 return self._set.issubset(other._set)
348 return self._set.issubset(other._set)
349
349
350 def successorssets(repo, initialnode, closest=False, cache=None):
350 def successorssets(repo, initialnode, closest=False, cache=None):
351 """Return set of all latest successors of initial nodes
351 """Return set of all latest successors of initial nodes
352
352
353 The successors set of a changeset A are the group of revisions that succeed
353 The successors set of a changeset A are the group of revisions that succeed
354 A. It succeeds A as a consistent whole, each revision being only a partial
354 A. It succeeds A as a consistent whole, each revision being only a partial
355 replacement. By default, the successors set contains non-obsolete
355 replacement. By default, the successors set contains non-obsolete
356 changesets only, walking the obsolescence graph until reaching a leaf. If
356 changesets only, walking the obsolescence graph until reaching a leaf. If
357 'closest' is set to True, closest successors-sets are return (the
357 'closest' is set to True, closest successors-sets are return (the
358 obsolescence walk stops on known changesets).
358 obsolescence walk stops on known changesets).
359
359
360 This function returns the full list of successor sets which is why it
360 This function returns the full list of successor sets which is why it
361 returns a list of tuples and not just a single tuple. Each tuple is a valid
361 returns a list of tuples and not just a single tuple. Each tuple is a valid
362 successors set. Note that (A,) may be a valid successors set for changeset A
362 successors set. Note that (A,) may be a valid successors set for changeset A
363 (see below).
363 (see below).
364
364
365 In most cases, a changeset A will have a single element (e.g. the changeset
365 In most cases, a changeset A will have a single element (e.g. the changeset
366 A is replaced by A') in its successors set. Though, it is also common for a
366 A is replaced by A') in its successors set. Though, it is also common for a
367 changeset A to have no elements in its successor set (e.g. the changeset
367 changeset A to have no elements in its successor set (e.g. the changeset
368 has been pruned). Therefore, the returned list of successors sets will be
368 has been pruned). Therefore, the returned list of successors sets will be
369 [(A',)] or [], respectively.
369 [(A',)] or [], respectively.
370
370
371 When a changeset A is split into A' and B', however, it will result in a
371 When a changeset A is split into A' and B', however, it will result in a
372 successors set containing more than a single element, i.e. [(A',B')].
372 successors set containing more than a single element, i.e. [(A',B')].
373 Divergent changesets will result in multiple successors sets, i.e. [(A',),
373 Divergent changesets will result in multiple successors sets, i.e. [(A',),
374 (A'')].
374 (A'')].
375
375
376 If a changeset A is not obsolete, then it will conceptually have no
376 If a changeset A is not obsolete, then it will conceptually have no
377 successors set. To distinguish this from a pruned changeset, the successor
377 successors set. To distinguish this from a pruned changeset, the successor
378 set will contain itself only, i.e. [(A,)].
378 set will contain itself only, i.e. [(A,)].
379
379
380 Finally, final successors unknown locally are considered to be pruned
380 Finally, final successors unknown locally are considered to be pruned
381 (pruned: obsoleted without any successors). (Final: successors not affected
381 (pruned: obsoleted without any successors). (Final: successors not affected
382 by markers).
382 by markers).
383
383
384 The 'closest' mode respect the repoview filtering. For example, without
384 The 'closest' mode respect the repoview filtering. For example, without
385 filter it will stop at the first locally known changeset, with 'visible'
385 filter it will stop at the first locally known changeset, with 'visible'
386 filter it will stop on visible changesets).
386 filter it will stop on visible changesets).
387
387
388 The optional `cache` parameter is a dictionary that may contains
388 The optional `cache` parameter is a dictionary that may contains
389 precomputed successors sets. It is meant to reuse the computation of a
389 precomputed successors sets. It is meant to reuse the computation of a
390 previous call to `successorssets` when multiple calls are made at the same
390 previous call to `successorssets` when multiple calls are made at the same
391 time. The cache dictionary is updated in place. The caller is responsible
391 time. The cache dictionary is updated in place. The caller is responsible
392 for its life span. Code that makes multiple calls to `successorssets`
392 for its life span. Code that makes multiple calls to `successorssets`
393 *should* use this cache mechanism or risk a performance hit.
393 *should* use this cache mechanism or risk a performance hit.
394
394
395 Since results are different depending of the 'closest' most, the same cache
395 Since results are different depending of the 'closest' most, the same cache
396 cannot be reused for both mode.
396 cannot be reused for both mode.
397 """
397 """
398
398
399 succmarkers = repo.obsstore.successors
399 succmarkers = repo.obsstore.successors
400
400
401 # Stack of nodes we search successors sets for
401 # Stack of nodes we search successors sets for
402 toproceed = [initialnode]
402 toproceed = [initialnode]
403 # set version of above list for fast loop detection
403 # set version of above list for fast loop detection
404 # element added to "toproceed" must be added here
404 # element added to "toproceed" must be added here
405 stackedset = set(toproceed)
405 stackedset = set(toproceed)
406 if cache is None:
406 if cache is None:
407 cache = {}
407 cache = {}
408
408
409 # This while loop is the flattened version of a recursive search for
409 # This while loop is the flattened version of a recursive search for
410 # successors sets
410 # successors sets
411 #
411 #
412 # def successorssets(x):
412 # def successorssets(x):
413 # successors = directsuccessors(x)
413 # successors = directsuccessors(x)
414 # ss = [[]]
414 # ss = [[]]
415 # for succ in directsuccessors(x):
415 # for succ in directsuccessors(x):
416 # # product as in itertools cartesian product
416 # # product as in itertools cartesian product
417 # ss = product(ss, successorssets(succ))
417 # ss = product(ss, successorssets(succ))
418 # return ss
418 # return ss
419 #
419 #
420 # But we can not use plain recursive calls here:
420 # But we can not use plain recursive calls here:
421 # - that would blow the python call stack
421 # - that would blow the python call stack
422 # - obsolescence markers may have cycles, we need to handle them.
422 # - obsolescence markers may have cycles, we need to handle them.
423 #
423 #
424 # The `toproceed` list act as our call stack. Every node we search
424 # The `toproceed` list act as our call stack. Every node we search
425 # successors set for are stacked there.
425 # successors set for are stacked there.
426 #
426 #
427 # The `stackedset` is set version of this stack used to check if a node is
427 # The `stackedset` is set version of this stack used to check if a node is
428 # already stacked. This check is used to detect cycles and prevent infinite
428 # already stacked. This check is used to detect cycles and prevent infinite
429 # loop.
429 # loop.
430 #
430 #
431 # successors set of all nodes are stored in the `cache` dictionary.
431 # successors set of all nodes are stored in the `cache` dictionary.
432 #
432 #
433 # After this while loop ends we use the cache to return the successors sets
433 # After this while loop ends we use the cache to return the successors sets
434 # for the node requested by the caller.
434 # for the node requested by the caller.
435 while toproceed:
435 while toproceed:
436 # Every iteration tries to compute the successors sets of the topmost
436 # Every iteration tries to compute the successors sets of the topmost
437 # node of the stack: CURRENT.
437 # node of the stack: CURRENT.
438 #
438 #
439 # There are four possible outcomes:
439 # There are four possible outcomes:
440 #
440 #
441 # 1) We already know the successors sets of CURRENT:
441 # 1) We already know the successors sets of CURRENT:
442 # -> mission accomplished, pop it from the stack.
442 # -> mission accomplished, pop it from the stack.
443 # 2) Stop the walk:
443 # 2) Stop the walk:
444 # default case: Node is not obsolete
444 # default case: Node is not obsolete
445 # closest case: Node is known at this repo filter level
445 # closest case: Node is known at this repo filter level
446 # -> the node is its own successors sets. Add it to the cache.
446 # -> the node is its own successors sets. Add it to the cache.
447 # 3) We do not know successors set of direct successors of CURRENT:
447 # 3) We do not know successors set of direct successors of CURRENT:
448 # -> We add those successors to the stack.
448 # -> We add those successors to the stack.
449 # 4) We know successors sets of all direct successors of CURRENT:
449 # 4) We know successors sets of all direct successors of CURRENT:
450 # -> We can compute CURRENT successors set and add it to the
450 # -> We can compute CURRENT successors set and add it to the
451 # cache.
451 # cache.
452 #
452 #
453 current = toproceed[-1]
453 current = toproceed[-1]
454
454
455 # case 2 condition is a bit hairy because of closest,
455 # case 2 condition is a bit hairy because of closest,
456 # we compute it on its own
456 # we compute it on its own
457 case2condition = ((current not in succmarkers)
457 case2condition = ((current not in succmarkers)
458 or (closest and current != initialnode
458 or (closest and current != initialnode
459 and current in repo))
459 and current in repo))
460
460
461 if current in cache:
461 if current in cache:
462 # case (1): We already know the successors sets
462 # case (1): We already know the successors sets
463 stackedset.remove(toproceed.pop())
463 stackedset.remove(toproceed.pop())
464 elif case2condition:
464 elif case2condition:
465 # case (2): end of walk.
465 # case (2): end of walk.
466 if current in repo:
466 if current in repo:
467 # We have a valid successors.
467 # We have a valid successors.
468 cache[current] = [_succs((current,))]
468 cache[current] = [_succs((current,))]
469 else:
469 else:
470 # Final obsolete version is unknown locally.
470 # Final obsolete version is unknown locally.
471 # Do not count that as a valid successors
471 # Do not count that as a valid successors
472 cache[current] = []
472 cache[current] = []
473 else:
473 else:
474 # cases (3) and (4)
474 # cases (3) and (4)
475 #
475 #
476 # We proceed in two phases. Phase 1 aims to distinguish case (3)
476 # We proceed in two phases. Phase 1 aims to distinguish case (3)
477 # from case (4):
477 # from case (4):
478 #
478 #
479 # For each direct successors of CURRENT, we check whether its
479 # For each direct successors of CURRENT, we check whether its
480 # successors sets are known. If they are not, we stack the
480 # successors sets are known. If they are not, we stack the
481 # unknown node and proceed to the next iteration of the while
481 # unknown node and proceed to the next iteration of the while
482 # loop. (case 3)
482 # loop. (case 3)
483 #
483 #
484 # During this step, we may detect obsolescence cycles: a node
484 # During this step, we may detect obsolescence cycles: a node
485 # with unknown successors sets but already in the call stack.
485 # with unknown successors sets but already in the call stack.
486 # In such a situation, we arbitrary set the successors sets of
486 # In such a situation, we arbitrary set the successors sets of
487 # the node to nothing (node pruned) to break the cycle.
487 # the node to nothing (node pruned) to break the cycle.
488 #
488 #
489 # If no break was encountered we proceed to phase 2.
489 # If no break was encountered we proceed to phase 2.
490 #
490 #
491 # Phase 2 computes successors sets of CURRENT (case 4); see details
491 # Phase 2 computes successors sets of CURRENT (case 4); see details
492 # in phase 2 itself.
492 # in phase 2 itself.
493 #
493 #
494 # Note the two levels of iteration in each phase.
494 # Note the two levels of iteration in each phase.
495 # - The first one handles obsolescence markers using CURRENT as
495 # - The first one handles obsolescence markers using CURRENT as
496 # precursor (successors markers of CURRENT).
496 # precursor (successors markers of CURRENT).
497 #
497 #
498 # Having multiple entry here means divergence.
498 # Having multiple entry here means divergence.
499 #
499 #
500 # - The second one handles successors defined in each marker.
500 # - The second one handles successors defined in each marker.
501 #
501 #
502 # Having none means pruned node, multiple successors means split,
502 # Having none means pruned node, multiple successors means split,
503 # single successors are standard replacement.
503 # single successors are standard replacement.
504 #
504 #
505 for mark in sorted(succmarkers[current]):
505 for mark in sorted(succmarkers[current]):
506 for suc in mark[1]:
506 for suc in mark[1]:
507 if suc not in cache:
507 if suc not in cache:
508 if suc in stackedset:
508 if suc in stackedset:
509 # cycle breaking
509 # cycle breaking
510 cache[suc] = []
510 cache[suc] = []
511 else:
511 else:
512 # case (3) If we have not computed successors sets
512 # case (3) If we have not computed successors sets
513 # of one of those successors we add it to the
513 # of one of those successors we add it to the
514 # `toproceed` stack and stop all work for this
514 # `toproceed` stack and stop all work for this
515 # iteration.
515 # iteration.
516 toproceed.append(suc)
516 toproceed.append(suc)
517 stackedset.add(suc)
517 stackedset.add(suc)
518 break
518 break
519 else:
519 else:
520 continue
520 continue
521 break
521 break
522 else:
522 else:
523 # case (4): we know all successors sets of all direct
523 # case (4): we know all successors sets of all direct
524 # successors
524 # successors
525 #
525 #
526 # Successors set contributed by each marker depends on the
526 # Successors set contributed by each marker depends on the
527 # successors sets of all its "successors" node.
527 # successors sets of all its "successors" node.
528 #
528 #
529 # Each different marker is a divergence in the obsolescence
529 # Each different marker is a divergence in the obsolescence
530 # history. It contributes successors sets distinct from other
530 # history. It contributes successors sets distinct from other
531 # markers.
531 # markers.
532 #
532 #
533 # Within a marker, a successor may have divergent successors
533 # Within a marker, a successor may have divergent successors
534 # sets. In such a case, the marker will contribute multiple
534 # sets. In such a case, the marker will contribute multiple
535 # divergent successors sets. If multiple successors have
535 # divergent successors sets. If multiple successors have
536 # divergent successors sets, a Cartesian product is used.
536 # divergent successors sets, a Cartesian product is used.
537 #
537 #
538 # At the end we post-process successors sets to remove
538 # At the end we post-process successors sets to remove
539 # duplicated entry and successors set that are strict subset of
539 # duplicated entry and successors set that are strict subset of
540 # another one.
540 # another one.
541 succssets = []
541 succssets = []
542 for mark in sorted(succmarkers[current]):
542 for mark in sorted(succmarkers[current]):
543 # successors sets contributed by this marker
543 # successors sets contributed by this marker
544 base = _succs()
544 base = _succs()
545 base.markers.add(mark)
545 base.markers.add(mark)
546 markss = [base]
546 markss = [base]
547 for suc in mark[1]:
547 for suc in mark[1]:
548 # cardinal product with previous successors
548 # cardinal product with previous successors
549 productresult = []
549 productresult = []
550 for prefix in markss:
550 for prefix in markss:
551 for suffix in cache[suc]:
551 for suffix in cache[suc]:
552 newss = prefix.copy()
552 newss = prefix.copy()
553 newss.markers.update(suffix.markers)
553 newss.markers.update(suffix.markers)
554 for part in suffix:
554 for part in suffix:
555 # do not duplicated entry in successors set
555 # do not duplicated entry in successors set
556 # first entry wins.
556 # first entry wins.
557 if part not in newss:
557 if part not in newss:
558 newss.append(part)
558 newss.append(part)
559 productresult.append(newss)
559 productresult.append(newss)
560 markss = productresult
560 markss = productresult
561 succssets.extend(markss)
561 succssets.extend(markss)
562 # remove duplicated and subset
562 # remove duplicated and subset
563 seen = []
563 seen = []
564 final = []
564 final = []
565 candidate = sorted((s for s in succssets if s),
565 candidates = sorted((s for s in succssets if s),
566 key=len, reverse=True)
566 key=len, reverse=True)
567 for cand in candidate:
567 for cand in candidates:
568 for seensuccs in seen:
568 for seensuccs in seen:
569 if cand.canmerge(seensuccs):
569 if cand.canmerge(seensuccs):
570 seensuccs.markers.update(cand.markers)
570 seensuccs.markers.update(cand.markers)
571 break
571 break
572 else:
572 else:
573 final.append(cand)
573 final.append(cand)
574 seen.append(cand)
574 seen.append(cand)
575 final.reverse() # put small successors set first
575 final.reverse() # put small successors set first
576 cache[current] = final
576 cache[current] = final
577 return cache[initialnode]
577 return cache[initialnode]
578
578
579 def successorsandmarkers(repo, ctx):
579 def successorsandmarkers(repo, ctx):
580 """compute the raw data needed for computing obsfate
580 """compute the raw data needed for computing obsfate
581 Returns a list of dict, one dict per successors set
581 Returns a list of dict, one dict per successors set
582 """
582 """
583 if not ctx.obsolete():
583 if not ctx.obsolete():
584 return None
584 return None
585
585
586 ssets = successorssets(repo, ctx.node(), closest=True)
586 ssets = successorssets(repo, ctx.node(), closest=True)
587
587
588 values = []
588 values = []
589 for sset in ssets:
589 for sset in ssets:
590 values.append({'successors': sset, 'markers': sset.markers})
590 values.append({'successors': sset, 'markers': sset.markers})
591
591
592 return values
592 return values
General Comments 0
You need to be logged in to leave comments. Login now