##// END OF EJS Templates
obsutil: drop deprecated methods (API)...
Matt Harbison -
r35911:78f33ded default
parent child Browse files
Show More
@@ -1,906 +1,891
1 # obsutil.py - utility functions for obsolescence
1 # obsutil.py - utility functions for obsolescence
2 #
2 #
3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import re
10 import re
11
11
12 from .i18n import _
12 from .i18n import _
13 from . import (
13 from . import (
14 node as nodemod,
14 node as nodemod,
15 phases,
15 phases,
16 util,
16 util,
17 )
17 )
18
18
19 class marker(object):
19 class marker(object):
20 """Wrap obsolete marker raw data"""
20 """Wrap obsolete marker raw data"""
21
21
22 def __init__(self, repo, data):
22 def __init__(self, repo, data):
23 # the repo argument will be used to create changectx in later version
23 # the repo argument will be used to create changectx in later version
24 self._repo = repo
24 self._repo = repo
25 self._data = data
25 self._data = data
26 self._decodedmeta = None
26 self._decodedmeta = None
27
27
28 def __hash__(self):
28 def __hash__(self):
29 return hash(self._data)
29 return hash(self._data)
30
30
31 def __eq__(self, other):
31 def __eq__(self, other):
32 if type(other) != type(self):
32 if type(other) != type(self):
33 return False
33 return False
34 return self._data == other._data
34 return self._data == other._data
35
35
36 def precnode(self):
37 msg = ("'marker.precnode' is deprecated, "
38 "use 'marker.prednode'")
39 util.nouideprecwarn(msg, '4.4')
40 return self.prednode()
41
42 def prednode(self):
36 def prednode(self):
43 """Predecessor changeset node identifier"""
37 """Predecessor changeset node identifier"""
44 return self._data[0]
38 return self._data[0]
45
39
46 def succnodes(self):
40 def succnodes(self):
47 """List of successor changesets node identifiers"""
41 """List of successor changesets node identifiers"""
48 return self._data[1]
42 return self._data[1]
49
43
50 def parentnodes(self):
44 def parentnodes(self):
51 """Parents of the predecessors (None if not recorded)"""
45 """Parents of the predecessors (None if not recorded)"""
52 return self._data[5]
46 return self._data[5]
53
47
54 def metadata(self):
48 def metadata(self):
55 """Decoded metadata dictionary"""
49 """Decoded metadata dictionary"""
56 return dict(self._data[3])
50 return dict(self._data[3])
57
51
58 def date(self):
52 def date(self):
59 """Creation date as (unixtime, offset)"""
53 """Creation date as (unixtime, offset)"""
60 return self._data[4]
54 return self._data[4]
61
55
62 def flags(self):
56 def flags(self):
63 """The flags field of the marker"""
57 """The flags field of the marker"""
64 return self._data[2]
58 return self._data[2]
65
59
66 def getmarkers(repo, nodes=None, exclusive=False):
60 def getmarkers(repo, nodes=None, exclusive=False):
67 """returns markers known in a repository
61 """returns markers known in a repository
68
62
69 If <nodes> is specified, only markers "relevant" to those nodes are are
63 If <nodes> is specified, only markers "relevant" to those nodes are are
70 returned"""
64 returned"""
71 if nodes is None:
65 if nodes is None:
72 rawmarkers = repo.obsstore
66 rawmarkers = repo.obsstore
73 elif exclusive:
67 elif exclusive:
74 rawmarkers = exclusivemarkers(repo, nodes)
68 rawmarkers = exclusivemarkers(repo, nodes)
75 else:
69 else:
76 rawmarkers = repo.obsstore.relevantmarkers(nodes)
70 rawmarkers = repo.obsstore.relevantmarkers(nodes)
77
71
78 for markerdata in rawmarkers:
72 for markerdata in rawmarkers:
79 yield marker(repo, markerdata)
73 yield marker(repo, markerdata)
80
74
81 def closestpredecessors(repo, nodeid):
75 def closestpredecessors(repo, nodeid):
82 """yield the list of next predecessors pointing on visible changectx nodes
76 """yield the list of next predecessors pointing on visible changectx nodes
83
77
84 This function respect the repoview filtering, filtered revision will be
78 This function respect the repoview filtering, filtered revision will be
85 considered missing.
79 considered missing.
86 """
80 """
87
81
88 precursors = repo.obsstore.predecessors
82 precursors = repo.obsstore.predecessors
89 stack = [nodeid]
83 stack = [nodeid]
90 seen = set(stack)
84 seen = set(stack)
91
85
92 while stack:
86 while stack:
93 current = stack.pop()
87 current = stack.pop()
94 currentpreccs = precursors.get(current, ())
88 currentpreccs = precursors.get(current, ())
95
89
96 for prec in currentpreccs:
90 for prec in currentpreccs:
97 precnodeid = prec[0]
91 precnodeid = prec[0]
98
92
99 # Basic cycle protection
93 # Basic cycle protection
100 if precnodeid in seen:
94 if precnodeid in seen:
101 continue
95 continue
102 seen.add(precnodeid)
96 seen.add(precnodeid)
103
97
104 if precnodeid in repo:
98 if precnodeid in repo:
105 yield precnodeid
99 yield precnodeid
106 else:
100 else:
107 stack.append(precnodeid)
101 stack.append(precnodeid)
108
102
109 def allprecursors(*args, **kwargs):
110 """ (DEPRECATED)
111 """
112 msg = ("'obsutil.allprecursors' is deprecated, "
113 "use 'obsutil.allpredecessors'")
114 util.nouideprecwarn(msg, '4.4')
115
116 return allpredecessors(*args, **kwargs)
117
118 def allpredecessors(obsstore, nodes, ignoreflags=0):
103 def allpredecessors(obsstore, nodes, ignoreflags=0):
119 """Yield node for every precursors of <nodes>.
104 """Yield node for every precursors of <nodes>.
120
105
121 Some precursors may be unknown locally.
106 Some precursors may be unknown locally.
122
107
123 This is a linear yield unsuited to detecting folded changesets. It includes
108 This is a linear yield unsuited to detecting folded changesets. It includes
124 initial nodes too."""
109 initial nodes too."""
125
110
126 remaining = set(nodes)
111 remaining = set(nodes)
127 seen = set(remaining)
112 seen = set(remaining)
128 while remaining:
113 while remaining:
129 current = remaining.pop()
114 current = remaining.pop()
130 yield current
115 yield current
131 for mark in obsstore.predecessors.get(current, ()):
116 for mark in obsstore.predecessors.get(current, ()):
132 # ignore marker flagged with specified flag
117 # ignore marker flagged with specified flag
133 if mark[2] & ignoreflags:
118 if mark[2] & ignoreflags:
134 continue
119 continue
135 suc = mark[0]
120 suc = mark[0]
136 if suc not in seen:
121 if suc not in seen:
137 seen.add(suc)
122 seen.add(suc)
138 remaining.add(suc)
123 remaining.add(suc)
139
124
140 def allsuccessors(obsstore, nodes, ignoreflags=0):
125 def allsuccessors(obsstore, nodes, ignoreflags=0):
141 """Yield node for every successor of <nodes>.
126 """Yield node for every successor of <nodes>.
142
127
143 Some successors may be unknown locally.
128 Some successors may be unknown locally.
144
129
145 This is a linear yield unsuited to detecting split changesets. It includes
130 This is a linear yield unsuited to detecting split changesets. It includes
146 initial nodes too."""
131 initial nodes too."""
147 remaining = set(nodes)
132 remaining = set(nodes)
148 seen = set(remaining)
133 seen = set(remaining)
149 while remaining:
134 while remaining:
150 current = remaining.pop()
135 current = remaining.pop()
151 yield current
136 yield current
152 for mark in obsstore.successors.get(current, ()):
137 for mark in obsstore.successors.get(current, ()):
153 # ignore marker flagged with specified flag
138 # ignore marker flagged with specified flag
154 if mark[2] & ignoreflags:
139 if mark[2] & ignoreflags:
155 continue
140 continue
156 for suc in mark[1]:
141 for suc in mark[1]:
157 if suc not in seen:
142 if suc not in seen:
158 seen.add(suc)
143 seen.add(suc)
159 remaining.add(suc)
144 remaining.add(suc)
160
145
161 def _filterprunes(markers):
146 def _filterprunes(markers):
162 """return a set with no prune markers"""
147 """return a set with no prune markers"""
163 return set(m for m in markers if m[1])
148 return set(m for m in markers if m[1])
164
149
165 def exclusivemarkers(repo, nodes):
150 def exclusivemarkers(repo, nodes):
166 """set of markers relevant to "nodes" but no other locally-known nodes
151 """set of markers relevant to "nodes" but no other locally-known nodes
167
152
168 This function compute the set of markers "exclusive" to a locally-known
153 This function compute the set of markers "exclusive" to a locally-known
169 node. This means we walk the markers starting from <nodes> until we reach a
154 node. This means we walk the markers starting from <nodes> until we reach a
170 locally-known precursors outside of <nodes>. Element of <nodes> with
155 locally-known precursors outside of <nodes>. Element of <nodes> with
171 locally-known successors outside of <nodes> are ignored (since their
156 locally-known successors outside of <nodes> are ignored (since their
172 precursors markers are also relevant to these successors).
157 precursors markers are also relevant to these successors).
173
158
174 For example:
159 For example:
175
160
176 # (A0 rewritten as A1)
161 # (A0 rewritten as A1)
177 #
162 #
178 # A0 <-1- A1 # Marker "1" is exclusive to A1
163 # A0 <-1- A1 # Marker "1" is exclusive to A1
179
164
180 or
165 or
181
166
182 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
167 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
183 #
168 #
184 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
169 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
185
170
186 or
171 or
187
172
188 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
173 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
189 #
174 #
190 # <-2- A1 # Marker "2" is exclusive to A0,A1
175 # <-2- A1 # Marker "2" is exclusive to A0,A1
191 # /
176 # /
192 # <-1- A0
177 # <-1- A0
193 # \
178 # \
194 # <-3- A2 # Marker "3" is exclusive to A0,A2
179 # <-3- A2 # Marker "3" is exclusive to A0,A2
195 #
180 #
196 # in addition:
181 # in addition:
197 #
182 #
198 # Markers "2,3" are exclusive to A1,A2
183 # Markers "2,3" are exclusive to A1,A2
199 # Markers "1,2,3" are exclusive to A0,A1,A2
184 # Markers "1,2,3" are exclusive to A0,A1,A2
200
185
201 See test/test-obsolete-bundle-strip.t for more examples.
186 See test/test-obsolete-bundle-strip.t for more examples.
202
187
203 An example usage is strip. When stripping a changeset, we also want to
188 An example usage is strip. When stripping a changeset, we also want to
204 strip the markers exclusive to this changeset. Otherwise we would have
189 strip the markers exclusive to this changeset. Otherwise we would have
205 "dangling"" obsolescence markers from its precursors: Obsolescence markers
190 "dangling"" obsolescence markers from its precursors: Obsolescence markers
206 marking a node as obsolete without any successors available locally.
191 marking a node as obsolete without any successors available locally.
207
192
208 As for relevant markers, the prune markers for children will be followed.
193 As for relevant markers, the prune markers for children will be followed.
209 Of course, they will only be followed if the pruned children is
194 Of course, they will only be followed if the pruned children is
210 locally-known. Since the prune markers are relevant to the pruned node.
195 locally-known. Since the prune markers are relevant to the pruned node.
211 However, while prune markers are considered relevant to the parent of the
196 However, while prune markers are considered relevant to the parent of the
212 pruned changesets, prune markers for locally-known changeset (with no
197 pruned changesets, prune markers for locally-known changeset (with no
213 successors) are considered exclusive to the pruned nodes. This allows
198 successors) are considered exclusive to the pruned nodes. This allows
214 to strip the prune markers (with the rest of the exclusive chain) alongside
199 to strip the prune markers (with the rest of the exclusive chain) alongside
215 the pruned changesets.
200 the pruned changesets.
216 """
201 """
217 # running on a filtered repository would be dangerous as markers could be
202 # running on a filtered repository would be dangerous as markers could be
218 # reported as exclusive when they are relevant for other filtered nodes.
203 # reported as exclusive when they are relevant for other filtered nodes.
219 unfi = repo.unfiltered()
204 unfi = repo.unfiltered()
220
205
221 # shortcut to various useful item
206 # shortcut to various useful item
222 nm = unfi.changelog.nodemap
207 nm = unfi.changelog.nodemap
223 precursorsmarkers = unfi.obsstore.predecessors
208 precursorsmarkers = unfi.obsstore.predecessors
224 successormarkers = unfi.obsstore.successors
209 successormarkers = unfi.obsstore.successors
225 childrenmarkers = unfi.obsstore.children
210 childrenmarkers = unfi.obsstore.children
226
211
227 # exclusive markers (return of the function)
212 # exclusive markers (return of the function)
228 exclmarkers = set()
213 exclmarkers = set()
229 # we need fast membership testing
214 # we need fast membership testing
230 nodes = set(nodes)
215 nodes = set(nodes)
231 # looking for head in the obshistory
216 # looking for head in the obshistory
232 #
217 #
233 # XXX we are ignoring all issues in regard with cycle for now.
218 # XXX we are ignoring all issues in regard with cycle for now.
234 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
219 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
235 stack.sort()
220 stack.sort()
236 # nodes already stacked
221 # nodes already stacked
237 seennodes = set(stack)
222 seennodes = set(stack)
238 while stack:
223 while stack:
239 current = stack.pop()
224 current = stack.pop()
240 # fetch precursors markers
225 # fetch precursors markers
241 markers = list(precursorsmarkers.get(current, ()))
226 markers = list(precursorsmarkers.get(current, ()))
242 # extend the list with prune markers
227 # extend the list with prune markers
243 for mark in successormarkers.get(current, ()):
228 for mark in successormarkers.get(current, ()):
244 if not mark[1]:
229 if not mark[1]:
245 markers.append(mark)
230 markers.append(mark)
246 # and markers from children (looking for prune)
231 # and markers from children (looking for prune)
247 for mark in childrenmarkers.get(current, ()):
232 for mark in childrenmarkers.get(current, ()):
248 if not mark[1]:
233 if not mark[1]:
249 markers.append(mark)
234 markers.append(mark)
250 # traverse the markers
235 # traverse the markers
251 for mark in markers:
236 for mark in markers:
252 if mark in exclmarkers:
237 if mark in exclmarkers:
253 # markers already selected
238 # markers already selected
254 continue
239 continue
255
240
256 # If the markers is about the current node, select it
241 # If the markers is about the current node, select it
257 #
242 #
258 # (this delay the addition of markers from children)
243 # (this delay the addition of markers from children)
259 if mark[1] or mark[0] == current:
244 if mark[1] or mark[0] == current:
260 exclmarkers.add(mark)
245 exclmarkers.add(mark)
261
246
262 # should we keep traversing through the precursors?
247 # should we keep traversing through the precursors?
263 prec = mark[0]
248 prec = mark[0]
264
249
265 # nodes in the stack or already processed
250 # nodes in the stack or already processed
266 if prec in seennodes:
251 if prec in seennodes:
267 continue
252 continue
268
253
269 # is this a locally known node ?
254 # is this a locally known node ?
270 known = prec in nm
255 known = prec in nm
271 # if locally-known and not in the <nodes> set the traversal
256 # if locally-known and not in the <nodes> set the traversal
272 # stop here.
257 # stop here.
273 if known and prec not in nodes:
258 if known and prec not in nodes:
274 continue
259 continue
275
260
276 # do not keep going if there are unselected markers pointing to this
261 # do not keep going if there are unselected markers pointing to this
277 # nodes. If we end up traversing these unselected markers later the
262 # nodes. If we end up traversing these unselected markers later the
278 # node will be taken care of at that point.
263 # node will be taken care of at that point.
279 precmarkers = _filterprunes(successormarkers.get(prec))
264 precmarkers = _filterprunes(successormarkers.get(prec))
280 if precmarkers.issubset(exclmarkers):
265 if precmarkers.issubset(exclmarkers):
281 seennodes.add(prec)
266 seennodes.add(prec)
282 stack.append(prec)
267 stack.append(prec)
283
268
284 return exclmarkers
269 return exclmarkers
285
270
286 def foreground(repo, nodes):
271 def foreground(repo, nodes):
287 """return all nodes in the "foreground" of other node
272 """return all nodes in the "foreground" of other node
288
273
289 The foreground of a revision is anything reachable using parent -> children
274 The foreground of a revision is anything reachable using parent -> children
290 or precursor -> successor relation. It is very similar to "descendant" but
275 or precursor -> successor relation. It is very similar to "descendant" but
291 augmented with obsolescence information.
276 augmented with obsolescence information.
292
277
293 Beware that possible obsolescence cycle may result if complex situation.
278 Beware that possible obsolescence cycle may result if complex situation.
294 """
279 """
295 repo = repo.unfiltered()
280 repo = repo.unfiltered()
296 foreground = set(repo.set('%ln::', nodes))
281 foreground = set(repo.set('%ln::', nodes))
297 if repo.obsstore:
282 if repo.obsstore:
298 # We only need this complicated logic if there is obsolescence
283 # We only need this complicated logic if there is obsolescence
299 # XXX will probably deserve an optimised revset.
284 # XXX will probably deserve an optimised revset.
300 nm = repo.changelog.nodemap
285 nm = repo.changelog.nodemap
301 plen = -1
286 plen = -1
302 # compute the whole set of successors or descendants
287 # compute the whole set of successors or descendants
303 while len(foreground) != plen:
288 while len(foreground) != plen:
304 plen = len(foreground)
289 plen = len(foreground)
305 succs = set(c.node() for c in foreground)
290 succs = set(c.node() for c in foreground)
306 mutable = [c.node() for c in foreground if c.mutable()]
291 mutable = [c.node() for c in foreground if c.mutable()]
307 succs.update(allsuccessors(repo.obsstore, mutable))
292 succs.update(allsuccessors(repo.obsstore, mutable))
308 known = (n for n in succs if n in nm)
293 known = (n for n in succs if n in nm)
309 foreground = set(repo.set('%ln::', known))
294 foreground = set(repo.set('%ln::', known))
310 return set(c.node() for c in foreground)
295 return set(c.node() for c in foreground)
311
296
312 # effectflag field
297 # effectflag field
313 #
298 #
314 # Effect-flag is a 1-byte bit field used to store what changed between a
299 # Effect-flag is a 1-byte bit field used to store what changed between a
315 # changeset and its successor(s).
300 # changeset and its successor(s).
316 #
301 #
317 # The effect flag is stored in obs-markers metadata while we iterate on the
302 # The effect flag is stored in obs-markers metadata while we iterate on the
318 # information design. That's why we have the EFFECTFLAGFIELD. If we come up
303 # information design. That's why we have the EFFECTFLAGFIELD. If we come up
319 # with an incompatible design for effect flag, we can store a new design under
304 # with an incompatible design for effect flag, we can store a new design under
320 # another field name so we don't break readers. We plan to extend the existing
305 # another field name so we don't break readers. We plan to extend the existing
321 # obsmarkers bit-field when the effect flag design will be stabilized.
306 # obsmarkers bit-field when the effect flag design will be stabilized.
322 #
307 #
323 # The effect-flag is placed behind an experimental flag
308 # The effect-flag is placed behind an experimental flag
324 # `effect-flags` set to off by default.
309 # `effect-flags` set to off by default.
325 #
310 #
326
311
327 EFFECTFLAGFIELD = "ef1"
312 EFFECTFLAGFIELD = "ef1"
328
313
329 DESCCHANGED = 1 << 0 # action changed the description
314 DESCCHANGED = 1 << 0 # action changed the description
330 METACHANGED = 1 << 1 # action change the meta
315 METACHANGED = 1 << 1 # action change the meta
331 DIFFCHANGED = 1 << 3 # action change diff introduced by the changeset
316 DIFFCHANGED = 1 << 3 # action change diff introduced by the changeset
332 PARENTCHANGED = 1 << 2 # action change the parent
317 PARENTCHANGED = 1 << 2 # action change the parent
333 USERCHANGED = 1 << 4 # the user changed
318 USERCHANGED = 1 << 4 # the user changed
334 DATECHANGED = 1 << 5 # the date changed
319 DATECHANGED = 1 << 5 # the date changed
335 BRANCHCHANGED = 1 << 6 # the branch changed
320 BRANCHCHANGED = 1 << 6 # the branch changed
336
321
337 METABLACKLIST = [
322 METABLACKLIST = [
338 re.compile('^branch$'),
323 re.compile('^branch$'),
339 re.compile('^.*-source$'),
324 re.compile('^.*-source$'),
340 re.compile('^.*_source$'),
325 re.compile('^.*_source$'),
341 re.compile('^source$'),
326 re.compile('^source$'),
342 ]
327 ]
343
328
344 def metanotblacklisted(metaitem):
329 def metanotblacklisted(metaitem):
345 """ Check that the key of a meta item (extrakey, extravalue) does not
330 """ Check that the key of a meta item (extrakey, extravalue) does not
346 match at least one of the blacklist pattern
331 match at least one of the blacklist pattern
347 """
332 """
348 metakey = metaitem[0]
333 metakey = metaitem[0]
349
334
350 return not any(pattern.match(metakey) for pattern in METABLACKLIST)
335 return not any(pattern.match(metakey) for pattern in METABLACKLIST)
351
336
352 def _prepare_hunk(hunk):
337 def _prepare_hunk(hunk):
353 """Drop all information but the username and patch"""
338 """Drop all information but the username and patch"""
354 cleanhunk = []
339 cleanhunk = []
355 for line in hunk.splitlines():
340 for line in hunk.splitlines():
356 if line.startswith(b'# User') or not line.startswith(b'#'):
341 if line.startswith(b'# User') or not line.startswith(b'#'):
357 if line.startswith(b'@@'):
342 if line.startswith(b'@@'):
358 line = b'@@\n'
343 line = b'@@\n'
359 cleanhunk.append(line)
344 cleanhunk.append(line)
360 return cleanhunk
345 return cleanhunk
361
346
362 def _getdifflines(iterdiff):
347 def _getdifflines(iterdiff):
363 """return a cleaned up lines"""
348 """return a cleaned up lines"""
364 lines = next(iterdiff, None)
349 lines = next(iterdiff, None)
365
350
366 if lines is None:
351 if lines is None:
367 return lines
352 return lines
368
353
369 return _prepare_hunk(lines)
354 return _prepare_hunk(lines)
370
355
371 def _cmpdiff(leftctx, rightctx):
356 def _cmpdiff(leftctx, rightctx):
372 """return True if both ctx introduce the "same diff"
357 """return True if both ctx introduce the "same diff"
373
358
374 This is a first and basic implementation, with many shortcoming.
359 This is a first and basic implementation, with many shortcoming.
375 """
360 """
376
361
377 # Leftctx or right ctx might be filtered, so we need to use the contexts
362 # Leftctx or right ctx might be filtered, so we need to use the contexts
378 # with an unfiltered repository to safely compute the diff
363 # with an unfiltered repository to safely compute the diff
379 leftunfi = leftctx._repo.unfiltered()[leftctx.rev()]
364 leftunfi = leftctx._repo.unfiltered()[leftctx.rev()]
380 leftdiff = leftunfi.diff(git=1)
365 leftdiff = leftunfi.diff(git=1)
381 rightunfi = rightctx._repo.unfiltered()[rightctx.rev()]
366 rightunfi = rightctx._repo.unfiltered()[rightctx.rev()]
382 rightdiff = rightunfi.diff(git=1)
367 rightdiff = rightunfi.diff(git=1)
383
368
384 left, right = (0, 0)
369 left, right = (0, 0)
385 while None not in (left, right):
370 while None not in (left, right):
386 left = _getdifflines(leftdiff)
371 left = _getdifflines(leftdiff)
387 right = _getdifflines(rightdiff)
372 right = _getdifflines(rightdiff)
388
373
389 if left != right:
374 if left != right:
390 return False
375 return False
391 return True
376 return True
392
377
393 def geteffectflag(relation):
378 def geteffectflag(relation):
394 """ From an obs-marker relation, compute what changed between the
379 """ From an obs-marker relation, compute what changed between the
395 predecessor and the successor.
380 predecessor and the successor.
396 """
381 """
397 effects = 0
382 effects = 0
398
383
399 source = relation[0]
384 source = relation[0]
400
385
401 for changectx in relation[1]:
386 for changectx in relation[1]:
402 # Check if description has changed
387 # Check if description has changed
403 if changectx.description() != source.description():
388 if changectx.description() != source.description():
404 effects |= DESCCHANGED
389 effects |= DESCCHANGED
405
390
406 # Check if user has changed
391 # Check if user has changed
407 if changectx.user() != source.user():
392 if changectx.user() != source.user():
408 effects |= USERCHANGED
393 effects |= USERCHANGED
409
394
410 # Check if date has changed
395 # Check if date has changed
411 if changectx.date() != source.date():
396 if changectx.date() != source.date():
412 effects |= DATECHANGED
397 effects |= DATECHANGED
413
398
414 # Check if branch has changed
399 # Check if branch has changed
415 if changectx.branch() != source.branch():
400 if changectx.branch() != source.branch():
416 effects |= BRANCHCHANGED
401 effects |= BRANCHCHANGED
417
402
418 # Check if at least one of the parent has changed
403 # Check if at least one of the parent has changed
419 if changectx.parents() != source.parents():
404 if changectx.parents() != source.parents():
420 effects |= PARENTCHANGED
405 effects |= PARENTCHANGED
421
406
422 # Check if other meta has changed
407 # Check if other meta has changed
423 changeextra = changectx.extra().items()
408 changeextra = changectx.extra().items()
424 ctxmeta = list(filter(metanotblacklisted, changeextra))
409 ctxmeta = list(filter(metanotblacklisted, changeextra))
425
410
426 sourceextra = source.extra().items()
411 sourceextra = source.extra().items()
427 srcmeta = list(filter(metanotblacklisted, sourceextra))
412 srcmeta = list(filter(metanotblacklisted, sourceextra))
428
413
429 if ctxmeta != srcmeta:
414 if ctxmeta != srcmeta:
430 effects |= METACHANGED
415 effects |= METACHANGED
431
416
432 # Check if the diff has changed
417 # Check if the diff has changed
433 if not _cmpdiff(source, changectx):
418 if not _cmpdiff(source, changectx):
434 effects |= DIFFCHANGED
419 effects |= DIFFCHANGED
435
420
436 return effects
421 return effects
437
422
438 def getobsoleted(repo, tr):
423 def getobsoleted(repo, tr):
439 """return the set of pre-existing revisions obsoleted by a transaction"""
424 """return the set of pre-existing revisions obsoleted by a transaction"""
440 torev = repo.unfiltered().changelog.nodemap.get
425 torev = repo.unfiltered().changelog.nodemap.get
441 phase = repo._phasecache.phase
426 phase = repo._phasecache.phase
442 succsmarkers = repo.obsstore.successors.get
427 succsmarkers = repo.obsstore.successors.get
443 public = phases.public
428 public = phases.public
444 addedmarkers = tr.changes.get('obsmarkers')
429 addedmarkers = tr.changes.get('obsmarkers')
445 addedrevs = tr.changes.get('revs')
430 addedrevs = tr.changes.get('revs')
446 seenrevs = set()
431 seenrevs = set()
447 obsoleted = set()
432 obsoleted = set()
448 for mark in addedmarkers:
433 for mark in addedmarkers:
449 node = mark[0]
434 node = mark[0]
450 rev = torev(node)
435 rev = torev(node)
451 if rev is None or rev in seenrevs or rev in addedrevs:
436 if rev is None or rev in seenrevs or rev in addedrevs:
452 continue
437 continue
453 seenrevs.add(rev)
438 seenrevs.add(rev)
454 if phase(repo, rev) == public:
439 if phase(repo, rev) == public:
455 continue
440 continue
456 if set(succsmarkers(node) or []).issubset(addedmarkers):
441 if set(succsmarkers(node) or []).issubset(addedmarkers):
457 obsoleted.add(rev)
442 obsoleted.add(rev)
458 return obsoleted
443 return obsoleted
459
444
460 class _succs(list):
445 class _succs(list):
461 """small class to represent a successors with some metadata about it"""
446 """small class to represent a successors with some metadata about it"""
462
447
463 def __init__(self, *args, **kwargs):
448 def __init__(self, *args, **kwargs):
464 super(_succs, self).__init__(*args, **kwargs)
449 super(_succs, self).__init__(*args, **kwargs)
465 self.markers = set()
450 self.markers = set()
466
451
467 def copy(self):
452 def copy(self):
468 new = _succs(self)
453 new = _succs(self)
469 new.markers = self.markers.copy()
454 new.markers = self.markers.copy()
470 return new
455 return new
471
456
472 @util.propertycache
457 @util.propertycache
473 def _set(self):
458 def _set(self):
474 # immutable
459 # immutable
475 return set(self)
460 return set(self)
476
461
477 def canmerge(self, other):
462 def canmerge(self, other):
478 return self._set.issubset(other._set)
463 return self._set.issubset(other._set)
479
464
480 def successorssets(repo, initialnode, closest=False, cache=None):
465 def successorssets(repo, initialnode, closest=False, cache=None):
481 """Return set of all latest successors of initial nodes
466 """Return set of all latest successors of initial nodes
482
467
483 The successors set of a changeset A are the group of revisions that succeed
468 The successors set of a changeset A are the group of revisions that succeed
484 A. It succeeds A as a consistent whole, each revision being only a partial
469 A. It succeeds A as a consistent whole, each revision being only a partial
485 replacement. By default, the successors set contains non-obsolete
470 replacement. By default, the successors set contains non-obsolete
486 changesets only, walking the obsolescence graph until reaching a leaf. If
471 changesets only, walking the obsolescence graph until reaching a leaf. If
487 'closest' is set to True, closest successors-sets are return (the
472 'closest' is set to True, closest successors-sets are return (the
488 obsolescence walk stops on known changesets).
473 obsolescence walk stops on known changesets).
489
474
490 This function returns the full list of successor sets which is why it
475 This function returns the full list of successor sets which is why it
491 returns a list of tuples and not just a single tuple. Each tuple is a valid
476 returns a list of tuples and not just a single tuple. Each tuple is a valid
492 successors set. Note that (A,) may be a valid successors set for changeset A
477 successors set. Note that (A,) may be a valid successors set for changeset A
493 (see below).
478 (see below).
494
479
495 In most cases, a changeset A will have a single element (e.g. the changeset
480 In most cases, a changeset A will have a single element (e.g. the changeset
496 A is replaced by A') in its successors set. Though, it is also common for a
481 A is replaced by A') in its successors set. Though, it is also common for a
497 changeset A to have no elements in its successor set (e.g. the changeset
482 changeset A to have no elements in its successor set (e.g. the changeset
498 has been pruned). Therefore, the returned list of successors sets will be
483 has been pruned). Therefore, the returned list of successors sets will be
499 [(A',)] or [], respectively.
484 [(A',)] or [], respectively.
500
485
501 When a changeset A is split into A' and B', however, it will result in a
486 When a changeset A is split into A' and B', however, it will result in a
502 successors set containing more than a single element, i.e. [(A',B')].
487 successors set containing more than a single element, i.e. [(A',B')].
503 Divergent changesets will result in multiple successors sets, i.e. [(A',),
488 Divergent changesets will result in multiple successors sets, i.e. [(A',),
504 (A'')].
489 (A'')].
505
490
506 If a changeset A is not obsolete, then it will conceptually have no
491 If a changeset A is not obsolete, then it will conceptually have no
507 successors set. To distinguish this from a pruned changeset, the successor
492 successors set. To distinguish this from a pruned changeset, the successor
508 set will contain itself only, i.e. [(A,)].
493 set will contain itself only, i.e. [(A,)].
509
494
510 Finally, final successors unknown locally are considered to be pruned
495 Finally, final successors unknown locally are considered to be pruned
511 (pruned: obsoleted without any successors). (Final: successors not affected
496 (pruned: obsoleted without any successors). (Final: successors not affected
512 by markers).
497 by markers).
513
498
514 The 'closest' mode respect the repoview filtering. For example, without
499 The 'closest' mode respect the repoview filtering. For example, without
515 filter it will stop at the first locally known changeset, with 'visible'
500 filter it will stop at the first locally known changeset, with 'visible'
516 filter it will stop on visible changesets).
501 filter it will stop on visible changesets).
517
502
518 The optional `cache` parameter is a dictionary that may contains
503 The optional `cache` parameter is a dictionary that may contains
519 precomputed successors sets. It is meant to reuse the computation of a
504 precomputed successors sets. It is meant to reuse the computation of a
520 previous call to `successorssets` when multiple calls are made at the same
505 previous call to `successorssets` when multiple calls are made at the same
521 time. The cache dictionary is updated in place. The caller is responsible
506 time. The cache dictionary is updated in place. The caller is responsible
522 for its life span. Code that makes multiple calls to `successorssets`
507 for its life span. Code that makes multiple calls to `successorssets`
523 *should* use this cache mechanism or risk a performance hit.
508 *should* use this cache mechanism or risk a performance hit.
524
509
525 Since results are different depending of the 'closest' most, the same cache
510 Since results are different depending of the 'closest' most, the same cache
526 cannot be reused for both mode.
511 cannot be reused for both mode.
527 """
512 """
528
513
529 succmarkers = repo.obsstore.successors
514 succmarkers = repo.obsstore.successors
530
515
531 # Stack of nodes we search successors sets for
516 # Stack of nodes we search successors sets for
532 toproceed = [initialnode]
517 toproceed = [initialnode]
533 # set version of above list for fast loop detection
518 # set version of above list for fast loop detection
534 # element added to "toproceed" must be added here
519 # element added to "toproceed" must be added here
535 stackedset = set(toproceed)
520 stackedset = set(toproceed)
536 if cache is None:
521 if cache is None:
537 cache = {}
522 cache = {}
538
523
539 # This while loop is the flattened version of a recursive search for
524 # This while loop is the flattened version of a recursive search for
540 # successors sets
525 # successors sets
541 #
526 #
542 # def successorssets(x):
527 # def successorssets(x):
543 # successors = directsuccessors(x)
528 # successors = directsuccessors(x)
544 # ss = [[]]
529 # ss = [[]]
545 # for succ in directsuccessors(x):
530 # for succ in directsuccessors(x):
546 # # product as in itertools cartesian product
531 # # product as in itertools cartesian product
547 # ss = product(ss, successorssets(succ))
532 # ss = product(ss, successorssets(succ))
548 # return ss
533 # return ss
549 #
534 #
550 # But we can not use plain recursive calls here:
535 # But we can not use plain recursive calls here:
551 # - that would blow the python call stack
536 # - that would blow the python call stack
552 # - obsolescence markers may have cycles, we need to handle them.
537 # - obsolescence markers may have cycles, we need to handle them.
553 #
538 #
554 # The `toproceed` list act as our call stack. Every node we search
539 # The `toproceed` list act as our call stack. Every node we search
555 # successors set for are stacked there.
540 # successors set for are stacked there.
556 #
541 #
557 # The `stackedset` is set version of this stack used to check if a node is
542 # The `stackedset` is set version of this stack used to check if a node is
558 # already stacked. This check is used to detect cycles and prevent infinite
543 # already stacked. This check is used to detect cycles and prevent infinite
559 # loop.
544 # loop.
560 #
545 #
561 # successors set of all nodes are stored in the `cache` dictionary.
546 # successors set of all nodes are stored in the `cache` dictionary.
562 #
547 #
563 # After this while loop ends we use the cache to return the successors sets
548 # After this while loop ends we use the cache to return the successors sets
564 # for the node requested by the caller.
549 # for the node requested by the caller.
565 while toproceed:
550 while toproceed:
566 # Every iteration tries to compute the successors sets of the topmost
551 # Every iteration tries to compute the successors sets of the topmost
567 # node of the stack: CURRENT.
552 # node of the stack: CURRENT.
568 #
553 #
569 # There are four possible outcomes:
554 # There are four possible outcomes:
570 #
555 #
571 # 1) We already know the successors sets of CURRENT:
556 # 1) We already know the successors sets of CURRENT:
572 # -> mission accomplished, pop it from the stack.
557 # -> mission accomplished, pop it from the stack.
573 # 2) Stop the walk:
558 # 2) Stop the walk:
574 # default case: Node is not obsolete
559 # default case: Node is not obsolete
575 # closest case: Node is known at this repo filter level
560 # closest case: Node is known at this repo filter level
576 # -> the node is its own successors sets. Add it to the cache.
561 # -> the node is its own successors sets. Add it to the cache.
577 # 3) We do not know successors set of direct successors of CURRENT:
562 # 3) We do not know successors set of direct successors of CURRENT:
578 # -> We add those successors to the stack.
563 # -> We add those successors to the stack.
579 # 4) We know successors sets of all direct successors of CURRENT:
564 # 4) We know successors sets of all direct successors of CURRENT:
580 # -> We can compute CURRENT successors set and add it to the
565 # -> We can compute CURRENT successors set and add it to the
581 # cache.
566 # cache.
582 #
567 #
583 current = toproceed[-1]
568 current = toproceed[-1]
584
569
585 # case 2 condition is a bit hairy because of closest,
570 # case 2 condition is a bit hairy because of closest,
586 # we compute it on its own
571 # we compute it on its own
587 case2condition = ((current not in succmarkers)
572 case2condition = ((current not in succmarkers)
588 or (closest and current != initialnode
573 or (closest and current != initialnode
589 and current in repo))
574 and current in repo))
590
575
591 if current in cache:
576 if current in cache:
592 # case (1): We already know the successors sets
577 # case (1): We already know the successors sets
593 stackedset.remove(toproceed.pop())
578 stackedset.remove(toproceed.pop())
594 elif case2condition:
579 elif case2condition:
595 # case (2): end of walk.
580 # case (2): end of walk.
596 if current in repo:
581 if current in repo:
597 # We have a valid successors.
582 # We have a valid successors.
598 cache[current] = [_succs((current,))]
583 cache[current] = [_succs((current,))]
599 else:
584 else:
600 # Final obsolete version is unknown locally.
585 # Final obsolete version is unknown locally.
601 # Do not count that as a valid successors
586 # Do not count that as a valid successors
602 cache[current] = []
587 cache[current] = []
603 else:
588 else:
604 # cases (3) and (4)
589 # cases (3) and (4)
605 #
590 #
606 # We proceed in two phases. Phase 1 aims to distinguish case (3)
591 # We proceed in two phases. Phase 1 aims to distinguish case (3)
607 # from case (4):
592 # from case (4):
608 #
593 #
609 # For each direct successors of CURRENT, we check whether its
594 # For each direct successors of CURRENT, we check whether its
610 # successors sets are known. If they are not, we stack the
595 # successors sets are known. If they are not, we stack the
611 # unknown node and proceed to the next iteration of the while
596 # unknown node and proceed to the next iteration of the while
612 # loop. (case 3)
597 # loop. (case 3)
613 #
598 #
614 # During this step, we may detect obsolescence cycles: a node
599 # During this step, we may detect obsolescence cycles: a node
615 # with unknown successors sets but already in the call stack.
600 # with unknown successors sets but already in the call stack.
616 # In such a situation, we arbitrary set the successors sets of
601 # In such a situation, we arbitrary set the successors sets of
617 # the node to nothing (node pruned) to break the cycle.
602 # the node to nothing (node pruned) to break the cycle.
618 #
603 #
619 # If no break was encountered we proceed to phase 2.
604 # If no break was encountered we proceed to phase 2.
620 #
605 #
621 # Phase 2 computes successors sets of CURRENT (case 4); see details
606 # Phase 2 computes successors sets of CURRENT (case 4); see details
622 # in phase 2 itself.
607 # in phase 2 itself.
623 #
608 #
624 # Note the two levels of iteration in each phase.
609 # Note the two levels of iteration in each phase.
625 # - The first one handles obsolescence markers using CURRENT as
610 # - The first one handles obsolescence markers using CURRENT as
626 # precursor (successors markers of CURRENT).
611 # precursor (successors markers of CURRENT).
627 #
612 #
628 # Having multiple entry here means divergence.
613 # Having multiple entry here means divergence.
629 #
614 #
630 # - The second one handles successors defined in each marker.
615 # - The second one handles successors defined in each marker.
631 #
616 #
632 # Having none means pruned node, multiple successors means split,
617 # Having none means pruned node, multiple successors means split,
633 # single successors are standard replacement.
618 # single successors are standard replacement.
634 #
619 #
635 for mark in sorted(succmarkers[current]):
620 for mark in sorted(succmarkers[current]):
636 for suc in mark[1]:
621 for suc in mark[1]:
637 if suc not in cache:
622 if suc not in cache:
638 if suc in stackedset:
623 if suc in stackedset:
639 # cycle breaking
624 # cycle breaking
640 cache[suc] = []
625 cache[suc] = []
641 else:
626 else:
642 # case (3) If we have not computed successors sets
627 # case (3) If we have not computed successors sets
643 # of one of those successors we add it to the
628 # of one of those successors we add it to the
644 # `toproceed` stack and stop all work for this
629 # `toproceed` stack and stop all work for this
645 # iteration.
630 # iteration.
646 toproceed.append(suc)
631 toproceed.append(suc)
647 stackedset.add(suc)
632 stackedset.add(suc)
648 break
633 break
649 else:
634 else:
650 continue
635 continue
651 break
636 break
652 else:
637 else:
653 # case (4): we know all successors sets of all direct
638 # case (4): we know all successors sets of all direct
654 # successors
639 # successors
655 #
640 #
656 # Successors set contributed by each marker depends on the
641 # Successors set contributed by each marker depends on the
657 # successors sets of all its "successors" node.
642 # successors sets of all its "successors" node.
658 #
643 #
659 # Each different marker is a divergence in the obsolescence
644 # Each different marker is a divergence in the obsolescence
660 # history. It contributes successors sets distinct from other
645 # history. It contributes successors sets distinct from other
661 # markers.
646 # markers.
662 #
647 #
663 # Within a marker, a successor may have divergent successors
648 # Within a marker, a successor may have divergent successors
664 # sets. In such a case, the marker will contribute multiple
649 # sets. In such a case, the marker will contribute multiple
665 # divergent successors sets. If multiple successors have
650 # divergent successors sets. If multiple successors have
666 # divergent successors sets, a Cartesian product is used.
651 # divergent successors sets, a Cartesian product is used.
667 #
652 #
668 # At the end we post-process successors sets to remove
653 # At the end we post-process successors sets to remove
669 # duplicated entry and successors set that are strict subset of
654 # duplicated entry and successors set that are strict subset of
670 # another one.
655 # another one.
671 succssets = []
656 succssets = []
672 for mark in sorted(succmarkers[current]):
657 for mark in sorted(succmarkers[current]):
673 # successors sets contributed by this marker
658 # successors sets contributed by this marker
674 base = _succs()
659 base = _succs()
675 base.markers.add(mark)
660 base.markers.add(mark)
676 markss = [base]
661 markss = [base]
677 for suc in mark[1]:
662 for suc in mark[1]:
678 # cardinal product with previous successors
663 # cardinal product with previous successors
679 productresult = []
664 productresult = []
680 for prefix in markss:
665 for prefix in markss:
681 for suffix in cache[suc]:
666 for suffix in cache[suc]:
682 newss = prefix.copy()
667 newss = prefix.copy()
683 newss.markers.update(suffix.markers)
668 newss.markers.update(suffix.markers)
684 for part in suffix:
669 for part in suffix:
685 # do not duplicated entry in successors set
670 # do not duplicated entry in successors set
686 # first entry wins.
671 # first entry wins.
687 if part not in newss:
672 if part not in newss:
688 newss.append(part)
673 newss.append(part)
689 productresult.append(newss)
674 productresult.append(newss)
690 markss = productresult
675 markss = productresult
691 succssets.extend(markss)
676 succssets.extend(markss)
692 # remove duplicated and subset
677 # remove duplicated and subset
693 seen = []
678 seen = []
694 final = []
679 final = []
695 candidates = sorted((s for s in succssets if s),
680 candidates = sorted((s for s in succssets if s),
696 key=len, reverse=True)
681 key=len, reverse=True)
697 for cand in candidates:
682 for cand in candidates:
698 for seensuccs in seen:
683 for seensuccs in seen:
699 if cand.canmerge(seensuccs):
684 if cand.canmerge(seensuccs):
700 seensuccs.markers.update(cand.markers)
685 seensuccs.markers.update(cand.markers)
701 break
686 break
702 else:
687 else:
703 final.append(cand)
688 final.append(cand)
704 seen.append(cand)
689 seen.append(cand)
705 final.reverse() # put small successors set first
690 final.reverse() # put small successors set first
706 cache[current] = final
691 cache[current] = final
707 return cache[initialnode]
692 return cache[initialnode]
708
693
709 def successorsandmarkers(repo, ctx):
694 def successorsandmarkers(repo, ctx):
710 """compute the raw data needed for computing obsfate
695 """compute the raw data needed for computing obsfate
711 Returns a list of dict, one dict per successors set
696 Returns a list of dict, one dict per successors set
712 """
697 """
713 if not ctx.obsolete():
698 if not ctx.obsolete():
714 return None
699 return None
715
700
716 ssets = successorssets(repo, ctx.node(), closest=True)
701 ssets = successorssets(repo, ctx.node(), closest=True)
717
702
718 # closestsuccessors returns an empty list for pruned revisions, remap it
703 # closestsuccessors returns an empty list for pruned revisions, remap it
719 # into a list containing an empty list for future processing
704 # into a list containing an empty list for future processing
720 if ssets == []:
705 if ssets == []:
721 ssets = [[]]
706 ssets = [[]]
722
707
723 # Try to recover pruned markers
708 # Try to recover pruned markers
724 succsmap = repo.obsstore.successors
709 succsmap = repo.obsstore.successors
725 fullsuccessorsets = [] # successor set + markers
710 fullsuccessorsets = [] # successor set + markers
726 for sset in ssets:
711 for sset in ssets:
727 if sset:
712 if sset:
728 fullsuccessorsets.append(sset)
713 fullsuccessorsets.append(sset)
729 else:
714 else:
730 # successorsset return an empty set() when ctx or one of its
715 # successorsset return an empty set() when ctx or one of its
731 # successors is pruned.
716 # successors is pruned.
732 # In this case, walk the obs-markers tree again starting with ctx
717 # In this case, walk the obs-markers tree again starting with ctx
733 # and find the relevant pruning obs-makers, the ones without
718 # and find the relevant pruning obs-makers, the ones without
734 # successors.
719 # successors.
735 # Having these markers allow us to compute some information about
720 # Having these markers allow us to compute some information about
736 # its fate, like who pruned this changeset and when.
721 # its fate, like who pruned this changeset and when.
737
722
738 # XXX we do not catch all prune markers (eg rewritten then pruned)
723 # XXX we do not catch all prune markers (eg rewritten then pruned)
739 # (fix me later)
724 # (fix me later)
740 foundany = False
725 foundany = False
741 for mark in succsmap.get(ctx.node(), ()):
726 for mark in succsmap.get(ctx.node(), ()):
742 if not mark[1]:
727 if not mark[1]:
743 foundany = True
728 foundany = True
744 sset = _succs()
729 sset = _succs()
745 sset.markers.add(mark)
730 sset.markers.add(mark)
746 fullsuccessorsets.append(sset)
731 fullsuccessorsets.append(sset)
747 if not foundany:
732 if not foundany:
748 fullsuccessorsets.append(_succs())
733 fullsuccessorsets.append(_succs())
749
734
750 values = []
735 values = []
751 for sset in fullsuccessorsets:
736 for sset in fullsuccessorsets:
752 values.append({'successors': sset, 'markers': sset.markers})
737 values.append({'successors': sset, 'markers': sset.markers})
753
738
754 return values
739 return values
755
740
756 def _getobsfate(successorssets):
741 def _getobsfate(successorssets):
757 """ Compute a changeset obsolescence fate based on its successorssets.
742 """ Compute a changeset obsolescence fate based on its successorssets.
758 Successors can be the tipmost ones or the immediate ones. This function
743 Successors can be the tipmost ones or the immediate ones. This function
759 return values are not meant to be shown directly to users, it is meant to
744 return values are not meant to be shown directly to users, it is meant to
760 be used by internal functions only.
745 be used by internal functions only.
761 Returns one fate from the following values:
746 Returns one fate from the following values:
762 - pruned
747 - pruned
763 - diverged
748 - diverged
764 - superseded
749 - superseded
765 - superseded_split
750 - superseded_split
766 """
751 """
767
752
768 if len(successorssets) == 0:
753 if len(successorssets) == 0:
769 # The commit has been pruned
754 # The commit has been pruned
770 return 'pruned'
755 return 'pruned'
771 elif len(successorssets) > 1:
756 elif len(successorssets) > 1:
772 return 'diverged'
757 return 'diverged'
773 else:
758 else:
774 # No divergence, only one set of successors
759 # No divergence, only one set of successors
775 successors = successorssets[0]
760 successors = successorssets[0]
776
761
777 if len(successors) == 1:
762 if len(successors) == 1:
778 return 'superseded'
763 return 'superseded'
779 else:
764 else:
780 return 'superseded_split'
765 return 'superseded_split'
781
766
782 def obsfateverb(successorset, markers):
767 def obsfateverb(successorset, markers):
783 """ Return the verb summarizing the successorset and potentially using
768 """ Return the verb summarizing the successorset and potentially using
784 information from the markers
769 information from the markers
785 """
770 """
786 if not successorset:
771 if not successorset:
787 verb = 'pruned'
772 verb = 'pruned'
788 elif len(successorset) == 1:
773 elif len(successorset) == 1:
789 verb = 'rewritten'
774 verb = 'rewritten'
790 else:
775 else:
791 verb = 'split'
776 verb = 'split'
792 return verb
777 return verb
793
778
794 def markersdates(markers):
779 def markersdates(markers):
795 """returns the list of dates for a list of markers
780 """returns the list of dates for a list of markers
796 """
781 """
797 return [m[4] for m in markers]
782 return [m[4] for m in markers]
798
783
799 def markersusers(markers):
784 def markersusers(markers):
800 """ Returns a sorted list of markers users without duplicates
785 """ Returns a sorted list of markers users without duplicates
801 """
786 """
802 markersmeta = [dict(m[3]) for m in markers]
787 markersmeta = [dict(m[3]) for m in markers]
803 users = set(meta.get('user') for meta in markersmeta if meta.get('user'))
788 users = set(meta.get('user') for meta in markersmeta if meta.get('user'))
804
789
805 return sorted(users)
790 return sorted(users)
806
791
807 def markersoperations(markers):
792 def markersoperations(markers):
808 """ Returns a sorted list of markers operations without duplicates
793 """ Returns a sorted list of markers operations without duplicates
809 """
794 """
810 markersmeta = [dict(m[3]) for m in markers]
795 markersmeta = [dict(m[3]) for m in markers]
811 operations = set(meta.get('operation') for meta in markersmeta
796 operations = set(meta.get('operation') for meta in markersmeta
812 if meta.get('operation'))
797 if meta.get('operation'))
813
798
814 return sorted(operations)
799 return sorted(operations)
815
800
816 def obsfateprinter(successors, markers, ui):
801 def obsfateprinter(successors, markers, ui):
817 """ Build a obsfate string for a single successorset using all obsfate
802 """ Build a obsfate string for a single successorset using all obsfate
818 related function defined in obsutil
803 related function defined in obsutil
819 """
804 """
820 quiet = ui.quiet
805 quiet = ui.quiet
821 verbose = ui.verbose
806 verbose = ui.verbose
822 normal = not verbose and not quiet
807 normal = not verbose and not quiet
823
808
824 line = []
809 line = []
825
810
826 # Verb
811 # Verb
827 line.append(obsfateverb(successors, markers))
812 line.append(obsfateverb(successors, markers))
828
813
829 # Operations
814 # Operations
830 operations = markersoperations(markers)
815 operations = markersoperations(markers)
831 if operations:
816 if operations:
832 line.append(" using %s" % ", ".join(operations))
817 line.append(" using %s" % ", ".join(operations))
833
818
834 # Successors
819 # Successors
835 if successors:
820 if successors:
836 fmtsuccessors = [successors.joinfmt(succ) for succ in successors]
821 fmtsuccessors = [successors.joinfmt(succ) for succ in successors]
837 line.append(" as %s" % ", ".join(fmtsuccessors))
822 line.append(" as %s" % ", ".join(fmtsuccessors))
838
823
839 # Users
824 # Users
840 users = markersusers(markers)
825 users = markersusers(markers)
841 # Filter out current user in not verbose mode to reduce amount of
826 # Filter out current user in not verbose mode to reduce amount of
842 # information
827 # information
843 if not verbose:
828 if not verbose:
844 currentuser = ui.username(acceptempty=True)
829 currentuser = ui.username(acceptempty=True)
845 if len(users) == 1 and currentuser in users:
830 if len(users) == 1 and currentuser in users:
846 users = None
831 users = None
847
832
848 if (verbose or normal) and users:
833 if (verbose or normal) and users:
849 line.append(" by %s" % ", ".join(users))
834 line.append(" by %s" % ", ".join(users))
850
835
851 # Date
836 # Date
852 dates = markersdates(markers)
837 dates = markersdates(markers)
853
838
854 if dates and verbose:
839 if dates and verbose:
855 min_date = min(dates)
840 min_date = min(dates)
856 max_date = max(dates)
841 max_date = max(dates)
857
842
858 if min_date == max_date:
843 if min_date == max_date:
859 fmtmin_date = util.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
844 fmtmin_date = util.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
860 line.append(" (at %s)" % fmtmin_date)
845 line.append(" (at %s)" % fmtmin_date)
861 else:
846 else:
862 fmtmin_date = util.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
847 fmtmin_date = util.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
863 fmtmax_date = util.datestr(max_date, '%Y-%m-%d %H:%M %1%2')
848 fmtmax_date = util.datestr(max_date, '%Y-%m-%d %H:%M %1%2')
864 line.append(" (between %s and %s)" % (fmtmin_date, fmtmax_date))
849 line.append(" (between %s and %s)" % (fmtmin_date, fmtmax_date))
865
850
866 return "".join(line)
851 return "".join(line)
867
852
868
853
869 filteredmsgtable = {
854 filteredmsgtable = {
870 "pruned": _("hidden revision '%s' is pruned"),
855 "pruned": _("hidden revision '%s' is pruned"),
871 "diverged": _("hidden revision '%s' has diverged"),
856 "diverged": _("hidden revision '%s' has diverged"),
872 "superseded": _("hidden revision '%s' was rewritten as: %s"),
857 "superseded": _("hidden revision '%s' was rewritten as: %s"),
873 "superseded_split": _("hidden revision '%s' was split as: %s"),
858 "superseded_split": _("hidden revision '%s' was split as: %s"),
874 "superseded_split_several": _("hidden revision '%s' was split as: %s and "
859 "superseded_split_several": _("hidden revision '%s' was split as: %s and "
875 "%d more"),
860 "%d more"),
876 }
861 }
877
862
878 def _getfilteredreason(repo, changeid, ctx):
863 def _getfilteredreason(repo, changeid, ctx):
879 """return a human-friendly string on why a obsolete changeset is hidden
864 """return a human-friendly string on why a obsolete changeset is hidden
880 """
865 """
881 successors = successorssets(repo, ctx.node())
866 successors = successorssets(repo, ctx.node())
882 fate = _getobsfate(successors)
867 fate = _getobsfate(successors)
883
868
884 # Be more precise in case the revision is superseded
869 # Be more precise in case the revision is superseded
885 if fate == 'pruned':
870 if fate == 'pruned':
886 return filteredmsgtable['pruned'] % changeid
871 return filteredmsgtable['pruned'] % changeid
887 elif fate == 'diverged':
872 elif fate == 'diverged':
888 return filteredmsgtable['diverged'] % changeid
873 return filteredmsgtable['diverged'] % changeid
889 elif fate == 'superseded':
874 elif fate == 'superseded':
890 single_successor = nodemod.short(successors[0][0])
875 single_successor = nodemod.short(successors[0][0])
891 return filteredmsgtable['superseded'] % (changeid, single_successor)
876 return filteredmsgtable['superseded'] % (changeid, single_successor)
892 elif fate == 'superseded_split':
877 elif fate == 'superseded_split':
893
878
894 succs = []
879 succs = []
895 for node_id in successors[0]:
880 for node_id in successors[0]:
896 succs.append(nodemod.short(node_id))
881 succs.append(nodemod.short(node_id))
897
882
898 if len(succs) <= 2:
883 if len(succs) <= 2:
899 fmtsuccs = ', '.join(succs)
884 fmtsuccs = ', '.join(succs)
900 return filteredmsgtable['superseded_split'] % (changeid, fmtsuccs)
885 return filteredmsgtable['superseded_split'] % (changeid, fmtsuccs)
901 else:
886 else:
902 firstsuccessors = ', '.join(succs[:2])
887 firstsuccessors = ', '.join(succs[:2])
903 remainingnumber = len(succs) - 2
888 remainingnumber = len(succs) - 2
904
889
905 args = (changeid, firstsuccessors, remainingnumber)
890 args = (changeid, firstsuccessors, remainingnumber)
906 return filteredmsgtable['superseded_split_several'] % args
891 return filteredmsgtable['superseded_split_several'] % args
General Comments 0
You need to be logged in to leave comments. Login now