##// END OF EJS Templates
obsutil: make sure "addedrevs" is not None in getobsoleted()...
Yuya Nishihara -
r39336:52e6171e default
parent child Browse files
Show More
@@ -1,982 +1,982 b''
1 # obsutil.py - utility functions for obsolescence
1 # obsutil.py - utility functions for obsolescence
2 #
2 #
3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import re
10 import re
11
11
12 from .i18n import _
12 from .i18n import _
13 from . import (
13 from . import (
14 diffutil,
14 diffutil,
15 encoding,
15 encoding,
16 node as nodemod,
16 node as nodemod,
17 phases,
17 phases,
18 util,
18 util,
19 )
19 )
20 from .utils import (
20 from .utils import (
21 dateutil,
21 dateutil,
22 )
22 )
23
23
24 ### obsolescence marker flag
24 ### obsolescence marker flag
25
25
26 ## bumpedfix flag
26 ## bumpedfix flag
27 #
27 #
28 # When a changeset A' succeed to a changeset A which became public, we call A'
28 # When a changeset A' succeed to a changeset A which became public, we call A'
29 # "bumped" because it's a successors of a public changesets
29 # "bumped" because it's a successors of a public changesets
30 #
30 #
31 # o A' (bumped)
31 # o A' (bumped)
32 # |`:
32 # |`:
33 # | o A
33 # | o A
34 # |/
34 # |/
35 # o Z
35 # o Z
36 #
36 #
37 # The way to solve this situation is to create a new changeset Ad as children
37 # The way to solve this situation is to create a new changeset Ad as children
38 # of A. This changeset have the same content than A'. So the diff from A to A'
38 # of A. This changeset have the same content than A'. So the diff from A to A'
39 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
39 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
40 #
40 #
41 # o Ad
41 # o Ad
42 # |`:
42 # |`:
43 # | x A'
43 # | x A'
44 # |'|
44 # |'|
45 # o | A
45 # o | A
46 # |/
46 # |/
47 # o Z
47 # o Z
48 #
48 #
49 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
49 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
50 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
50 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
51 # This flag mean that the successors express the changes between the public and
51 # This flag mean that the successors express the changes between the public and
52 # bumped version and fix the situation, breaking the transitivity of
52 # bumped version and fix the situation, breaking the transitivity of
53 # "bumped" here.
53 # "bumped" here.
54 bumpedfix = 1
54 bumpedfix = 1
55 usingsha256 = 2
55 usingsha256 = 2
56
56
57 class marker(object):
57 class marker(object):
58 """Wrap obsolete marker raw data"""
58 """Wrap obsolete marker raw data"""
59
59
60 def __init__(self, repo, data):
60 def __init__(self, repo, data):
61 # the repo argument will be used to create changectx in later version
61 # the repo argument will be used to create changectx in later version
62 self._repo = repo
62 self._repo = repo
63 self._data = data
63 self._data = data
64 self._decodedmeta = None
64 self._decodedmeta = None
65
65
66 def __hash__(self):
66 def __hash__(self):
67 return hash(self._data)
67 return hash(self._data)
68
68
69 def __eq__(self, other):
69 def __eq__(self, other):
70 if type(other) != type(self):
70 if type(other) != type(self):
71 return False
71 return False
72 return self._data == other._data
72 return self._data == other._data
73
73
74 def prednode(self):
74 def prednode(self):
75 """Predecessor changeset node identifier"""
75 """Predecessor changeset node identifier"""
76 return self._data[0]
76 return self._data[0]
77
77
78 def succnodes(self):
78 def succnodes(self):
79 """List of successor changesets node identifiers"""
79 """List of successor changesets node identifiers"""
80 return self._data[1]
80 return self._data[1]
81
81
82 def parentnodes(self):
82 def parentnodes(self):
83 """Parents of the predecessors (None if not recorded)"""
83 """Parents of the predecessors (None if not recorded)"""
84 return self._data[5]
84 return self._data[5]
85
85
86 def metadata(self):
86 def metadata(self):
87 """Decoded metadata dictionary"""
87 """Decoded metadata dictionary"""
88 return dict(self._data[3])
88 return dict(self._data[3])
89
89
90 def date(self):
90 def date(self):
91 """Creation date as (unixtime, offset)"""
91 """Creation date as (unixtime, offset)"""
92 return self._data[4]
92 return self._data[4]
93
93
94 def flags(self):
94 def flags(self):
95 """The flags field of the marker"""
95 """The flags field of the marker"""
96 return self._data[2]
96 return self._data[2]
97
97
98 def getmarkers(repo, nodes=None, exclusive=False):
98 def getmarkers(repo, nodes=None, exclusive=False):
99 """returns markers known in a repository
99 """returns markers known in a repository
100
100
101 If <nodes> is specified, only markers "relevant" to those nodes are are
101 If <nodes> is specified, only markers "relevant" to those nodes are are
102 returned"""
102 returned"""
103 if nodes is None:
103 if nodes is None:
104 rawmarkers = repo.obsstore
104 rawmarkers = repo.obsstore
105 elif exclusive:
105 elif exclusive:
106 rawmarkers = exclusivemarkers(repo, nodes)
106 rawmarkers = exclusivemarkers(repo, nodes)
107 else:
107 else:
108 rawmarkers = repo.obsstore.relevantmarkers(nodes)
108 rawmarkers = repo.obsstore.relevantmarkers(nodes)
109
109
110 for markerdata in rawmarkers:
110 for markerdata in rawmarkers:
111 yield marker(repo, markerdata)
111 yield marker(repo, markerdata)
112
112
113 def closestpredecessors(repo, nodeid):
113 def closestpredecessors(repo, nodeid):
114 """yield the list of next predecessors pointing on visible changectx nodes
114 """yield the list of next predecessors pointing on visible changectx nodes
115
115
116 This function respect the repoview filtering, filtered revision will be
116 This function respect the repoview filtering, filtered revision will be
117 considered missing.
117 considered missing.
118 """
118 """
119
119
120 precursors = repo.obsstore.predecessors
120 precursors = repo.obsstore.predecessors
121 stack = [nodeid]
121 stack = [nodeid]
122 seen = set(stack)
122 seen = set(stack)
123
123
124 while stack:
124 while stack:
125 current = stack.pop()
125 current = stack.pop()
126 currentpreccs = precursors.get(current, ())
126 currentpreccs = precursors.get(current, ())
127
127
128 for prec in currentpreccs:
128 for prec in currentpreccs:
129 precnodeid = prec[0]
129 precnodeid = prec[0]
130
130
131 # Basic cycle protection
131 # Basic cycle protection
132 if precnodeid in seen:
132 if precnodeid in seen:
133 continue
133 continue
134 seen.add(precnodeid)
134 seen.add(precnodeid)
135
135
136 if precnodeid in repo:
136 if precnodeid in repo:
137 yield precnodeid
137 yield precnodeid
138 else:
138 else:
139 stack.append(precnodeid)
139 stack.append(precnodeid)
140
140
141 def allpredecessors(obsstore, nodes, ignoreflags=0):
141 def allpredecessors(obsstore, nodes, ignoreflags=0):
142 """Yield node for every precursors of <nodes>.
142 """Yield node for every precursors of <nodes>.
143
143
144 Some precursors may be unknown locally.
144 Some precursors may be unknown locally.
145
145
146 This is a linear yield unsuited to detecting folded changesets. It includes
146 This is a linear yield unsuited to detecting folded changesets. It includes
147 initial nodes too."""
147 initial nodes too."""
148
148
149 remaining = set(nodes)
149 remaining = set(nodes)
150 seen = set(remaining)
150 seen = set(remaining)
151 while remaining:
151 while remaining:
152 current = remaining.pop()
152 current = remaining.pop()
153 yield current
153 yield current
154 for mark in obsstore.predecessors.get(current, ()):
154 for mark in obsstore.predecessors.get(current, ()):
155 # ignore marker flagged with specified flag
155 # ignore marker flagged with specified flag
156 if mark[2] & ignoreflags:
156 if mark[2] & ignoreflags:
157 continue
157 continue
158 suc = mark[0]
158 suc = mark[0]
159 if suc not in seen:
159 if suc not in seen:
160 seen.add(suc)
160 seen.add(suc)
161 remaining.add(suc)
161 remaining.add(suc)
162
162
163 def allsuccessors(obsstore, nodes, ignoreflags=0):
163 def allsuccessors(obsstore, nodes, ignoreflags=0):
164 """Yield node for every successor of <nodes>.
164 """Yield node for every successor of <nodes>.
165
165
166 Some successors may be unknown locally.
166 Some successors may be unknown locally.
167
167
168 This is a linear yield unsuited to detecting split changesets. It includes
168 This is a linear yield unsuited to detecting split changesets. It includes
169 initial nodes too."""
169 initial nodes too."""
170 remaining = set(nodes)
170 remaining = set(nodes)
171 seen = set(remaining)
171 seen = set(remaining)
172 while remaining:
172 while remaining:
173 current = remaining.pop()
173 current = remaining.pop()
174 yield current
174 yield current
175 for mark in obsstore.successors.get(current, ()):
175 for mark in obsstore.successors.get(current, ()):
176 # ignore marker flagged with specified flag
176 # ignore marker flagged with specified flag
177 if mark[2] & ignoreflags:
177 if mark[2] & ignoreflags:
178 continue
178 continue
179 for suc in mark[1]:
179 for suc in mark[1]:
180 if suc not in seen:
180 if suc not in seen:
181 seen.add(suc)
181 seen.add(suc)
182 remaining.add(suc)
182 remaining.add(suc)
183
183
184 def _filterprunes(markers):
184 def _filterprunes(markers):
185 """return a set with no prune markers"""
185 """return a set with no prune markers"""
186 return set(m for m in markers if m[1])
186 return set(m for m in markers if m[1])
187
187
188 def exclusivemarkers(repo, nodes):
188 def exclusivemarkers(repo, nodes):
189 """set of markers relevant to "nodes" but no other locally-known nodes
189 """set of markers relevant to "nodes" but no other locally-known nodes
190
190
191 This function compute the set of markers "exclusive" to a locally-known
191 This function compute the set of markers "exclusive" to a locally-known
192 node. This means we walk the markers starting from <nodes> until we reach a
192 node. This means we walk the markers starting from <nodes> until we reach a
193 locally-known precursors outside of <nodes>. Element of <nodes> with
193 locally-known precursors outside of <nodes>. Element of <nodes> with
194 locally-known successors outside of <nodes> are ignored (since their
194 locally-known successors outside of <nodes> are ignored (since their
195 precursors markers are also relevant to these successors).
195 precursors markers are also relevant to these successors).
196
196
197 For example:
197 For example:
198
198
199 # (A0 rewritten as A1)
199 # (A0 rewritten as A1)
200 #
200 #
201 # A0 <-1- A1 # Marker "1" is exclusive to A1
201 # A0 <-1- A1 # Marker "1" is exclusive to A1
202
202
203 or
203 or
204
204
205 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
205 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
206 #
206 #
207 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
207 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
208
208
209 or
209 or
210
210
211 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
211 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
212 #
212 #
213 # <-2- A1 # Marker "2" is exclusive to A0,A1
213 # <-2- A1 # Marker "2" is exclusive to A0,A1
214 # /
214 # /
215 # <-1- A0
215 # <-1- A0
216 # \
216 # \
217 # <-3- A2 # Marker "3" is exclusive to A0,A2
217 # <-3- A2 # Marker "3" is exclusive to A0,A2
218 #
218 #
219 # in addition:
219 # in addition:
220 #
220 #
221 # Markers "2,3" are exclusive to A1,A2
221 # Markers "2,3" are exclusive to A1,A2
222 # Markers "1,2,3" are exclusive to A0,A1,A2
222 # Markers "1,2,3" are exclusive to A0,A1,A2
223
223
224 See test/test-obsolete-bundle-strip.t for more examples.
224 See test/test-obsolete-bundle-strip.t for more examples.
225
225
226 An example usage is strip. When stripping a changeset, we also want to
226 An example usage is strip. When stripping a changeset, we also want to
227 strip the markers exclusive to this changeset. Otherwise we would have
227 strip the markers exclusive to this changeset. Otherwise we would have
228 "dangling"" obsolescence markers from its precursors: Obsolescence markers
228 "dangling"" obsolescence markers from its precursors: Obsolescence markers
229 marking a node as obsolete without any successors available locally.
229 marking a node as obsolete without any successors available locally.
230
230
231 As for relevant markers, the prune markers for children will be followed.
231 As for relevant markers, the prune markers for children will be followed.
232 Of course, they will only be followed if the pruned children is
232 Of course, they will only be followed if the pruned children is
233 locally-known. Since the prune markers are relevant to the pruned node.
233 locally-known. Since the prune markers are relevant to the pruned node.
234 However, while prune markers are considered relevant to the parent of the
234 However, while prune markers are considered relevant to the parent of the
235 pruned changesets, prune markers for locally-known changeset (with no
235 pruned changesets, prune markers for locally-known changeset (with no
236 successors) are considered exclusive to the pruned nodes. This allows
236 successors) are considered exclusive to the pruned nodes. This allows
237 to strip the prune markers (with the rest of the exclusive chain) alongside
237 to strip the prune markers (with the rest of the exclusive chain) alongside
238 the pruned changesets.
238 the pruned changesets.
239 """
239 """
240 # running on a filtered repository would be dangerous as markers could be
240 # running on a filtered repository would be dangerous as markers could be
241 # reported as exclusive when they are relevant for other filtered nodes.
241 # reported as exclusive when they are relevant for other filtered nodes.
242 unfi = repo.unfiltered()
242 unfi = repo.unfiltered()
243
243
244 # shortcut to various useful item
244 # shortcut to various useful item
245 nm = unfi.changelog.nodemap
245 nm = unfi.changelog.nodemap
246 precursorsmarkers = unfi.obsstore.predecessors
246 precursorsmarkers = unfi.obsstore.predecessors
247 successormarkers = unfi.obsstore.successors
247 successormarkers = unfi.obsstore.successors
248 childrenmarkers = unfi.obsstore.children
248 childrenmarkers = unfi.obsstore.children
249
249
250 # exclusive markers (return of the function)
250 # exclusive markers (return of the function)
251 exclmarkers = set()
251 exclmarkers = set()
252 # we need fast membership testing
252 # we need fast membership testing
253 nodes = set(nodes)
253 nodes = set(nodes)
254 # looking for head in the obshistory
254 # looking for head in the obshistory
255 #
255 #
256 # XXX we are ignoring all issues in regard with cycle for now.
256 # XXX we are ignoring all issues in regard with cycle for now.
257 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
257 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
258 stack.sort()
258 stack.sort()
259 # nodes already stacked
259 # nodes already stacked
260 seennodes = set(stack)
260 seennodes = set(stack)
261 while stack:
261 while stack:
262 current = stack.pop()
262 current = stack.pop()
263 # fetch precursors markers
263 # fetch precursors markers
264 markers = list(precursorsmarkers.get(current, ()))
264 markers = list(precursorsmarkers.get(current, ()))
265 # extend the list with prune markers
265 # extend the list with prune markers
266 for mark in successormarkers.get(current, ()):
266 for mark in successormarkers.get(current, ()):
267 if not mark[1]:
267 if not mark[1]:
268 markers.append(mark)
268 markers.append(mark)
269 # and markers from children (looking for prune)
269 # and markers from children (looking for prune)
270 for mark in childrenmarkers.get(current, ()):
270 for mark in childrenmarkers.get(current, ()):
271 if not mark[1]:
271 if not mark[1]:
272 markers.append(mark)
272 markers.append(mark)
273 # traverse the markers
273 # traverse the markers
274 for mark in markers:
274 for mark in markers:
275 if mark in exclmarkers:
275 if mark in exclmarkers:
276 # markers already selected
276 # markers already selected
277 continue
277 continue
278
278
279 # If the markers is about the current node, select it
279 # If the markers is about the current node, select it
280 #
280 #
281 # (this delay the addition of markers from children)
281 # (this delay the addition of markers from children)
282 if mark[1] or mark[0] == current:
282 if mark[1] or mark[0] == current:
283 exclmarkers.add(mark)
283 exclmarkers.add(mark)
284
284
285 # should we keep traversing through the precursors?
285 # should we keep traversing through the precursors?
286 prec = mark[0]
286 prec = mark[0]
287
287
288 # nodes in the stack or already processed
288 # nodes in the stack or already processed
289 if prec in seennodes:
289 if prec in seennodes:
290 continue
290 continue
291
291
292 # is this a locally known node ?
292 # is this a locally known node ?
293 known = prec in nm
293 known = prec in nm
294 # if locally-known and not in the <nodes> set the traversal
294 # if locally-known and not in the <nodes> set the traversal
295 # stop here.
295 # stop here.
296 if known and prec not in nodes:
296 if known and prec not in nodes:
297 continue
297 continue
298
298
299 # do not keep going if there are unselected markers pointing to this
299 # do not keep going if there are unselected markers pointing to this
300 # nodes. If we end up traversing these unselected markers later the
300 # nodes. If we end up traversing these unselected markers later the
301 # node will be taken care of at that point.
301 # node will be taken care of at that point.
302 precmarkers = _filterprunes(successormarkers.get(prec))
302 precmarkers = _filterprunes(successormarkers.get(prec))
303 if precmarkers.issubset(exclmarkers):
303 if precmarkers.issubset(exclmarkers):
304 seennodes.add(prec)
304 seennodes.add(prec)
305 stack.append(prec)
305 stack.append(prec)
306
306
307 return exclmarkers
307 return exclmarkers
308
308
309 def foreground(repo, nodes):
309 def foreground(repo, nodes):
310 """return all nodes in the "foreground" of other node
310 """return all nodes in the "foreground" of other node
311
311
312 The foreground of a revision is anything reachable using parent -> children
312 The foreground of a revision is anything reachable using parent -> children
313 or precursor -> successor relation. It is very similar to "descendant" but
313 or precursor -> successor relation. It is very similar to "descendant" but
314 augmented with obsolescence information.
314 augmented with obsolescence information.
315
315
316 Beware that possible obsolescence cycle may result if complex situation.
316 Beware that possible obsolescence cycle may result if complex situation.
317 """
317 """
318 repo = repo.unfiltered()
318 repo = repo.unfiltered()
319 foreground = set(repo.set('%ln::', nodes))
319 foreground = set(repo.set('%ln::', nodes))
320 if repo.obsstore:
320 if repo.obsstore:
321 # We only need this complicated logic if there is obsolescence
321 # We only need this complicated logic if there is obsolescence
322 # XXX will probably deserve an optimised revset.
322 # XXX will probably deserve an optimised revset.
323 nm = repo.changelog.nodemap
323 nm = repo.changelog.nodemap
324 plen = -1
324 plen = -1
325 # compute the whole set of successors or descendants
325 # compute the whole set of successors or descendants
326 while len(foreground) != plen:
326 while len(foreground) != plen:
327 plen = len(foreground)
327 plen = len(foreground)
328 succs = set(c.node() for c in foreground)
328 succs = set(c.node() for c in foreground)
329 mutable = [c.node() for c in foreground if c.mutable()]
329 mutable = [c.node() for c in foreground if c.mutable()]
330 succs.update(allsuccessors(repo.obsstore, mutable))
330 succs.update(allsuccessors(repo.obsstore, mutable))
331 known = (n for n in succs if n in nm)
331 known = (n for n in succs if n in nm)
332 foreground = set(repo.set('%ln::', known))
332 foreground = set(repo.set('%ln::', known))
333 return set(c.node() for c in foreground)
333 return set(c.node() for c in foreground)
334
334
335 # effectflag field
335 # effectflag field
336 #
336 #
337 # Effect-flag is a 1-byte bit field used to store what changed between a
337 # Effect-flag is a 1-byte bit field used to store what changed between a
338 # changeset and its successor(s).
338 # changeset and its successor(s).
339 #
339 #
340 # The effect flag is stored in obs-markers metadata while we iterate on the
340 # The effect flag is stored in obs-markers metadata while we iterate on the
341 # information design. That's why we have the EFFECTFLAGFIELD. If we come up
341 # information design. That's why we have the EFFECTFLAGFIELD. If we come up
342 # with an incompatible design for effect flag, we can store a new design under
342 # with an incompatible design for effect flag, we can store a new design under
343 # another field name so we don't break readers. We plan to extend the existing
343 # another field name so we don't break readers. We plan to extend the existing
344 # obsmarkers bit-field when the effect flag design will be stabilized.
344 # obsmarkers bit-field when the effect flag design will be stabilized.
345 #
345 #
346 # The effect-flag is placed behind an experimental flag
346 # The effect-flag is placed behind an experimental flag
347 # `effect-flags` set to off by default.
347 # `effect-flags` set to off by default.
348 #
348 #
349
349
350 EFFECTFLAGFIELD = "ef1"
350 EFFECTFLAGFIELD = "ef1"
351
351
352 DESCCHANGED = 1 << 0 # action changed the description
352 DESCCHANGED = 1 << 0 # action changed the description
353 METACHANGED = 1 << 1 # action change the meta
353 METACHANGED = 1 << 1 # action change the meta
354 DIFFCHANGED = 1 << 3 # action change diff introduced by the changeset
354 DIFFCHANGED = 1 << 3 # action change diff introduced by the changeset
355 PARENTCHANGED = 1 << 2 # action change the parent
355 PARENTCHANGED = 1 << 2 # action change the parent
356 USERCHANGED = 1 << 4 # the user changed
356 USERCHANGED = 1 << 4 # the user changed
357 DATECHANGED = 1 << 5 # the date changed
357 DATECHANGED = 1 << 5 # the date changed
358 BRANCHCHANGED = 1 << 6 # the branch changed
358 BRANCHCHANGED = 1 << 6 # the branch changed
359
359
360 METABLACKLIST = [
360 METABLACKLIST = [
361 re.compile('^branch$'),
361 re.compile('^branch$'),
362 re.compile('^.*-source$'),
362 re.compile('^.*-source$'),
363 re.compile('^.*_source$'),
363 re.compile('^.*_source$'),
364 re.compile('^source$'),
364 re.compile('^source$'),
365 ]
365 ]
366
366
367 def metanotblacklisted(metaitem):
367 def metanotblacklisted(metaitem):
368 """ Check that the key of a meta item (extrakey, extravalue) does not
368 """ Check that the key of a meta item (extrakey, extravalue) does not
369 match at least one of the blacklist pattern
369 match at least one of the blacklist pattern
370 """
370 """
371 metakey = metaitem[0]
371 metakey = metaitem[0]
372
372
373 return not any(pattern.match(metakey) for pattern in METABLACKLIST)
373 return not any(pattern.match(metakey) for pattern in METABLACKLIST)
374
374
375 def _prepare_hunk(hunk):
375 def _prepare_hunk(hunk):
376 """Drop all information but the username and patch"""
376 """Drop all information but the username and patch"""
377 cleanhunk = []
377 cleanhunk = []
378 for line in hunk.splitlines():
378 for line in hunk.splitlines():
379 if line.startswith(b'# User') or not line.startswith(b'#'):
379 if line.startswith(b'# User') or not line.startswith(b'#'):
380 if line.startswith(b'@@'):
380 if line.startswith(b'@@'):
381 line = b'@@\n'
381 line = b'@@\n'
382 cleanhunk.append(line)
382 cleanhunk.append(line)
383 return cleanhunk
383 return cleanhunk
384
384
385 def _getdifflines(iterdiff):
385 def _getdifflines(iterdiff):
386 """return a cleaned up lines"""
386 """return a cleaned up lines"""
387 lines = next(iterdiff, None)
387 lines = next(iterdiff, None)
388
388
389 if lines is None:
389 if lines is None:
390 return lines
390 return lines
391
391
392 return _prepare_hunk(lines)
392 return _prepare_hunk(lines)
393
393
394 def _cmpdiff(leftctx, rightctx):
394 def _cmpdiff(leftctx, rightctx):
395 """return True if both ctx introduce the "same diff"
395 """return True if both ctx introduce the "same diff"
396
396
397 This is a first and basic implementation, with many shortcoming.
397 This is a first and basic implementation, with many shortcoming.
398 """
398 """
399 diffopts = diffutil.diffallopts(leftctx.repo().ui, {'git': True})
399 diffopts = diffutil.diffallopts(leftctx.repo().ui, {'git': True})
400 # Leftctx or right ctx might be filtered, so we need to use the contexts
400 # Leftctx or right ctx might be filtered, so we need to use the contexts
401 # with an unfiltered repository to safely compute the diff
401 # with an unfiltered repository to safely compute the diff
402 leftunfi = leftctx._repo.unfiltered()[leftctx.rev()]
402 leftunfi = leftctx._repo.unfiltered()[leftctx.rev()]
403 leftdiff = leftunfi.diff(opts=diffopts)
403 leftdiff = leftunfi.diff(opts=diffopts)
404 rightunfi = rightctx._repo.unfiltered()[rightctx.rev()]
404 rightunfi = rightctx._repo.unfiltered()[rightctx.rev()]
405 rightdiff = rightunfi.diff(opts=diffopts)
405 rightdiff = rightunfi.diff(opts=diffopts)
406
406
407 left, right = (0, 0)
407 left, right = (0, 0)
408 while None not in (left, right):
408 while None not in (left, right):
409 left = _getdifflines(leftdiff)
409 left = _getdifflines(leftdiff)
410 right = _getdifflines(rightdiff)
410 right = _getdifflines(rightdiff)
411
411
412 if left != right:
412 if left != right:
413 return False
413 return False
414 return True
414 return True
415
415
416 def geteffectflag(relation):
416 def geteffectflag(relation):
417 """ From an obs-marker relation, compute what changed between the
417 """ From an obs-marker relation, compute what changed between the
418 predecessor and the successor.
418 predecessor and the successor.
419 """
419 """
420 effects = 0
420 effects = 0
421
421
422 source = relation[0]
422 source = relation[0]
423
423
424 for changectx in relation[1]:
424 for changectx in relation[1]:
425 # Check if description has changed
425 # Check if description has changed
426 if changectx.description() != source.description():
426 if changectx.description() != source.description():
427 effects |= DESCCHANGED
427 effects |= DESCCHANGED
428
428
429 # Check if user has changed
429 # Check if user has changed
430 if changectx.user() != source.user():
430 if changectx.user() != source.user():
431 effects |= USERCHANGED
431 effects |= USERCHANGED
432
432
433 # Check if date has changed
433 # Check if date has changed
434 if changectx.date() != source.date():
434 if changectx.date() != source.date():
435 effects |= DATECHANGED
435 effects |= DATECHANGED
436
436
437 # Check if branch has changed
437 # Check if branch has changed
438 if changectx.branch() != source.branch():
438 if changectx.branch() != source.branch():
439 effects |= BRANCHCHANGED
439 effects |= BRANCHCHANGED
440
440
441 # Check if at least one of the parent has changed
441 # Check if at least one of the parent has changed
442 if changectx.parents() != source.parents():
442 if changectx.parents() != source.parents():
443 effects |= PARENTCHANGED
443 effects |= PARENTCHANGED
444
444
445 # Check if other meta has changed
445 # Check if other meta has changed
446 changeextra = changectx.extra().items()
446 changeextra = changectx.extra().items()
447 ctxmeta = list(filter(metanotblacklisted, changeextra))
447 ctxmeta = list(filter(metanotblacklisted, changeextra))
448
448
449 sourceextra = source.extra().items()
449 sourceextra = source.extra().items()
450 srcmeta = list(filter(metanotblacklisted, sourceextra))
450 srcmeta = list(filter(metanotblacklisted, sourceextra))
451
451
452 if ctxmeta != srcmeta:
452 if ctxmeta != srcmeta:
453 effects |= METACHANGED
453 effects |= METACHANGED
454
454
455 # Check if the diff has changed
455 # Check if the diff has changed
456 if not _cmpdiff(source, changectx):
456 if not _cmpdiff(source, changectx):
457 effects |= DIFFCHANGED
457 effects |= DIFFCHANGED
458
458
459 return effects
459 return effects
460
460
461 def getobsoleted(repo, tr):
461 def getobsoleted(repo, tr):
462 """return the set of pre-existing revisions obsoleted by a transaction"""
462 """return the set of pre-existing revisions obsoleted by a transaction"""
463 torev = repo.unfiltered().changelog.nodemap.get
463 torev = repo.unfiltered().changelog.nodemap.get
464 phase = repo._phasecache.phase
464 phase = repo._phasecache.phase
465 succsmarkers = repo.obsstore.successors.get
465 succsmarkers = repo.obsstore.successors.get
466 public = phases.public
466 public = phases.public
467 addedmarkers = tr.changes.get('obsmarkers')
467 addedmarkers = tr.changes.get('obsmarkers')
468 addedrevs = tr.changes.get('revs')
468 addedrevs = tr.changes['revs']
469 seenrevs = set()
469 seenrevs = set()
470 obsoleted = set()
470 obsoleted = set()
471 for mark in addedmarkers:
471 for mark in addedmarkers:
472 node = mark[0]
472 node = mark[0]
473 rev = torev(node)
473 rev = torev(node)
474 if rev is None or rev in seenrevs or rev in addedrevs:
474 if rev is None or rev in seenrevs or rev in addedrevs:
475 continue
475 continue
476 seenrevs.add(rev)
476 seenrevs.add(rev)
477 if phase(repo, rev) == public:
477 if phase(repo, rev) == public:
478 continue
478 continue
479 if set(succsmarkers(node) or []).issubset(addedmarkers):
479 if set(succsmarkers(node) or []).issubset(addedmarkers):
480 obsoleted.add(rev)
480 obsoleted.add(rev)
481 return obsoleted
481 return obsoleted
482
482
483 class _succs(list):
483 class _succs(list):
484 """small class to represent a successors with some metadata about it"""
484 """small class to represent a successors with some metadata about it"""
485
485
486 def __init__(self, *args, **kwargs):
486 def __init__(self, *args, **kwargs):
487 super(_succs, self).__init__(*args, **kwargs)
487 super(_succs, self).__init__(*args, **kwargs)
488 self.markers = set()
488 self.markers = set()
489
489
490 def copy(self):
490 def copy(self):
491 new = _succs(self)
491 new = _succs(self)
492 new.markers = self.markers.copy()
492 new.markers = self.markers.copy()
493 return new
493 return new
494
494
495 @util.propertycache
495 @util.propertycache
496 def _set(self):
496 def _set(self):
497 # immutable
497 # immutable
498 return set(self)
498 return set(self)
499
499
500 def canmerge(self, other):
500 def canmerge(self, other):
501 return self._set.issubset(other._set)
501 return self._set.issubset(other._set)
502
502
503 def successorssets(repo, initialnode, closest=False, cache=None):
503 def successorssets(repo, initialnode, closest=False, cache=None):
504 """Return set of all latest successors of initial nodes
504 """Return set of all latest successors of initial nodes
505
505
506 The successors set of a changeset A are the group of revisions that succeed
506 The successors set of a changeset A are the group of revisions that succeed
507 A. It succeeds A as a consistent whole, each revision being only a partial
507 A. It succeeds A as a consistent whole, each revision being only a partial
508 replacement. By default, the successors set contains non-obsolete
508 replacement. By default, the successors set contains non-obsolete
509 changesets only, walking the obsolescence graph until reaching a leaf. If
509 changesets only, walking the obsolescence graph until reaching a leaf. If
510 'closest' is set to True, closest successors-sets are return (the
510 'closest' is set to True, closest successors-sets are return (the
511 obsolescence walk stops on known changesets).
511 obsolescence walk stops on known changesets).
512
512
513 This function returns the full list of successor sets which is why it
513 This function returns the full list of successor sets which is why it
514 returns a list of tuples and not just a single tuple. Each tuple is a valid
514 returns a list of tuples and not just a single tuple. Each tuple is a valid
515 successors set. Note that (A,) may be a valid successors set for changeset A
515 successors set. Note that (A,) may be a valid successors set for changeset A
516 (see below).
516 (see below).
517
517
518 In most cases, a changeset A will have a single element (e.g. the changeset
518 In most cases, a changeset A will have a single element (e.g. the changeset
519 A is replaced by A') in its successors set. Though, it is also common for a
519 A is replaced by A') in its successors set. Though, it is also common for a
520 changeset A to have no elements in its successor set (e.g. the changeset
520 changeset A to have no elements in its successor set (e.g. the changeset
521 has been pruned). Therefore, the returned list of successors sets will be
521 has been pruned). Therefore, the returned list of successors sets will be
522 [(A',)] or [], respectively.
522 [(A',)] or [], respectively.
523
523
524 When a changeset A is split into A' and B', however, it will result in a
524 When a changeset A is split into A' and B', however, it will result in a
525 successors set containing more than a single element, i.e. [(A',B')].
525 successors set containing more than a single element, i.e. [(A',B')].
526 Divergent changesets will result in multiple successors sets, i.e. [(A',),
526 Divergent changesets will result in multiple successors sets, i.e. [(A',),
527 (A'')].
527 (A'')].
528
528
529 If a changeset A is not obsolete, then it will conceptually have no
529 If a changeset A is not obsolete, then it will conceptually have no
530 successors set. To distinguish this from a pruned changeset, the successor
530 successors set. To distinguish this from a pruned changeset, the successor
531 set will contain itself only, i.e. [(A,)].
531 set will contain itself only, i.e. [(A,)].
532
532
533 Finally, final successors unknown locally are considered to be pruned
533 Finally, final successors unknown locally are considered to be pruned
534 (pruned: obsoleted without any successors). (Final: successors not affected
534 (pruned: obsoleted without any successors). (Final: successors not affected
535 by markers).
535 by markers).
536
536
537 The 'closest' mode respect the repoview filtering. For example, without
537 The 'closest' mode respect the repoview filtering. For example, without
538 filter it will stop at the first locally known changeset, with 'visible'
538 filter it will stop at the first locally known changeset, with 'visible'
539 filter it will stop on visible changesets).
539 filter it will stop on visible changesets).
540
540
541 The optional `cache` parameter is a dictionary that may contains
541 The optional `cache` parameter is a dictionary that may contains
542 precomputed successors sets. It is meant to reuse the computation of a
542 precomputed successors sets. It is meant to reuse the computation of a
543 previous call to `successorssets` when multiple calls are made at the same
543 previous call to `successorssets` when multiple calls are made at the same
544 time. The cache dictionary is updated in place. The caller is responsible
544 time. The cache dictionary is updated in place. The caller is responsible
545 for its life span. Code that makes multiple calls to `successorssets`
545 for its life span. Code that makes multiple calls to `successorssets`
546 *should* use this cache mechanism or risk a performance hit.
546 *should* use this cache mechanism or risk a performance hit.
547
547
548 Since results are different depending of the 'closest' most, the same cache
548 Since results are different depending of the 'closest' most, the same cache
549 cannot be reused for both mode.
549 cannot be reused for both mode.
550 """
550 """
551
551
552 succmarkers = repo.obsstore.successors
552 succmarkers = repo.obsstore.successors
553
553
554 # Stack of nodes we search successors sets for
554 # Stack of nodes we search successors sets for
555 toproceed = [initialnode]
555 toproceed = [initialnode]
556 # set version of above list for fast loop detection
556 # set version of above list for fast loop detection
557 # element added to "toproceed" must be added here
557 # element added to "toproceed" must be added here
558 stackedset = set(toproceed)
558 stackedset = set(toproceed)
559 if cache is None:
559 if cache is None:
560 cache = {}
560 cache = {}
561
561
562 # This while loop is the flattened version of a recursive search for
562 # This while loop is the flattened version of a recursive search for
563 # successors sets
563 # successors sets
564 #
564 #
565 # def successorssets(x):
565 # def successorssets(x):
566 # successors = directsuccessors(x)
566 # successors = directsuccessors(x)
567 # ss = [[]]
567 # ss = [[]]
568 # for succ in directsuccessors(x):
568 # for succ in directsuccessors(x):
569 # # product as in itertools cartesian product
569 # # product as in itertools cartesian product
570 # ss = product(ss, successorssets(succ))
570 # ss = product(ss, successorssets(succ))
571 # return ss
571 # return ss
572 #
572 #
573 # But we can not use plain recursive calls here:
573 # But we can not use plain recursive calls here:
574 # - that would blow the python call stack
574 # - that would blow the python call stack
575 # - obsolescence markers may have cycles, we need to handle them.
575 # - obsolescence markers may have cycles, we need to handle them.
576 #
576 #
577 # The `toproceed` list act as our call stack. Every node we search
577 # The `toproceed` list act as our call stack. Every node we search
578 # successors set for are stacked there.
578 # successors set for are stacked there.
579 #
579 #
580 # The `stackedset` is set version of this stack used to check if a node is
580 # The `stackedset` is set version of this stack used to check if a node is
581 # already stacked. This check is used to detect cycles and prevent infinite
581 # already stacked. This check is used to detect cycles and prevent infinite
582 # loop.
582 # loop.
583 #
583 #
584 # successors set of all nodes are stored in the `cache` dictionary.
584 # successors set of all nodes are stored in the `cache` dictionary.
585 #
585 #
586 # After this while loop ends we use the cache to return the successors sets
586 # After this while loop ends we use the cache to return the successors sets
587 # for the node requested by the caller.
587 # for the node requested by the caller.
588 while toproceed:
588 while toproceed:
589 # Every iteration tries to compute the successors sets of the topmost
589 # Every iteration tries to compute the successors sets of the topmost
590 # node of the stack: CURRENT.
590 # node of the stack: CURRENT.
591 #
591 #
592 # There are four possible outcomes:
592 # There are four possible outcomes:
593 #
593 #
594 # 1) We already know the successors sets of CURRENT:
594 # 1) We already know the successors sets of CURRENT:
595 # -> mission accomplished, pop it from the stack.
595 # -> mission accomplished, pop it from the stack.
596 # 2) Stop the walk:
596 # 2) Stop the walk:
597 # default case: Node is not obsolete
597 # default case: Node is not obsolete
598 # closest case: Node is known at this repo filter level
598 # closest case: Node is known at this repo filter level
599 # -> the node is its own successors sets. Add it to the cache.
599 # -> the node is its own successors sets. Add it to the cache.
600 # 3) We do not know successors set of direct successors of CURRENT:
600 # 3) We do not know successors set of direct successors of CURRENT:
601 # -> We add those successors to the stack.
601 # -> We add those successors to the stack.
602 # 4) We know successors sets of all direct successors of CURRENT:
602 # 4) We know successors sets of all direct successors of CURRENT:
603 # -> We can compute CURRENT successors set and add it to the
603 # -> We can compute CURRENT successors set and add it to the
604 # cache.
604 # cache.
605 #
605 #
606 current = toproceed[-1]
606 current = toproceed[-1]
607
607
608 # case 2 condition is a bit hairy because of closest,
608 # case 2 condition is a bit hairy because of closest,
609 # we compute it on its own
609 # we compute it on its own
610 case2condition = ((current not in succmarkers)
610 case2condition = ((current not in succmarkers)
611 or (closest and current != initialnode
611 or (closest and current != initialnode
612 and current in repo))
612 and current in repo))
613
613
614 if current in cache:
614 if current in cache:
615 # case (1): We already know the successors sets
615 # case (1): We already know the successors sets
616 stackedset.remove(toproceed.pop())
616 stackedset.remove(toproceed.pop())
617 elif case2condition:
617 elif case2condition:
618 # case (2): end of walk.
618 # case (2): end of walk.
619 if current in repo:
619 if current in repo:
620 # We have a valid successors.
620 # We have a valid successors.
621 cache[current] = [_succs((current,))]
621 cache[current] = [_succs((current,))]
622 else:
622 else:
623 # Final obsolete version is unknown locally.
623 # Final obsolete version is unknown locally.
624 # Do not count that as a valid successors
624 # Do not count that as a valid successors
625 cache[current] = []
625 cache[current] = []
626 else:
626 else:
627 # cases (3) and (4)
627 # cases (3) and (4)
628 #
628 #
629 # We proceed in two phases. Phase 1 aims to distinguish case (3)
629 # We proceed in two phases. Phase 1 aims to distinguish case (3)
630 # from case (4):
630 # from case (4):
631 #
631 #
632 # For each direct successors of CURRENT, we check whether its
632 # For each direct successors of CURRENT, we check whether its
633 # successors sets are known. If they are not, we stack the
633 # successors sets are known. If they are not, we stack the
634 # unknown node and proceed to the next iteration of the while
634 # unknown node and proceed to the next iteration of the while
635 # loop. (case 3)
635 # loop. (case 3)
636 #
636 #
637 # During this step, we may detect obsolescence cycles: a node
637 # During this step, we may detect obsolescence cycles: a node
638 # with unknown successors sets but already in the call stack.
638 # with unknown successors sets but already in the call stack.
639 # In such a situation, we arbitrary set the successors sets of
639 # In such a situation, we arbitrary set the successors sets of
640 # the node to nothing (node pruned) to break the cycle.
640 # the node to nothing (node pruned) to break the cycle.
641 #
641 #
642 # If no break was encountered we proceed to phase 2.
642 # If no break was encountered we proceed to phase 2.
643 #
643 #
644 # Phase 2 computes successors sets of CURRENT (case 4); see details
644 # Phase 2 computes successors sets of CURRENT (case 4); see details
645 # in phase 2 itself.
645 # in phase 2 itself.
646 #
646 #
647 # Note the two levels of iteration in each phase.
647 # Note the two levels of iteration in each phase.
648 # - The first one handles obsolescence markers using CURRENT as
648 # - The first one handles obsolescence markers using CURRENT as
649 # precursor (successors markers of CURRENT).
649 # precursor (successors markers of CURRENT).
650 #
650 #
651 # Having multiple entry here means divergence.
651 # Having multiple entry here means divergence.
652 #
652 #
653 # - The second one handles successors defined in each marker.
653 # - The second one handles successors defined in each marker.
654 #
654 #
655 # Having none means pruned node, multiple successors means split,
655 # Having none means pruned node, multiple successors means split,
656 # single successors are standard replacement.
656 # single successors are standard replacement.
657 #
657 #
658 for mark in sorted(succmarkers[current]):
658 for mark in sorted(succmarkers[current]):
659 for suc in mark[1]:
659 for suc in mark[1]:
660 if suc not in cache:
660 if suc not in cache:
661 if suc in stackedset:
661 if suc in stackedset:
662 # cycle breaking
662 # cycle breaking
663 cache[suc] = []
663 cache[suc] = []
664 else:
664 else:
665 # case (3) If we have not computed successors sets
665 # case (3) If we have not computed successors sets
666 # of one of those successors we add it to the
666 # of one of those successors we add it to the
667 # `toproceed` stack and stop all work for this
667 # `toproceed` stack and stop all work for this
668 # iteration.
668 # iteration.
669 toproceed.append(suc)
669 toproceed.append(suc)
670 stackedset.add(suc)
670 stackedset.add(suc)
671 break
671 break
672 else:
672 else:
673 continue
673 continue
674 break
674 break
675 else:
675 else:
676 # case (4): we know all successors sets of all direct
676 # case (4): we know all successors sets of all direct
677 # successors
677 # successors
678 #
678 #
679 # Successors set contributed by each marker depends on the
679 # Successors set contributed by each marker depends on the
680 # successors sets of all its "successors" node.
680 # successors sets of all its "successors" node.
681 #
681 #
682 # Each different marker is a divergence in the obsolescence
682 # Each different marker is a divergence in the obsolescence
683 # history. It contributes successors sets distinct from other
683 # history. It contributes successors sets distinct from other
684 # markers.
684 # markers.
685 #
685 #
686 # Within a marker, a successor may have divergent successors
686 # Within a marker, a successor may have divergent successors
687 # sets. In such a case, the marker will contribute multiple
687 # sets. In such a case, the marker will contribute multiple
688 # divergent successors sets. If multiple successors have
688 # divergent successors sets. If multiple successors have
689 # divergent successors sets, a Cartesian product is used.
689 # divergent successors sets, a Cartesian product is used.
690 #
690 #
691 # At the end we post-process successors sets to remove
691 # At the end we post-process successors sets to remove
692 # duplicated entry and successors set that are strict subset of
692 # duplicated entry and successors set that are strict subset of
693 # another one.
693 # another one.
694 succssets = []
694 succssets = []
695 for mark in sorted(succmarkers[current]):
695 for mark in sorted(succmarkers[current]):
696 # successors sets contributed by this marker
696 # successors sets contributed by this marker
697 base = _succs()
697 base = _succs()
698 base.markers.add(mark)
698 base.markers.add(mark)
699 markss = [base]
699 markss = [base]
700 for suc in mark[1]:
700 for suc in mark[1]:
701 # cardinal product with previous successors
701 # cardinal product with previous successors
702 productresult = []
702 productresult = []
703 for prefix in markss:
703 for prefix in markss:
704 for suffix in cache[suc]:
704 for suffix in cache[suc]:
705 newss = prefix.copy()
705 newss = prefix.copy()
706 newss.markers.update(suffix.markers)
706 newss.markers.update(suffix.markers)
707 for part in suffix:
707 for part in suffix:
708 # do not duplicated entry in successors set
708 # do not duplicated entry in successors set
709 # first entry wins.
709 # first entry wins.
710 if part not in newss:
710 if part not in newss:
711 newss.append(part)
711 newss.append(part)
712 productresult.append(newss)
712 productresult.append(newss)
713 markss = productresult
713 markss = productresult
714 succssets.extend(markss)
714 succssets.extend(markss)
715 # remove duplicated and subset
715 # remove duplicated and subset
716 seen = []
716 seen = []
717 final = []
717 final = []
718 candidates = sorted((s for s in succssets if s),
718 candidates = sorted((s for s in succssets if s),
719 key=len, reverse=True)
719 key=len, reverse=True)
720 for cand in candidates:
720 for cand in candidates:
721 for seensuccs in seen:
721 for seensuccs in seen:
722 if cand.canmerge(seensuccs):
722 if cand.canmerge(seensuccs):
723 seensuccs.markers.update(cand.markers)
723 seensuccs.markers.update(cand.markers)
724 break
724 break
725 else:
725 else:
726 final.append(cand)
726 final.append(cand)
727 seen.append(cand)
727 seen.append(cand)
728 final.reverse() # put small successors set first
728 final.reverse() # put small successors set first
729 cache[current] = final
729 cache[current] = final
730 return cache[initialnode]
730 return cache[initialnode]
731
731
732 def successorsandmarkers(repo, ctx):
732 def successorsandmarkers(repo, ctx):
733 """compute the raw data needed for computing obsfate
733 """compute the raw data needed for computing obsfate
734 Returns a list of dict, one dict per successors set
734 Returns a list of dict, one dict per successors set
735 """
735 """
736 if not ctx.obsolete():
736 if not ctx.obsolete():
737 return None
737 return None
738
738
739 ssets = successorssets(repo, ctx.node(), closest=True)
739 ssets = successorssets(repo, ctx.node(), closest=True)
740
740
741 # closestsuccessors returns an empty list for pruned revisions, remap it
741 # closestsuccessors returns an empty list for pruned revisions, remap it
742 # into a list containing an empty list for future processing
742 # into a list containing an empty list for future processing
743 if ssets == []:
743 if ssets == []:
744 ssets = [[]]
744 ssets = [[]]
745
745
746 # Try to recover pruned markers
746 # Try to recover pruned markers
747 succsmap = repo.obsstore.successors
747 succsmap = repo.obsstore.successors
748 fullsuccessorsets = [] # successor set + markers
748 fullsuccessorsets = [] # successor set + markers
749 for sset in ssets:
749 for sset in ssets:
750 if sset:
750 if sset:
751 fullsuccessorsets.append(sset)
751 fullsuccessorsets.append(sset)
752 else:
752 else:
753 # successorsset return an empty set() when ctx or one of its
753 # successorsset return an empty set() when ctx or one of its
754 # successors is pruned.
754 # successors is pruned.
755 # In this case, walk the obs-markers tree again starting with ctx
755 # In this case, walk the obs-markers tree again starting with ctx
756 # and find the relevant pruning obs-makers, the ones without
756 # and find the relevant pruning obs-makers, the ones without
757 # successors.
757 # successors.
758 # Having these markers allow us to compute some information about
758 # Having these markers allow us to compute some information about
759 # its fate, like who pruned this changeset and when.
759 # its fate, like who pruned this changeset and when.
760
760
761 # XXX we do not catch all prune markers (eg rewritten then pruned)
761 # XXX we do not catch all prune markers (eg rewritten then pruned)
762 # (fix me later)
762 # (fix me later)
763 foundany = False
763 foundany = False
764 for mark in succsmap.get(ctx.node(), ()):
764 for mark in succsmap.get(ctx.node(), ()):
765 if not mark[1]:
765 if not mark[1]:
766 foundany = True
766 foundany = True
767 sset = _succs()
767 sset = _succs()
768 sset.markers.add(mark)
768 sset.markers.add(mark)
769 fullsuccessorsets.append(sset)
769 fullsuccessorsets.append(sset)
770 if not foundany:
770 if not foundany:
771 fullsuccessorsets.append(_succs())
771 fullsuccessorsets.append(_succs())
772
772
773 values = []
773 values = []
774 for sset in fullsuccessorsets:
774 for sset in fullsuccessorsets:
775 values.append({'successors': sset, 'markers': sset.markers})
775 values.append({'successors': sset, 'markers': sset.markers})
776
776
777 return values
777 return values
778
778
779 def _getobsfate(successorssets):
779 def _getobsfate(successorssets):
780 """ Compute a changeset obsolescence fate based on its successorssets.
780 """ Compute a changeset obsolescence fate based on its successorssets.
781 Successors can be the tipmost ones or the immediate ones. This function
781 Successors can be the tipmost ones or the immediate ones. This function
782 return values are not meant to be shown directly to users, it is meant to
782 return values are not meant to be shown directly to users, it is meant to
783 be used by internal functions only.
783 be used by internal functions only.
784 Returns one fate from the following values:
784 Returns one fate from the following values:
785 - pruned
785 - pruned
786 - diverged
786 - diverged
787 - superseded
787 - superseded
788 - superseded_split
788 - superseded_split
789 """
789 """
790
790
791 if len(successorssets) == 0:
791 if len(successorssets) == 0:
792 # The commit has been pruned
792 # The commit has been pruned
793 return 'pruned'
793 return 'pruned'
794 elif len(successorssets) > 1:
794 elif len(successorssets) > 1:
795 return 'diverged'
795 return 'diverged'
796 else:
796 else:
797 # No divergence, only one set of successors
797 # No divergence, only one set of successors
798 successors = successorssets[0]
798 successors = successorssets[0]
799
799
800 if len(successors) == 1:
800 if len(successors) == 1:
801 return 'superseded'
801 return 'superseded'
802 else:
802 else:
803 return 'superseded_split'
803 return 'superseded_split'
804
804
805 def obsfateverb(successorset, markers):
805 def obsfateverb(successorset, markers):
806 """ Return the verb summarizing the successorset and potentially using
806 """ Return the verb summarizing the successorset and potentially using
807 information from the markers
807 information from the markers
808 """
808 """
809 if not successorset:
809 if not successorset:
810 verb = 'pruned'
810 verb = 'pruned'
811 elif len(successorset) == 1:
811 elif len(successorset) == 1:
812 verb = 'rewritten'
812 verb = 'rewritten'
813 else:
813 else:
814 verb = 'split'
814 verb = 'split'
815 return verb
815 return verb
816
816
817 def markersdates(markers):
817 def markersdates(markers):
818 """returns the list of dates for a list of markers
818 """returns the list of dates for a list of markers
819 """
819 """
820 return [m[4] for m in markers]
820 return [m[4] for m in markers]
821
821
822 def markersusers(markers):
822 def markersusers(markers):
823 """ Returns a sorted list of markers users without duplicates
823 """ Returns a sorted list of markers users without duplicates
824 """
824 """
825 markersmeta = [dict(m[3]) for m in markers]
825 markersmeta = [dict(m[3]) for m in markers]
826 users = set(encoding.tolocal(meta['user']) for meta in markersmeta
826 users = set(encoding.tolocal(meta['user']) for meta in markersmeta
827 if meta.get('user'))
827 if meta.get('user'))
828
828
829 return sorted(users)
829 return sorted(users)
830
830
831 def markersoperations(markers):
831 def markersoperations(markers):
832 """ Returns a sorted list of markers operations without duplicates
832 """ Returns a sorted list of markers operations without duplicates
833 """
833 """
834 markersmeta = [dict(m[3]) for m in markers]
834 markersmeta = [dict(m[3]) for m in markers]
835 operations = set(meta.get('operation') for meta in markersmeta
835 operations = set(meta.get('operation') for meta in markersmeta
836 if meta.get('operation'))
836 if meta.get('operation'))
837
837
838 return sorted(operations)
838 return sorted(operations)
839
839
840 def obsfateprinter(ui, repo, successors, markers, formatctx):
840 def obsfateprinter(ui, repo, successors, markers, formatctx):
841 """ Build a obsfate string for a single successorset using all obsfate
841 """ Build a obsfate string for a single successorset using all obsfate
842 related function defined in obsutil
842 related function defined in obsutil
843 """
843 """
844 quiet = ui.quiet
844 quiet = ui.quiet
845 verbose = ui.verbose
845 verbose = ui.verbose
846 normal = not verbose and not quiet
846 normal = not verbose and not quiet
847
847
848 line = []
848 line = []
849
849
850 # Verb
850 # Verb
851 line.append(obsfateverb(successors, markers))
851 line.append(obsfateverb(successors, markers))
852
852
853 # Operations
853 # Operations
854 operations = markersoperations(markers)
854 operations = markersoperations(markers)
855 if operations:
855 if operations:
856 line.append(" using %s" % ", ".join(operations))
856 line.append(" using %s" % ", ".join(operations))
857
857
858 # Successors
858 # Successors
859 if successors:
859 if successors:
860 fmtsuccessors = [formatctx(repo[succ]) for succ in successors]
860 fmtsuccessors = [formatctx(repo[succ]) for succ in successors]
861 line.append(" as %s" % ", ".join(fmtsuccessors))
861 line.append(" as %s" % ", ".join(fmtsuccessors))
862
862
863 # Users
863 # Users
864 users = markersusers(markers)
864 users = markersusers(markers)
865 # Filter out current user in not verbose mode to reduce amount of
865 # Filter out current user in not verbose mode to reduce amount of
866 # information
866 # information
867 if not verbose:
867 if not verbose:
868 currentuser = ui.username(acceptempty=True)
868 currentuser = ui.username(acceptempty=True)
869 if len(users) == 1 and currentuser in users:
869 if len(users) == 1 and currentuser in users:
870 users = None
870 users = None
871
871
872 if (verbose or normal) and users:
872 if (verbose or normal) and users:
873 line.append(" by %s" % ", ".join(users))
873 line.append(" by %s" % ", ".join(users))
874
874
875 # Date
875 # Date
876 dates = markersdates(markers)
876 dates = markersdates(markers)
877
877
878 if dates and verbose:
878 if dates and verbose:
879 min_date = min(dates)
879 min_date = min(dates)
880 max_date = max(dates)
880 max_date = max(dates)
881
881
882 if min_date == max_date:
882 if min_date == max_date:
883 fmtmin_date = dateutil.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
883 fmtmin_date = dateutil.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
884 line.append(" (at %s)" % fmtmin_date)
884 line.append(" (at %s)" % fmtmin_date)
885 else:
885 else:
886 fmtmin_date = dateutil.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
886 fmtmin_date = dateutil.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
887 fmtmax_date = dateutil.datestr(max_date, '%Y-%m-%d %H:%M %1%2')
887 fmtmax_date = dateutil.datestr(max_date, '%Y-%m-%d %H:%M %1%2')
888 line.append(" (between %s and %s)" % (fmtmin_date, fmtmax_date))
888 line.append(" (between %s and %s)" % (fmtmin_date, fmtmax_date))
889
889
890 return "".join(line)
890 return "".join(line)
891
891
892
892
893 filteredmsgtable = {
893 filteredmsgtable = {
894 "pruned": _("hidden revision '%s' is pruned"),
894 "pruned": _("hidden revision '%s' is pruned"),
895 "diverged": _("hidden revision '%s' has diverged"),
895 "diverged": _("hidden revision '%s' has diverged"),
896 "superseded": _("hidden revision '%s' was rewritten as: %s"),
896 "superseded": _("hidden revision '%s' was rewritten as: %s"),
897 "superseded_split": _("hidden revision '%s' was split as: %s"),
897 "superseded_split": _("hidden revision '%s' was split as: %s"),
898 "superseded_split_several": _("hidden revision '%s' was split as: %s and "
898 "superseded_split_several": _("hidden revision '%s' was split as: %s and "
899 "%d more"),
899 "%d more"),
900 }
900 }
901
901
902 def _getfilteredreason(repo, changeid, ctx):
902 def _getfilteredreason(repo, changeid, ctx):
903 """return a human-friendly string on why a obsolete changeset is hidden
903 """return a human-friendly string on why a obsolete changeset is hidden
904 """
904 """
905 successors = successorssets(repo, ctx.node())
905 successors = successorssets(repo, ctx.node())
906 fate = _getobsfate(successors)
906 fate = _getobsfate(successors)
907
907
908 # Be more precise in case the revision is superseded
908 # Be more precise in case the revision is superseded
909 if fate == 'pruned':
909 if fate == 'pruned':
910 return filteredmsgtable['pruned'] % changeid
910 return filteredmsgtable['pruned'] % changeid
911 elif fate == 'diverged':
911 elif fate == 'diverged':
912 return filteredmsgtable['diverged'] % changeid
912 return filteredmsgtable['diverged'] % changeid
913 elif fate == 'superseded':
913 elif fate == 'superseded':
914 single_successor = nodemod.short(successors[0][0])
914 single_successor = nodemod.short(successors[0][0])
915 return filteredmsgtable['superseded'] % (changeid, single_successor)
915 return filteredmsgtable['superseded'] % (changeid, single_successor)
916 elif fate == 'superseded_split':
916 elif fate == 'superseded_split':
917
917
918 succs = []
918 succs = []
919 for node_id in successors[0]:
919 for node_id in successors[0]:
920 succs.append(nodemod.short(node_id))
920 succs.append(nodemod.short(node_id))
921
921
922 if len(succs) <= 2:
922 if len(succs) <= 2:
923 fmtsuccs = ', '.join(succs)
923 fmtsuccs = ', '.join(succs)
924 return filteredmsgtable['superseded_split'] % (changeid, fmtsuccs)
924 return filteredmsgtable['superseded_split'] % (changeid, fmtsuccs)
925 else:
925 else:
926 firstsuccessors = ', '.join(succs[:2])
926 firstsuccessors = ', '.join(succs[:2])
927 remainingnumber = len(succs) - 2
927 remainingnumber = len(succs) - 2
928
928
929 args = (changeid, firstsuccessors, remainingnumber)
929 args = (changeid, firstsuccessors, remainingnumber)
930 return filteredmsgtable['superseded_split_several'] % args
930 return filteredmsgtable['superseded_split_several'] % args
931
931
932 def divergentsets(repo, ctx):
932 def divergentsets(repo, ctx):
933 """Compute sets of commits divergent with a given one"""
933 """Compute sets of commits divergent with a given one"""
934 cache = {}
934 cache = {}
935 base = {}
935 base = {}
936 for n in allpredecessors(repo.obsstore, [ctx.node()]):
936 for n in allpredecessors(repo.obsstore, [ctx.node()]):
937 if n == ctx.node():
937 if n == ctx.node():
938 # a node can't be a base for divergence with itself
938 # a node can't be a base for divergence with itself
939 continue
939 continue
940 nsuccsets = successorssets(repo, n, cache)
940 nsuccsets = successorssets(repo, n, cache)
941 for nsuccset in nsuccsets:
941 for nsuccset in nsuccsets:
942 if ctx.node() in nsuccset:
942 if ctx.node() in nsuccset:
943 # we are only interested in *other* successor sets
943 # we are only interested in *other* successor sets
944 continue
944 continue
945 if tuple(nsuccset) in base:
945 if tuple(nsuccset) in base:
946 # we already know the latest base for this divergency
946 # we already know the latest base for this divergency
947 continue
947 continue
948 base[tuple(nsuccset)] = n
948 base[tuple(nsuccset)] = n
949 return [{'divergentnodes': divset, 'commonpredecessor': b}
949 return [{'divergentnodes': divset, 'commonpredecessor': b}
950 for divset, b in base.iteritems()]
950 for divset, b in base.iteritems()]
951
951
952 def whyunstable(repo, ctx):
952 def whyunstable(repo, ctx):
953 result = []
953 result = []
954 if ctx.orphan():
954 if ctx.orphan():
955 for parent in ctx.parents():
955 for parent in ctx.parents():
956 kind = None
956 kind = None
957 if parent.orphan():
957 if parent.orphan():
958 kind = 'orphan'
958 kind = 'orphan'
959 elif parent.obsolete():
959 elif parent.obsolete():
960 kind = 'obsolete'
960 kind = 'obsolete'
961 if kind is not None:
961 if kind is not None:
962 result.append({'instability': 'orphan',
962 result.append({'instability': 'orphan',
963 'reason': '%s parent' % kind,
963 'reason': '%s parent' % kind,
964 'node': parent.hex()})
964 'node': parent.hex()})
965 if ctx.phasedivergent():
965 if ctx.phasedivergent():
966 predecessors = allpredecessors(repo.obsstore, [ctx.node()],
966 predecessors = allpredecessors(repo.obsstore, [ctx.node()],
967 ignoreflags=bumpedfix)
967 ignoreflags=bumpedfix)
968 immutable = [repo[p] for p in predecessors
968 immutable = [repo[p] for p in predecessors
969 if p in repo and not repo[p].mutable()]
969 if p in repo and not repo[p].mutable()]
970 for predecessor in immutable:
970 for predecessor in immutable:
971 result.append({'instability': 'phase-divergent',
971 result.append({'instability': 'phase-divergent',
972 'reason': 'immutable predecessor',
972 'reason': 'immutable predecessor',
973 'node': predecessor.hex()})
973 'node': predecessor.hex()})
974 if ctx.contentdivergent():
974 if ctx.contentdivergent():
975 dsets = divergentsets(repo, ctx)
975 dsets = divergentsets(repo, ctx)
976 for dset in dsets:
976 for dset in dsets:
977 divnodes = [repo[n] for n in dset['divergentnodes']]
977 divnodes = [repo[n] for n in dset['divergentnodes']]
978 result.append({'instability': 'content-divergent',
978 result.append({'instability': 'content-divergent',
979 'divergentnodes': divnodes,
979 'divergentnodes': divnodes,
980 'reason': 'predecessor',
980 'reason': 'predecessor',
981 'node': nodemod.hex(dset['commonpredecessor'])})
981 'node': nodemod.hex(dset['commonpredecessor'])})
982 return result
982 return result
General Comments 0
You need to be logged in to leave comments. Login now