##// END OF EJS Templates
index: use `index.has_node` in `obsutil.foreground`...
marmoute -
r43941:1944aaae default
parent child Browse files
Show More
@@ -1,1035 +1,1035 b''
1 # obsutil.py - utility functions for obsolescence
1 # obsutil.py - utility functions for obsolescence
2 #
2 #
3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import re
10 import re
11
11
12 from .i18n import _
12 from .i18n import _
13 from . import (
13 from . import (
14 diffutil,
14 diffutil,
15 encoding,
15 encoding,
16 node as nodemod,
16 node as nodemod,
17 phases,
17 phases,
18 pycompat,
18 pycompat,
19 util,
19 util,
20 )
20 )
21 from .utils import dateutil
21 from .utils import dateutil
22
22
23 ### obsolescence marker flag
23 ### obsolescence marker flag
24
24
25 ## bumpedfix flag
25 ## bumpedfix flag
26 #
26 #
27 # When a changeset A' succeed to a changeset A which became public, we call A'
27 # When a changeset A' succeed to a changeset A which became public, we call A'
28 # "bumped" because it's a successors of a public changesets
28 # "bumped" because it's a successors of a public changesets
29 #
29 #
30 # o A' (bumped)
30 # o A' (bumped)
31 # |`:
31 # |`:
32 # | o A
32 # | o A
33 # |/
33 # |/
34 # o Z
34 # o Z
35 #
35 #
36 # The way to solve this situation is to create a new changeset Ad as children
36 # The way to solve this situation is to create a new changeset Ad as children
37 # of A. This changeset have the same content than A'. So the diff from A to A'
37 # of A. This changeset have the same content than A'. So the diff from A to A'
38 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
38 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
39 #
39 #
40 # o Ad
40 # o Ad
41 # |`:
41 # |`:
42 # | x A'
42 # | x A'
43 # |'|
43 # |'|
44 # o | A
44 # o | A
45 # |/
45 # |/
46 # o Z
46 # o Z
47 #
47 #
48 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
48 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
49 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
49 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
50 # This flag mean that the successors express the changes between the public and
50 # This flag mean that the successors express the changes between the public and
51 # bumped version and fix the situation, breaking the transitivity of
51 # bumped version and fix the situation, breaking the transitivity of
52 # "bumped" here.
52 # "bumped" here.
53 bumpedfix = 1
53 bumpedfix = 1
54 usingsha256 = 2
54 usingsha256 = 2
55
55
56
56
57 class marker(object):
57 class marker(object):
58 """Wrap obsolete marker raw data"""
58 """Wrap obsolete marker raw data"""
59
59
60 def __init__(self, repo, data):
60 def __init__(self, repo, data):
61 # the repo argument will be used to create changectx in later version
61 # the repo argument will be used to create changectx in later version
62 self._repo = repo
62 self._repo = repo
63 self._data = data
63 self._data = data
64 self._decodedmeta = None
64 self._decodedmeta = None
65
65
66 def __hash__(self):
66 def __hash__(self):
67 return hash(self._data)
67 return hash(self._data)
68
68
69 def __eq__(self, other):
69 def __eq__(self, other):
70 if type(other) != type(self):
70 if type(other) != type(self):
71 return False
71 return False
72 return self._data == other._data
72 return self._data == other._data
73
73
74 def prednode(self):
74 def prednode(self):
75 """Predecessor changeset node identifier"""
75 """Predecessor changeset node identifier"""
76 return self._data[0]
76 return self._data[0]
77
77
78 def succnodes(self):
78 def succnodes(self):
79 """List of successor changesets node identifiers"""
79 """List of successor changesets node identifiers"""
80 return self._data[1]
80 return self._data[1]
81
81
82 def parentnodes(self):
82 def parentnodes(self):
83 """Parents of the predecessors (None if not recorded)"""
83 """Parents of the predecessors (None if not recorded)"""
84 return self._data[5]
84 return self._data[5]
85
85
86 def metadata(self):
86 def metadata(self):
87 """Decoded metadata dictionary"""
87 """Decoded metadata dictionary"""
88 return dict(self._data[3])
88 return dict(self._data[3])
89
89
90 def date(self):
90 def date(self):
91 """Creation date as (unixtime, offset)"""
91 """Creation date as (unixtime, offset)"""
92 return self._data[4]
92 return self._data[4]
93
93
94 def flags(self):
94 def flags(self):
95 """The flags field of the marker"""
95 """The flags field of the marker"""
96 return self._data[2]
96 return self._data[2]
97
97
98
98
99 def getmarkers(repo, nodes=None, exclusive=False):
99 def getmarkers(repo, nodes=None, exclusive=False):
100 """returns markers known in a repository
100 """returns markers known in a repository
101
101
102 If <nodes> is specified, only markers "relevant" to those nodes are are
102 If <nodes> is specified, only markers "relevant" to those nodes are are
103 returned"""
103 returned"""
104 if nodes is None:
104 if nodes is None:
105 rawmarkers = repo.obsstore
105 rawmarkers = repo.obsstore
106 elif exclusive:
106 elif exclusive:
107 rawmarkers = exclusivemarkers(repo, nodes)
107 rawmarkers = exclusivemarkers(repo, nodes)
108 else:
108 else:
109 rawmarkers = repo.obsstore.relevantmarkers(nodes)
109 rawmarkers = repo.obsstore.relevantmarkers(nodes)
110
110
111 for markerdata in rawmarkers:
111 for markerdata in rawmarkers:
112 yield marker(repo, markerdata)
112 yield marker(repo, markerdata)
113
113
114
114
115 def closestpredecessors(repo, nodeid):
115 def closestpredecessors(repo, nodeid):
116 """yield the list of next predecessors pointing on visible changectx nodes
116 """yield the list of next predecessors pointing on visible changectx nodes
117
117
118 This function respect the repoview filtering, filtered revision will be
118 This function respect the repoview filtering, filtered revision will be
119 considered missing.
119 considered missing.
120 """
120 """
121
121
122 precursors = repo.obsstore.predecessors
122 precursors = repo.obsstore.predecessors
123 stack = [nodeid]
123 stack = [nodeid]
124 seen = set(stack)
124 seen = set(stack)
125
125
126 while stack:
126 while stack:
127 current = stack.pop()
127 current = stack.pop()
128 currentpreccs = precursors.get(current, ())
128 currentpreccs = precursors.get(current, ())
129
129
130 for prec in currentpreccs:
130 for prec in currentpreccs:
131 precnodeid = prec[0]
131 precnodeid = prec[0]
132
132
133 # Basic cycle protection
133 # Basic cycle protection
134 if precnodeid in seen:
134 if precnodeid in seen:
135 continue
135 continue
136 seen.add(precnodeid)
136 seen.add(precnodeid)
137
137
138 if precnodeid in repo:
138 if precnodeid in repo:
139 yield precnodeid
139 yield precnodeid
140 else:
140 else:
141 stack.append(precnodeid)
141 stack.append(precnodeid)
142
142
143
143
144 def allpredecessors(obsstore, nodes, ignoreflags=0):
144 def allpredecessors(obsstore, nodes, ignoreflags=0):
145 """Yield node for every precursors of <nodes>.
145 """Yield node for every precursors of <nodes>.
146
146
147 Some precursors may be unknown locally.
147 Some precursors may be unknown locally.
148
148
149 This is a linear yield unsuited to detecting folded changesets. It includes
149 This is a linear yield unsuited to detecting folded changesets. It includes
150 initial nodes too."""
150 initial nodes too."""
151
151
152 remaining = set(nodes)
152 remaining = set(nodes)
153 seen = set(remaining)
153 seen = set(remaining)
154 prec = obsstore.predecessors.get
154 prec = obsstore.predecessors.get
155 while remaining:
155 while remaining:
156 current = remaining.pop()
156 current = remaining.pop()
157 yield current
157 yield current
158 for mark in prec(current, ()):
158 for mark in prec(current, ()):
159 # ignore marker flagged with specified flag
159 # ignore marker flagged with specified flag
160 if mark[2] & ignoreflags:
160 if mark[2] & ignoreflags:
161 continue
161 continue
162 suc = mark[0]
162 suc = mark[0]
163 if suc not in seen:
163 if suc not in seen:
164 seen.add(suc)
164 seen.add(suc)
165 remaining.add(suc)
165 remaining.add(suc)
166
166
167
167
168 def allsuccessors(obsstore, nodes, ignoreflags=0):
168 def allsuccessors(obsstore, nodes, ignoreflags=0):
169 """Yield node for every successor of <nodes>.
169 """Yield node for every successor of <nodes>.
170
170
171 Some successors may be unknown locally.
171 Some successors may be unknown locally.
172
172
173 This is a linear yield unsuited to detecting split changesets. It includes
173 This is a linear yield unsuited to detecting split changesets. It includes
174 initial nodes too."""
174 initial nodes too."""
175 remaining = set(nodes)
175 remaining = set(nodes)
176 seen = set(remaining)
176 seen = set(remaining)
177 while remaining:
177 while remaining:
178 current = remaining.pop()
178 current = remaining.pop()
179 yield current
179 yield current
180 for mark in obsstore.successors.get(current, ()):
180 for mark in obsstore.successors.get(current, ()):
181 # ignore marker flagged with specified flag
181 # ignore marker flagged with specified flag
182 if mark[2] & ignoreflags:
182 if mark[2] & ignoreflags:
183 continue
183 continue
184 for suc in mark[1]:
184 for suc in mark[1]:
185 if suc not in seen:
185 if suc not in seen:
186 seen.add(suc)
186 seen.add(suc)
187 remaining.add(suc)
187 remaining.add(suc)
188
188
189
189
190 def _filterprunes(markers):
190 def _filterprunes(markers):
191 """return a set with no prune markers"""
191 """return a set with no prune markers"""
192 return set(m for m in markers if m[1])
192 return set(m for m in markers if m[1])
193
193
194
194
195 def exclusivemarkers(repo, nodes):
195 def exclusivemarkers(repo, nodes):
196 """set of markers relevant to "nodes" but no other locally-known nodes
196 """set of markers relevant to "nodes" but no other locally-known nodes
197
197
198 This function compute the set of markers "exclusive" to a locally-known
198 This function compute the set of markers "exclusive" to a locally-known
199 node. This means we walk the markers starting from <nodes> until we reach a
199 node. This means we walk the markers starting from <nodes> until we reach a
200 locally-known precursors outside of <nodes>. Element of <nodes> with
200 locally-known precursors outside of <nodes>. Element of <nodes> with
201 locally-known successors outside of <nodes> are ignored (since their
201 locally-known successors outside of <nodes> are ignored (since their
202 precursors markers are also relevant to these successors).
202 precursors markers are also relevant to these successors).
203
203
204 For example:
204 For example:
205
205
206 # (A0 rewritten as A1)
206 # (A0 rewritten as A1)
207 #
207 #
208 # A0 <-1- A1 # Marker "1" is exclusive to A1
208 # A0 <-1- A1 # Marker "1" is exclusive to A1
209
209
210 or
210 or
211
211
212 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
212 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
213 #
213 #
214 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
214 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
215
215
216 or
216 or
217
217
218 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
218 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
219 #
219 #
220 # <-2- A1 # Marker "2" is exclusive to A0,A1
220 # <-2- A1 # Marker "2" is exclusive to A0,A1
221 # /
221 # /
222 # <-1- A0
222 # <-1- A0
223 # \
223 # \
224 # <-3- A2 # Marker "3" is exclusive to A0,A2
224 # <-3- A2 # Marker "3" is exclusive to A0,A2
225 #
225 #
226 # in addition:
226 # in addition:
227 #
227 #
228 # Markers "2,3" are exclusive to A1,A2
228 # Markers "2,3" are exclusive to A1,A2
229 # Markers "1,2,3" are exclusive to A0,A1,A2
229 # Markers "1,2,3" are exclusive to A0,A1,A2
230
230
231 See test/test-obsolete-bundle-strip.t for more examples.
231 See test/test-obsolete-bundle-strip.t for more examples.
232
232
233 An example usage is strip. When stripping a changeset, we also want to
233 An example usage is strip. When stripping a changeset, we also want to
234 strip the markers exclusive to this changeset. Otherwise we would have
234 strip the markers exclusive to this changeset. Otherwise we would have
235 "dangling"" obsolescence markers from its precursors: Obsolescence markers
235 "dangling"" obsolescence markers from its precursors: Obsolescence markers
236 marking a node as obsolete without any successors available locally.
236 marking a node as obsolete without any successors available locally.
237
237
238 As for relevant markers, the prune markers for children will be followed.
238 As for relevant markers, the prune markers for children will be followed.
239 Of course, they will only be followed if the pruned children is
239 Of course, they will only be followed if the pruned children is
240 locally-known. Since the prune markers are relevant to the pruned node.
240 locally-known. Since the prune markers are relevant to the pruned node.
241 However, while prune markers are considered relevant to the parent of the
241 However, while prune markers are considered relevant to the parent of the
242 pruned changesets, prune markers for locally-known changeset (with no
242 pruned changesets, prune markers for locally-known changeset (with no
243 successors) are considered exclusive to the pruned nodes. This allows
243 successors) are considered exclusive to the pruned nodes. This allows
244 to strip the prune markers (with the rest of the exclusive chain) alongside
244 to strip the prune markers (with the rest of the exclusive chain) alongside
245 the pruned changesets.
245 the pruned changesets.
246 """
246 """
247 # running on a filtered repository would be dangerous as markers could be
247 # running on a filtered repository would be dangerous as markers could be
248 # reported as exclusive when they are relevant for other filtered nodes.
248 # reported as exclusive when they are relevant for other filtered nodes.
249 unfi = repo.unfiltered()
249 unfi = repo.unfiltered()
250
250
251 # shortcut to various useful item
251 # shortcut to various useful item
252 has_node = unfi.changelog.index.has_node
252 has_node = unfi.changelog.index.has_node
253 precursorsmarkers = unfi.obsstore.predecessors
253 precursorsmarkers = unfi.obsstore.predecessors
254 successormarkers = unfi.obsstore.successors
254 successormarkers = unfi.obsstore.successors
255 childrenmarkers = unfi.obsstore.children
255 childrenmarkers = unfi.obsstore.children
256
256
257 # exclusive markers (return of the function)
257 # exclusive markers (return of the function)
258 exclmarkers = set()
258 exclmarkers = set()
259 # we need fast membership testing
259 # we need fast membership testing
260 nodes = set(nodes)
260 nodes = set(nodes)
261 # looking for head in the obshistory
261 # looking for head in the obshistory
262 #
262 #
263 # XXX we are ignoring all issues in regard with cycle for now.
263 # XXX we are ignoring all issues in regard with cycle for now.
264 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
264 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
265 stack.sort()
265 stack.sort()
266 # nodes already stacked
266 # nodes already stacked
267 seennodes = set(stack)
267 seennodes = set(stack)
268 while stack:
268 while stack:
269 current = stack.pop()
269 current = stack.pop()
270 # fetch precursors markers
270 # fetch precursors markers
271 markers = list(precursorsmarkers.get(current, ()))
271 markers = list(precursorsmarkers.get(current, ()))
272 # extend the list with prune markers
272 # extend the list with prune markers
273 for mark in successormarkers.get(current, ()):
273 for mark in successormarkers.get(current, ()):
274 if not mark[1]:
274 if not mark[1]:
275 markers.append(mark)
275 markers.append(mark)
276 # and markers from children (looking for prune)
276 # and markers from children (looking for prune)
277 for mark in childrenmarkers.get(current, ()):
277 for mark in childrenmarkers.get(current, ()):
278 if not mark[1]:
278 if not mark[1]:
279 markers.append(mark)
279 markers.append(mark)
280 # traverse the markers
280 # traverse the markers
281 for mark in markers:
281 for mark in markers:
282 if mark in exclmarkers:
282 if mark in exclmarkers:
283 # markers already selected
283 # markers already selected
284 continue
284 continue
285
285
286 # If the markers is about the current node, select it
286 # If the markers is about the current node, select it
287 #
287 #
288 # (this delay the addition of markers from children)
288 # (this delay the addition of markers from children)
289 if mark[1] or mark[0] == current:
289 if mark[1] or mark[0] == current:
290 exclmarkers.add(mark)
290 exclmarkers.add(mark)
291
291
292 # should we keep traversing through the precursors?
292 # should we keep traversing through the precursors?
293 prec = mark[0]
293 prec = mark[0]
294
294
295 # nodes in the stack or already processed
295 # nodes in the stack or already processed
296 if prec in seennodes:
296 if prec in seennodes:
297 continue
297 continue
298
298
299 # is this a locally known node ?
299 # is this a locally known node ?
300 known = has_node(prec)
300 known = has_node(prec)
301 # if locally-known and not in the <nodes> set the traversal
301 # if locally-known and not in the <nodes> set the traversal
302 # stop here.
302 # stop here.
303 if known and prec not in nodes:
303 if known and prec not in nodes:
304 continue
304 continue
305
305
306 # do not keep going if there are unselected markers pointing to this
306 # do not keep going if there are unselected markers pointing to this
307 # nodes. If we end up traversing these unselected markers later the
307 # nodes. If we end up traversing these unselected markers later the
308 # node will be taken care of at that point.
308 # node will be taken care of at that point.
309 precmarkers = _filterprunes(successormarkers.get(prec))
309 precmarkers = _filterprunes(successormarkers.get(prec))
310 if precmarkers.issubset(exclmarkers):
310 if precmarkers.issubset(exclmarkers):
311 seennodes.add(prec)
311 seennodes.add(prec)
312 stack.append(prec)
312 stack.append(prec)
313
313
314 return exclmarkers
314 return exclmarkers
315
315
316
316
317 def foreground(repo, nodes):
317 def foreground(repo, nodes):
318 """return all nodes in the "foreground" of other node
318 """return all nodes in the "foreground" of other node
319
319
320 The foreground of a revision is anything reachable using parent -> children
320 The foreground of a revision is anything reachable using parent -> children
321 or precursor -> successor relation. It is very similar to "descendant" but
321 or precursor -> successor relation. It is very similar to "descendant" but
322 augmented with obsolescence information.
322 augmented with obsolescence information.
323
323
324 Beware that possible obsolescence cycle may result if complex situation.
324 Beware that possible obsolescence cycle may result if complex situation.
325 """
325 """
326 repo = repo.unfiltered()
326 repo = repo.unfiltered()
327 foreground = set(repo.set(b'%ln::', nodes))
327 foreground = set(repo.set(b'%ln::', nodes))
328 if repo.obsstore:
328 if repo.obsstore:
329 # We only need this complicated logic if there is obsolescence
329 # We only need this complicated logic if there is obsolescence
330 # XXX will probably deserve an optimised revset.
330 # XXX will probably deserve an optimised revset.
331 nm = repo.changelog.nodemap
331 has_node = repo.changelog.index.has_node
332 plen = -1
332 plen = -1
333 # compute the whole set of successors or descendants
333 # compute the whole set of successors or descendants
334 while len(foreground) != plen:
334 while len(foreground) != plen:
335 plen = len(foreground)
335 plen = len(foreground)
336 succs = set(c.node() for c in foreground)
336 succs = set(c.node() for c in foreground)
337 mutable = [c.node() for c in foreground if c.mutable()]
337 mutable = [c.node() for c in foreground if c.mutable()]
338 succs.update(allsuccessors(repo.obsstore, mutable))
338 succs.update(allsuccessors(repo.obsstore, mutable))
339 known = (n for n in succs if n in nm)
339 known = (n for n in succs if has_node(n))
340 foreground = set(repo.set(b'%ln::', known))
340 foreground = set(repo.set(b'%ln::', known))
341 return set(c.node() for c in foreground)
341 return set(c.node() for c in foreground)
342
342
343
343
344 # effectflag field
344 # effectflag field
345 #
345 #
346 # Effect-flag is a 1-byte bit field used to store what changed between a
346 # Effect-flag is a 1-byte bit field used to store what changed between a
347 # changeset and its successor(s).
347 # changeset and its successor(s).
348 #
348 #
349 # The effect flag is stored in obs-markers metadata while we iterate on the
349 # The effect flag is stored in obs-markers metadata while we iterate on the
350 # information design. That's why we have the EFFECTFLAGFIELD. If we come up
350 # information design. That's why we have the EFFECTFLAGFIELD. If we come up
351 # with an incompatible design for effect flag, we can store a new design under
351 # with an incompatible design for effect flag, we can store a new design under
352 # another field name so we don't break readers. We plan to extend the existing
352 # another field name so we don't break readers. We plan to extend the existing
353 # obsmarkers bit-field when the effect flag design will be stabilized.
353 # obsmarkers bit-field when the effect flag design will be stabilized.
354 #
354 #
355 # The effect-flag is placed behind an experimental flag
355 # The effect-flag is placed behind an experimental flag
356 # `effect-flags` set to off by default.
356 # `effect-flags` set to off by default.
357 #
357 #
358
358
359 EFFECTFLAGFIELD = b"ef1"
359 EFFECTFLAGFIELD = b"ef1"
360
360
361 DESCCHANGED = 1 << 0 # action changed the description
361 DESCCHANGED = 1 << 0 # action changed the description
362 METACHANGED = 1 << 1 # action change the meta
362 METACHANGED = 1 << 1 # action change the meta
363 DIFFCHANGED = 1 << 3 # action change diff introduced by the changeset
363 DIFFCHANGED = 1 << 3 # action change diff introduced by the changeset
364 PARENTCHANGED = 1 << 2 # action change the parent
364 PARENTCHANGED = 1 << 2 # action change the parent
365 USERCHANGED = 1 << 4 # the user changed
365 USERCHANGED = 1 << 4 # the user changed
366 DATECHANGED = 1 << 5 # the date changed
366 DATECHANGED = 1 << 5 # the date changed
367 BRANCHCHANGED = 1 << 6 # the branch changed
367 BRANCHCHANGED = 1 << 6 # the branch changed
368
368
369 METABLACKLIST = [
369 METABLACKLIST = [
370 re.compile(b'^branch$'),
370 re.compile(b'^branch$'),
371 re.compile(b'^.*-source$'),
371 re.compile(b'^.*-source$'),
372 re.compile(b'^.*_source$'),
372 re.compile(b'^.*_source$'),
373 re.compile(b'^source$'),
373 re.compile(b'^source$'),
374 ]
374 ]
375
375
376
376
377 def metanotblacklisted(metaitem):
377 def metanotblacklisted(metaitem):
378 """ Check that the key of a meta item (extrakey, extravalue) does not
378 """ Check that the key of a meta item (extrakey, extravalue) does not
379 match at least one of the blacklist pattern
379 match at least one of the blacklist pattern
380 """
380 """
381 metakey = metaitem[0]
381 metakey = metaitem[0]
382
382
383 return not any(pattern.match(metakey) for pattern in METABLACKLIST)
383 return not any(pattern.match(metakey) for pattern in METABLACKLIST)
384
384
385
385
386 def _prepare_hunk(hunk):
386 def _prepare_hunk(hunk):
387 """Drop all information but the username and patch"""
387 """Drop all information but the username and patch"""
388 cleanhunk = []
388 cleanhunk = []
389 for line in hunk.splitlines():
389 for line in hunk.splitlines():
390 if line.startswith(b'# User') or not line.startswith(b'#'):
390 if line.startswith(b'# User') or not line.startswith(b'#'):
391 if line.startswith(b'@@'):
391 if line.startswith(b'@@'):
392 line = b'@@\n'
392 line = b'@@\n'
393 cleanhunk.append(line)
393 cleanhunk.append(line)
394 return cleanhunk
394 return cleanhunk
395
395
396
396
397 def _getdifflines(iterdiff):
397 def _getdifflines(iterdiff):
398 """return a cleaned up lines"""
398 """return a cleaned up lines"""
399 lines = next(iterdiff, None)
399 lines = next(iterdiff, None)
400
400
401 if lines is None:
401 if lines is None:
402 return lines
402 return lines
403
403
404 return _prepare_hunk(lines)
404 return _prepare_hunk(lines)
405
405
406
406
407 def _cmpdiff(leftctx, rightctx):
407 def _cmpdiff(leftctx, rightctx):
408 """return True if both ctx introduce the "same diff"
408 """return True if both ctx introduce the "same diff"
409
409
410 This is a first and basic implementation, with many shortcoming.
410 This is a first and basic implementation, with many shortcoming.
411 """
411 """
412 diffopts = diffutil.diffallopts(leftctx.repo().ui, {b'git': True})
412 diffopts = diffutil.diffallopts(leftctx.repo().ui, {b'git': True})
413
413
414 # Leftctx or right ctx might be filtered, so we need to use the contexts
414 # Leftctx or right ctx might be filtered, so we need to use the contexts
415 # with an unfiltered repository to safely compute the diff
415 # with an unfiltered repository to safely compute the diff
416
416
417 # leftctx and rightctx can be from different repository views in case of
417 # leftctx and rightctx can be from different repository views in case of
418 # hgsubversion, do don't try to access them from same repository
418 # hgsubversion, do don't try to access them from same repository
419 # rightctx.repo() and leftctx.repo() are not always the same
419 # rightctx.repo() and leftctx.repo() are not always the same
420 leftunfi = leftctx._repo.unfiltered()[leftctx.rev()]
420 leftunfi = leftctx._repo.unfiltered()[leftctx.rev()]
421 leftdiff = leftunfi.diff(opts=diffopts)
421 leftdiff = leftunfi.diff(opts=diffopts)
422 rightunfi = rightctx._repo.unfiltered()[rightctx.rev()]
422 rightunfi = rightctx._repo.unfiltered()[rightctx.rev()]
423 rightdiff = rightunfi.diff(opts=diffopts)
423 rightdiff = rightunfi.diff(opts=diffopts)
424
424
425 left, right = (0, 0)
425 left, right = (0, 0)
426 while None not in (left, right):
426 while None not in (left, right):
427 left = _getdifflines(leftdiff)
427 left = _getdifflines(leftdiff)
428 right = _getdifflines(rightdiff)
428 right = _getdifflines(rightdiff)
429
429
430 if left != right:
430 if left != right:
431 return False
431 return False
432 return True
432 return True
433
433
434
434
435 def geteffectflag(source, successors):
435 def geteffectflag(source, successors):
436 """ From an obs-marker relation, compute what changed between the
436 """ From an obs-marker relation, compute what changed between the
437 predecessor and the successor.
437 predecessor and the successor.
438 """
438 """
439 effects = 0
439 effects = 0
440
440
441 for changectx in successors:
441 for changectx in successors:
442 # Check if description has changed
442 # Check if description has changed
443 if changectx.description() != source.description():
443 if changectx.description() != source.description():
444 effects |= DESCCHANGED
444 effects |= DESCCHANGED
445
445
446 # Check if user has changed
446 # Check if user has changed
447 if changectx.user() != source.user():
447 if changectx.user() != source.user():
448 effects |= USERCHANGED
448 effects |= USERCHANGED
449
449
450 # Check if date has changed
450 # Check if date has changed
451 if changectx.date() != source.date():
451 if changectx.date() != source.date():
452 effects |= DATECHANGED
452 effects |= DATECHANGED
453
453
454 # Check if branch has changed
454 # Check if branch has changed
455 if changectx.branch() != source.branch():
455 if changectx.branch() != source.branch():
456 effects |= BRANCHCHANGED
456 effects |= BRANCHCHANGED
457
457
458 # Check if at least one of the parent has changed
458 # Check if at least one of the parent has changed
459 if changectx.parents() != source.parents():
459 if changectx.parents() != source.parents():
460 effects |= PARENTCHANGED
460 effects |= PARENTCHANGED
461
461
462 # Check if other meta has changed
462 # Check if other meta has changed
463 changeextra = changectx.extra().items()
463 changeextra = changectx.extra().items()
464 ctxmeta = list(filter(metanotblacklisted, changeextra))
464 ctxmeta = list(filter(metanotblacklisted, changeextra))
465
465
466 sourceextra = source.extra().items()
466 sourceextra = source.extra().items()
467 srcmeta = list(filter(metanotblacklisted, sourceextra))
467 srcmeta = list(filter(metanotblacklisted, sourceextra))
468
468
469 if ctxmeta != srcmeta:
469 if ctxmeta != srcmeta:
470 effects |= METACHANGED
470 effects |= METACHANGED
471
471
472 # Check if the diff has changed
472 # Check if the diff has changed
473 if not _cmpdiff(source, changectx):
473 if not _cmpdiff(source, changectx):
474 effects |= DIFFCHANGED
474 effects |= DIFFCHANGED
475
475
476 return effects
476 return effects
477
477
478
478
479 def getobsoleted(repo, tr):
479 def getobsoleted(repo, tr):
480 """return the set of pre-existing revisions obsoleted by a transaction"""
480 """return the set of pre-existing revisions obsoleted by a transaction"""
481 torev = repo.unfiltered().changelog.nodemap.get
481 torev = repo.unfiltered().changelog.nodemap.get
482 phase = repo._phasecache.phase
482 phase = repo._phasecache.phase
483 succsmarkers = repo.obsstore.successors.get
483 succsmarkers = repo.obsstore.successors.get
484 public = phases.public
484 public = phases.public
485 addedmarkers = tr.changes[b'obsmarkers']
485 addedmarkers = tr.changes[b'obsmarkers']
486 origrepolen = tr.changes[b'origrepolen']
486 origrepolen = tr.changes[b'origrepolen']
487 seenrevs = set()
487 seenrevs = set()
488 obsoleted = set()
488 obsoleted = set()
489 for mark in addedmarkers:
489 for mark in addedmarkers:
490 node = mark[0]
490 node = mark[0]
491 rev = torev(node)
491 rev = torev(node)
492 if rev is None or rev in seenrevs or rev >= origrepolen:
492 if rev is None or rev in seenrevs or rev >= origrepolen:
493 continue
493 continue
494 seenrevs.add(rev)
494 seenrevs.add(rev)
495 if phase(repo, rev) == public:
495 if phase(repo, rev) == public:
496 continue
496 continue
497 if set(succsmarkers(node) or []).issubset(addedmarkers):
497 if set(succsmarkers(node) or []).issubset(addedmarkers):
498 obsoleted.add(rev)
498 obsoleted.add(rev)
499 return obsoleted
499 return obsoleted
500
500
501
501
502 class _succs(list):
502 class _succs(list):
503 """small class to represent a successors with some metadata about it"""
503 """small class to represent a successors with some metadata about it"""
504
504
505 def __init__(self, *args, **kwargs):
505 def __init__(self, *args, **kwargs):
506 super(_succs, self).__init__(*args, **kwargs)
506 super(_succs, self).__init__(*args, **kwargs)
507 self.markers = set()
507 self.markers = set()
508
508
509 def copy(self):
509 def copy(self):
510 new = _succs(self)
510 new = _succs(self)
511 new.markers = self.markers.copy()
511 new.markers = self.markers.copy()
512 return new
512 return new
513
513
514 @util.propertycache
514 @util.propertycache
515 def _set(self):
515 def _set(self):
516 # immutable
516 # immutable
517 return set(self)
517 return set(self)
518
518
519 def canmerge(self, other):
519 def canmerge(self, other):
520 return self._set.issubset(other._set)
520 return self._set.issubset(other._set)
521
521
522
522
523 def successorssets(repo, initialnode, closest=False, cache=None):
523 def successorssets(repo, initialnode, closest=False, cache=None):
524 """Return set of all latest successors of initial nodes
524 """Return set of all latest successors of initial nodes
525
525
526 The successors set of a changeset A are the group of revisions that succeed
526 The successors set of a changeset A are the group of revisions that succeed
527 A. It succeeds A as a consistent whole, each revision being only a partial
527 A. It succeeds A as a consistent whole, each revision being only a partial
528 replacement. By default, the successors set contains non-obsolete
528 replacement. By default, the successors set contains non-obsolete
529 changesets only, walking the obsolescence graph until reaching a leaf. If
529 changesets only, walking the obsolescence graph until reaching a leaf. If
530 'closest' is set to True, closest successors-sets are return (the
530 'closest' is set to True, closest successors-sets are return (the
531 obsolescence walk stops on known changesets).
531 obsolescence walk stops on known changesets).
532
532
533 This function returns the full list of successor sets which is why it
533 This function returns the full list of successor sets which is why it
534 returns a list of tuples and not just a single tuple. Each tuple is a valid
534 returns a list of tuples and not just a single tuple. Each tuple is a valid
535 successors set. Note that (A,) may be a valid successors set for changeset A
535 successors set. Note that (A,) may be a valid successors set for changeset A
536 (see below).
536 (see below).
537
537
538 In most cases, a changeset A will have a single element (e.g. the changeset
538 In most cases, a changeset A will have a single element (e.g. the changeset
539 A is replaced by A') in its successors set. Though, it is also common for a
539 A is replaced by A') in its successors set. Though, it is also common for a
540 changeset A to have no elements in its successor set (e.g. the changeset
540 changeset A to have no elements in its successor set (e.g. the changeset
541 has been pruned). Therefore, the returned list of successors sets will be
541 has been pruned). Therefore, the returned list of successors sets will be
542 [(A',)] or [], respectively.
542 [(A',)] or [], respectively.
543
543
544 When a changeset A is split into A' and B', however, it will result in a
544 When a changeset A is split into A' and B', however, it will result in a
545 successors set containing more than a single element, i.e. [(A',B')].
545 successors set containing more than a single element, i.e. [(A',B')].
546 Divergent changesets will result in multiple successors sets, i.e. [(A',),
546 Divergent changesets will result in multiple successors sets, i.e. [(A',),
547 (A'')].
547 (A'')].
548
548
549 If a changeset A is not obsolete, then it will conceptually have no
549 If a changeset A is not obsolete, then it will conceptually have no
550 successors set. To distinguish this from a pruned changeset, the successor
550 successors set. To distinguish this from a pruned changeset, the successor
551 set will contain itself only, i.e. [(A,)].
551 set will contain itself only, i.e. [(A,)].
552
552
553 Finally, final successors unknown locally are considered to be pruned
553 Finally, final successors unknown locally are considered to be pruned
554 (pruned: obsoleted without any successors). (Final: successors not affected
554 (pruned: obsoleted without any successors). (Final: successors not affected
555 by markers).
555 by markers).
556
556
557 The 'closest' mode respect the repoview filtering. For example, without
557 The 'closest' mode respect the repoview filtering. For example, without
558 filter it will stop at the first locally known changeset, with 'visible'
558 filter it will stop at the first locally known changeset, with 'visible'
559 filter it will stop on visible changesets).
559 filter it will stop on visible changesets).
560
560
561 The optional `cache` parameter is a dictionary that may contains
561 The optional `cache` parameter is a dictionary that may contains
562 precomputed successors sets. It is meant to reuse the computation of a
562 precomputed successors sets. It is meant to reuse the computation of a
563 previous call to `successorssets` when multiple calls are made at the same
563 previous call to `successorssets` when multiple calls are made at the same
564 time. The cache dictionary is updated in place. The caller is responsible
564 time. The cache dictionary is updated in place. The caller is responsible
565 for its life span. Code that makes multiple calls to `successorssets`
565 for its life span. Code that makes multiple calls to `successorssets`
566 *should* use this cache mechanism or risk a performance hit.
566 *should* use this cache mechanism or risk a performance hit.
567
567
568 Since results are different depending of the 'closest' most, the same cache
568 Since results are different depending of the 'closest' most, the same cache
569 cannot be reused for both mode.
569 cannot be reused for both mode.
570 """
570 """
571
571
572 succmarkers = repo.obsstore.successors
572 succmarkers = repo.obsstore.successors
573
573
574 # Stack of nodes we search successors sets for
574 # Stack of nodes we search successors sets for
575 toproceed = [initialnode]
575 toproceed = [initialnode]
576 # set version of above list for fast loop detection
576 # set version of above list for fast loop detection
577 # element added to "toproceed" must be added here
577 # element added to "toproceed" must be added here
578 stackedset = set(toproceed)
578 stackedset = set(toproceed)
579 if cache is None:
579 if cache is None:
580 cache = {}
580 cache = {}
581
581
582 # This while loop is the flattened version of a recursive search for
582 # This while loop is the flattened version of a recursive search for
583 # successors sets
583 # successors sets
584 #
584 #
585 # def successorssets(x):
585 # def successorssets(x):
586 # successors = directsuccessors(x)
586 # successors = directsuccessors(x)
587 # ss = [[]]
587 # ss = [[]]
588 # for succ in directsuccessors(x):
588 # for succ in directsuccessors(x):
589 # # product as in itertools cartesian product
589 # # product as in itertools cartesian product
590 # ss = product(ss, successorssets(succ))
590 # ss = product(ss, successorssets(succ))
591 # return ss
591 # return ss
592 #
592 #
593 # But we can not use plain recursive calls here:
593 # But we can not use plain recursive calls here:
594 # - that would blow the python call stack
594 # - that would blow the python call stack
595 # - obsolescence markers may have cycles, we need to handle them.
595 # - obsolescence markers may have cycles, we need to handle them.
596 #
596 #
597 # The `toproceed` list act as our call stack. Every node we search
597 # The `toproceed` list act as our call stack. Every node we search
598 # successors set for are stacked there.
598 # successors set for are stacked there.
599 #
599 #
600 # The `stackedset` is set version of this stack used to check if a node is
600 # The `stackedset` is set version of this stack used to check if a node is
601 # already stacked. This check is used to detect cycles and prevent infinite
601 # already stacked. This check is used to detect cycles and prevent infinite
602 # loop.
602 # loop.
603 #
603 #
604 # successors set of all nodes are stored in the `cache` dictionary.
604 # successors set of all nodes are stored in the `cache` dictionary.
605 #
605 #
606 # After this while loop ends we use the cache to return the successors sets
606 # After this while loop ends we use the cache to return the successors sets
607 # for the node requested by the caller.
607 # for the node requested by the caller.
608 while toproceed:
608 while toproceed:
609 # Every iteration tries to compute the successors sets of the topmost
609 # Every iteration tries to compute the successors sets of the topmost
610 # node of the stack: CURRENT.
610 # node of the stack: CURRENT.
611 #
611 #
612 # There are four possible outcomes:
612 # There are four possible outcomes:
613 #
613 #
614 # 1) We already know the successors sets of CURRENT:
614 # 1) We already know the successors sets of CURRENT:
615 # -> mission accomplished, pop it from the stack.
615 # -> mission accomplished, pop it from the stack.
616 # 2) Stop the walk:
616 # 2) Stop the walk:
617 # default case: Node is not obsolete
617 # default case: Node is not obsolete
618 # closest case: Node is known at this repo filter level
618 # closest case: Node is known at this repo filter level
619 # -> the node is its own successors sets. Add it to the cache.
619 # -> the node is its own successors sets. Add it to the cache.
620 # 3) We do not know successors set of direct successors of CURRENT:
620 # 3) We do not know successors set of direct successors of CURRENT:
621 # -> We add those successors to the stack.
621 # -> We add those successors to the stack.
622 # 4) We know successors sets of all direct successors of CURRENT:
622 # 4) We know successors sets of all direct successors of CURRENT:
623 # -> We can compute CURRENT successors set and add it to the
623 # -> We can compute CURRENT successors set and add it to the
624 # cache.
624 # cache.
625 #
625 #
626 current = toproceed[-1]
626 current = toproceed[-1]
627
627
628 # case 2 condition is a bit hairy because of closest,
628 # case 2 condition is a bit hairy because of closest,
629 # we compute it on its own
629 # we compute it on its own
630 case2condition = (current not in succmarkers) or (
630 case2condition = (current not in succmarkers) or (
631 closest and current != initialnode and current in repo
631 closest and current != initialnode and current in repo
632 )
632 )
633
633
634 if current in cache:
634 if current in cache:
635 # case (1): We already know the successors sets
635 # case (1): We already know the successors sets
636 stackedset.remove(toproceed.pop())
636 stackedset.remove(toproceed.pop())
637 elif case2condition:
637 elif case2condition:
638 # case (2): end of walk.
638 # case (2): end of walk.
639 if current in repo:
639 if current in repo:
640 # We have a valid successors.
640 # We have a valid successors.
641 cache[current] = [_succs((current,))]
641 cache[current] = [_succs((current,))]
642 else:
642 else:
643 # Final obsolete version is unknown locally.
643 # Final obsolete version is unknown locally.
644 # Do not count that as a valid successors
644 # Do not count that as a valid successors
645 cache[current] = []
645 cache[current] = []
646 else:
646 else:
647 # cases (3) and (4)
647 # cases (3) and (4)
648 #
648 #
649 # We proceed in two phases. Phase 1 aims to distinguish case (3)
649 # We proceed in two phases. Phase 1 aims to distinguish case (3)
650 # from case (4):
650 # from case (4):
651 #
651 #
652 # For each direct successors of CURRENT, we check whether its
652 # For each direct successors of CURRENT, we check whether its
653 # successors sets are known. If they are not, we stack the
653 # successors sets are known. If they are not, we stack the
654 # unknown node and proceed to the next iteration of the while
654 # unknown node and proceed to the next iteration of the while
655 # loop. (case 3)
655 # loop. (case 3)
656 #
656 #
657 # During this step, we may detect obsolescence cycles: a node
657 # During this step, we may detect obsolescence cycles: a node
658 # with unknown successors sets but already in the call stack.
658 # with unknown successors sets but already in the call stack.
659 # In such a situation, we arbitrary set the successors sets of
659 # In such a situation, we arbitrary set the successors sets of
660 # the node to nothing (node pruned) to break the cycle.
660 # the node to nothing (node pruned) to break the cycle.
661 #
661 #
662 # If no break was encountered we proceed to phase 2.
662 # If no break was encountered we proceed to phase 2.
663 #
663 #
664 # Phase 2 computes successors sets of CURRENT (case 4); see details
664 # Phase 2 computes successors sets of CURRENT (case 4); see details
665 # in phase 2 itself.
665 # in phase 2 itself.
666 #
666 #
667 # Note the two levels of iteration in each phase.
667 # Note the two levels of iteration in each phase.
668 # - The first one handles obsolescence markers using CURRENT as
668 # - The first one handles obsolescence markers using CURRENT as
669 # precursor (successors markers of CURRENT).
669 # precursor (successors markers of CURRENT).
670 #
670 #
671 # Having multiple entry here means divergence.
671 # Having multiple entry here means divergence.
672 #
672 #
673 # - The second one handles successors defined in each marker.
673 # - The second one handles successors defined in each marker.
674 #
674 #
675 # Having none means pruned node, multiple successors means split,
675 # Having none means pruned node, multiple successors means split,
676 # single successors are standard replacement.
676 # single successors are standard replacement.
677 #
677 #
678 for mark in sorted(succmarkers[current]):
678 for mark in sorted(succmarkers[current]):
679 for suc in mark[1]:
679 for suc in mark[1]:
680 if suc not in cache:
680 if suc not in cache:
681 if suc in stackedset:
681 if suc in stackedset:
682 # cycle breaking
682 # cycle breaking
683 cache[suc] = []
683 cache[suc] = []
684 else:
684 else:
685 # case (3) If we have not computed successors sets
685 # case (3) If we have not computed successors sets
686 # of one of those successors we add it to the
686 # of one of those successors we add it to the
687 # `toproceed` stack and stop all work for this
687 # `toproceed` stack and stop all work for this
688 # iteration.
688 # iteration.
689 toproceed.append(suc)
689 toproceed.append(suc)
690 stackedset.add(suc)
690 stackedset.add(suc)
691 break
691 break
692 else:
692 else:
693 continue
693 continue
694 break
694 break
695 else:
695 else:
696 # case (4): we know all successors sets of all direct
696 # case (4): we know all successors sets of all direct
697 # successors
697 # successors
698 #
698 #
699 # Successors set contributed by each marker depends on the
699 # Successors set contributed by each marker depends on the
700 # successors sets of all its "successors" node.
700 # successors sets of all its "successors" node.
701 #
701 #
702 # Each different marker is a divergence in the obsolescence
702 # Each different marker is a divergence in the obsolescence
703 # history. It contributes successors sets distinct from other
703 # history. It contributes successors sets distinct from other
704 # markers.
704 # markers.
705 #
705 #
706 # Within a marker, a successor may have divergent successors
706 # Within a marker, a successor may have divergent successors
707 # sets. In such a case, the marker will contribute multiple
707 # sets. In such a case, the marker will contribute multiple
708 # divergent successors sets. If multiple successors have
708 # divergent successors sets. If multiple successors have
709 # divergent successors sets, a Cartesian product is used.
709 # divergent successors sets, a Cartesian product is used.
710 #
710 #
711 # At the end we post-process successors sets to remove
711 # At the end we post-process successors sets to remove
712 # duplicated entry and successors set that are strict subset of
712 # duplicated entry and successors set that are strict subset of
713 # another one.
713 # another one.
714 succssets = []
714 succssets = []
715 for mark in sorted(succmarkers[current]):
715 for mark in sorted(succmarkers[current]):
716 # successors sets contributed by this marker
716 # successors sets contributed by this marker
717 base = _succs()
717 base = _succs()
718 base.markers.add(mark)
718 base.markers.add(mark)
719 markss = [base]
719 markss = [base]
720 for suc in mark[1]:
720 for suc in mark[1]:
721 # cardinal product with previous successors
721 # cardinal product with previous successors
722 productresult = []
722 productresult = []
723 for prefix in markss:
723 for prefix in markss:
724 for suffix in cache[suc]:
724 for suffix in cache[suc]:
725 newss = prefix.copy()
725 newss = prefix.copy()
726 newss.markers.update(suffix.markers)
726 newss.markers.update(suffix.markers)
727 for part in suffix:
727 for part in suffix:
728 # do not duplicated entry in successors set
728 # do not duplicated entry in successors set
729 # first entry wins.
729 # first entry wins.
730 if part not in newss:
730 if part not in newss:
731 newss.append(part)
731 newss.append(part)
732 productresult.append(newss)
732 productresult.append(newss)
733 if productresult:
733 if productresult:
734 markss = productresult
734 markss = productresult
735 succssets.extend(markss)
735 succssets.extend(markss)
736 # remove duplicated and subset
736 # remove duplicated and subset
737 seen = []
737 seen = []
738 final = []
738 final = []
739 candidates = sorted(
739 candidates = sorted(
740 (s for s in succssets if s), key=len, reverse=True
740 (s for s in succssets if s), key=len, reverse=True
741 )
741 )
742 for cand in candidates:
742 for cand in candidates:
743 for seensuccs in seen:
743 for seensuccs in seen:
744 if cand.canmerge(seensuccs):
744 if cand.canmerge(seensuccs):
745 seensuccs.markers.update(cand.markers)
745 seensuccs.markers.update(cand.markers)
746 break
746 break
747 else:
747 else:
748 final.append(cand)
748 final.append(cand)
749 seen.append(cand)
749 seen.append(cand)
750 final.reverse() # put small successors set first
750 final.reverse() # put small successors set first
751 cache[current] = final
751 cache[current] = final
752 return cache[initialnode]
752 return cache[initialnode]
753
753
754
754
755 def successorsandmarkers(repo, ctx):
755 def successorsandmarkers(repo, ctx):
756 """compute the raw data needed for computing obsfate
756 """compute the raw data needed for computing obsfate
757 Returns a list of dict, one dict per successors set
757 Returns a list of dict, one dict per successors set
758 """
758 """
759 if not ctx.obsolete():
759 if not ctx.obsolete():
760 return None
760 return None
761
761
762 ssets = successorssets(repo, ctx.node(), closest=True)
762 ssets = successorssets(repo, ctx.node(), closest=True)
763
763
764 # closestsuccessors returns an empty list for pruned revisions, remap it
764 # closestsuccessors returns an empty list for pruned revisions, remap it
765 # into a list containing an empty list for future processing
765 # into a list containing an empty list for future processing
766 if ssets == []:
766 if ssets == []:
767 ssets = [[]]
767 ssets = [[]]
768
768
769 # Try to recover pruned markers
769 # Try to recover pruned markers
770 succsmap = repo.obsstore.successors
770 succsmap = repo.obsstore.successors
771 fullsuccessorsets = [] # successor set + markers
771 fullsuccessorsets = [] # successor set + markers
772 for sset in ssets:
772 for sset in ssets:
773 if sset:
773 if sset:
774 fullsuccessorsets.append(sset)
774 fullsuccessorsets.append(sset)
775 else:
775 else:
776 # successorsset return an empty set() when ctx or one of its
776 # successorsset return an empty set() when ctx or one of its
777 # successors is pruned.
777 # successors is pruned.
778 # In this case, walk the obs-markers tree again starting with ctx
778 # In this case, walk the obs-markers tree again starting with ctx
779 # and find the relevant pruning obs-makers, the ones without
779 # and find the relevant pruning obs-makers, the ones without
780 # successors.
780 # successors.
781 # Having these markers allow us to compute some information about
781 # Having these markers allow us to compute some information about
782 # its fate, like who pruned this changeset and when.
782 # its fate, like who pruned this changeset and when.
783
783
784 # XXX we do not catch all prune markers (eg rewritten then pruned)
784 # XXX we do not catch all prune markers (eg rewritten then pruned)
785 # (fix me later)
785 # (fix me later)
786 foundany = False
786 foundany = False
787 for mark in succsmap.get(ctx.node(), ()):
787 for mark in succsmap.get(ctx.node(), ()):
788 if not mark[1]:
788 if not mark[1]:
789 foundany = True
789 foundany = True
790 sset = _succs()
790 sset = _succs()
791 sset.markers.add(mark)
791 sset.markers.add(mark)
792 fullsuccessorsets.append(sset)
792 fullsuccessorsets.append(sset)
793 if not foundany:
793 if not foundany:
794 fullsuccessorsets.append(_succs())
794 fullsuccessorsets.append(_succs())
795
795
796 values = []
796 values = []
797 for sset in fullsuccessorsets:
797 for sset in fullsuccessorsets:
798 values.append({b'successors': sset, b'markers': sset.markers})
798 values.append({b'successors': sset, b'markers': sset.markers})
799
799
800 return values
800 return values
801
801
802
802
803 def _getobsfate(successorssets):
803 def _getobsfate(successorssets):
804 """ Compute a changeset obsolescence fate based on its successorssets.
804 """ Compute a changeset obsolescence fate based on its successorssets.
805 Successors can be the tipmost ones or the immediate ones. This function
805 Successors can be the tipmost ones or the immediate ones. This function
806 return values are not meant to be shown directly to users, it is meant to
806 return values are not meant to be shown directly to users, it is meant to
807 be used by internal functions only.
807 be used by internal functions only.
808 Returns one fate from the following values:
808 Returns one fate from the following values:
809 - pruned
809 - pruned
810 - diverged
810 - diverged
811 - superseded
811 - superseded
812 - superseded_split
812 - superseded_split
813 """
813 """
814
814
815 if len(successorssets) == 0:
815 if len(successorssets) == 0:
816 # The commit has been pruned
816 # The commit has been pruned
817 return b'pruned'
817 return b'pruned'
818 elif len(successorssets) > 1:
818 elif len(successorssets) > 1:
819 return b'diverged'
819 return b'diverged'
820 else:
820 else:
821 # No divergence, only one set of successors
821 # No divergence, only one set of successors
822 successors = successorssets[0]
822 successors = successorssets[0]
823
823
824 if len(successors) == 1:
824 if len(successors) == 1:
825 return b'superseded'
825 return b'superseded'
826 else:
826 else:
827 return b'superseded_split'
827 return b'superseded_split'
828
828
829
829
830 def obsfateverb(successorset, markers):
830 def obsfateverb(successorset, markers):
831 """ Return the verb summarizing the successorset and potentially using
831 """ Return the verb summarizing the successorset and potentially using
832 information from the markers
832 information from the markers
833 """
833 """
834 if not successorset:
834 if not successorset:
835 verb = b'pruned'
835 verb = b'pruned'
836 elif len(successorset) == 1:
836 elif len(successorset) == 1:
837 verb = b'rewritten'
837 verb = b'rewritten'
838 else:
838 else:
839 verb = b'split'
839 verb = b'split'
840 return verb
840 return verb
841
841
842
842
843 def markersdates(markers):
843 def markersdates(markers):
844 """returns the list of dates for a list of markers
844 """returns the list of dates for a list of markers
845 """
845 """
846 return [m[4] for m in markers]
846 return [m[4] for m in markers]
847
847
848
848
849 def markersusers(markers):
849 def markersusers(markers):
850 """ Returns a sorted list of markers users without duplicates
850 """ Returns a sorted list of markers users without duplicates
851 """
851 """
852 markersmeta = [dict(m[3]) for m in markers]
852 markersmeta = [dict(m[3]) for m in markers]
853 users = set(
853 users = set(
854 encoding.tolocal(meta[b'user'])
854 encoding.tolocal(meta[b'user'])
855 for meta in markersmeta
855 for meta in markersmeta
856 if meta.get(b'user')
856 if meta.get(b'user')
857 )
857 )
858
858
859 return sorted(users)
859 return sorted(users)
860
860
861
861
862 def markersoperations(markers):
862 def markersoperations(markers):
863 """ Returns a sorted list of markers operations without duplicates
863 """ Returns a sorted list of markers operations without duplicates
864 """
864 """
865 markersmeta = [dict(m[3]) for m in markers]
865 markersmeta = [dict(m[3]) for m in markers]
866 operations = set(
866 operations = set(
867 meta.get(b'operation') for meta in markersmeta if meta.get(b'operation')
867 meta.get(b'operation') for meta in markersmeta if meta.get(b'operation')
868 )
868 )
869
869
870 return sorted(operations)
870 return sorted(operations)
871
871
872
872
873 def obsfateprinter(ui, repo, successors, markers, formatctx):
873 def obsfateprinter(ui, repo, successors, markers, formatctx):
874 """ Build a obsfate string for a single successorset using all obsfate
874 """ Build a obsfate string for a single successorset using all obsfate
875 related function defined in obsutil
875 related function defined in obsutil
876 """
876 """
877 quiet = ui.quiet
877 quiet = ui.quiet
878 verbose = ui.verbose
878 verbose = ui.verbose
879 normal = not verbose and not quiet
879 normal = not verbose and not quiet
880
880
881 line = []
881 line = []
882
882
883 # Verb
883 # Verb
884 line.append(obsfateverb(successors, markers))
884 line.append(obsfateverb(successors, markers))
885
885
886 # Operations
886 # Operations
887 operations = markersoperations(markers)
887 operations = markersoperations(markers)
888 if operations:
888 if operations:
889 line.append(b" using %s" % b", ".join(operations))
889 line.append(b" using %s" % b", ".join(operations))
890
890
891 # Successors
891 # Successors
892 if successors:
892 if successors:
893 fmtsuccessors = [formatctx(repo[succ]) for succ in successors]
893 fmtsuccessors = [formatctx(repo[succ]) for succ in successors]
894 line.append(b" as %s" % b", ".join(fmtsuccessors))
894 line.append(b" as %s" % b", ".join(fmtsuccessors))
895
895
896 # Users
896 # Users
897 users = markersusers(markers)
897 users = markersusers(markers)
898 # Filter out current user in not verbose mode to reduce amount of
898 # Filter out current user in not verbose mode to reduce amount of
899 # information
899 # information
900 if not verbose:
900 if not verbose:
901 currentuser = ui.username(acceptempty=True)
901 currentuser = ui.username(acceptempty=True)
902 if len(users) == 1 and currentuser in users:
902 if len(users) == 1 and currentuser in users:
903 users = None
903 users = None
904
904
905 if (verbose or normal) and users:
905 if (verbose or normal) and users:
906 line.append(b" by %s" % b", ".join(users))
906 line.append(b" by %s" % b", ".join(users))
907
907
908 # Date
908 # Date
909 dates = markersdates(markers)
909 dates = markersdates(markers)
910
910
911 if dates and verbose:
911 if dates and verbose:
912 min_date = min(dates)
912 min_date = min(dates)
913 max_date = max(dates)
913 max_date = max(dates)
914
914
915 if min_date == max_date:
915 if min_date == max_date:
916 fmtmin_date = dateutil.datestr(min_date, b'%Y-%m-%d %H:%M %1%2')
916 fmtmin_date = dateutil.datestr(min_date, b'%Y-%m-%d %H:%M %1%2')
917 line.append(b" (at %s)" % fmtmin_date)
917 line.append(b" (at %s)" % fmtmin_date)
918 else:
918 else:
919 fmtmin_date = dateutil.datestr(min_date, b'%Y-%m-%d %H:%M %1%2')
919 fmtmin_date = dateutil.datestr(min_date, b'%Y-%m-%d %H:%M %1%2')
920 fmtmax_date = dateutil.datestr(max_date, b'%Y-%m-%d %H:%M %1%2')
920 fmtmax_date = dateutil.datestr(max_date, b'%Y-%m-%d %H:%M %1%2')
921 line.append(b" (between %s and %s)" % (fmtmin_date, fmtmax_date))
921 line.append(b" (between %s and %s)" % (fmtmin_date, fmtmax_date))
922
922
923 return b"".join(line)
923 return b"".join(line)
924
924
925
925
926 filteredmsgtable = {
926 filteredmsgtable = {
927 b"pruned": _(b"hidden revision '%s' is pruned"),
927 b"pruned": _(b"hidden revision '%s' is pruned"),
928 b"diverged": _(b"hidden revision '%s' has diverged"),
928 b"diverged": _(b"hidden revision '%s' has diverged"),
929 b"superseded": _(b"hidden revision '%s' was rewritten as: %s"),
929 b"superseded": _(b"hidden revision '%s' was rewritten as: %s"),
930 b"superseded_split": _(b"hidden revision '%s' was split as: %s"),
930 b"superseded_split": _(b"hidden revision '%s' was split as: %s"),
931 b"superseded_split_several": _(
931 b"superseded_split_several": _(
932 b"hidden revision '%s' was split as: %s and %d more"
932 b"hidden revision '%s' was split as: %s and %d more"
933 ),
933 ),
934 }
934 }
935
935
936
936
937 def _getfilteredreason(repo, changeid, ctx):
937 def _getfilteredreason(repo, changeid, ctx):
938 """return a human-friendly string on why a obsolete changeset is hidden
938 """return a human-friendly string on why a obsolete changeset is hidden
939 """
939 """
940 successors = successorssets(repo, ctx.node())
940 successors = successorssets(repo, ctx.node())
941 fate = _getobsfate(successors)
941 fate = _getobsfate(successors)
942
942
943 # Be more precise in case the revision is superseded
943 # Be more precise in case the revision is superseded
944 if fate == b'pruned':
944 if fate == b'pruned':
945 return filteredmsgtable[b'pruned'] % changeid
945 return filteredmsgtable[b'pruned'] % changeid
946 elif fate == b'diverged':
946 elif fate == b'diverged':
947 return filteredmsgtable[b'diverged'] % changeid
947 return filteredmsgtable[b'diverged'] % changeid
948 elif fate == b'superseded':
948 elif fate == b'superseded':
949 single_successor = nodemod.short(successors[0][0])
949 single_successor = nodemod.short(successors[0][0])
950 return filteredmsgtable[b'superseded'] % (changeid, single_successor)
950 return filteredmsgtable[b'superseded'] % (changeid, single_successor)
951 elif fate == b'superseded_split':
951 elif fate == b'superseded_split':
952
952
953 succs = []
953 succs = []
954 for node_id in successors[0]:
954 for node_id in successors[0]:
955 succs.append(nodemod.short(node_id))
955 succs.append(nodemod.short(node_id))
956
956
957 if len(succs) <= 2:
957 if len(succs) <= 2:
958 fmtsuccs = b', '.join(succs)
958 fmtsuccs = b', '.join(succs)
959 return filteredmsgtable[b'superseded_split'] % (changeid, fmtsuccs)
959 return filteredmsgtable[b'superseded_split'] % (changeid, fmtsuccs)
960 else:
960 else:
961 firstsuccessors = b', '.join(succs[:2])
961 firstsuccessors = b', '.join(succs[:2])
962 remainingnumber = len(succs) - 2
962 remainingnumber = len(succs) - 2
963
963
964 args = (changeid, firstsuccessors, remainingnumber)
964 args = (changeid, firstsuccessors, remainingnumber)
965 return filteredmsgtable[b'superseded_split_several'] % args
965 return filteredmsgtable[b'superseded_split_several'] % args
966
966
967
967
968 def divergentsets(repo, ctx):
968 def divergentsets(repo, ctx):
969 """Compute sets of commits divergent with a given one"""
969 """Compute sets of commits divergent with a given one"""
970 cache = {}
970 cache = {}
971 base = {}
971 base = {}
972 for n in allpredecessors(repo.obsstore, [ctx.node()]):
972 for n in allpredecessors(repo.obsstore, [ctx.node()]):
973 if n == ctx.node():
973 if n == ctx.node():
974 # a node can't be a base for divergence with itself
974 # a node can't be a base for divergence with itself
975 continue
975 continue
976 nsuccsets = successorssets(repo, n, cache)
976 nsuccsets = successorssets(repo, n, cache)
977 for nsuccset in nsuccsets:
977 for nsuccset in nsuccsets:
978 if ctx.node() in nsuccset:
978 if ctx.node() in nsuccset:
979 # we are only interested in *other* successor sets
979 # we are only interested in *other* successor sets
980 continue
980 continue
981 if tuple(nsuccset) in base:
981 if tuple(nsuccset) in base:
982 # we already know the latest base for this divergency
982 # we already know the latest base for this divergency
983 continue
983 continue
984 base[tuple(nsuccset)] = n
984 base[tuple(nsuccset)] = n
985 return [
985 return [
986 {b'divergentnodes': divset, b'commonpredecessor': b}
986 {b'divergentnodes': divset, b'commonpredecessor': b}
987 for divset, b in pycompat.iteritems(base)
987 for divset, b in pycompat.iteritems(base)
988 ]
988 ]
989
989
990
990
991 def whyunstable(repo, ctx):
991 def whyunstable(repo, ctx):
992 result = []
992 result = []
993 if ctx.orphan():
993 if ctx.orphan():
994 for parent in ctx.parents():
994 for parent in ctx.parents():
995 kind = None
995 kind = None
996 if parent.orphan():
996 if parent.orphan():
997 kind = b'orphan'
997 kind = b'orphan'
998 elif parent.obsolete():
998 elif parent.obsolete():
999 kind = b'obsolete'
999 kind = b'obsolete'
1000 if kind is not None:
1000 if kind is not None:
1001 result.append(
1001 result.append(
1002 {
1002 {
1003 b'instability': b'orphan',
1003 b'instability': b'orphan',
1004 b'reason': b'%s parent' % kind,
1004 b'reason': b'%s parent' % kind,
1005 b'node': parent.hex(),
1005 b'node': parent.hex(),
1006 }
1006 }
1007 )
1007 )
1008 if ctx.phasedivergent():
1008 if ctx.phasedivergent():
1009 predecessors = allpredecessors(
1009 predecessors = allpredecessors(
1010 repo.obsstore, [ctx.node()], ignoreflags=bumpedfix
1010 repo.obsstore, [ctx.node()], ignoreflags=bumpedfix
1011 )
1011 )
1012 immutable = [
1012 immutable = [
1013 repo[p] for p in predecessors if p in repo and not repo[p].mutable()
1013 repo[p] for p in predecessors if p in repo and not repo[p].mutable()
1014 ]
1014 ]
1015 for predecessor in immutable:
1015 for predecessor in immutable:
1016 result.append(
1016 result.append(
1017 {
1017 {
1018 b'instability': b'phase-divergent',
1018 b'instability': b'phase-divergent',
1019 b'reason': b'immutable predecessor',
1019 b'reason': b'immutable predecessor',
1020 b'node': predecessor.hex(),
1020 b'node': predecessor.hex(),
1021 }
1021 }
1022 )
1022 )
1023 if ctx.contentdivergent():
1023 if ctx.contentdivergent():
1024 dsets = divergentsets(repo, ctx)
1024 dsets = divergentsets(repo, ctx)
1025 for dset in dsets:
1025 for dset in dsets:
1026 divnodes = [repo[n] for n in dset[b'divergentnodes']]
1026 divnodes = [repo[n] for n in dset[b'divergentnodes']]
1027 result.append(
1027 result.append(
1028 {
1028 {
1029 b'instability': b'content-divergent',
1029 b'instability': b'content-divergent',
1030 b'divergentnodes': divnodes,
1030 b'divergentnodes': divnodes,
1031 b'reason': b'predecessor',
1031 b'reason': b'predecessor',
1032 b'node': nodemod.hex(dset[b'commonpredecessor']),
1032 b'node': nodemod.hex(dset[b'commonpredecessor']),
1033 }
1033 }
1034 )
1034 )
1035 return result
1035 return result
General Comments 0
You need to be logged in to leave comments. Login now