##// END OF EJS Templates
obsutil: maintain a homogenous list when computing successors...
Matt Harbison -
r47549:d35063eb stable
parent child Browse files
Show More
@@ -1,1049 +1,1049 b''
1 # obsutil.py - utility functions for obsolescence
1 # obsutil.py - utility functions for obsolescence
2 #
2 #
3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import re
10 import re
11
11
12 from .i18n import _
12 from .i18n import _
13 from .node import (
13 from .node import (
14 hex,
14 hex,
15 short,
15 short,
16 )
16 )
17 from . import (
17 from . import (
18 diffutil,
18 diffutil,
19 encoding,
19 encoding,
20 error,
20 error,
21 phases,
21 phases,
22 pycompat,
22 pycompat,
23 util,
23 util,
24 )
24 )
25 from .utils import dateutil
25 from .utils import dateutil
26
26
27 ### obsolescence marker flag
27 ### obsolescence marker flag
28
28
29 ## bumpedfix flag
29 ## bumpedfix flag
30 #
30 #
31 # When a changeset A' succeed to a changeset A which became public, we call A'
31 # When a changeset A' succeed to a changeset A which became public, we call A'
32 # "bumped" because it's a successors of a public changesets
32 # "bumped" because it's a successors of a public changesets
33 #
33 #
34 # o A' (bumped)
34 # o A' (bumped)
35 # |`:
35 # |`:
36 # | o A
36 # | o A
37 # |/
37 # |/
38 # o Z
38 # o Z
39 #
39 #
40 # The way to solve this situation is to create a new changeset Ad as children
40 # The way to solve this situation is to create a new changeset Ad as children
41 # of A. This changeset have the same content than A'. So the diff from A to A'
41 # of A. This changeset have the same content than A'. So the diff from A to A'
42 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
42 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
43 #
43 #
44 # o Ad
44 # o Ad
45 # |`:
45 # |`:
46 # | x A'
46 # | x A'
47 # |'|
47 # |'|
48 # o | A
48 # o | A
49 # |/
49 # |/
50 # o Z
50 # o Z
51 #
51 #
52 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
52 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
53 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
53 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
54 # This flag mean that the successors express the changes between the public and
54 # This flag mean that the successors express the changes between the public and
55 # bumped version and fix the situation, breaking the transitivity of
55 # bumped version and fix the situation, breaking the transitivity of
56 # "bumped" here.
56 # "bumped" here.
57 bumpedfix = 1
57 bumpedfix = 1
58 usingsha256 = 2
58 usingsha256 = 2
59
59
60
60
61 class marker(object):
61 class marker(object):
62 """Wrap obsolete marker raw data"""
62 """Wrap obsolete marker raw data"""
63
63
64 def __init__(self, repo, data):
64 def __init__(self, repo, data):
65 # the repo argument will be used to create changectx in later version
65 # the repo argument will be used to create changectx in later version
66 self._repo = repo
66 self._repo = repo
67 self._data = data
67 self._data = data
68 self._decodedmeta = None
68 self._decodedmeta = None
69
69
70 def __hash__(self):
70 def __hash__(self):
71 return hash(self._data)
71 return hash(self._data)
72
72
73 def __eq__(self, other):
73 def __eq__(self, other):
74 if type(other) != type(self):
74 if type(other) != type(self):
75 return False
75 return False
76 return self._data == other._data
76 return self._data == other._data
77
77
78 def prednode(self):
78 def prednode(self):
79 """Predecessor changeset node identifier"""
79 """Predecessor changeset node identifier"""
80 return self._data[0]
80 return self._data[0]
81
81
82 def succnodes(self):
82 def succnodes(self):
83 """List of successor changesets node identifiers"""
83 """List of successor changesets node identifiers"""
84 return self._data[1]
84 return self._data[1]
85
85
86 def parentnodes(self):
86 def parentnodes(self):
87 """Parents of the predecessors (None if not recorded)"""
87 """Parents of the predecessors (None if not recorded)"""
88 return self._data[5]
88 return self._data[5]
89
89
90 def metadata(self):
90 def metadata(self):
91 """Decoded metadata dictionary"""
91 """Decoded metadata dictionary"""
92 return dict(self._data[3])
92 return dict(self._data[3])
93
93
94 def date(self):
94 def date(self):
95 """Creation date as (unixtime, offset)"""
95 """Creation date as (unixtime, offset)"""
96 return self._data[4]
96 return self._data[4]
97
97
98 def flags(self):
98 def flags(self):
99 """The flags field of the marker"""
99 """The flags field of the marker"""
100 return self._data[2]
100 return self._data[2]
101
101
102
102
103 def getmarkers(repo, nodes=None, exclusive=False):
103 def getmarkers(repo, nodes=None, exclusive=False):
104 """returns markers known in a repository
104 """returns markers known in a repository
105
105
106 If <nodes> is specified, only markers "relevant" to those nodes are are
106 If <nodes> is specified, only markers "relevant" to those nodes are are
107 returned"""
107 returned"""
108 if nodes is None:
108 if nodes is None:
109 rawmarkers = repo.obsstore
109 rawmarkers = repo.obsstore
110 elif exclusive:
110 elif exclusive:
111 rawmarkers = exclusivemarkers(repo, nodes)
111 rawmarkers = exclusivemarkers(repo, nodes)
112 else:
112 else:
113 rawmarkers = repo.obsstore.relevantmarkers(nodes)
113 rawmarkers = repo.obsstore.relevantmarkers(nodes)
114
114
115 for markerdata in rawmarkers:
115 for markerdata in rawmarkers:
116 yield marker(repo, markerdata)
116 yield marker(repo, markerdata)
117
117
118
118
119 def sortedmarkers(markers):
119 def sortedmarkers(markers):
120 # last item of marker tuple ('parents') may be None or a tuple
120 # last item of marker tuple ('parents') may be None or a tuple
121 return sorted(markers, key=lambda m: m[:-1] + (m[-1] or (),))
121 return sorted(markers, key=lambda m: m[:-1] + (m[-1] or (),))
122
122
123
123
124 def closestpredecessors(repo, nodeid):
124 def closestpredecessors(repo, nodeid):
125 """yield the list of next predecessors pointing on visible changectx nodes
125 """yield the list of next predecessors pointing on visible changectx nodes
126
126
127 This function respect the repoview filtering, filtered revision will be
127 This function respect the repoview filtering, filtered revision will be
128 considered missing.
128 considered missing.
129 """
129 """
130
130
131 precursors = repo.obsstore.predecessors
131 precursors = repo.obsstore.predecessors
132 stack = [nodeid]
132 stack = [nodeid]
133 seen = set(stack)
133 seen = set(stack)
134
134
135 while stack:
135 while stack:
136 current = stack.pop()
136 current = stack.pop()
137 currentpreccs = precursors.get(current, ())
137 currentpreccs = precursors.get(current, ())
138
138
139 for prec in currentpreccs:
139 for prec in currentpreccs:
140 precnodeid = prec[0]
140 precnodeid = prec[0]
141
141
142 # Basic cycle protection
142 # Basic cycle protection
143 if precnodeid in seen:
143 if precnodeid in seen:
144 continue
144 continue
145 seen.add(precnodeid)
145 seen.add(precnodeid)
146
146
147 if precnodeid in repo:
147 if precnodeid in repo:
148 yield precnodeid
148 yield precnodeid
149 else:
149 else:
150 stack.append(precnodeid)
150 stack.append(precnodeid)
151
151
152
152
153 def allpredecessors(obsstore, nodes, ignoreflags=0):
153 def allpredecessors(obsstore, nodes, ignoreflags=0):
154 """Yield node for every precursors of <nodes>.
154 """Yield node for every precursors of <nodes>.
155
155
156 Some precursors may be unknown locally.
156 Some precursors may be unknown locally.
157
157
158 This is a linear yield unsuited to detecting folded changesets. It includes
158 This is a linear yield unsuited to detecting folded changesets. It includes
159 initial nodes too."""
159 initial nodes too."""
160
160
161 remaining = set(nodes)
161 remaining = set(nodes)
162 seen = set(remaining)
162 seen = set(remaining)
163 prec = obsstore.predecessors.get
163 prec = obsstore.predecessors.get
164 while remaining:
164 while remaining:
165 current = remaining.pop()
165 current = remaining.pop()
166 yield current
166 yield current
167 for mark in prec(current, ()):
167 for mark in prec(current, ()):
168 # ignore marker flagged with specified flag
168 # ignore marker flagged with specified flag
169 if mark[2] & ignoreflags:
169 if mark[2] & ignoreflags:
170 continue
170 continue
171 suc = mark[0]
171 suc = mark[0]
172 if suc not in seen:
172 if suc not in seen:
173 seen.add(suc)
173 seen.add(suc)
174 remaining.add(suc)
174 remaining.add(suc)
175
175
176
176
177 def allsuccessors(obsstore, nodes, ignoreflags=0):
177 def allsuccessors(obsstore, nodes, ignoreflags=0):
178 """Yield node for every successor of <nodes>.
178 """Yield node for every successor of <nodes>.
179
179
180 Some successors may be unknown locally.
180 Some successors may be unknown locally.
181
181
182 This is a linear yield unsuited to detecting split changesets. It includes
182 This is a linear yield unsuited to detecting split changesets. It includes
183 initial nodes too."""
183 initial nodes too."""
184 remaining = set(nodes)
184 remaining = set(nodes)
185 seen = set(remaining)
185 seen = set(remaining)
186 while remaining:
186 while remaining:
187 current = remaining.pop()
187 current = remaining.pop()
188 yield current
188 yield current
189 for mark in obsstore.successors.get(current, ()):
189 for mark in obsstore.successors.get(current, ()):
190 # ignore marker flagged with specified flag
190 # ignore marker flagged with specified flag
191 if mark[2] & ignoreflags:
191 if mark[2] & ignoreflags:
192 continue
192 continue
193 for suc in mark[1]:
193 for suc in mark[1]:
194 if suc not in seen:
194 if suc not in seen:
195 seen.add(suc)
195 seen.add(suc)
196 remaining.add(suc)
196 remaining.add(suc)
197
197
198
198
199 def _filterprunes(markers):
199 def _filterprunes(markers):
200 """return a set with no prune markers"""
200 """return a set with no prune markers"""
201 return {m for m in markers if m[1]}
201 return {m for m in markers if m[1]}
202
202
203
203
204 def exclusivemarkers(repo, nodes):
204 def exclusivemarkers(repo, nodes):
205 """set of markers relevant to "nodes" but no other locally-known nodes
205 """set of markers relevant to "nodes" but no other locally-known nodes
206
206
207 This function compute the set of markers "exclusive" to a locally-known
207 This function compute the set of markers "exclusive" to a locally-known
208 node. This means we walk the markers starting from <nodes> until we reach a
208 node. This means we walk the markers starting from <nodes> until we reach a
209 locally-known precursors outside of <nodes>. Element of <nodes> with
209 locally-known precursors outside of <nodes>. Element of <nodes> with
210 locally-known successors outside of <nodes> are ignored (since their
210 locally-known successors outside of <nodes> are ignored (since their
211 precursors markers are also relevant to these successors).
211 precursors markers are also relevant to these successors).
212
212
213 For example:
213 For example:
214
214
215 # (A0 rewritten as A1)
215 # (A0 rewritten as A1)
216 #
216 #
217 # A0 <-1- A1 # Marker "1" is exclusive to A1
217 # A0 <-1- A1 # Marker "1" is exclusive to A1
218
218
219 or
219 or
220
220
221 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
221 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
222 #
222 #
223 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
223 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
224
224
225 or
225 or
226
226
227 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
227 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
228 #
228 #
229 # <-2- A1 # Marker "2" is exclusive to A0,A1
229 # <-2- A1 # Marker "2" is exclusive to A0,A1
230 # /
230 # /
231 # <-1- A0
231 # <-1- A0
232 # \
232 # \
233 # <-3- A2 # Marker "3" is exclusive to A0,A2
233 # <-3- A2 # Marker "3" is exclusive to A0,A2
234 #
234 #
235 # in addition:
235 # in addition:
236 #
236 #
237 # Markers "2,3" are exclusive to A1,A2
237 # Markers "2,3" are exclusive to A1,A2
238 # Markers "1,2,3" are exclusive to A0,A1,A2
238 # Markers "1,2,3" are exclusive to A0,A1,A2
239
239
240 See test/test-obsolete-bundle-strip.t for more examples.
240 See test/test-obsolete-bundle-strip.t for more examples.
241
241
242 An example usage is strip. When stripping a changeset, we also want to
242 An example usage is strip. When stripping a changeset, we also want to
243 strip the markers exclusive to this changeset. Otherwise we would have
243 strip the markers exclusive to this changeset. Otherwise we would have
244 "dangling"" obsolescence markers from its precursors: Obsolescence markers
244 "dangling"" obsolescence markers from its precursors: Obsolescence markers
245 marking a node as obsolete without any successors available locally.
245 marking a node as obsolete without any successors available locally.
246
246
247 As for relevant markers, the prune markers for children will be followed.
247 As for relevant markers, the prune markers for children will be followed.
248 Of course, they will only be followed if the pruned children is
248 Of course, they will only be followed if the pruned children is
249 locally-known. Since the prune markers are relevant to the pruned node.
249 locally-known. Since the prune markers are relevant to the pruned node.
250 However, while prune markers are considered relevant to the parent of the
250 However, while prune markers are considered relevant to the parent of the
251 pruned changesets, prune markers for locally-known changeset (with no
251 pruned changesets, prune markers for locally-known changeset (with no
252 successors) are considered exclusive to the pruned nodes. This allows
252 successors) are considered exclusive to the pruned nodes. This allows
253 to strip the prune markers (with the rest of the exclusive chain) alongside
253 to strip the prune markers (with the rest of the exclusive chain) alongside
254 the pruned changesets.
254 the pruned changesets.
255 """
255 """
256 # running on a filtered repository would be dangerous as markers could be
256 # running on a filtered repository would be dangerous as markers could be
257 # reported as exclusive when they are relevant for other filtered nodes.
257 # reported as exclusive when they are relevant for other filtered nodes.
258 unfi = repo.unfiltered()
258 unfi = repo.unfiltered()
259
259
260 # shortcut to various useful item
260 # shortcut to various useful item
261 has_node = unfi.changelog.index.has_node
261 has_node = unfi.changelog.index.has_node
262 precursorsmarkers = unfi.obsstore.predecessors
262 precursorsmarkers = unfi.obsstore.predecessors
263 successormarkers = unfi.obsstore.successors
263 successormarkers = unfi.obsstore.successors
264 childrenmarkers = unfi.obsstore.children
264 childrenmarkers = unfi.obsstore.children
265
265
266 # exclusive markers (return of the function)
266 # exclusive markers (return of the function)
267 exclmarkers = set()
267 exclmarkers = set()
268 # we need fast membership testing
268 # we need fast membership testing
269 nodes = set(nodes)
269 nodes = set(nodes)
270 # looking for head in the obshistory
270 # looking for head in the obshistory
271 #
271 #
272 # XXX we are ignoring all issues in regard with cycle for now.
272 # XXX we are ignoring all issues in regard with cycle for now.
273 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
273 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
274 stack.sort()
274 stack.sort()
275 # nodes already stacked
275 # nodes already stacked
276 seennodes = set(stack)
276 seennodes = set(stack)
277 while stack:
277 while stack:
278 current = stack.pop()
278 current = stack.pop()
279 # fetch precursors markers
279 # fetch precursors markers
280 markers = list(precursorsmarkers.get(current, ()))
280 markers = list(precursorsmarkers.get(current, ()))
281 # extend the list with prune markers
281 # extend the list with prune markers
282 for mark in successormarkers.get(current, ()):
282 for mark in successormarkers.get(current, ()):
283 if not mark[1]:
283 if not mark[1]:
284 markers.append(mark)
284 markers.append(mark)
285 # and markers from children (looking for prune)
285 # and markers from children (looking for prune)
286 for mark in childrenmarkers.get(current, ()):
286 for mark in childrenmarkers.get(current, ()):
287 if not mark[1]:
287 if not mark[1]:
288 markers.append(mark)
288 markers.append(mark)
289 # traverse the markers
289 # traverse the markers
290 for mark in markers:
290 for mark in markers:
291 if mark in exclmarkers:
291 if mark in exclmarkers:
292 # markers already selected
292 # markers already selected
293 continue
293 continue
294
294
295 # If the markers is about the current node, select it
295 # If the markers is about the current node, select it
296 #
296 #
297 # (this delay the addition of markers from children)
297 # (this delay the addition of markers from children)
298 if mark[1] or mark[0] == current:
298 if mark[1] or mark[0] == current:
299 exclmarkers.add(mark)
299 exclmarkers.add(mark)
300
300
301 # should we keep traversing through the precursors?
301 # should we keep traversing through the precursors?
302 prec = mark[0]
302 prec = mark[0]
303
303
304 # nodes in the stack or already processed
304 # nodes in the stack or already processed
305 if prec in seennodes:
305 if prec in seennodes:
306 continue
306 continue
307
307
308 # is this a locally known node ?
308 # is this a locally known node ?
309 known = has_node(prec)
309 known = has_node(prec)
310 # if locally-known and not in the <nodes> set the traversal
310 # if locally-known and not in the <nodes> set the traversal
311 # stop here.
311 # stop here.
312 if known and prec not in nodes:
312 if known and prec not in nodes:
313 continue
313 continue
314
314
315 # do not keep going if there are unselected markers pointing to this
315 # do not keep going if there are unselected markers pointing to this
316 # nodes. If we end up traversing these unselected markers later the
316 # nodes. If we end up traversing these unselected markers later the
317 # node will be taken care of at that point.
317 # node will be taken care of at that point.
318 precmarkers = _filterprunes(successormarkers.get(prec))
318 precmarkers = _filterprunes(successormarkers.get(prec))
319 if precmarkers.issubset(exclmarkers):
319 if precmarkers.issubset(exclmarkers):
320 seennodes.add(prec)
320 seennodes.add(prec)
321 stack.append(prec)
321 stack.append(prec)
322
322
323 return exclmarkers
323 return exclmarkers
324
324
325
325
326 def foreground(repo, nodes):
326 def foreground(repo, nodes):
327 """return all nodes in the "foreground" of other node
327 """return all nodes in the "foreground" of other node
328
328
329 The foreground of a revision is anything reachable using parent -> children
329 The foreground of a revision is anything reachable using parent -> children
330 or precursor -> successor relation. It is very similar to "descendant" but
330 or precursor -> successor relation. It is very similar to "descendant" but
331 augmented with obsolescence information.
331 augmented with obsolescence information.
332
332
333 Beware that possible obsolescence cycle may result if complex situation.
333 Beware that possible obsolescence cycle may result if complex situation.
334 """
334 """
335 repo = repo.unfiltered()
335 repo = repo.unfiltered()
336 foreground = set(repo.set(b'%ln::', nodes))
336 foreground = set(repo.set(b'%ln::', nodes))
337 if repo.obsstore:
337 if repo.obsstore:
338 # We only need this complicated logic if there is obsolescence
338 # We only need this complicated logic if there is obsolescence
339 # XXX will probably deserve an optimised revset.
339 # XXX will probably deserve an optimised revset.
340 has_node = repo.changelog.index.has_node
340 has_node = repo.changelog.index.has_node
341 plen = -1
341 plen = -1
342 # compute the whole set of successors or descendants
342 # compute the whole set of successors or descendants
343 while len(foreground) != plen:
343 while len(foreground) != plen:
344 plen = len(foreground)
344 plen = len(foreground)
345 succs = {c.node() for c in foreground}
345 succs = {c.node() for c in foreground}
346 mutable = [c.node() for c in foreground if c.mutable()]
346 mutable = [c.node() for c in foreground if c.mutable()]
347 succs.update(allsuccessors(repo.obsstore, mutable))
347 succs.update(allsuccessors(repo.obsstore, mutable))
348 known = (n for n in succs if has_node(n))
348 known = (n for n in succs if has_node(n))
349 foreground = set(repo.set(b'%ln::', known))
349 foreground = set(repo.set(b'%ln::', known))
350 return {c.node() for c in foreground}
350 return {c.node() for c in foreground}
351
351
352
352
353 # effectflag field
353 # effectflag field
354 #
354 #
355 # Effect-flag is a 1-byte bit field used to store what changed between a
355 # Effect-flag is a 1-byte bit field used to store what changed between a
356 # changeset and its successor(s).
356 # changeset and its successor(s).
357 #
357 #
358 # The effect flag is stored in obs-markers metadata while we iterate on the
358 # The effect flag is stored in obs-markers metadata while we iterate on the
359 # information design. That's why we have the EFFECTFLAGFIELD. If we come up
359 # information design. That's why we have the EFFECTFLAGFIELD. If we come up
360 # with an incompatible design for effect flag, we can store a new design under
360 # with an incompatible design for effect flag, we can store a new design under
361 # another field name so we don't break readers. We plan to extend the existing
361 # another field name so we don't break readers. We plan to extend the existing
362 # obsmarkers bit-field when the effect flag design will be stabilized.
362 # obsmarkers bit-field when the effect flag design will be stabilized.
363 #
363 #
364 # The effect-flag is placed behind an experimental flag
364 # The effect-flag is placed behind an experimental flag
365 # `effect-flags` set to off by default.
365 # `effect-flags` set to off by default.
366 #
366 #
367
367
368 EFFECTFLAGFIELD = b"ef1"
368 EFFECTFLAGFIELD = b"ef1"
369
369
370 DESCCHANGED = 1 << 0 # action changed the description
370 DESCCHANGED = 1 << 0 # action changed the description
371 METACHANGED = 1 << 1 # action change the meta
371 METACHANGED = 1 << 1 # action change the meta
372 DIFFCHANGED = 1 << 3 # action change diff introduced by the changeset
372 DIFFCHANGED = 1 << 3 # action change diff introduced by the changeset
373 PARENTCHANGED = 1 << 2 # action change the parent
373 PARENTCHANGED = 1 << 2 # action change the parent
374 USERCHANGED = 1 << 4 # the user changed
374 USERCHANGED = 1 << 4 # the user changed
375 DATECHANGED = 1 << 5 # the date changed
375 DATECHANGED = 1 << 5 # the date changed
376 BRANCHCHANGED = 1 << 6 # the branch changed
376 BRANCHCHANGED = 1 << 6 # the branch changed
377
377
378 METABLACKLIST = [
378 METABLACKLIST = [
379 re.compile(b'^branch$'),
379 re.compile(b'^branch$'),
380 re.compile(b'^.*-source$'),
380 re.compile(b'^.*-source$'),
381 re.compile(b'^.*_source$'),
381 re.compile(b'^.*_source$'),
382 re.compile(b'^source$'),
382 re.compile(b'^source$'),
383 ]
383 ]
384
384
385
385
386 def metanotblacklisted(metaitem):
386 def metanotblacklisted(metaitem):
387 """Check that the key of a meta item (extrakey, extravalue) does not
387 """Check that the key of a meta item (extrakey, extravalue) does not
388 match at least one of the blacklist pattern
388 match at least one of the blacklist pattern
389 """
389 """
390 metakey = metaitem[0]
390 metakey = metaitem[0]
391
391
392 return not any(pattern.match(metakey) for pattern in METABLACKLIST)
392 return not any(pattern.match(metakey) for pattern in METABLACKLIST)
393
393
394
394
395 def _prepare_hunk(hunk):
395 def _prepare_hunk(hunk):
396 """Drop all information but the username and patch"""
396 """Drop all information but the username and patch"""
397 cleanhunk = []
397 cleanhunk = []
398 for line in hunk.splitlines():
398 for line in hunk.splitlines():
399 if line.startswith(b'# User') or not line.startswith(b'#'):
399 if line.startswith(b'# User') or not line.startswith(b'#'):
400 if line.startswith(b'@@'):
400 if line.startswith(b'@@'):
401 line = b'@@\n'
401 line = b'@@\n'
402 cleanhunk.append(line)
402 cleanhunk.append(line)
403 return cleanhunk
403 return cleanhunk
404
404
405
405
406 def _getdifflines(iterdiff):
406 def _getdifflines(iterdiff):
407 """return a cleaned up lines"""
407 """return a cleaned up lines"""
408 lines = next(iterdiff, None)
408 lines = next(iterdiff, None)
409
409
410 if lines is None:
410 if lines is None:
411 return lines
411 return lines
412
412
413 return _prepare_hunk(lines)
413 return _prepare_hunk(lines)
414
414
415
415
416 def _cmpdiff(leftctx, rightctx):
416 def _cmpdiff(leftctx, rightctx):
417 """return True if both ctx introduce the "same diff"
417 """return True if both ctx introduce the "same diff"
418
418
419 This is a first and basic implementation, with many shortcoming.
419 This is a first and basic implementation, with many shortcoming.
420 """
420 """
421 diffopts = diffutil.diffallopts(leftctx.repo().ui, {b'git': True})
421 diffopts = diffutil.diffallopts(leftctx.repo().ui, {b'git': True})
422
422
423 # Leftctx or right ctx might be filtered, so we need to use the contexts
423 # Leftctx or right ctx might be filtered, so we need to use the contexts
424 # with an unfiltered repository to safely compute the diff
424 # with an unfiltered repository to safely compute the diff
425
425
426 # leftctx and rightctx can be from different repository views in case of
426 # leftctx and rightctx can be from different repository views in case of
427 # hgsubversion, do don't try to access them from same repository
427 # hgsubversion, do don't try to access them from same repository
428 # rightctx.repo() and leftctx.repo() are not always the same
428 # rightctx.repo() and leftctx.repo() are not always the same
429 leftunfi = leftctx._repo.unfiltered()[leftctx.rev()]
429 leftunfi = leftctx._repo.unfiltered()[leftctx.rev()]
430 leftdiff = leftunfi.diff(opts=diffopts)
430 leftdiff = leftunfi.diff(opts=diffopts)
431 rightunfi = rightctx._repo.unfiltered()[rightctx.rev()]
431 rightunfi = rightctx._repo.unfiltered()[rightctx.rev()]
432 rightdiff = rightunfi.diff(opts=diffopts)
432 rightdiff = rightunfi.diff(opts=diffopts)
433
433
434 left, right = (0, 0)
434 left, right = (0, 0)
435 while None not in (left, right):
435 while None not in (left, right):
436 left = _getdifflines(leftdiff)
436 left = _getdifflines(leftdiff)
437 right = _getdifflines(rightdiff)
437 right = _getdifflines(rightdiff)
438
438
439 if left != right:
439 if left != right:
440 return False
440 return False
441 return True
441 return True
442
442
443
443
444 def geteffectflag(source, successors):
444 def geteffectflag(source, successors):
445 """From an obs-marker relation, compute what changed between the
445 """From an obs-marker relation, compute what changed between the
446 predecessor and the successor.
446 predecessor and the successor.
447 """
447 """
448 effects = 0
448 effects = 0
449
449
450 for changectx in successors:
450 for changectx in successors:
451 # Check if description has changed
451 # Check if description has changed
452 if changectx.description() != source.description():
452 if changectx.description() != source.description():
453 effects |= DESCCHANGED
453 effects |= DESCCHANGED
454
454
455 # Check if user has changed
455 # Check if user has changed
456 if changectx.user() != source.user():
456 if changectx.user() != source.user():
457 effects |= USERCHANGED
457 effects |= USERCHANGED
458
458
459 # Check if date has changed
459 # Check if date has changed
460 if changectx.date() != source.date():
460 if changectx.date() != source.date():
461 effects |= DATECHANGED
461 effects |= DATECHANGED
462
462
463 # Check if branch has changed
463 # Check if branch has changed
464 if changectx.branch() != source.branch():
464 if changectx.branch() != source.branch():
465 effects |= BRANCHCHANGED
465 effects |= BRANCHCHANGED
466
466
467 # Check if at least one of the parent has changed
467 # Check if at least one of the parent has changed
468 if changectx.parents() != source.parents():
468 if changectx.parents() != source.parents():
469 effects |= PARENTCHANGED
469 effects |= PARENTCHANGED
470
470
471 # Check if other meta has changed
471 # Check if other meta has changed
472 changeextra = changectx.extra().items()
472 changeextra = changectx.extra().items()
473 ctxmeta = list(filter(metanotblacklisted, changeextra))
473 ctxmeta = list(filter(metanotblacklisted, changeextra))
474
474
475 sourceextra = source.extra().items()
475 sourceextra = source.extra().items()
476 srcmeta = list(filter(metanotblacklisted, sourceextra))
476 srcmeta = list(filter(metanotblacklisted, sourceextra))
477
477
478 if ctxmeta != srcmeta:
478 if ctxmeta != srcmeta:
479 effects |= METACHANGED
479 effects |= METACHANGED
480
480
481 # Check if the diff has changed
481 # Check if the diff has changed
482 if not _cmpdiff(source, changectx):
482 if not _cmpdiff(source, changectx):
483 effects |= DIFFCHANGED
483 effects |= DIFFCHANGED
484
484
485 return effects
485 return effects
486
486
487
487
488 def getobsoleted(repo, tr=None, changes=None):
488 def getobsoleted(repo, tr=None, changes=None):
489 """return the set of pre-existing revisions obsoleted by a transaction
489 """return the set of pre-existing revisions obsoleted by a transaction
490
490
491 Either the transaction or changes item of the transaction (for hooks)
491 Either the transaction or changes item of the transaction (for hooks)
492 must be provided, but not both.
492 must be provided, but not both.
493 """
493 """
494 if (tr is None) == (changes is None):
494 if (tr is None) == (changes is None):
495 e = b"exactly one of tr and changes must be provided"
495 e = b"exactly one of tr and changes must be provided"
496 raise error.ProgrammingError(e)
496 raise error.ProgrammingError(e)
497 torev = repo.unfiltered().changelog.index.get_rev
497 torev = repo.unfiltered().changelog.index.get_rev
498 phase = repo._phasecache.phase
498 phase = repo._phasecache.phase
499 succsmarkers = repo.obsstore.successors.get
499 succsmarkers = repo.obsstore.successors.get
500 public = phases.public
500 public = phases.public
501 if changes is None:
501 if changes is None:
502 changes = tr.changes
502 changes = tr.changes
503 addedmarkers = changes[b'obsmarkers']
503 addedmarkers = changes[b'obsmarkers']
504 origrepolen = changes[b'origrepolen']
504 origrepolen = changes[b'origrepolen']
505 seenrevs = set()
505 seenrevs = set()
506 obsoleted = set()
506 obsoleted = set()
507 for mark in addedmarkers:
507 for mark in addedmarkers:
508 node = mark[0]
508 node = mark[0]
509 rev = torev(node)
509 rev = torev(node)
510 if rev is None or rev in seenrevs or rev >= origrepolen:
510 if rev is None or rev in seenrevs or rev >= origrepolen:
511 continue
511 continue
512 seenrevs.add(rev)
512 seenrevs.add(rev)
513 if phase(repo, rev) == public:
513 if phase(repo, rev) == public:
514 continue
514 continue
515 if set(succsmarkers(node) or []).issubset(addedmarkers):
515 if set(succsmarkers(node) or []).issubset(addedmarkers):
516 obsoleted.add(rev)
516 obsoleted.add(rev)
517 return obsoleted
517 return obsoleted
518
518
519
519
520 class _succs(list):
520 class _succs(list):
521 """small class to represent a successors with some metadata about it"""
521 """small class to represent a successors with some metadata about it"""
522
522
523 def __init__(self, *args, **kwargs):
523 def __init__(self, *args, **kwargs):
524 super(_succs, self).__init__(*args, **kwargs)
524 super(_succs, self).__init__(*args, **kwargs)
525 self.markers = set()
525 self.markers = set()
526
526
527 def copy(self):
527 def copy(self):
528 new = _succs(self)
528 new = _succs(self)
529 new.markers = self.markers.copy()
529 new.markers = self.markers.copy()
530 return new
530 return new
531
531
532 @util.propertycache
532 @util.propertycache
533 def _set(self):
533 def _set(self):
534 # immutable
534 # immutable
535 return set(self)
535 return set(self)
536
536
537 def canmerge(self, other):
537 def canmerge(self, other):
538 return self._set.issubset(other._set)
538 return self._set.issubset(other._set)
539
539
540
540
541 def successorssets(repo, initialnode, closest=False, cache=None):
541 def successorssets(repo, initialnode, closest=False, cache=None):
542 """Return set of all latest successors of initial nodes
542 """Return set of all latest successors of initial nodes
543
543
544 The successors set of a changeset A are the group of revisions that succeed
544 The successors set of a changeset A are the group of revisions that succeed
545 A. It succeeds A as a consistent whole, each revision being only a partial
545 A. It succeeds A as a consistent whole, each revision being only a partial
546 replacement. By default, the successors set contains non-obsolete
546 replacement. By default, the successors set contains non-obsolete
547 changesets only, walking the obsolescence graph until reaching a leaf. If
547 changesets only, walking the obsolescence graph until reaching a leaf. If
548 'closest' is set to True, closest successors-sets are return (the
548 'closest' is set to True, closest successors-sets are return (the
549 obsolescence walk stops on known changesets).
549 obsolescence walk stops on known changesets).
550
550
551 This function returns the full list of successor sets which is why it
551 This function returns the full list of successor sets which is why it
552 returns a list of tuples and not just a single tuple. Each tuple is a valid
552 returns a list of tuples and not just a single tuple. Each tuple is a valid
553 successors set. Note that (A,) may be a valid successors set for changeset A
553 successors set. Note that (A,) may be a valid successors set for changeset A
554 (see below).
554 (see below).
555
555
556 In most cases, a changeset A will have a single element (e.g. the changeset
556 In most cases, a changeset A will have a single element (e.g. the changeset
557 A is replaced by A') in its successors set. Though, it is also common for a
557 A is replaced by A') in its successors set. Though, it is also common for a
558 changeset A to have no elements in its successor set (e.g. the changeset
558 changeset A to have no elements in its successor set (e.g. the changeset
559 has been pruned). Therefore, the returned list of successors sets will be
559 has been pruned). Therefore, the returned list of successors sets will be
560 [(A',)] or [], respectively.
560 [(A',)] or [], respectively.
561
561
562 When a changeset A is split into A' and B', however, it will result in a
562 When a changeset A is split into A' and B', however, it will result in a
563 successors set containing more than a single element, i.e. [(A',B')].
563 successors set containing more than a single element, i.e. [(A',B')].
564 Divergent changesets will result in multiple successors sets, i.e. [(A',),
564 Divergent changesets will result in multiple successors sets, i.e. [(A',),
565 (A'')].
565 (A'')].
566
566
567 If a changeset A is not obsolete, then it will conceptually have no
567 If a changeset A is not obsolete, then it will conceptually have no
568 successors set. To distinguish this from a pruned changeset, the successor
568 successors set. To distinguish this from a pruned changeset, the successor
569 set will contain itself only, i.e. [(A,)].
569 set will contain itself only, i.e. [(A,)].
570
570
571 Finally, final successors unknown locally are considered to be pruned
571 Finally, final successors unknown locally are considered to be pruned
572 (pruned: obsoleted without any successors). (Final: successors not affected
572 (pruned: obsoleted without any successors). (Final: successors not affected
573 by markers).
573 by markers).
574
574
575 The 'closest' mode respect the repoview filtering. For example, without
575 The 'closest' mode respect the repoview filtering. For example, without
576 filter it will stop at the first locally known changeset, with 'visible'
576 filter it will stop at the first locally known changeset, with 'visible'
577 filter it will stop on visible changesets).
577 filter it will stop on visible changesets).
578
578
579 The optional `cache` parameter is a dictionary that may contains
579 The optional `cache` parameter is a dictionary that may contains
580 precomputed successors sets. It is meant to reuse the computation of a
580 precomputed successors sets. It is meant to reuse the computation of a
581 previous call to `successorssets` when multiple calls are made at the same
581 previous call to `successorssets` when multiple calls are made at the same
582 time. The cache dictionary is updated in place. The caller is responsible
582 time. The cache dictionary is updated in place. The caller is responsible
583 for its life span. Code that makes multiple calls to `successorssets`
583 for its life span. Code that makes multiple calls to `successorssets`
584 *should* use this cache mechanism or risk a performance hit.
584 *should* use this cache mechanism or risk a performance hit.
585
585
586 Since results are different depending of the 'closest' most, the same cache
586 Since results are different depending of the 'closest' most, the same cache
587 cannot be reused for both mode.
587 cannot be reused for both mode.
588 """
588 """
589
589
590 succmarkers = repo.obsstore.successors
590 succmarkers = repo.obsstore.successors
591
591
592 # Stack of nodes we search successors sets for
592 # Stack of nodes we search successors sets for
593 toproceed = [initialnode]
593 toproceed = [initialnode]
594 # set version of above list for fast loop detection
594 # set version of above list for fast loop detection
595 # element added to "toproceed" must be added here
595 # element added to "toproceed" must be added here
596 stackedset = set(toproceed)
596 stackedset = set(toproceed)
597 if cache is None:
597 if cache is None:
598 cache = {}
598 cache = {}
599
599
600 # This while loop is the flattened version of a recursive search for
600 # This while loop is the flattened version of a recursive search for
601 # successors sets
601 # successors sets
602 #
602 #
603 # def successorssets(x):
603 # def successorssets(x):
604 # successors = directsuccessors(x)
604 # successors = directsuccessors(x)
605 # ss = [[]]
605 # ss = [[]]
606 # for succ in directsuccessors(x):
606 # for succ in directsuccessors(x):
607 # # product as in itertools cartesian product
607 # # product as in itertools cartesian product
608 # ss = product(ss, successorssets(succ))
608 # ss = product(ss, successorssets(succ))
609 # return ss
609 # return ss
610 #
610 #
611 # But we can not use plain recursive calls here:
611 # But we can not use plain recursive calls here:
612 # - that would blow the python call stack
612 # - that would blow the python call stack
613 # - obsolescence markers may have cycles, we need to handle them.
613 # - obsolescence markers may have cycles, we need to handle them.
614 #
614 #
615 # The `toproceed` list act as our call stack. Every node we search
615 # The `toproceed` list act as our call stack. Every node we search
616 # successors set for are stacked there.
616 # successors set for are stacked there.
617 #
617 #
618 # The `stackedset` is set version of this stack used to check if a node is
618 # The `stackedset` is set version of this stack used to check if a node is
619 # already stacked. This check is used to detect cycles and prevent infinite
619 # already stacked. This check is used to detect cycles and prevent infinite
620 # loop.
620 # loop.
621 #
621 #
622 # successors set of all nodes are stored in the `cache` dictionary.
622 # successors set of all nodes are stored in the `cache` dictionary.
623 #
623 #
624 # After this while loop ends we use the cache to return the successors sets
624 # After this while loop ends we use the cache to return the successors sets
625 # for the node requested by the caller.
625 # for the node requested by the caller.
626 while toproceed:
626 while toproceed:
627 # Every iteration tries to compute the successors sets of the topmost
627 # Every iteration tries to compute the successors sets of the topmost
628 # node of the stack: CURRENT.
628 # node of the stack: CURRENT.
629 #
629 #
630 # There are four possible outcomes:
630 # There are four possible outcomes:
631 #
631 #
632 # 1) We already know the successors sets of CURRENT:
632 # 1) We already know the successors sets of CURRENT:
633 # -> mission accomplished, pop it from the stack.
633 # -> mission accomplished, pop it from the stack.
634 # 2) Stop the walk:
634 # 2) Stop the walk:
635 # default case: Node is not obsolete
635 # default case: Node is not obsolete
636 # closest case: Node is known at this repo filter level
636 # closest case: Node is known at this repo filter level
637 # -> the node is its own successors sets. Add it to the cache.
637 # -> the node is its own successors sets. Add it to the cache.
638 # 3) We do not know successors set of direct successors of CURRENT:
638 # 3) We do not know successors set of direct successors of CURRENT:
639 # -> We add those successors to the stack.
639 # -> We add those successors to the stack.
640 # 4) We know successors sets of all direct successors of CURRENT:
640 # 4) We know successors sets of all direct successors of CURRENT:
641 # -> We can compute CURRENT successors set and add it to the
641 # -> We can compute CURRENT successors set and add it to the
642 # cache.
642 # cache.
643 #
643 #
644 current = toproceed[-1]
644 current = toproceed[-1]
645
645
646 # case 2 condition is a bit hairy because of closest,
646 # case 2 condition is a bit hairy because of closest,
647 # we compute it on its own
647 # we compute it on its own
648 case2condition = (current not in succmarkers) or (
648 case2condition = (current not in succmarkers) or (
649 closest and current != initialnode and current in repo
649 closest and current != initialnode and current in repo
650 )
650 )
651
651
652 if current in cache:
652 if current in cache:
653 # case (1): We already know the successors sets
653 # case (1): We already know the successors sets
654 stackedset.remove(toproceed.pop())
654 stackedset.remove(toproceed.pop())
655 elif case2condition:
655 elif case2condition:
656 # case (2): end of walk.
656 # case (2): end of walk.
657 if current in repo:
657 if current in repo:
658 # We have a valid successors.
658 # We have a valid successors.
659 cache[current] = [_succs((current,))]
659 cache[current] = [_succs((current,))]
660 else:
660 else:
661 # Final obsolete version is unknown locally.
661 # Final obsolete version is unknown locally.
662 # Do not count that as a valid successors
662 # Do not count that as a valid successors
663 cache[current] = []
663 cache[current] = []
664 else:
664 else:
665 # cases (3) and (4)
665 # cases (3) and (4)
666 #
666 #
667 # We proceed in two phases. Phase 1 aims to distinguish case (3)
667 # We proceed in two phases. Phase 1 aims to distinguish case (3)
668 # from case (4):
668 # from case (4):
669 #
669 #
670 # For each direct successors of CURRENT, we check whether its
670 # For each direct successors of CURRENT, we check whether its
671 # successors sets are known. If they are not, we stack the
671 # successors sets are known. If they are not, we stack the
672 # unknown node and proceed to the next iteration of the while
672 # unknown node and proceed to the next iteration of the while
673 # loop. (case 3)
673 # loop. (case 3)
674 #
674 #
675 # During this step, we may detect obsolescence cycles: a node
675 # During this step, we may detect obsolescence cycles: a node
676 # with unknown successors sets but already in the call stack.
676 # with unknown successors sets but already in the call stack.
677 # In such a situation, we arbitrary set the successors sets of
677 # In such a situation, we arbitrary set the successors sets of
678 # the node to nothing (node pruned) to break the cycle.
678 # the node to nothing (node pruned) to break the cycle.
679 #
679 #
680 # If no break was encountered we proceed to phase 2.
680 # If no break was encountered we proceed to phase 2.
681 #
681 #
682 # Phase 2 computes successors sets of CURRENT (case 4); see details
682 # Phase 2 computes successors sets of CURRENT (case 4); see details
683 # in phase 2 itself.
683 # in phase 2 itself.
684 #
684 #
685 # Note the two levels of iteration in each phase.
685 # Note the two levels of iteration in each phase.
686 # - The first one handles obsolescence markers using CURRENT as
686 # - The first one handles obsolescence markers using CURRENT as
687 # precursor (successors markers of CURRENT).
687 # precursor (successors markers of CURRENT).
688 #
688 #
689 # Having multiple entry here means divergence.
689 # Having multiple entry here means divergence.
690 #
690 #
691 # - The second one handles successors defined in each marker.
691 # - The second one handles successors defined in each marker.
692 #
692 #
693 # Having none means pruned node, multiple successors means split,
693 # Having none means pruned node, multiple successors means split,
694 # single successors are standard replacement.
694 # single successors are standard replacement.
695 #
695 #
696 for mark in sortedmarkers(succmarkers[current]):
696 for mark in sortedmarkers(succmarkers[current]):
697 for suc in mark[1]:
697 for suc in mark[1]:
698 if suc not in cache:
698 if suc not in cache:
699 if suc in stackedset:
699 if suc in stackedset:
700 # cycle breaking
700 # cycle breaking
701 cache[suc] = []
701 cache[suc] = []
702 else:
702 else:
703 # case (3) If we have not computed successors sets
703 # case (3) If we have not computed successors sets
704 # of one of those successors we add it to the
704 # of one of those successors we add it to the
705 # `toproceed` stack and stop all work for this
705 # `toproceed` stack and stop all work for this
706 # iteration.
706 # iteration.
707 toproceed.append(suc)
707 toproceed.append(suc)
708 stackedset.add(suc)
708 stackedset.add(suc)
709 break
709 break
710 else:
710 else:
711 continue
711 continue
712 break
712 break
713 else:
713 else:
714 # case (4): we know all successors sets of all direct
714 # case (4): we know all successors sets of all direct
715 # successors
715 # successors
716 #
716 #
717 # Successors set contributed by each marker depends on the
717 # Successors set contributed by each marker depends on the
718 # successors sets of all its "successors" node.
718 # successors sets of all its "successors" node.
719 #
719 #
720 # Each different marker is a divergence in the obsolescence
720 # Each different marker is a divergence in the obsolescence
721 # history. It contributes successors sets distinct from other
721 # history. It contributes successors sets distinct from other
722 # markers.
722 # markers.
723 #
723 #
724 # Within a marker, a successor may have divergent successors
724 # Within a marker, a successor may have divergent successors
725 # sets. In such a case, the marker will contribute multiple
725 # sets. In such a case, the marker will contribute multiple
726 # divergent successors sets. If multiple successors have
726 # divergent successors sets. If multiple successors have
727 # divergent successors sets, a Cartesian product is used.
727 # divergent successors sets, a Cartesian product is used.
728 #
728 #
729 # At the end we post-process successors sets to remove
729 # At the end we post-process successors sets to remove
730 # duplicated entry and successors set that are strict subset of
730 # duplicated entry and successors set that are strict subset of
731 # another one.
731 # another one.
732 succssets = []
732 succssets = []
733 for mark in sortedmarkers(succmarkers[current]):
733 for mark in sortedmarkers(succmarkers[current]):
734 # successors sets contributed by this marker
734 # successors sets contributed by this marker
735 base = _succs()
735 base = _succs()
736 base.markers.add(mark)
736 base.markers.add(mark)
737 markss = [base]
737 markss = [base]
738 for suc in mark[1]:
738 for suc in mark[1]:
739 # cardinal product with previous successors
739 # cardinal product with previous successors
740 productresult = []
740 productresult = []
741 for prefix in markss:
741 for prefix in markss:
742 for suffix in cache[suc]:
742 for suffix in cache[suc]:
743 newss = prefix.copy()
743 newss = prefix.copy()
744 newss.markers.update(suffix.markers)
744 newss.markers.update(suffix.markers)
745 for part in suffix:
745 for part in suffix:
746 # do not duplicated entry in successors set
746 # do not duplicated entry in successors set
747 # first entry wins.
747 # first entry wins.
748 if part not in newss:
748 if part not in newss:
749 newss.append(part)
749 newss.append(part)
750 productresult.append(newss)
750 productresult.append(newss)
751 if productresult:
751 if productresult:
752 markss = productresult
752 markss = productresult
753 succssets.extend(markss)
753 succssets.extend(markss)
754 # remove duplicated and subset
754 # remove duplicated and subset
755 seen = []
755 seen = []
756 final = []
756 final = []
757 candidates = sorted(
757 candidates = sorted(
758 (s for s in succssets if s), key=len, reverse=True
758 (s for s in succssets if s), key=len, reverse=True
759 )
759 )
760 for cand in candidates:
760 for cand in candidates:
761 for seensuccs in seen:
761 for seensuccs in seen:
762 if cand.canmerge(seensuccs):
762 if cand.canmerge(seensuccs):
763 seensuccs.markers.update(cand.markers)
763 seensuccs.markers.update(cand.markers)
764 break
764 break
765 else:
765 else:
766 final.append(cand)
766 final.append(cand)
767 seen.append(cand)
767 seen.append(cand)
768 final.reverse() # put small successors set first
768 final.reverse() # put small successors set first
769 cache[current] = final
769 cache[current] = final
770 return cache[initialnode]
770 return cache[initialnode]
771
771
772
772
773 def successorsandmarkers(repo, ctx):
773 def successorsandmarkers(repo, ctx):
774 """compute the raw data needed for computing obsfate
774 """compute the raw data needed for computing obsfate
775 Returns a list of dict, one dict per successors set
775 Returns a list of dict, one dict per successors set
776 """
776 """
777 if not ctx.obsolete():
777 if not ctx.obsolete():
778 return None
778 return None
779
779
780 ssets = successorssets(repo, ctx.node(), closest=True)
780 ssets = successorssets(repo, ctx.node(), closest=True)
781
781
782 # closestsuccessors returns an empty list for pruned revisions, remap it
782 # closestsuccessors returns an empty list for pruned revisions, remap it
783 # into a list containing an empty list for future processing
783 # into a list containing an empty list for future processing
784 if ssets == []:
784 if ssets == []:
785 ssets = [[]]
785 ssets = [_succs()]
786
786
787 # Try to recover pruned markers
787 # Try to recover pruned markers
788 succsmap = repo.obsstore.successors
788 succsmap = repo.obsstore.successors
789 fullsuccessorsets = [] # successor set + markers
789 fullsuccessorsets = [] # successor set + markers
790 for sset in ssets:
790 for sset in ssets:
791 if sset:
791 if sset:
792 fullsuccessorsets.append(sset)
792 fullsuccessorsets.append(sset)
793 else:
793 else:
794 # successorsset return an empty set() when ctx or one of its
794 # successorsset return an empty set() when ctx or one of its
795 # successors is pruned.
795 # successors is pruned.
796 # In this case, walk the obs-markers tree again starting with ctx
796 # In this case, walk the obs-markers tree again starting with ctx
797 # and find the relevant pruning obs-makers, the ones without
797 # and find the relevant pruning obs-makers, the ones without
798 # successors.
798 # successors.
799 # Having these markers allow us to compute some information about
799 # Having these markers allow us to compute some information about
800 # its fate, like who pruned this changeset and when.
800 # its fate, like who pruned this changeset and when.
801
801
802 # XXX we do not catch all prune markers (eg rewritten then pruned)
802 # XXX we do not catch all prune markers (eg rewritten then pruned)
803 # (fix me later)
803 # (fix me later)
804 foundany = False
804 foundany = False
805 for mark in succsmap.get(ctx.node(), ()):
805 for mark in succsmap.get(ctx.node(), ()):
806 if not mark[1]:
806 if not mark[1]:
807 foundany = True
807 foundany = True
808 sset = _succs()
808 sset = _succs()
809 sset.markers.add(mark)
809 sset.markers.add(mark)
810 fullsuccessorsets.append(sset)
810 fullsuccessorsets.append(sset)
811 if not foundany:
811 if not foundany:
812 fullsuccessorsets.append(_succs())
812 fullsuccessorsets.append(_succs())
813
813
814 values = []
814 values = []
815 for sset in fullsuccessorsets:
815 for sset in fullsuccessorsets:
816 values.append({b'successors': sset, b'markers': sset.markers})
816 values.append({b'successors': sset, b'markers': sset.markers})
817
817
818 return values
818 return values
819
819
820
820
821 def _getobsfate(successorssets):
821 def _getobsfate(successorssets):
822 """Compute a changeset obsolescence fate based on its successorssets.
822 """Compute a changeset obsolescence fate based on its successorssets.
823 Successors can be the tipmost ones or the immediate ones. This function
823 Successors can be the tipmost ones or the immediate ones. This function
824 return values are not meant to be shown directly to users, it is meant to
824 return values are not meant to be shown directly to users, it is meant to
825 be used by internal functions only.
825 be used by internal functions only.
826 Returns one fate from the following values:
826 Returns one fate from the following values:
827 - pruned
827 - pruned
828 - diverged
828 - diverged
829 - superseded
829 - superseded
830 - superseded_split
830 - superseded_split
831 """
831 """
832
832
833 if len(successorssets) == 0:
833 if len(successorssets) == 0:
834 # The commit has been pruned
834 # The commit has been pruned
835 return b'pruned'
835 return b'pruned'
836 elif len(successorssets) > 1:
836 elif len(successorssets) > 1:
837 return b'diverged'
837 return b'diverged'
838 else:
838 else:
839 # No divergence, only one set of successors
839 # No divergence, only one set of successors
840 successors = successorssets[0]
840 successors = successorssets[0]
841
841
842 if len(successors) == 1:
842 if len(successors) == 1:
843 return b'superseded'
843 return b'superseded'
844 else:
844 else:
845 return b'superseded_split'
845 return b'superseded_split'
846
846
847
847
848 def obsfateverb(successorset, markers):
848 def obsfateverb(successorset, markers):
849 """Return the verb summarizing the successorset and potentially using
849 """Return the verb summarizing the successorset and potentially using
850 information from the markers
850 information from the markers
851 """
851 """
852 if not successorset:
852 if not successorset:
853 verb = b'pruned'
853 verb = b'pruned'
854 elif len(successorset) == 1:
854 elif len(successorset) == 1:
855 verb = b'rewritten'
855 verb = b'rewritten'
856 else:
856 else:
857 verb = b'split'
857 verb = b'split'
858 return verb
858 return verb
859
859
860
860
861 def markersdates(markers):
861 def markersdates(markers):
862 """returns the list of dates for a list of markers"""
862 """returns the list of dates for a list of markers"""
863 return [m[4] for m in markers]
863 return [m[4] for m in markers]
864
864
865
865
866 def markersusers(markers):
866 def markersusers(markers):
867 """Returns a sorted list of markers users without duplicates"""
867 """Returns a sorted list of markers users without duplicates"""
868 markersmeta = [dict(m[3]) for m in markers]
868 markersmeta = [dict(m[3]) for m in markers]
869 users = {
869 users = {
870 encoding.tolocal(meta[b'user'])
870 encoding.tolocal(meta[b'user'])
871 for meta in markersmeta
871 for meta in markersmeta
872 if meta.get(b'user')
872 if meta.get(b'user')
873 }
873 }
874
874
875 return sorted(users)
875 return sorted(users)
876
876
877
877
878 def markersoperations(markers):
878 def markersoperations(markers):
879 """Returns a sorted list of markers operations without duplicates"""
879 """Returns a sorted list of markers operations without duplicates"""
880 markersmeta = [dict(m[3]) for m in markers]
880 markersmeta = [dict(m[3]) for m in markers]
881 operations = {
881 operations = {
882 meta.get(b'operation') for meta in markersmeta if meta.get(b'operation')
882 meta.get(b'operation') for meta in markersmeta if meta.get(b'operation')
883 }
883 }
884
884
885 return sorted(operations)
885 return sorted(operations)
886
886
887
887
888 def obsfateprinter(ui, repo, successors, markers, formatctx):
888 def obsfateprinter(ui, repo, successors, markers, formatctx):
889 """Build a obsfate string for a single successorset using all obsfate
889 """Build a obsfate string for a single successorset using all obsfate
890 related function defined in obsutil
890 related function defined in obsutil
891 """
891 """
892 quiet = ui.quiet
892 quiet = ui.quiet
893 verbose = ui.verbose
893 verbose = ui.verbose
894 normal = not verbose and not quiet
894 normal = not verbose and not quiet
895
895
896 line = []
896 line = []
897
897
898 # Verb
898 # Verb
899 line.append(obsfateverb(successors, markers))
899 line.append(obsfateverb(successors, markers))
900
900
901 # Operations
901 # Operations
902 operations = markersoperations(markers)
902 operations = markersoperations(markers)
903 if operations:
903 if operations:
904 line.append(b" using %s" % b", ".join(operations))
904 line.append(b" using %s" % b", ".join(operations))
905
905
906 # Successors
906 # Successors
907 if successors:
907 if successors:
908 fmtsuccessors = [formatctx(repo[succ]) for succ in successors]
908 fmtsuccessors = [formatctx(repo[succ]) for succ in successors]
909 line.append(b" as %s" % b", ".join(fmtsuccessors))
909 line.append(b" as %s" % b", ".join(fmtsuccessors))
910
910
911 # Users
911 # Users
912 users = markersusers(markers)
912 users = markersusers(markers)
913 # Filter out current user in not verbose mode to reduce amount of
913 # Filter out current user in not verbose mode to reduce amount of
914 # information
914 # information
915 if not verbose:
915 if not verbose:
916 currentuser = ui.username(acceptempty=True)
916 currentuser = ui.username(acceptempty=True)
917 if len(users) == 1 and currentuser in users:
917 if len(users) == 1 and currentuser in users:
918 users = None
918 users = None
919
919
920 if (verbose or normal) and users:
920 if (verbose or normal) and users:
921 line.append(b" by %s" % b", ".join(users))
921 line.append(b" by %s" % b", ".join(users))
922
922
923 # Date
923 # Date
924 dates = markersdates(markers)
924 dates = markersdates(markers)
925
925
926 if dates and verbose:
926 if dates and verbose:
927 min_date = min(dates)
927 min_date = min(dates)
928 max_date = max(dates)
928 max_date = max(dates)
929
929
930 if min_date == max_date:
930 if min_date == max_date:
931 fmtmin_date = dateutil.datestr(min_date, b'%Y-%m-%d %H:%M %1%2')
931 fmtmin_date = dateutil.datestr(min_date, b'%Y-%m-%d %H:%M %1%2')
932 line.append(b" (at %s)" % fmtmin_date)
932 line.append(b" (at %s)" % fmtmin_date)
933 else:
933 else:
934 fmtmin_date = dateutil.datestr(min_date, b'%Y-%m-%d %H:%M %1%2')
934 fmtmin_date = dateutil.datestr(min_date, b'%Y-%m-%d %H:%M %1%2')
935 fmtmax_date = dateutil.datestr(max_date, b'%Y-%m-%d %H:%M %1%2')
935 fmtmax_date = dateutil.datestr(max_date, b'%Y-%m-%d %H:%M %1%2')
936 line.append(b" (between %s and %s)" % (fmtmin_date, fmtmax_date))
936 line.append(b" (between %s and %s)" % (fmtmin_date, fmtmax_date))
937
937
938 return b"".join(line)
938 return b"".join(line)
939
939
940
940
941 filteredmsgtable = {
941 filteredmsgtable = {
942 b"pruned": _(b"hidden revision '%s' is pruned"),
942 b"pruned": _(b"hidden revision '%s' is pruned"),
943 b"diverged": _(b"hidden revision '%s' has diverged"),
943 b"diverged": _(b"hidden revision '%s' has diverged"),
944 b"superseded": _(b"hidden revision '%s' was rewritten as: %s"),
944 b"superseded": _(b"hidden revision '%s' was rewritten as: %s"),
945 b"superseded_split": _(b"hidden revision '%s' was split as: %s"),
945 b"superseded_split": _(b"hidden revision '%s' was split as: %s"),
946 b"superseded_split_several": _(
946 b"superseded_split_several": _(
947 b"hidden revision '%s' was split as: %s and %d more"
947 b"hidden revision '%s' was split as: %s and %d more"
948 ),
948 ),
949 }
949 }
950
950
951
951
952 def _getfilteredreason(repo, changeid, ctx):
952 def _getfilteredreason(repo, changeid, ctx):
953 """return a human-friendly string on why a obsolete changeset is hidden"""
953 """return a human-friendly string on why a obsolete changeset is hidden"""
954 successors = successorssets(repo, ctx.node())
954 successors = successorssets(repo, ctx.node())
955 fate = _getobsfate(successors)
955 fate = _getobsfate(successors)
956
956
957 # Be more precise in case the revision is superseded
957 # Be more precise in case the revision is superseded
958 if fate == b'pruned':
958 if fate == b'pruned':
959 return filteredmsgtable[b'pruned'] % changeid
959 return filteredmsgtable[b'pruned'] % changeid
960 elif fate == b'diverged':
960 elif fate == b'diverged':
961 return filteredmsgtable[b'diverged'] % changeid
961 return filteredmsgtable[b'diverged'] % changeid
962 elif fate == b'superseded':
962 elif fate == b'superseded':
963 single_successor = short(successors[0][0])
963 single_successor = short(successors[0][0])
964 return filteredmsgtable[b'superseded'] % (changeid, single_successor)
964 return filteredmsgtable[b'superseded'] % (changeid, single_successor)
965 elif fate == b'superseded_split':
965 elif fate == b'superseded_split':
966
966
967 succs = []
967 succs = []
968 for node_id in successors[0]:
968 for node_id in successors[0]:
969 succs.append(short(node_id))
969 succs.append(short(node_id))
970
970
971 if len(succs) <= 2:
971 if len(succs) <= 2:
972 fmtsuccs = b', '.join(succs)
972 fmtsuccs = b', '.join(succs)
973 return filteredmsgtable[b'superseded_split'] % (changeid, fmtsuccs)
973 return filteredmsgtable[b'superseded_split'] % (changeid, fmtsuccs)
974 else:
974 else:
975 firstsuccessors = b', '.join(succs[:2])
975 firstsuccessors = b', '.join(succs[:2])
976 remainingnumber = len(succs) - 2
976 remainingnumber = len(succs) - 2
977
977
978 args = (changeid, firstsuccessors, remainingnumber)
978 args = (changeid, firstsuccessors, remainingnumber)
979 return filteredmsgtable[b'superseded_split_several'] % args
979 return filteredmsgtable[b'superseded_split_several'] % args
980
980
981
981
982 def divergentsets(repo, ctx):
982 def divergentsets(repo, ctx):
983 """Compute sets of commits divergent with a given one"""
983 """Compute sets of commits divergent with a given one"""
984 cache = {}
984 cache = {}
985 base = {}
985 base = {}
986 for n in allpredecessors(repo.obsstore, [ctx.node()]):
986 for n in allpredecessors(repo.obsstore, [ctx.node()]):
987 if n == ctx.node():
987 if n == ctx.node():
988 # a node can't be a base for divergence with itself
988 # a node can't be a base for divergence with itself
989 continue
989 continue
990 nsuccsets = successorssets(repo, n, cache)
990 nsuccsets = successorssets(repo, n, cache)
991 for nsuccset in nsuccsets:
991 for nsuccset in nsuccsets:
992 if ctx.node() in nsuccset:
992 if ctx.node() in nsuccset:
993 # we are only interested in *other* successor sets
993 # we are only interested in *other* successor sets
994 continue
994 continue
995 if tuple(nsuccset) in base:
995 if tuple(nsuccset) in base:
996 # we already know the latest base for this divergency
996 # we already know the latest base for this divergency
997 continue
997 continue
998 base[tuple(nsuccset)] = n
998 base[tuple(nsuccset)] = n
999 return [
999 return [
1000 {b'divergentnodes': divset, b'commonpredecessor': b}
1000 {b'divergentnodes': divset, b'commonpredecessor': b}
1001 for divset, b in pycompat.iteritems(base)
1001 for divset, b in pycompat.iteritems(base)
1002 ]
1002 ]
1003
1003
1004
1004
1005 def whyunstable(repo, ctx):
1005 def whyunstable(repo, ctx):
1006 result = []
1006 result = []
1007 if ctx.orphan():
1007 if ctx.orphan():
1008 for parent in ctx.parents():
1008 for parent in ctx.parents():
1009 kind = None
1009 kind = None
1010 if parent.orphan():
1010 if parent.orphan():
1011 kind = b'orphan'
1011 kind = b'orphan'
1012 elif parent.obsolete():
1012 elif parent.obsolete():
1013 kind = b'obsolete'
1013 kind = b'obsolete'
1014 if kind is not None:
1014 if kind is not None:
1015 result.append(
1015 result.append(
1016 {
1016 {
1017 b'instability': b'orphan',
1017 b'instability': b'orphan',
1018 b'reason': b'%s parent' % kind,
1018 b'reason': b'%s parent' % kind,
1019 b'node': parent.hex(),
1019 b'node': parent.hex(),
1020 }
1020 }
1021 )
1021 )
1022 if ctx.phasedivergent():
1022 if ctx.phasedivergent():
1023 predecessors = allpredecessors(
1023 predecessors = allpredecessors(
1024 repo.obsstore, [ctx.node()], ignoreflags=bumpedfix
1024 repo.obsstore, [ctx.node()], ignoreflags=bumpedfix
1025 )
1025 )
1026 immutable = [
1026 immutable = [
1027 repo[p] for p in predecessors if p in repo and not repo[p].mutable()
1027 repo[p] for p in predecessors if p in repo and not repo[p].mutable()
1028 ]
1028 ]
1029 for predecessor in immutable:
1029 for predecessor in immutable:
1030 result.append(
1030 result.append(
1031 {
1031 {
1032 b'instability': b'phase-divergent',
1032 b'instability': b'phase-divergent',
1033 b'reason': b'immutable predecessor',
1033 b'reason': b'immutable predecessor',
1034 b'node': predecessor.hex(),
1034 b'node': predecessor.hex(),
1035 }
1035 }
1036 )
1036 )
1037 if ctx.contentdivergent():
1037 if ctx.contentdivergent():
1038 dsets = divergentsets(repo, ctx)
1038 dsets = divergentsets(repo, ctx)
1039 for dset in dsets:
1039 for dset in dsets:
1040 divnodes = [repo[n] for n in dset[b'divergentnodes']]
1040 divnodes = [repo[n] for n in dset[b'divergentnodes']]
1041 result.append(
1041 result.append(
1042 {
1042 {
1043 b'instability': b'content-divergent',
1043 b'instability': b'content-divergent',
1044 b'divergentnodes': divnodes,
1044 b'divergentnodes': divnodes,
1045 b'reason': b'predecessor',
1045 b'reason': b'predecessor',
1046 b'node': hex(dset[b'commonpredecessor']),
1046 b'node': hex(dset[b'commonpredecessor']),
1047 }
1047 }
1048 )
1048 )
1049 return result
1049 return result
General Comments 0
You need to be logged in to leave comments. Login now