##// END OF EJS Templates
obsutil: sort metadata before comparing in geteffectflag()...
av6 -
r52395:a5d8f261 stable
parent child Browse files
Show More
@@ -1,1047 +1,1047 b''
1 # obsutil.py - utility functions for obsolescence
1 # obsutil.py - utility functions for obsolescence
2 #
2 #
3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8
8
9 import re
9 import re
10
10
11 from .i18n import _
11 from .i18n import _
12 from .node import (
12 from .node import (
13 hex,
13 hex,
14 short,
14 short,
15 )
15 )
16 from . import (
16 from . import (
17 diffutil,
17 diffutil,
18 encoding,
18 encoding,
19 error,
19 error,
20 phases,
20 phases,
21 util,
21 util,
22 )
22 )
23 from .utils import dateutil
23 from .utils import dateutil
24
24
25 ### obsolescence marker flag
25 ### obsolescence marker flag
26
26
27 ## bumpedfix flag
27 ## bumpedfix flag
28 #
28 #
29 # When a changeset A' succeed to a changeset A which became public, we call A'
29 # When a changeset A' succeed to a changeset A which became public, we call A'
30 # "bumped" because it's a successors of a public changesets
30 # "bumped" because it's a successors of a public changesets
31 #
31 #
32 # o A' (bumped)
32 # o A' (bumped)
33 # |`:
33 # |`:
34 # | o A
34 # | o A
35 # |/
35 # |/
36 # o Z
36 # o Z
37 #
37 #
38 # The way to solve this situation is to create a new changeset Ad as children
38 # The way to solve this situation is to create a new changeset Ad as children
39 # of A. This changeset have the same content than A'. So the diff from A to A'
39 # of A. This changeset have the same content than A'. So the diff from A to A'
40 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
40 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
41 #
41 #
42 # o Ad
42 # o Ad
43 # |`:
43 # |`:
44 # | x A'
44 # | x A'
45 # |'|
45 # |'|
46 # o | A
46 # o | A
47 # |/
47 # |/
48 # o Z
48 # o Z
49 #
49 #
50 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
50 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
51 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
51 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
52 # This flag mean that the successors express the changes between the public and
52 # This flag mean that the successors express the changes between the public and
53 # bumped version and fix the situation, breaking the transitivity of
53 # bumped version and fix the situation, breaking the transitivity of
54 # "bumped" here.
54 # "bumped" here.
55 bumpedfix = 1
55 bumpedfix = 1
56 usingsha256 = 2
56 usingsha256 = 2
57
57
58
58
59 class marker:
59 class marker:
60 """Wrap obsolete marker raw data"""
60 """Wrap obsolete marker raw data"""
61
61
62 def __init__(self, repo, data):
62 def __init__(self, repo, data):
63 # the repo argument will be used to create changectx in later version
63 # the repo argument will be used to create changectx in later version
64 self._repo = repo
64 self._repo = repo
65 self._data = data
65 self._data = data
66 self._decodedmeta = None
66 self._decodedmeta = None
67
67
68 def __hash__(self):
68 def __hash__(self):
69 return hash(self._data)
69 return hash(self._data)
70
70
71 def __eq__(self, other):
71 def __eq__(self, other):
72 if type(other) != type(self):
72 if type(other) != type(self):
73 return False
73 return False
74 return self._data == other._data
74 return self._data == other._data
75
75
76 def prednode(self):
76 def prednode(self):
77 """Predecessor changeset node identifier"""
77 """Predecessor changeset node identifier"""
78 return self._data[0]
78 return self._data[0]
79
79
80 def succnodes(self):
80 def succnodes(self):
81 """List of successor changesets node identifiers"""
81 """List of successor changesets node identifiers"""
82 return self._data[1]
82 return self._data[1]
83
83
84 def parentnodes(self):
84 def parentnodes(self):
85 """Parents of the predecessors (None if not recorded)"""
85 """Parents of the predecessors (None if not recorded)"""
86 return self._data[5]
86 return self._data[5]
87
87
88 def metadata(self):
88 def metadata(self):
89 """Decoded metadata dictionary"""
89 """Decoded metadata dictionary"""
90 return dict(self._data[3])
90 return dict(self._data[3])
91
91
92 def date(self):
92 def date(self):
93 """Creation date as (unixtime, offset)"""
93 """Creation date as (unixtime, offset)"""
94 return self._data[4]
94 return self._data[4]
95
95
96 def flags(self):
96 def flags(self):
97 """The flags field of the marker"""
97 """The flags field of the marker"""
98 return self._data[2]
98 return self._data[2]
99
99
100
100
101 def getmarkers(repo, nodes=None, exclusive=False):
101 def getmarkers(repo, nodes=None, exclusive=False):
102 """returns markers known in a repository
102 """returns markers known in a repository
103
103
104 If <nodes> is specified, only markers "relevant" to those nodes are are
104 If <nodes> is specified, only markers "relevant" to those nodes are are
105 returned"""
105 returned"""
106 if nodes is None:
106 if nodes is None:
107 rawmarkers = repo.obsstore
107 rawmarkers = repo.obsstore
108 elif exclusive:
108 elif exclusive:
109 rawmarkers = exclusivemarkers(repo, nodes)
109 rawmarkers = exclusivemarkers(repo, nodes)
110 else:
110 else:
111 rawmarkers = repo.obsstore.relevantmarkers(nodes)
111 rawmarkers = repo.obsstore.relevantmarkers(nodes)
112
112
113 for markerdata in rawmarkers:
113 for markerdata in rawmarkers:
114 yield marker(repo, markerdata)
114 yield marker(repo, markerdata)
115
115
116
116
117 def sortedmarkers(markers):
117 def sortedmarkers(markers):
118 # last item of marker tuple ('parents') may be None or a tuple
118 # last item of marker tuple ('parents') may be None or a tuple
119 return sorted(markers, key=lambda m: m[:-1] + (m[-1] or (),))
119 return sorted(markers, key=lambda m: m[:-1] + (m[-1] or (),))
120
120
121
121
122 def closestpredecessors(repo, nodeid):
122 def closestpredecessors(repo, nodeid):
123 """yield the list of next predecessors pointing on visible changectx nodes
123 """yield the list of next predecessors pointing on visible changectx nodes
124
124
125 This function respect the repoview filtering, filtered revision will be
125 This function respect the repoview filtering, filtered revision will be
126 considered missing.
126 considered missing.
127 """
127 """
128
128
129 precursors = repo.obsstore.predecessors
129 precursors = repo.obsstore.predecessors
130 stack = [nodeid]
130 stack = [nodeid]
131 seen = set(stack)
131 seen = set(stack)
132
132
133 while stack:
133 while stack:
134 current = stack.pop()
134 current = stack.pop()
135 currentpreccs = precursors.get(current, ())
135 currentpreccs = precursors.get(current, ())
136
136
137 for prec in currentpreccs:
137 for prec in currentpreccs:
138 precnodeid = prec[0]
138 precnodeid = prec[0]
139
139
140 # Basic cycle protection
140 # Basic cycle protection
141 if precnodeid in seen:
141 if precnodeid in seen:
142 continue
142 continue
143 seen.add(precnodeid)
143 seen.add(precnodeid)
144
144
145 if precnodeid in repo:
145 if precnodeid in repo:
146 yield precnodeid
146 yield precnodeid
147 else:
147 else:
148 stack.append(precnodeid)
148 stack.append(precnodeid)
149
149
150
150
151 def allpredecessors(obsstore, nodes, ignoreflags=0):
151 def allpredecessors(obsstore, nodes, ignoreflags=0):
152 """Yield node for every precursors of <nodes>.
152 """Yield node for every precursors of <nodes>.
153
153
154 Some precursors may be unknown locally.
154 Some precursors may be unknown locally.
155
155
156 This is a linear yield unsuited to detecting folded changesets. It includes
156 This is a linear yield unsuited to detecting folded changesets. It includes
157 initial nodes too."""
157 initial nodes too."""
158
158
159 remaining = set(nodes)
159 remaining = set(nodes)
160 seen = set(remaining)
160 seen = set(remaining)
161 prec = obsstore.predecessors.get
161 prec = obsstore.predecessors.get
162 while remaining:
162 while remaining:
163 current = remaining.pop()
163 current = remaining.pop()
164 yield current
164 yield current
165 for mark in prec(current, ()):
165 for mark in prec(current, ()):
166 # ignore marker flagged with specified flag
166 # ignore marker flagged with specified flag
167 if mark[2] & ignoreflags:
167 if mark[2] & ignoreflags:
168 continue
168 continue
169 suc = mark[0]
169 suc = mark[0]
170 if suc not in seen:
170 if suc not in seen:
171 seen.add(suc)
171 seen.add(suc)
172 remaining.add(suc)
172 remaining.add(suc)
173
173
174
174
175 def allsuccessors(obsstore, nodes, ignoreflags=0):
175 def allsuccessors(obsstore, nodes, ignoreflags=0):
176 """Yield node for every successor of <nodes>.
176 """Yield node for every successor of <nodes>.
177
177
178 Some successors may be unknown locally.
178 Some successors may be unknown locally.
179
179
180 This is a linear yield unsuited to detecting split changesets. It includes
180 This is a linear yield unsuited to detecting split changesets. It includes
181 initial nodes too."""
181 initial nodes too."""
182 remaining = set(nodes)
182 remaining = set(nodes)
183 seen = set(remaining)
183 seen = set(remaining)
184 while remaining:
184 while remaining:
185 current = remaining.pop()
185 current = remaining.pop()
186 yield current
186 yield current
187 for mark in obsstore.successors.get(current, ()):
187 for mark in obsstore.successors.get(current, ()):
188 # ignore marker flagged with specified flag
188 # ignore marker flagged with specified flag
189 if mark[2] & ignoreflags:
189 if mark[2] & ignoreflags:
190 continue
190 continue
191 for suc in mark[1]:
191 for suc in mark[1]:
192 if suc not in seen:
192 if suc not in seen:
193 seen.add(suc)
193 seen.add(suc)
194 remaining.add(suc)
194 remaining.add(suc)
195
195
196
196
197 def _filterprunes(markers):
197 def _filterprunes(markers):
198 """return a set with no prune markers"""
198 """return a set with no prune markers"""
199 return {m for m in markers if m[1]}
199 return {m for m in markers if m[1]}
200
200
201
201
202 def exclusivemarkers(repo, nodes):
202 def exclusivemarkers(repo, nodes):
203 """set of markers relevant to "nodes" but no other locally-known nodes
203 """set of markers relevant to "nodes" but no other locally-known nodes
204
204
205 This function compute the set of markers "exclusive" to a locally-known
205 This function compute the set of markers "exclusive" to a locally-known
206 node. This means we walk the markers starting from <nodes> until we reach a
206 node. This means we walk the markers starting from <nodes> until we reach a
207 locally-known precursors outside of <nodes>. Element of <nodes> with
207 locally-known precursors outside of <nodes>. Element of <nodes> with
208 locally-known successors outside of <nodes> are ignored (since their
208 locally-known successors outside of <nodes> are ignored (since their
209 precursors markers are also relevant to these successors).
209 precursors markers are also relevant to these successors).
210
210
211 For example:
211 For example:
212
212
213 # (A0 rewritten as A1)
213 # (A0 rewritten as A1)
214 #
214 #
215 # A0 <-1- A1 # Marker "1" is exclusive to A1
215 # A0 <-1- A1 # Marker "1" is exclusive to A1
216
216
217 or
217 or
218
218
219 # (A0 rewritten as AX; AX rewritten as A1; AX is unknown locally)
219 # (A0 rewritten as AX; AX rewritten as A1; AX is unknown locally)
220 #
220 #
221 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
221 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
222
222
223 or
223 or
224
224
225 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
225 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
226 #
226 #
227 # <-2- A1 # Marker "2" is exclusive to A0,A1
227 # <-2- A1 # Marker "2" is exclusive to A0,A1
228 # /
228 # /
229 # <-1- A0
229 # <-1- A0
230 # \
230 # \
231 # <-3- A2 # Marker "3" is exclusive to A0,A2
231 # <-3- A2 # Marker "3" is exclusive to A0,A2
232 #
232 #
233 # in addition:
233 # in addition:
234 #
234 #
235 # Markers "2,3" are exclusive to A1,A2
235 # Markers "2,3" are exclusive to A1,A2
236 # Markers "1,2,3" are exclusive to A0,A1,A2
236 # Markers "1,2,3" are exclusive to A0,A1,A2
237
237
238 See test/test-obsolete-bundle-strip.t for more examples.
238 See test/test-obsolete-bundle-strip.t for more examples.
239
239
240 An example usage is strip. When stripping a changeset, we also want to
240 An example usage is strip. When stripping a changeset, we also want to
241 strip the markers exclusive to this changeset. Otherwise we would have
241 strip the markers exclusive to this changeset. Otherwise we would have
242 "dangling"" obsolescence markers from its precursors: Obsolescence markers
242 "dangling"" obsolescence markers from its precursors: Obsolescence markers
243 marking a node as obsolete without any successors available locally.
243 marking a node as obsolete without any successors available locally.
244
244
245 As for relevant markers, the prune markers for children will be followed.
245 As for relevant markers, the prune markers for children will be followed.
246 Of course, they will only be followed if the pruned children is
246 Of course, they will only be followed if the pruned children is
247 locally-known. Since the prune markers are relevant to the pruned node.
247 locally-known. Since the prune markers are relevant to the pruned node.
248 However, while prune markers are considered relevant to the parent of the
248 However, while prune markers are considered relevant to the parent of the
249 pruned changesets, prune markers for locally-known changeset (with no
249 pruned changesets, prune markers for locally-known changeset (with no
250 successors) are considered exclusive to the pruned nodes. This allows
250 successors) are considered exclusive to the pruned nodes. This allows
251 to strip the prune markers (with the rest of the exclusive chain) alongside
251 to strip the prune markers (with the rest of the exclusive chain) alongside
252 the pruned changesets.
252 the pruned changesets.
253 """
253 """
254 # running on a filtered repository would be dangerous as markers could be
254 # running on a filtered repository would be dangerous as markers could be
255 # reported as exclusive when they are relevant for other filtered nodes.
255 # reported as exclusive when they are relevant for other filtered nodes.
256 unfi = repo.unfiltered()
256 unfi = repo.unfiltered()
257
257
258 # shortcut to various useful item
258 # shortcut to various useful item
259 has_node = unfi.changelog.index.has_node
259 has_node = unfi.changelog.index.has_node
260 precursorsmarkers = unfi.obsstore.predecessors
260 precursorsmarkers = unfi.obsstore.predecessors
261 successormarkers = unfi.obsstore.successors
261 successormarkers = unfi.obsstore.successors
262 childrenmarkers = unfi.obsstore.children
262 childrenmarkers = unfi.obsstore.children
263
263
264 # exclusive markers (return of the function)
264 # exclusive markers (return of the function)
265 exclmarkers = set()
265 exclmarkers = set()
266 # we need fast membership testing
266 # we need fast membership testing
267 nodes = set(nodes)
267 nodes = set(nodes)
268 # looking for head in the obshistory
268 # looking for head in the obshistory
269 #
269 #
270 # XXX we are ignoring all issues in regard with cycle for now.
270 # XXX we are ignoring all issues in regard with cycle for now.
271 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
271 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
272 stack.sort()
272 stack.sort()
273 # nodes already stacked
273 # nodes already stacked
274 seennodes = set(stack)
274 seennodes = set(stack)
275 while stack:
275 while stack:
276 current = stack.pop()
276 current = stack.pop()
277 # fetch precursors markers
277 # fetch precursors markers
278 markers = list(precursorsmarkers.get(current, ()))
278 markers = list(precursorsmarkers.get(current, ()))
279 # extend the list with prune markers
279 # extend the list with prune markers
280 for mark in successormarkers.get(current, ()):
280 for mark in successormarkers.get(current, ()):
281 if not mark[1]:
281 if not mark[1]:
282 markers.append(mark)
282 markers.append(mark)
283 # and markers from children (looking for prune)
283 # and markers from children (looking for prune)
284 for mark in childrenmarkers.get(current, ()):
284 for mark in childrenmarkers.get(current, ()):
285 if not mark[1]:
285 if not mark[1]:
286 markers.append(mark)
286 markers.append(mark)
287 # traverse the markers
287 # traverse the markers
288 for mark in markers:
288 for mark in markers:
289 if mark in exclmarkers:
289 if mark in exclmarkers:
290 # markers already selected
290 # markers already selected
291 continue
291 continue
292
292
293 # If the markers is about the current node, select it
293 # If the markers is about the current node, select it
294 #
294 #
295 # (this delay the addition of markers from children)
295 # (this delay the addition of markers from children)
296 if mark[1] or mark[0] == current:
296 if mark[1] or mark[0] == current:
297 exclmarkers.add(mark)
297 exclmarkers.add(mark)
298
298
299 # should we keep traversing through the precursors?
299 # should we keep traversing through the precursors?
300 prec = mark[0]
300 prec = mark[0]
301
301
302 # nodes in the stack or already processed
302 # nodes in the stack or already processed
303 if prec in seennodes:
303 if prec in seennodes:
304 continue
304 continue
305
305
306 # is this a locally known node ?
306 # is this a locally known node ?
307 known = has_node(prec)
307 known = has_node(prec)
308 # if locally-known and not in the <nodes> set the traversal
308 # if locally-known and not in the <nodes> set the traversal
309 # stop here.
309 # stop here.
310 if known and prec not in nodes:
310 if known and prec not in nodes:
311 continue
311 continue
312
312
313 # do not keep going if there are unselected markers pointing to this
313 # do not keep going if there are unselected markers pointing to this
314 # nodes. If we end up traversing these unselected markers later the
314 # nodes. If we end up traversing these unselected markers later the
315 # node will be taken care of at that point.
315 # node will be taken care of at that point.
316 precmarkers = _filterprunes(successormarkers.get(prec))
316 precmarkers = _filterprunes(successormarkers.get(prec))
317 if precmarkers.issubset(exclmarkers):
317 if precmarkers.issubset(exclmarkers):
318 seennodes.add(prec)
318 seennodes.add(prec)
319 stack.append(prec)
319 stack.append(prec)
320
320
321 return exclmarkers
321 return exclmarkers
322
322
323
323
324 def foreground(repo, nodes):
324 def foreground(repo, nodes):
325 """return all nodes in the "foreground" of other node
325 """return all nodes in the "foreground" of other node
326
326
327 The foreground of a revision is anything reachable using parent -> children
327 The foreground of a revision is anything reachable using parent -> children
328 or precursor -> successor relation. It is very similar to "descendant" but
328 or precursor -> successor relation. It is very similar to "descendant" but
329 augmented with obsolescence information.
329 augmented with obsolescence information.
330
330
331 Beware that possible obsolescence cycle may result if complex situation.
331 Beware that possible obsolescence cycle may result if complex situation.
332 """
332 """
333 repo = repo.unfiltered()
333 repo = repo.unfiltered()
334 foreground = set(repo.set(b'%ln::', nodes))
334 foreground = set(repo.set(b'%ln::', nodes))
335 if repo.obsstore:
335 if repo.obsstore:
336 # We only need this complicated logic if there is obsolescence
336 # We only need this complicated logic if there is obsolescence
337 # XXX will probably deserve an optimised revset.
337 # XXX will probably deserve an optimised revset.
338 has_node = repo.changelog.index.has_node
338 has_node = repo.changelog.index.has_node
339 plen = -1
339 plen = -1
340 # compute the whole set of successors or descendants
340 # compute the whole set of successors or descendants
341 while len(foreground) != plen:
341 while len(foreground) != plen:
342 plen = len(foreground)
342 plen = len(foreground)
343 succs = {c.node() for c in foreground}
343 succs = {c.node() for c in foreground}
344 mutable = [c.node() for c in foreground if c.mutable()]
344 mutable = [c.node() for c in foreground if c.mutable()]
345 succs.update(allsuccessors(repo.obsstore, mutable))
345 succs.update(allsuccessors(repo.obsstore, mutable))
346 known = (n for n in succs if has_node(n))
346 known = (n for n in succs if has_node(n))
347 foreground = set(repo.set(b'%ln::', known))
347 foreground = set(repo.set(b'%ln::', known))
348 return {c.node() for c in foreground}
348 return {c.node() for c in foreground}
349
349
350
350
351 # effectflag field
351 # effectflag field
352 #
352 #
353 # Effect-flag is a 1-byte bit field used to store what changed between a
353 # Effect-flag is a 1-byte bit field used to store what changed between a
354 # changeset and its successor(s).
354 # changeset and its successor(s).
355 #
355 #
356 # The effect flag is stored in obs-markers metadata while we iterate on the
356 # The effect flag is stored in obs-markers metadata while we iterate on the
357 # information design. That's why we have the EFFECTFLAGFIELD. If we come up
357 # information design. That's why we have the EFFECTFLAGFIELD. If we come up
358 # with an incompatible design for effect flag, we can store a new design under
358 # with an incompatible design for effect flag, we can store a new design under
359 # another field name so we don't break readers. We plan to extend the existing
359 # another field name so we don't break readers. We plan to extend the existing
360 # obsmarkers bit-field when the effect flag design will be stabilized.
360 # obsmarkers bit-field when the effect flag design will be stabilized.
361 #
361 #
362 # The effect-flag is placed behind an experimental flag
362 # The effect-flag is placed behind an experimental flag
363 # `effect-flags` set to off by default.
363 # `effect-flags` set to off by default.
364 #
364 #
365
365
366 EFFECTFLAGFIELD = b"ef1"
366 EFFECTFLAGFIELD = b"ef1"
367
367
368 DESCCHANGED = 1 << 0 # action changed the description
368 DESCCHANGED = 1 << 0 # action changed the description
369 METACHANGED = 1 << 1 # action change the meta
369 METACHANGED = 1 << 1 # action change the meta
370 DIFFCHANGED = 1 << 3 # action change diff introduced by the changeset
370 DIFFCHANGED = 1 << 3 # action change diff introduced by the changeset
371 PARENTCHANGED = 1 << 2 # action change the parent
371 PARENTCHANGED = 1 << 2 # action change the parent
372 USERCHANGED = 1 << 4 # the user changed
372 USERCHANGED = 1 << 4 # the user changed
373 DATECHANGED = 1 << 5 # the date changed
373 DATECHANGED = 1 << 5 # the date changed
374 BRANCHCHANGED = 1 << 6 # the branch changed
374 BRANCHCHANGED = 1 << 6 # the branch changed
375
375
376 METABLACKLIST = [
376 METABLACKLIST = [
377 re.compile(b'^branch$'),
377 re.compile(b'^branch$'),
378 re.compile(b'^.*-source$'),
378 re.compile(b'^.*-source$'),
379 re.compile(b'^.*_source$'),
379 re.compile(b'^.*_source$'),
380 re.compile(b'^source$'),
380 re.compile(b'^source$'),
381 ]
381 ]
382
382
383
383
384 def metanotblacklisted(metaitem):
384 def metanotblacklisted(metaitem):
385 """Check that the key of a meta item (extrakey, extravalue) does not
385 """Check that the key of a meta item (extrakey, extravalue) does not
386 match at least one of the blacklist pattern
386 match at least one of the blacklist pattern
387 """
387 """
388 metakey = metaitem[0]
388 metakey = metaitem[0]
389
389
390 return not any(pattern.match(metakey) for pattern in METABLACKLIST)
390 return not any(pattern.match(metakey) for pattern in METABLACKLIST)
391
391
392
392
393 def _prepare_hunk(hunk):
393 def _prepare_hunk(hunk):
394 """Drop all information but the username and patch"""
394 """Drop all information but the username and patch"""
395 cleanhunk = []
395 cleanhunk = []
396 for line in hunk.splitlines():
396 for line in hunk.splitlines():
397 if line.startswith(b'# User') or not line.startswith(b'#'):
397 if line.startswith(b'# User') or not line.startswith(b'#'):
398 if line.startswith(b'@@'):
398 if line.startswith(b'@@'):
399 line = b'@@\n'
399 line = b'@@\n'
400 cleanhunk.append(line)
400 cleanhunk.append(line)
401 return cleanhunk
401 return cleanhunk
402
402
403
403
404 def _getdifflines(iterdiff):
404 def _getdifflines(iterdiff):
405 """return a cleaned up lines"""
405 """return a cleaned up lines"""
406 lines = next(iterdiff, None)
406 lines = next(iterdiff, None)
407
407
408 if lines is None:
408 if lines is None:
409 return lines
409 return lines
410
410
411 return _prepare_hunk(lines)
411 return _prepare_hunk(lines)
412
412
413
413
414 def _cmpdiff(leftctx, rightctx):
414 def _cmpdiff(leftctx, rightctx):
415 """return True if both ctx introduce the "same diff"
415 """return True if both ctx introduce the "same diff"
416
416
417 This is a first and basic implementation, with many shortcoming.
417 This is a first and basic implementation, with many shortcoming.
418 """
418 """
419 diffopts = diffutil.diffallopts(leftctx.repo().ui, {b'git': True})
419 diffopts = diffutil.diffallopts(leftctx.repo().ui, {b'git': True})
420
420
421 # Leftctx or right ctx might be filtered, so we need to use the contexts
421 # Leftctx or right ctx might be filtered, so we need to use the contexts
422 # with an unfiltered repository to safely compute the diff
422 # with an unfiltered repository to safely compute the diff
423
423
424 # leftctx and rightctx can be from different repository views in case of
424 # leftctx and rightctx can be from different repository views in case of
425 # hgsubversion, do don't try to access them from same repository
425 # hgsubversion, do don't try to access them from same repository
426 # rightctx.repo() and leftctx.repo() are not always the same
426 # rightctx.repo() and leftctx.repo() are not always the same
427 leftunfi = leftctx._repo.unfiltered()[leftctx.rev()]
427 leftunfi = leftctx._repo.unfiltered()[leftctx.rev()]
428 leftdiff = leftunfi.diff(opts=diffopts)
428 leftdiff = leftunfi.diff(opts=diffopts)
429 rightunfi = rightctx._repo.unfiltered()[rightctx.rev()]
429 rightunfi = rightctx._repo.unfiltered()[rightctx.rev()]
430 rightdiff = rightunfi.diff(opts=diffopts)
430 rightdiff = rightunfi.diff(opts=diffopts)
431
431
432 left, right = (0, 0)
432 left, right = (0, 0)
433 while None not in (left, right):
433 while None not in (left, right):
434 left = _getdifflines(leftdiff)
434 left = _getdifflines(leftdiff)
435 right = _getdifflines(rightdiff)
435 right = _getdifflines(rightdiff)
436
436
437 if left != right:
437 if left != right:
438 return False
438 return False
439 return True
439 return True
440
440
441
441
442 def geteffectflag(source, successors):
442 def geteffectflag(source, successors):
443 """From an obs-marker relation, compute what changed between the
443 """From an obs-marker relation, compute what changed between the
444 predecessor and the successor.
444 predecessor and the successor.
445 """
445 """
446 effects = 0
446 effects = 0
447
447
448 for changectx in successors:
448 for changectx in successors:
449 # Check if description has changed
449 # Check if description has changed
450 if changectx.description() != source.description():
450 if changectx.description() != source.description():
451 effects |= DESCCHANGED
451 effects |= DESCCHANGED
452
452
453 # Check if user has changed
453 # Check if user has changed
454 if changectx.user() != source.user():
454 if changectx.user() != source.user():
455 effects |= USERCHANGED
455 effects |= USERCHANGED
456
456
457 # Check if date has changed
457 # Check if date has changed
458 if changectx.date() != source.date():
458 if changectx.date() != source.date():
459 effects |= DATECHANGED
459 effects |= DATECHANGED
460
460
461 # Check if branch has changed
461 # Check if branch has changed
462 if changectx.branch() != source.branch():
462 if changectx.branch() != source.branch():
463 effects |= BRANCHCHANGED
463 effects |= BRANCHCHANGED
464
464
465 # Check if at least one of the parent has changed
465 # Check if at least one of the parent has changed
466 if changectx.parents() != source.parents():
466 if changectx.parents() != source.parents():
467 effects |= PARENTCHANGED
467 effects |= PARENTCHANGED
468
468
469 # Check if other meta has changed
469 # Check if other meta has changed
470 changeextra = changectx.extra().items()
470 changeextra = changectx.extra().items()
471 ctxmeta = list(filter(metanotblacklisted, changeextra))
471 ctxmeta = sorted(filter(metanotblacklisted, changeextra))
472
472
473 sourceextra = source.extra().items()
473 sourceextra = source.extra().items()
474 srcmeta = list(filter(metanotblacklisted, sourceextra))
474 srcmeta = sorted(filter(metanotblacklisted, sourceextra))
475
475
476 if ctxmeta != srcmeta:
476 if ctxmeta != srcmeta:
477 effects |= METACHANGED
477 effects |= METACHANGED
478
478
479 # Check if the diff has changed
479 # Check if the diff has changed
480 if not _cmpdiff(source, changectx):
480 if not _cmpdiff(source, changectx):
481 effects |= DIFFCHANGED
481 effects |= DIFFCHANGED
482
482
483 return effects
483 return effects
484
484
485
485
486 def getobsoleted(repo, tr=None, changes=None):
486 def getobsoleted(repo, tr=None, changes=None):
487 """return the set of pre-existing revisions obsoleted by a transaction
487 """return the set of pre-existing revisions obsoleted by a transaction
488
488
489 Either the transaction or changes item of the transaction (for hooks)
489 Either the transaction or changes item of the transaction (for hooks)
490 must be provided, but not both.
490 must be provided, but not both.
491 """
491 """
492 if (tr is None) == (changes is None):
492 if (tr is None) == (changes is None):
493 e = b"exactly one of tr and changes must be provided"
493 e = b"exactly one of tr and changes must be provided"
494 raise error.ProgrammingError(e)
494 raise error.ProgrammingError(e)
495 torev = repo.unfiltered().changelog.index.get_rev
495 torev = repo.unfiltered().changelog.index.get_rev
496 phase = repo._phasecache.phase
496 phase = repo._phasecache.phase
497 succsmarkers = repo.obsstore.successors.get
497 succsmarkers = repo.obsstore.successors.get
498 public = phases.public
498 public = phases.public
499 if changes is None:
499 if changes is None:
500 changes = tr.changes
500 changes = tr.changes
501 addedmarkers = changes[b'obsmarkers']
501 addedmarkers = changes[b'obsmarkers']
502 origrepolen = changes[b'origrepolen']
502 origrepolen = changes[b'origrepolen']
503 seenrevs = set()
503 seenrevs = set()
504 obsoleted = set()
504 obsoleted = set()
505 for mark in addedmarkers:
505 for mark in addedmarkers:
506 node = mark[0]
506 node = mark[0]
507 rev = torev(node)
507 rev = torev(node)
508 if rev is None or rev in seenrevs or rev >= origrepolen:
508 if rev is None or rev in seenrevs or rev >= origrepolen:
509 continue
509 continue
510 seenrevs.add(rev)
510 seenrevs.add(rev)
511 if phase(repo, rev) == public:
511 if phase(repo, rev) == public:
512 continue
512 continue
513 if set(succsmarkers(node) or []).issubset(addedmarkers):
513 if set(succsmarkers(node) or []).issubset(addedmarkers):
514 obsoleted.add(rev)
514 obsoleted.add(rev)
515 return obsoleted
515 return obsoleted
516
516
517
517
518 class _succs(list):
518 class _succs(list):
519 """small class to represent a successors with some metadata about it"""
519 """small class to represent a successors with some metadata about it"""
520
520
521 def __init__(self, *args, **kwargs):
521 def __init__(self, *args, **kwargs):
522 super(_succs, self).__init__(*args, **kwargs)
522 super(_succs, self).__init__(*args, **kwargs)
523 self.markers = set()
523 self.markers = set()
524
524
525 def copy(self):
525 def copy(self):
526 new = _succs(self)
526 new = _succs(self)
527 new.markers = self.markers.copy()
527 new.markers = self.markers.copy()
528 return new
528 return new
529
529
530 @util.propertycache
530 @util.propertycache
531 def _set(self):
531 def _set(self):
532 # immutable
532 # immutable
533 return set(self)
533 return set(self)
534
534
535 def canmerge(self, other):
535 def canmerge(self, other):
536 return self._set.issubset(other._set)
536 return self._set.issubset(other._set)
537
537
538
538
539 def successorssets(repo, initialnode, closest=False, cache=None):
539 def successorssets(repo, initialnode, closest=False, cache=None):
540 """Return set of all latest successors of initial nodes
540 """Return set of all latest successors of initial nodes
541
541
542 The successors set of a changeset A are the group of revisions that succeed
542 The successors set of a changeset A are the group of revisions that succeed
543 A. It succeeds A as a consistent whole, each revision being only a partial
543 A. It succeeds A as a consistent whole, each revision being only a partial
544 replacement. By default, the successors set contains non-obsolete
544 replacement. By default, the successors set contains non-obsolete
545 changesets only, walking the obsolescence graph until reaching a leaf. If
545 changesets only, walking the obsolescence graph until reaching a leaf. If
546 'closest' is set to True, closest successors-sets are return (the
546 'closest' is set to True, closest successors-sets are return (the
547 obsolescence walk stops on known changesets).
547 obsolescence walk stops on known changesets).
548
548
549 This function returns the full list of successor sets which is why it
549 This function returns the full list of successor sets which is why it
550 returns a list of tuples and not just a single tuple. Each tuple is a valid
550 returns a list of tuples and not just a single tuple. Each tuple is a valid
551 successors set. Note that (A,) may be a valid successors set for changeset A
551 successors set. Note that (A,) may be a valid successors set for changeset A
552 (see below).
552 (see below).
553
553
554 In most cases, a changeset A will have a single element (e.g. the changeset
554 In most cases, a changeset A will have a single element (e.g. the changeset
555 A is replaced by A') in its successors set. Though, it is also common for a
555 A is replaced by A') in its successors set. Though, it is also common for a
556 changeset A to have no elements in its successor set (e.g. the changeset
556 changeset A to have no elements in its successor set (e.g. the changeset
557 has been pruned). Therefore, the returned list of successors sets will be
557 has been pruned). Therefore, the returned list of successors sets will be
558 [(A',)] or [], respectively.
558 [(A',)] or [], respectively.
559
559
560 When a changeset A is split into A' and B', however, it will result in a
560 When a changeset A is split into A' and B', however, it will result in a
561 successors set containing more than a single element, i.e. [(A',B')].
561 successors set containing more than a single element, i.e. [(A',B')].
562 Divergent changesets will result in multiple successors sets, i.e. [(A',),
562 Divergent changesets will result in multiple successors sets, i.e. [(A',),
563 (A'')].
563 (A'')].
564
564
565 If a changeset A is not obsolete, then it will conceptually have no
565 If a changeset A is not obsolete, then it will conceptually have no
566 successors set. To distinguish this from a pruned changeset, the successor
566 successors set. To distinguish this from a pruned changeset, the successor
567 set will contain itself only, i.e. [(A,)].
567 set will contain itself only, i.e. [(A,)].
568
568
569 Finally, final successors unknown locally are considered to be pruned
569 Finally, final successors unknown locally are considered to be pruned
570 (pruned: obsoleted without any successors). (Final: successors not affected
570 (pruned: obsoleted without any successors). (Final: successors not affected
571 by markers).
571 by markers).
572
572
573 The 'closest' mode respect the repoview filtering. For example, without
573 The 'closest' mode respect the repoview filtering. For example, without
574 filter it will stop at the first locally known changeset, with 'visible'
574 filter it will stop at the first locally known changeset, with 'visible'
575 filter it will stop on visible changesets).
575 filter it will stop on visible changesets).
576
576
577 The optional `cache` parameter is a dictionary that may contains
577 The optional `cache` parameter is a dictionary that may contains
578 precomputed successors sets. It is meant to reuse the computation of a
578 precomputed successors sets. It is meant to reuse the computation of a
579 previous call to `successorssets` when multiple calls are made at the same
579 previous call to `successorssets` when multiple calls are made at the same
580 time. The cache dictionary is updated in place. The caller is responsible
580 time. The cache dictionary is updated in place. The caller is responsible
581 for its life span. Code that makes multiple calls to `successorssets`
581 for its life span. Code that makes multiple calls to `successorssets`
582 *should* use this cache mechanism or risk a performance hit.
582 *should* use this cache mechanism or risk a performance hit.
583
583
584 Since results are different depending of the 'closest' most, the same cache
584 Since results are different depending of the 'closest' most, the same cache
585 cannot be reused for both mode.
585 cannot be reused for both mode.
586 """
586 """
587
587
588 succmarkers = repo.obsstore.successors
588 succmarkers = repo.obsstore.successors
589
589
590 # Stack of nodes we search successors sets for
590 # Stack of nodes we search successors sets for
591 toproceed = [initialnode]
591 toproceed = [initialnode]
592 # set version of above list for fast loop detection
592 # set version of above list for fast loop detection
593 # element added to "toproceed" must be added here
593 # element added to "toproceed" must be added here
594 stackedset = set(toproceed)
594 stackedset = set(toproceed)
595 if cache is None:
595 if cache is None:
596 cache = {}
596 cache = {}
597
597
598 # This while loop is the flattened version of a recursive search for
598 # This while loop is the flattened version of a recursive search for
599 # successors sets
599 # successors sets
600 #
600 #
601 # def successorssets(x):
601 # def successorssets(x):
602 # successors = directsuccessors(x)
602 # successors = directsuccessors(x)
603 # ss = [[]]
603 # ss = [[]]
604 # for succ in directsuccessors(x):
604 # for succ in directsuccessors(x):
605 # # product as in itertools cartesian product
605 # # product as in itertools cartesian product
606 # ss = product(ss, successorssets(succ))
606 # ss = product(ss, successorssets(succ))
607 # return ss
607 # return ss
608 #
608 #
609 # But we can not use plain recursive calls here:
609 # But we can not use plain recursive calls here:
610 # - that would blow the python call stack
610 # - that would blow the python call stack
611 # - obsolescence markers may have cycles, we need to handle them.
611 # - obsolescence markers may have cycles, we need to handle them.
612 #
612 #
613 # The `toproceed` list act as our call stack. Every node we search
613 # The `toproceed` list act as our call stack. Every node we search
614 # successors set for are stacked there.
614 # successors set for are stacked there.
615 #
615 #
616 # The `stackedset` is set version of this stack used to check if a node is
616 # The `stackedset` is set version of this stack used to check if a node is
617 # already stacked. This check is used to detect cycles and prevent infinite
617 # already stacked. This check is used to detect cycles and prevent infinite
618 # loop.
618 # loop.
619 #
619 #
620 # successors set of all nodes are stored in the `cache` dictionary.
620 # successors set of all nodes are stored in the `cache` dictionary.
621 #
621 #
622 # After this while loop ends we use the cache to return the successors sets
622 # After this while loop ends we use the cache to return the successors sets
623 # for the node requested by the caller.
623 # for the node requested by the caller.
624 while toproceed:
624 while toproceed:
625 # Every iteration tries to compute the successors sets of the topmost
625 # Every iteration tries to compute the successors sets of the topmost
626 # node of the stack: CURRENT.
626 # node of the stack: CURRENT.
627 #
627 #
628 # There are four possible outcomes:
628 # There are four possible outcomes:
629 #
629 #
630 # 1) We already know the successors sets of CURRENT:
630 # 1) We already know the successors sets of CURRENT:
631 # -> mission accomplished, pop it from the stack.
631 # -> mission accomplished, pop it from the stack.
632 # 2) Stop the walk:
632 # 2) Stop the walk:
633 # default case: Node is not obsolete
633 # default case: Node is not obsolete
634 # closest case: Node is known at this repo filter level
634 # closest case: Node is known at this repo filter level
635 # -> the node is its own successors sets. Add it to the cache.
635 # -> the node is its own successors sets. Add it to the cache.
636 # 3) We do not know successors set of direct successors of CURRENT:
636 # 3) We do not know successors set of direct successors of CURRENT:
637 # -> We add those successors to the stack.
637 # -> We add those successors to the stack.
638 # 4) We know successors sets of all direct successors of CURRENT:
638 # 4) We know successors sets of all direct successors of CURRENT:
639 # -> We can compute CURRENT successors set and add it to the
639 # -> We can compute CURRENT successors set and add it to the
640 # cache.
640 # cache.
641 #
641 #
642 current = toproceed[-1]
642 current = toproceed[-1]
643
643
644 # case 2 condition is a bit hairy because of closest,
644 # case 2 condition is a bit hairy because of closest,
645 # we compute it on its own
645 # we compute it on its own
646 case2condition = (current not in succmarkers) or (
646 case2condition = (current not in succmarkers) or (
647 closest and current != initialnode and current in repo
647 closest and current != initialnode and current in repo
648 )
648 )
649
649
650 if current in cache:
650 if current in cache:
651 # case (1): We already know the successors sets
651 # case (1): We already know the successors sets
652 stackedset.remove(toproceed.pop())
652 stackedset.remove(toproceed.pop())
653 elif case2condition:
653 elif case2condition:
654 # case (2): end of walk.
654 # case (2): end of walk.
655 if current in repo:
655 if current in repo:
656 # We have a valid successors.
656 # We have a valid successors.
657 cache[current] = [_succs((current,))]
657 cache[current] = [_succs((current,))]
658 else:
658 else:
659 # Final obsolete version is unknown locally.
659 # Final obsolete version is unknown locally.
660 # Do not count that as a valid successors
660 # Do not count that as a valid successors
661 cache[current] = []
661 cache[current] = []
662 else:
662 else:
663 # cases (3) and (4)
663 # cases (3) and (4)
664 #
664 #
665 # We proceed in two phases. Phase 1 aims to distinguish case (3)
665 # We proceed in two phases. Phase 1 aims to distinguish case (3)
666 # from case (4):
666 # from case (4):
667 #
667 #
668 # For each direct successors of CURRENT, we check whether its
668 # For each direct successors of CURRENT, we check whether its
669 # successors sets are known. If they are not, we stack the
669 # successors sets are known. If they are not, we stack the
670 # unknown node and proceed to the next iteration of the while
670 # unknown node and proceed to the next iteration of the while
671 # loop. (case 3)
671 # loop. (case 3)
672 #
672 #
673 # During this step, we may detect obsolescence cycles: a node
673 # During this step, we may detect obsolescence cycles: a node
674 # with unknown successors sets but already in the call stack.
674 # with unknown successors sets but already in the call stack.
675 # In such a situation, we arbitrary set the successors sets of
675 # In such a situation, we arbitrary set the successors sets of
676 # the node to nothing (node pruned) to break the cycle.
676 # the node to nothing (node pruned) to break the cycle.
677 #
677 #
678 # If no break was encountered we proceed to phase 2.
678 # If no break was encountered we proceed to phase 2.
679 #
679 #
680 # Phase 2 computes successors sets of CURRENT (case 4); see details
680 # Phase 2 computes successors sets of CURRENT (case 4); see details
681 # in phase 2 itself.
681 # in phase 2 itself.
682 #
682 #
683 # Note the two levels of iteration in each phase.
683 # Note the two levels of iteration in each phase.
684 # - The first one handles obsolescence markers using CURRENT as
684 # - The first one handles obsolescence markers using CURRENT as
685 # precursor (successors markers of CURRENT).
685 # precursor (successors markers of CURRENT).
686 #
686 #
687 # Having multiple entry here means divergence.
687 # Having multiple entry here means divergence.
688 #
688 #
689 # - The second one handles successors defined in each marker.
689 # - The second one handles successors defined in each marker.
690 #
690 #
691 # Having none means pruned node, multiple successors means split,
691 # Having none means pruned node, multiple successors means split,
692 # single successors are standard replacement.
692 # single successors are standard replacement.
693 #
693 #
694 for mark in sortedmarkers(succmarkers[current]):
694 for mark in sortedmarkers(succmarkers[current]):
695 for suc in mark[1]:
695 for suc in mark[1]:
696 if suc not in cache:
696 if suc not in cache:
697 if suc in stackedset:
697 if suc in stackedset:
698 # cycle breaking
698 # cycle breaking
699 cache[suc] = []
699 cache[suc] = []
700 else:
700 else:
701 # case (3) If we have not computed successors sets
701 # case (3) If we have not computed successors sets
702 # of one of those successors we add it to the
702 # of one of those successors we add it to the
703 # `toproceed` stack and stop all work for this
703 # `toproceed` stack and stop all work for this
704 # iteration.
704 # iteration.
705 toproceed.append(suc)
705 toproceed.append(suc)
706 stackedset.add(suc)
706 stackedset.add(suc)
707 break
707 break
708 else:
708 else:
709 continue
709 continue
710 break
710 break
711 else:
711 else:
712 # case (4): we know all successors sets of all direct
712 # case (4): we know all successors sets of all direct
713 # successors
713 # successors
714 #
714 #
715 # Successors set contributed by each marker depends on the
715 # Successors set contributed by each marker depends on the
716 # successors sets of all its "successors" node.
716 # successors sets of all its "successors" node.
717 #
717 #
718 # Each different marker is a divergence in the obsolescence
718 # Each different marker is a divergence in the obsolescence
719 # history. It contributes successors sets distinct from other
719 # history. It contributes successors sets distinct from other
720 # markers.
720 # markers.
721 #
721 #
722 # Within a marker, a successor may have divergent successors
722 # Within a marker, a successor may have divergent successors
723 # sets. In such a case, the marker will contribute multiple
723 # sets. In such a case, the marker will contribute multiple
724 # divergent successors sets. If multiple successors have
724 # divergent successors sets. If multiple successors have
725 # divergent successors sets, a Cartesian product is used.
725 # divergent successors sets, a Cartesian product is used.
726 #
726 #
727 # At the end we post-process successors sets to remove
727 # At the end we post-process successors sets to remove
728 # duplicated entry and successors set that are strict subset of
728 # duplicated entry and successors set that are strict subset of
729 # another one.
729 # another one.
730 succssets = []
730 succssets = []
731 for mark in sortedmarkers(succmarkers[current]):
731 for mark in sortedmarkers(succmarkers[current]):
732 # successors sets contributed by this marker
732 # successors sets contributed by this marker
733 base = _succs()
733 base = _succs()
734 base.markers.add(mark)
734 base.markers.add(mark)
735 markss = [base]
735 markss = [base]
736 for suc in mark[1]:
736 for suc in mark[1]:
737 # cardinal product with previous successors
737 # cardinal product with previous successors
738 productresult = []
738 productresult = []
739 for prefix in markss:
739 for prefix in markss:
740 for suffix in cache[suc]:
740 for suffix in cache[suc]:
741 newss = prefix.copy()
741 newss = prefix.copy()
742 newss.markers.update(suffix.markers)
742 newss.markers.update(suffix.markers)
743 for part in suffix:
743 for part in suffix:
744 # do not duplicated entry in successors set
744 # do not duplicated entry in successors set
745 # first entry wins.
745 # first entry wins.
746 if part not in newss:
746 if part not in newss:
747 newss.append(part)
747 newss.append(part)
748 productresult.append(newss)
748 productresult.append(newss)
749 if productresult:
749 if productresult:
750 markss = productresult
750 markss = productresult
751 succssets.extend(markss)
751 succssets.extend(markss)
752 # remove duplicated and subset
752 # remove duplicated and subset
753 seen = []
753 seen = []
754 final = []
754 final = []
755 candidates = sorted(
755 candidates = sorted(
756 (s for s in succssets if s), key=len, reverse=True
756 (s for s in succssets if s), key=len, reverse=True
757 )
757 )
758 for cand in candidates:
758 for cand in candidates:
759 for seensuccs in seen:
759 for seensuccs in seen:
760 if cand.canmerge(seensuccs):
760 if cand.canmerge(seensuccs):
761 seensuccs.markers.update(cand.markers)
761 seensuccs.markers.update(cand.markers)
762 break
762 break
763 else:
763 else:
764 final.append(cand)
764 final.append(cand)
765 seen.append(cand)
765 seen.append(cand)
766 final.reverse() # put small successors set first
766 final.reverse() # put small successors set first
767 cache[current] = final
767 cache[current] = final
768 return cache[initialnode]
768 return cache[initialnode]
769
769
770
770
771 def successorsandmarkers(repo, ctx):
771 def successorsandmarkers(repo, ctx):
772 """compute the raw data needed for computing obsfate
772 """compute the raw data needed for computing obsfate
773 Returns a list of dict, one dict per successors set
773 Returns a list of dict, one dict per successors set
774 """
774 """
775 if not ctx.obsolete():
775 if not ctx.obsolete():
776 return None
776 return None
777
777
778 ssets = successorssets(repo, ctx.node(), closest=True)
778 ssets = successorssets(repo, ctx.node(), closest=True)
779
779
780 # closestsuccessors returns an empty list for pruned revisions, remap it
780 # closestsuccessors returns an empty list for pruned revisions, remap it
781 # into a list containing an empty list for future processing
781 # into a list containing an empty list for future processing
782 if ssets == []:
782 if ssets == []:
783 ssets = [_succs()]
783 ssets = [_succs()]
784
784
785 # Try to recover pruned markers
785 # Try to recover pruned markers
786 succsmap = repo.obsstore.successors
786 succsmap = repo.obsstore.successors
787 fullsuccessorsets = [] # successor set + markers
787 fullsuccessorsets = [] # successor set + markers
788 for sset in ssets:
788 for sset in ssets:
789 if sset:
789 if sset:
790 fullsuccessorsets.append(sset)
790 fullsuccessorsets.append(sset)
791 else:
791 else:
792 # successorsset return an empty set() when ctx or one of its
792 # successorsset return an empty set() when ctx or one of its
793 # successors is pruned.
793 # successors is pruned.
794 # In this case, walk the obs-markers tree again starting with ctx
794 # In this case, walk the obs-markers tree again starting with ctx
795 # and find the relevant pruning obs-makers, the ones without
795 # and find the relevant pruning obs-makers, the ones without
796 # successors.
796 # successors.
797 # Having these markers allow us to compute some information about
797 # Having these markers allow us to compute some information about
798 # its fate, like who pruned this changeset and when.
798 # its fate, like who pruned this changeset and when.
799
799
800 # XXX we do not catch all prune markers (eg rewritten then pruned)
800 # XXX we do not catch all prune markers (eg rewritten then pruned)
801 # (fix me later)
801 # (fix me later)
802 foundany = False
802 foundany = False
803 for mark in succsmap.get(ctx.node(), ()):
803 for mark in succsmap.get(ctx.node(), ()):
804 if not mark[1]:
804 if not mark[1]:
805 foundany = True
805 foundany = True
806 sset = _succs()
806 sset = _succs()
807 sset.markers.add(mark)
807 sset.markers.add(mark)
808 fullsuccessorsets.append(sset)
808 fullsuccessorsets.append(sset)
809 if not foundany:
809 if not foundany:
810 fullsuccessorsets.append(_succs())
810 fullsuccessorsets.append(_succs())
811
811
812 values = []
812 values = []
813 for sset in fullsuccessorsets:
813 for sset in fullsuccessorsets:
814 values.append({b'successors': sset, b'markers': sset.markers})
814 values.append({b'successors': sset, b'markers': sset.markers})
815
815
816 return values
816 return values
817
817
818
818
819 def _getobsfate(successorssets):
819 def _getobsfate(successorssets):
820 """Compute a changeset obsolescence fate based on its successorssets.
820 """Compute a changeset obsolescence fate based on its successorssets.
821 Successors can be the tipmost ones or the immediate ones. This function
821 Successors can be the tipmost ones or the immediate ones. This function
822 return values are not meant to be shown directly to users, it is meant to
822 return values are not meant to be shown directly to users, it is meant to
823 be used by internal functions only.
823 be used by internal functions only.
824 Returns one fate from the following values:
824 Returns one fate from the following values:
825 - pruned
825 - pruned
826 - diverged
826 - diverged
827 - superseded
827 - superseded
828 - superseded_split
828 - superseded_split
829 """
829 """
830
830
831 if len(successorssets) == 0:
831 if len(successorssets) == 0:
832 # The commit has been pruned
832 # The commit has been pruned
833 return b'pruned'
833 return b'pruned'
834 elif len(successorssets) > 1:
834 elif len(successorssets) > 1:
835 return b'diverged'
835 return b'diverged'
836 else:
836 else:
837 # No divergence, only one set of successors
837 # No divergence, only one set of successors
838 successors = successorssets[0]
838 successors = successorssets[0]
839
839
840 if len(successors) == 1:
840 if len(successors) == 1:
841 return b'superseded'
841 return b'superseded'
842 else:
842 else:
843 return b'superseded_split'
843 return b'superseded_split'
844
844
845
845
846 def obsfateverb(successorset, markers):
846 def obsfateverb(successorset, markers):
847 """Return the verb summarizing the successorset and potentially using
847 """Return the verb summarizing the successorset and potentially using
848 information from the markers
848 information from the markers
849 """
849 """
850 if not successorset:
850 if not successorset:
851 verb = b'pruned'
851 verb = b'pruned'
852 elif len(successorset) == 1:
852 elif len(successorset) == 1:
853 verb = b'rewritten'
853 verb = b'rewritten'
854 else:
854 else:
855 verb = b'split'
855 verb = b'split'
856 return verb
856 return verb
857
857
858
858
859 def markersdates(markers):
859 def markersdates(markers):
860 """returns the list of dates for a list of markers"""
860 """returns the list of dates for a list of markers"""
861 return [m[4] for m in markers]
861 return [m[4] for m in markers]
862
862
863
863
864 def markersusers(markers):
864 def markersusers(markers):
865 """Returns a sorted list of markers users without duplicates"""
865 """Returns a sorted list of markers users without duplicates"""
866 markersmeta = [dict(m[3]) for m in markers]
866 markersmeta = [dict(m[3]) for m in markers]
867 users = {
867 users = {
868 encoding.tolocal(meta[b'user'])
868 encoding.tolocal(meta[b'user'])
869 for meta in markersmeta
869 for meta in markersmeta
870 if meta.get(b'user')
870 if meta.get(b'user')
871 }
871 }
872
872
873 return sorted(users)
873 return sorted(users)
874
874
875
875
876 def markersoperations(markers):
876 def markersoperations(markers):
877 """Returns a sorted list of markers operations without duplicates"""
877 """Returns a sorted list of markers operations without duplicates"""
878 markersmeta = [dict(m[3]) for m in markers]
878 markersmeta = [dict(m[3]) for m in markers]
879 operations = {
879 operations = {
880 meta.get(b'operation') for meta in markersmeta if meta.get(b'operation')
880 meta.get(b'operation') for meta in markersmeta if meta.get(b'operation')
881 }
881 }
882
882
883 return sorted(operations)
883 return sorted(operations)
884
884
885
885
886 def obsfateprinter(ui, repo, successors, markers, formatctx):
886 def obsfateprinter(ui, repo, successors, markers, formatctx):
887 """Build a obsfate string for a single successorset using all obsfate
887 """Build a obsfate string for a single successorset using all obsfate
888 related function defined in obsutil
888 related function defined in obsutil
889 """
889 """
890 quiet = ui.quiet
890 quiet = ui.quiet
891 verbose = ui.verbose
891 verbose = ui.verbose
892 normal = not verbose and not quiet
892 normal = not verbose and not quiet
893
893
894 line = []
894 line = []
895
895
896 # Verb
896 # Verb
897 line.append(obsfateverb(successors, markers))
897 line.append(obsfateverb(successors, markers))
898
898
899 # Operations
899 # Operations
900 operations = markersoperations(markers)
900 operations = markersoperations(markers)
901 if operations:
901 if operations:
902 line.append(b" using %s" % b", ".join(operations))
902 line.append(b" using %s" % b", ".join(operations))
903
903
904 # Successors
904 # Successors
905 if successors:
905 if successors:
906 fmtsuccessors = [formatctx(repo[succ]) for succ in successors]
906 fmtsuccessors = [formatctx(repo[succ]) for succ in successors]
907 line.append(b" as %s" % b", ".join(fmtsuccessors))
907 line.append(b" as %s" % b", ".join(fmtsuccessors))
908
908
909 # Users
909 # Users
910 users = markersusers(markers)
910 users = markersusers(markers)
911 # Filter out current user in not verbose mode to reduce amount of
911 # Filter out current user in not verbose mode to reduce amount of
912 # information
912 # information
913 if not verbose:
913 if not verbose:
914 currentuser = ui.username(acceptempty=True)
914 currentuser = ui.username(acceptempty=True)
915 if len(users) == 1 and currentuser in users:
915 if len(users) == 1 and currentuser in users:
916 users = None
916 users = None
917
917
918 if (verbose or normal) and users:
918 if (verbose or normal) and users:
919 line.append(b" by %s" % b", ".join(users))
919 line.append(b" by %s" % b", ".join(users))
920
920
921 # Date
921 # Date
922 dates = markersdates(markers)
922 dates = markersdates(markers)
923
923
924 if dates and verbose:
924 if dates and verbose:
925 min_date = min(dates)
925 min_date = min(dates)
926 max_date = max(dates)
926 max_date = max(dates)
927
927
928 if min_date == max_date:
928 if min_date == max_date:
929 fmtmin_date = dateutil.datestr(min_date, b'%Y-%m-%d %H:%M %1%2')
929 fmtmin_date = dateutil.datestr(min_date, b'%Y-%m-%d %H:%M %1%2')
930 line.append(b" (at %s)" % fmtmin_date)
930 line.append(b" (at %s)" % fmtmin_date)
931 else:
931 else:
932 fmtmin_date = dateutil.datestr(min_date, b'%Y-%m-%d %H:%M %1%2')
932 fmtmin_date = dateutil.datestr(min_date, b'%Y-%m-%d %H:%M %1%2')
933 fmtmax_date = dateutil.datestr(max_date, b'%Y-%m-%d %H:%M %1%2')
933 fmtmax_date = dateutil.datestr(max_date, b'%Y-%m-%d %H:%M %1%2')
934 line.append(b" (between %s and %s)" % (fmtmin_date, fmtmax_date))
934 line.append(b" (between %s and %s)" % (fmtmin_date, fmtmax_date))
935
935
936 return b"".join(line)
936 return b"".join(line)
937
937
938
938
939 filteredmsgtable = {
939 filteredmsgtable = {
940 b"pruned": _(b"hidden revision '%s' is pruned"),
940 b"pruned": _(b"hidden revision '%s' is pruned"),
941 b"diverged": _(b"hidden revision '%s' has diverged"),
941 b"diverged": _(b"hidden revision '%s' has diverged"),
942 b"superseded": _(b"hidden revision '%s' was rewritten as: %s"),
942 b"superseded": _(b"hidden revision '%s' was rewritten as: %s"),
943 b"superseded_split": _(b"hidden revision '%s' was split as: %s"),
943 b"superseded_split": _(b"hidden revision '%s' was split as: %s"),
944 b"superseded_split_several": _(
944 b"superseded_split_several": _(
945 b"hidden revision '%s' was split as: %s and %d more"
945 b"hidden revision '%s' was split as: %s and %d more"
946 ),
946 ),
947 }
947 }
948
948
949
949
950 def _getfilteredreason(repo, changeid, ctx):
950 def _getfilteredreason(repo, changeid, ctx):
951 """return a human-friendly string on why a obsolete changeset is hidden"""
951 """return a human-friendly string on why a obsolete changeset is hidden"""
952 successors = successorssets(repo, ctx.node())
952 successors = successorssets(repo, ctx.node())
953 fate = _getobsfate(successors)
953 fate = _getobsfate(successors)
954
954
955 # Be more precise in case the revision is superseded
955 # Be more precise in case the revision is superseded
956 if fate == b'pruned':
956 if fate == b'pruned':
957 return filteredmsgtable[b'pruned'] % changeid
957 return filteredmsgtable[b'pruned'] % changeid
958 elif fate == b'diverged':
958 elif fate == b'diverged':
959 return filteredmsgtable[b'diverged'] % changeid
959 return filteredmsgtable[b'diverged'] % changeid
960 elif fate == b'superseded':
960 elif fate == b'superseded':
961 single_successor = short(successors[0][0])
961 single_successor = short(successors[0][0])
962 return filteredmsgtable[b'superseded'] % (changeid, single_successor)
962 return filteredmsgtable[b'superseded'] % (changeid, single_successor)
963 elif fate == b'superseded_split':
963 elif fate == b'superseded_split':
964
964
965 succs = []
965 succs = []
966 for node_id in successors[0]:
966 for node_id in successors[0]:
967 succs.append(short(node_id))
967 succs.append(short(node_id))
968
968
969 if len(succs) <= 2:
969 if len(succs) <= 2:
970 fmtsuccs = b', '.join(succs)
970 fmtsuccs = b', '.join(succs)
971 return filteredmsgtable[b'superseded_split'] % (changeid, fmtsuccs)
971 return filteredmsgtable[b'superseded_split'] % (changeid, fmtsuccs)
972 else:
972 else:
973 firstsuccessors = b', '.join(succs[:2])
973 firstsuccessors = b', '.join(succs[:2])
974 remainingnumber = len(succs) - 2
974 remainingnumber = len(succs) - 2
975
975
976 args = (changeid, firstsuccessors, remainingnumber)
976 args = (changeid, firstsuccessors, remainingnumber)
977 return filteredmsgtable[b'superseded_split_several'] % args
977 return filteredmsgtable[b'superseded_split_several'] % args
978
978
979
979
980 def divergentsets(repo, ctx):
980 def divergentsets(repo, ctx):
981 """Compute sets of commits divergent with a given one"""
981 """Compute sets of commits divergent with a given one"""
982 cache = {}
982 cache = {}
983 base = {}
983 base = {}
984 for n in allpredecessors(repo.obsstore, [ctx.node()]):
984 for n in allpredecessors(repo.obsstore, [ctx.node()]):
985 if n == ctx.node():
985 if n == ctx.node():
986 # a node can't be a base for divergence with itself
986 # a node can't be a base for divergence with itself
987 continue
987 continue
988 nsuccsets = successorssets(repo, n, cache)
988 nsuccsets = successorssets(repo, n, cache)
989 for nsuccset in nsuccsets:
989 for nsuccset in nsuccsets:
990 if ctx.node() in nsuccset:
990 if ctx.node() in nsuccset:
991 # we are only interested in *other* successor sets
991 # we are only interested in *other* successor sets
992 continue
992 continue
993 if tuple(nsuccset) in base:
993 if tuple(nsuccset) in base:
994 # we already know the latest base for this divergency
994 # we already know the latest base for this divergency
995 continue
995 continue
996 base[tuple(nsuccset)] = n
996 base[tuple(nsuccset)] = n
997 return [
997 return [
998 {b'divergentnodes': divset, b'commonpredecessor': b}
998 {b'divergentnodes': divset, b'commonpredecessor': b}
999 for divset, b in base.items()
999 for divset, b in base.items()
1000 ]
1000 ]
1001
1001
1002
1002
1003 def whyunstable(repo, ctx):
1003 def whyunstable(repo, ctx):
1004 result = []
1004 result = []
1005 if ctx.orphan():
1005 if ctx.orphan():
1006 for parent in ctx.parents():
1006 for parent in ctx.parents():
1007 kind = None
1007 kind = None
1008 if parent.orphan():
1008 if parent.orphan():
1009 kind = b'orphan'
1009 kind = b'orphan'
1010 elif parent.obsolete():
1010 elif parent.obsolete():
1011 kind = b'obsolete'
1011 kind = b'obsolete'
1012 if kind is not None:
1012 if kind is not None:
1013 result.append(
1013 result.append(
1014 {
1014 {
1015 b'instability': b'orphan',
1015 b'instability': b'orphan',
1016 b'reason': b'%s parent' % kind,
1016 b'reason': b'%s parent' % kind,
1017 b'node': parent.hex(),
1017 b'node': parent.hex(),
1018 }
1018 }
1019 )
1019 )
1020 if ctx.phasedivergent():
1020 if ctx.phasedivergent():
1021 predecessors = allpredecessors(
1021 predecessors = allpredecessors(
1022 repo.obsstore, [ctx.node()], ignoreflags=bumpedfix
1022 repo.obsstore, [ctx.node()], ignoreflags=bumpedfix
1023 )
1023 )
1024 immutable = [
1024 immutable = [
1025 repo[p] for p in predecessors if p in repo and not repo[p].mutable()
1025 repo[p] for p in predecessors if p in repo and not repo[p].mutable()
1026 ]
1026 ]
1027 for predecessor in immutable:
1027 for predecessor in immutable:
1028 result.append(
1028 result.append(
1029 {
1029 {
1030 b'instability': b'phase-divergent',
1030 b'instability': b'phase-divergent',
1031 b'reason': b'immutable predecessor',
1031 b'reason': b'immutable predecessor',
1032 b'node': predecessor.hex(),
1032 b'node': predecessor.hex(),
1033 }
1033 }
1034 )
1034 )
1035 if ctx.contentdivergent():
1035 if ctx.contentdivergent():
1036 dsets = divergentsets(repo, ctx)
1036 dsets = divergentsets(repo, ctx)
1037 for dset in dsets:
1037 for dset in dsets:
1038 divnodes = [repo[n] for n in dset[b'divergentnodes']]
1038 divnodes = [repo[n] for n in dset[b'divergentnodes']]
1039 result.append(
1039 result.append(
1040 {
1040 {
1041 b'instability': b'content-divergent',
1041 b'instability': b'content-divergent',
1042 b'divergentnodes': divnodes,
1042 b'divergentnodes': divnodes,
1043 b'reason': b'predecessor',
1043 b'reason': b'predecessor',
1044 b'node': hex(dset[b'commonpredecessor']),
1044 b'node': hex(dset[b'commonpredecessor']),
1045 }
1045 }
1046 )
1046 )
1047 return result
1047 return result
General Comments 0
You need to be logged in to leave comments. Login now