##// END OF EJS Templates
obsutil: prefetch method in allpredecessors loop...
Boris Feld -
r40497:c7618901 default
parent child Browse files
Show More
@@ -1,980 +1,981 b''
1 # obsutil.py - utility functions for obsolescence
1 # obsutil.py - utility functions for obsolescence
2 #
2 #
3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import re
10 import re
11
11
12 from .i18n import _
12 from .i18n import _
13 from . import (
13 from . import (
14 diffutil,
14 diffutil,
15 encoding,
15 encoding,
16 node as nodemod,
16 node as nodemod,
17 phases,
17 phases,
18 util,
18 util,
19 )
19 )
20 from .utils import (
20 from .utils import (
21 dateutil,
21 dateutil,
22 )
22 )
23
23
24 ### obsolescence marker flag
24 ### obsolescence marker flag
25
25
26 ## bumpedfix flag
26 ## bumpedfix flag
27 #
27 #
28 # When a changeset A' succeed to a changeset A which became public, we call A'
28 # When a changeset A' succeed to a changeset A which became public, we call A'
29 # "bumped" because it's a successors of a public changesets
29 # "bumped" because it's a successors of a public changesets
30 #
30 #
31 # o A' (bumped)
31 # o A' (bumped)
32 # |`:
32 # |`:
33 # | o A
33 # | o A
34 # |/
34 # |/
35 # o Z
35 # o Z
36 #
36 #
37 # The way to solve this situation is to create a new changeset Ad as children
37 # The way to solve this situation is to create a new changeset Ad as children
38 # of A. This changeset have the same content than A'. So the diff from A to A'
38 # of A. This changeset have the same content than A'. So the diff from A to A'
39 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
39 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
40 #
40 #
41 # o Ad
41 # o Ad
42 # |`:
42 # |`:
43 # | x A'
43 # | x A'
44 # |'|
44 # |'|
45 # o | A
45 # o | A
46 # |/
46 # |/
47 # o Z
47 # o Z
48 #
48 #
49 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
49 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
50 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
50 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
51 # This flag mean that the successors express the changes between the public and
51 # This flag mean that the successors express the changes between the public and
52 # bumped version and fix the situation, breaking the transitivity of
52 # bumped version and fix the situation, breaking the transitivity of
53 # "bumped" here.
53 # "bumped" here.
54 bumpedfix = 1
54 bumpedfix = 1
55 usingsha256 = 2
55 usingsha256 = 2
56
56
57 class marker(object):
57 class marker(object):
58 """Wrap obsolete marker raw data"""
58 """Wrap obsolete marker raw data"""
59
59
60 def __init__(self, repo, data):
60 def __init__(self, repo, data):
61 # the repo argument will be used to create changectx in later version
61 # the repo argument will be used to create changectx in later version
62 self._repo = repo
62 self._repo = repo
63 self._data = data
63 self._data = data
64 self._decodedmeta = None
64 self._decodedmeta = None
65
65
66 def __hash__(self):
66 def __hash__(self):
67 return hash(self._data)
67 return hash(self._data)
68
68
69 def __eq__(self, other):
69 def __eq__(self, other):
70 if type(other) != type(self):
70 if type(other) != type(self):
71 return False
71 return False
72 return self._data == other._data
72 return self._data == other._data
73
73
74 def prednode(self):
74 def prednode(self):
75 """Predecessor changeset node identifier"""
75 """Predecessor changeset node identifier"""
76 return self._data[0]
76 return self._data[0]
77
77
78 def succnodes(self):
78 def succnodes(self):
79 """List of successor changesets node identifiers"""
79 """List of successor changesets node identifiers"""
80 return self._data[1]
80 return self._data[1]
81
81
82 def parentnodes(self):
82 def parentnodes(self):
83 """Parents of the predecessors (None if not recorded)"""
83 """Parents of the predecessors (None if not recorded)"""
84 return self._data[5]
84 return self._data[5]
85
85
86 def metadata(self):
86 def metadata(self):
87 """Decoded metadata dictionary"""
87 """Decoded metadata dictionary"""
88 return dict(self._data[3])
88 return dict(self._data[3])
89
89
90 def date(self):
90 def date(self):
91 """Creation date as (unixtime, offset)"""
91 """Creation date as (unixtime, offset)"""
92 return self._data[4]
92 return self._data[4]
93
93
94 def flags(self):
94 def flags(self):
95 """The flags field of the marker"""
95 """The flags field of the marker"""
96 return self._data[2]
96 return self._data[2]
97
97
98 def getmarkers(repo, nodes=None, exclusive=False):
98 def getmarkers(repo, nodes=None, exclusive=False):
99 """returns markers known in a repository
99 """returns markers known in a repository
100
100
101 If <nodes> is specified, only markers "relevant" to those nodes are are
101 If <nodes> is specified, only markers "relevant" to those nodes are are
102 returned"""
102 returned"""
103 if nodes is None:
103 if nodes is None:
104 rawmarkers = repo.obsstore
104 rawmarkers = repo.obsstore
105 elif exclusive:
105 elif exclusive:
106 rawmarkers = exclusivemarkers(repo, nodes)
106 rawmarkers = exclusivemarkers(repo, nodes)
107 else:
107 else:
108 rawmarkers = repo.obsstore.relevantmarkers(nodes)
108 rawmarkers = repo.obsstore.relevantmarkers(nodes)
109
109
110 for markerdata in rawmarkers:
110 for markerdata in rawmarkers:
111 yield marker(repo, markerdata)
111 yield marker(repo, markerdata)
112
112
113 def closestpredecessors(repo, nodeid):
113 def closestpredecessors(repo, nodeid):
114 """yield the list of next predecessors pointing on visible changectx nodes
114 """yield the list of next predecessors pointing on visible changectx nodes
115
115
116 This function respect the repoview filtering, filtered revision will be
116 This function respect the repoview filtering, filtered revision will be
117 considered missing.
117 considered missing.
118 """
118 """
119
119
120 precursors = repo.obsstore.predecessors
120 precursors = repo.obsstore.predecessors
121 stack = [nodeid]
121 stack = [nodeid]
122 seen = set(stack)
122 seen = set(stack)
123
123
124 while stack:
124 while stack:
125 current = stack.pop()
125 current = stack.pop()
126 currentpreccs = precursors.get(current, ())
126 currentpreccs = precursors.get(current, ())
127
127
128 for prec in currentpreccs:
128 for prec in currentpreccs:
129 precnodeid = prec[0]
129 precnodeid = prec[0]
130
130
131 # Basic cycle protection
131 # Basic cycle protection
132 if precnodeid in seen:
132 if precnodeid in seen:
133 continue
133 continue
134 seen.add(precnodeid)
134 seen.add(precnodeid)
135
135
136 if precnodeid in repo:
136 if precnodeid in repo:
137 yield precnodeid
137 yield precnodeid
138 else:
138 else:
139 stack.append(precnodeid)
139 stack.append(precnodeid)
140
140
141 def allpredecessors(obsstore, nodes, ignoreflags=0):
141 def allpredecessors(obsstore, nodes, ignoreflags=0):
142 """Yield node for every precursors of <nodes>.
142 """Yield node for every precursors of <nodes>.
143
143
144 Some precursors may be unknown locally.
144 Some precursors may be unknown locally.
145
145
146 This is a linear yield unsuited to detecting folded changesets. It includes
146 This is a linear yield unsuited to detecting folded changesets. It includes
147 initial nodes too."""
147 initial nodes too."""
148
148
149 remaining = set(nodes)
149 remaining = set(nodes)
150 seen = set(remaining)
150 seen = set(remaining)
151 prec = obsstore.predecessors.get
151 while remaining:
152 while remaining:
152 current = remaining.pop()
153 current = remaining.pop()
153 yield current
154 yield current
154 for mark in obsstore.predecessors.get(current, ()):
155 for mark in prec(current, ()):
155 # ignore marker flagged with specified flag
156 # ignore marker flagged with specified flag
156 if mark[2] & ignoreflags:
157 if mark[2] & ignoreflags:
157 continue
158 continue
158 suc = mark[0]
159 suc = mark[0]
159 if suc not in seen:
160 if suc not in seen:
160 seen.add(suc)
161 seen.add(suc)
161 remaining.add(suc)
162 remaining.add(suc)
162
163
163 def allsuccessors(obsstore, nodes, ignoreflags=0):
164 def allsuccessors(obsstore, nodes, ignoreflags=0):
164 """Yield node for every successor of <nodes>.
165 """Yield node for every successor of <nodes>.
165
166
166 Some successors may be unknown locally.
167 Some successors may be unknown locally.
167
168
168 This is a linear yield unsuited to detecting split changesets. It includes
169 This is a linear yield unsuited to detecting split changesets. It includes
169 initial nodes too."""
170 initial nodes too."""
170 remaining = set(nodes)
171 remaining = set(nodes)
171 seen = set(remaining)
172 seen = set(remaining)
172 while remaining:
173 while remaining:
173 current = remaining.pop()
174 current = remaining.pop()
174 yield current
175 yield current
175 for mark in obsstore.successors.get(current, ()):
176 for mark in obsstore.successors.get(current, ()):
176 # ignore marker flagged with specified flag
177 # ignore marker flagged with specified flag
177 if mark[2] & ignoreflags:
178 if mark[2] & ignoreflags:
178 continue
179 continue
179 for suc in mark[1]:
180 for suc in mark[1]:
180 if suc not in seen:
181 if suc not in seen:
181 seen.add(suc)
182 seen.add(suc)
182 remaining.add(suc)
183 remaining.add(suc)
183
184
184 def _filterprunes(markers):
185 def _filterprunes(markers):
185 """return a set with no prune markers"""
186 """return a set with no prune markers"""
186 return set(m for m in markers if m[1])
187 return set(m for m in markers if m[1])
187
188
188 def exclusivemarkers(repo, nodes):
189 def exclusivemarkers(repo, nodes):
189 """set of markers relevant to "nodes" but no other locally-known nodes
190 """set of markers relevant to "nodes" but no other locally-known nodes
190
191
191 This function compute the set of markers "exclusive" to a locally-known
192 This function compute the set of markers "exclusive" to a locally-known
192 node. This means we walk the markers starting from <nodes> until we reach a
193 node. This means we walk the markers starting from <nodes> until we reach a
193 locally-known precursors outside of <nodes>. Element of <nodes> with
194 locally-known precursors outside of <nodes>. Element of <nodes> with
194 locally-known successors outside of <nodes> are ignored (since their
195 locally-known successors outside of <nodes> are ignored (since their
195 precursors markers are also relevant to these successors).
196 precursors markers are also relevant to these successors).
196
197
197 For example:
198 For example:
198
199
199 # (A0 rewritten as A1)
200 # (A0 rewritten as A1)
200 #
201 #
201 # A0 <-1- A1 # Marker "1" is exclusive to A1
202 # A0 <-1- A1 # Marker "1" is exclusive to A1
202
203
203 or
204 or
204
205
205 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
206 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
206 #
207 #
207 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
208 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
208
209
209 or
210 or
210
211
211 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
212 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
212 #
213 #
213 # <-2- A1 # Marker "2" is exclusive to A0,A1
214 # <-2- A1 # Marker "2" is exclusive to A0,A1
214 # /
215 # /
215 # <-1- A0
216 # <-1- A0
216 # \
217 # \
217 # <-3- A2 # Marker "3" is exclusive to A0,A2
218 # <-3- A2 # Marker "3" is exclusive to A0,A2
218 #
219 #
219 # in addition:
220 # in addition:
220 #
221 #
221 # Markers "2,3" are exclusive to A1,A2
222 # Markers "2,3" are exclusive to A1,A2
222 # Markers "1,2,3" are exclusive to A0,A1,A2
223 # Markers "1,2,3" are exclusive to A0,A1,A2
223
224
224 See test/test-obsolete-bundle-strip.t for more examples.
225 See test/test-obsolete-bundle-strip.t for more examples.
225
226
226 An example usage is strip. When stripping a changeset, we also want to
227 An example usage is strip. When stripping a changeset, we also want to
227 strip the markers exclusive to this changeset. Otherwise we would have
228 strip the markers exclusive to this changeset. Otherwise we would have
228 "dangling"" obsolescence markers from its precursors: Obsolescence markers
229 "dangling"" obsolescence markers from its precursors: Obsolescence markers
229 marking a node as obsolete without any successors available locally.
230 marking a node as obsolete without any successors available locally.
230
231
231 As for relevant markers, the prune markers for children will be followed.
232 As for relevant markers, the prune markers for children will be followed.
232 Of course, they will only be followed if the pruned children is
233 Of course, they will only be followed if the pruned children is
233 locally-known. Since the prune markers are relevant to the pruned node.
234 locally-known. Since the prune markers are relevant to the pruned node.
234 However, while prune markers are considered relevant to the parent of the
235 However, while prune markers are considered relevant to the parent of the
235 pruned changesets, prune markers for locally-known changeset (with no
236 pruned changesets, prune markers for locally-known changeset (with no
236 successors) are considered exclusive to the pruned nodes. This allows
237 successors) are considered exclusive to the pruned nodes. This allows
237 to strip the prune markers (with the rest of the exclusive chain) alongside
238 to strip the prune markers (with the rest of the exclusive chain) alongside
238 the pruned changesets.
239 the pruned changesets.
239 """
240 """
240 # running on a filtered repository would be dangerous as markers could be
241 # running on a filtered repository would be dangerous as markers could be
241 # reported as exclusive when they are relevant for other filtered nodes.
242 # reported as exclusive when they are relevant for other filtered nodes.
242 unfi = repo.unfiltered()
243 unfi = repo.unfiltered()
243
244
244 # shortcut to various useful item
245 # shortcut to various useful item
245 nm = unfi.changelog.nodemap
246 nm = unfi.changelog.nodemap
246 precursorsmarkers = unfi.obsstore.predecessors
247 precursorsmarkers = unfi.obsstore.predecessors
247 successormarkers = unfi.obsstore.successors
248 successormarkers = unfi.obsstore.successors
248 childrenmarkers = unfi.obsstore.children
249 childrenmarkers = unfi.obsstore.children
249
250
250 # exclusive markers (return of the function)
251 # exclusive markers (return of the function)
251 exclmarkers = set()
252 exclmarkers = set()
252 # we need fast membership testing
253 # we need fast membership testing
253 nodes = set(nodes)
254 nodes = set(nodes)
254 # looking for head in the obshistory
255 # looking for head in the obshistory
255 #
256 #
256 # XXX we are ignoring all issues in regard with cycle for now.
257 # XXX we are ignoring all issues in regard with cycle for now.
257 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
258 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
258 stack.sort()
259 stack.sort()
259 # nodes already stacked
260 # nodes already stacked
260 seennodes = set(stack)
261 seennodes = set(stack)
261 while stack:
262 while stack:
262 current = stack.pop()
263 current = stack.pop()
263 # fetch precursors markers
264 # fetch precursors markers
264 markers = list(precursorsmarkers.get(current, ()))
265 markers = list(precursorsmarkers.get(current, ()))
265 # extend the list with prune markers
266 # extend the list with prune markers
266 for mark in successormarkers.get(current, ()):
267 for mark in successormarkers.get(current, ()):
267 if not mark[1]:
268 if not mark[1]:
268 markers.append(mark)
269 markers.append(mark)
269 # and markers from children (looking for prune)
270 # and markers from children (looking for prune)
270 for mark in childrenmarkers.get(current, ()):
271 for mark in childrenmarkers.get(current, ()):
271 if not mark[1]:
272 if not mark[1]:
272 markers.append(mark)
273 markers.append(mark)
273 # traverse the markers
274 # traverse the markers
274 for mark in markers:
275 for mark in markers:
275 if mark in exclmarkers:
276 if mark in exclmarkers:
276 # markers already selected
277 # markers already selected
277 continue
278 continue
278
279
279 # If the markers is about the current node, select it
280 # If the markers is about the current node, select it
280 #
281 #
281 # (this delay the addition of markers from children)
282 # (this delay the addition of markers from children)
282 if mark[1] or mark[0] == current:
283 if mark[1] or mark[0] == current:
283 exclmarkers.add(mark)
284 exclmarkers.add(mark)
284
285
285 # should we keep traversing through the precursors?
286 # should we keep traversing through the precursors?
286 prec = mark[0]
287 prec = mark[0]
287
288
288 # nodes in the stack or already processed
289 # nodes in the stack or already processed
289 if prec in seennodes:
290 if prec in seennodes:
290 continue
291 continue
291
292
292 # is this a locally known node ?
293 # is this a locally known node ?
293 known = prec in nm
294 known = prec in nm
294 # if locally-known and not in the <nodes> set the traversal
295 # if locally-known and not in the <nodes> set the traversal
295 # stop here.
296 # stop here.
296 if known and prec not in nodes:
297 if known and prec not in nodes:
297 continue
298 continue
298
299
299 # do not keep going if there are unselected markers pointing to this
300 # do not keep going if there are unselected markers pointing to this
300 # nodes. If we end up traversing these unselected markers later the
301 # nodes. If we end up traversing these unselected markers later the
301 # node will be taken care of at that point.
302 # node will be taken care of at that point.
302 precmarkers = _filterprunes(successormarkers.get(prec))
303 precmarkers = _filterprunes(successormarkers.get(prec))
303 if precmarkers.issubset(exclmarkers):
304 if precmarkers.issubset(exclmarkers):
304 seennodes.add(prec)
305 seennodes.add(prec)
305 stack.append(prec)
306 stack.append(prec)
306
307
307 return exclmarkers
308 return exclmarkers
308
309
309 def foreground(repo, nodes):
310 def foreground(repo, nodes):
310 """return all nodes in the "foreground" of other node
311 """return all nodes in the "foreground" of other node
311
312
312 The foreground of a revision is anything reachable using parent -> children
313 The foreground of a revision is anything reachable using parent -> children
313 or precursor -> successor relation. It is very similar to "descendant" but
314 or precursor -> successor relation. It is very similar to "descendant" but
314 augmented with obsolescence information.
315 augmented with obsolescence information.
315
316
316 Beware that possible obsolescence cycle may result if complex situation.
317 Beware that possible obsolescence cycle may result if complex situation.
317 """
318 """
318 repo = repo.unfiltered()
319 repo = repo.unfiltered()
319 foreground = set(repo.set('%ln::', nodes))
320 foreground = set(repo.set('%ln::', nodes))
320 if repo.obsstore:
321 if repo.obsstore:
321 # We only need this complicated logic if there is obsolescence
322 # We only need this complicated logic if there is obsolescence
322 # XXX will probably deserve an optimised revset.
323 # XXX will probably deserve an optimised revset.
323 nm = repo.changelog.nodemap
324 nm = repo.changelog.nodemap
324 plen = -1
325 plen = -1
325 # compute the whole set of successors or descendants
326 # compute the whole set of successors or descendants
326 while len(foreground) != plen:
327 while len(foreground) != plen:
327 plen = len(foreground)
328 plen = len(foreground)
328 succs = set(c.node() for c in foreground)
329 succs = set(c.node() for c in foreground)
329 mutable = [c.node() for c in foreground if c.mutable()]
330 mutable = [c.node() for c in foreground if c.mutable()]
330 succs.update(allsuccessors(repo.obsstore, mutable))
331 succs.update(allsuccessors(repo.obsstore, mutable))
331 known = (n for n in succs if n in nm)
332 known = (n for n in succs if n in nm)
332 foreground = set(repo.set('%ln::', known))
333 foreground = set(repo.set('%ln::', known))
333 return set(c.node() for c in foreground)
334 return set(c.node() for c in foreground)
334
335
335 # effectflag field
336 # effectflag field
336 #
337 #
337 # Effect-flag is a 1-byte bit field used to store what changed between a
338 # Effect-flag is a 1-byte bit field used to store what changed between a
338 # changeset and its successor(s).
339 # changeset and its successor(s).
339 #
340 #
340 # The effect flag is stored in obs-markers metadata while we iterate on the
341 # The effect flag is stored in obs-markers metadata while we iterate on the
341 # information design. That's why we have the EFFECTFLAGFIELD. If we come up
342 # information design. That's why we have the EFFECTFLAGFIELD. If we come up
342 # with an incompatible design for effect flag, we can store a new design under
343 # with an incompatible design for effect flag, we can store a new design under
343 # another field name so we don't break readers. We plan to extend the existing
344 # another field name so we don't break readers. We plan to extend the existing
344 # obsmarkers bit-field when the effect flag design will be stabilized.
345 # obsmarkers bit-field when the effect flag design will be stabilized.
345 #
346 #
346 # The effect-flag is placed behind an experimental flag
347 # The effect-flag is placed behind an experimental flag
347 # `effect-flags` set to off by default.
348 # `effect-flags` set to off by default.
348 #
349 #
349
350
350 EFFECTFLAGFIELD = "ef1"
351 EFFECTFLAGFIELD = "ef1"
351
352
352 DESCCHANGED = 1 << 0 # action changed the description
353 DESCCHANGED = 1 << 0 # action changed the description
353 METACHANGED = 1 << 1 # action change the meta
354 METACHANGED = 1 << 1 # action change the meta
354 DIFFCHANGED = 1 << 3 # action change diff introduced by the changeset
355 DIFFCHANGED = 1 << 3 # action change diff introduced by the changeset
355 PARENTCHANGED = 1 << 2 # action change the parent
356 PARENTCHANGED = 1 << 2 # action change the parent
356 USERCHANGED = 1 << 4 # the user changed
357 USERCHANGED = 1 << 4 # the user changed
357 DATECHANGED = 1 << 5 # the date changed
358 DATECHANGED = 1 << 5 # the date changed
358 BRANCHCHANGED = 1 << 6 # the branch changed
359 BRANCHCHANGED = 1 << 6 # the branch changed
359
360
360 METABLACKLIST = [
361 METABLACKLIST = [
361 re.compile('^branch$'),
362 re.compile('^branch$'),
362 re.compile('^.*-source$'),
363 re.compile('^.*-source$'),
363 re.compile('^.*_source$'),
364 re.compile('^.*_source$'),
364 re.compile('^source$'),
365 re.compile('^source$'),
365 ]
366 ]
366
367
367 def metanotblacklisted(metaitem):
368 def metanotblacklisted(metaitem):
368 """ Check that the key of a meta item (extrakey, extravalue) does not
369 """ Check that the key of a meta item (extrakey, extravalue) does not
369 match at least one of the blacklist pattern
370 match at least one of the blacklist pattern
370 """
371 """
371 metakey = metaitem[0]
372 metakey = metaitem[0]
372
373
373 return not any(pattern.match(metakey) for pattern in METABLACKLIST)
374 return not any(pattern.match(metakey) for pattern in METABLACKLIST)
374
375
375 def _prepare_hunk(hunk):
376 def _prepare_hunk(hunk):
376 """Drop all information but the username and patch"""
377 """Drop all information but the username and patch"""
377 cleanhunk = []
378 cleanhunk = []
378 for line in hunk.splitlines():
379 for line in hunk.splitlines():
379 if line.startswith(b'# User') or not line.startswith(b'#'):
380 if line.startswith(b'# User') or not line.startswith(b'#'):
380 if line.startswith(b'@@'):
381 if line.startswith(b'@@'):
381 line = b'@@\n'
382 line = b'@@\n'
382 cleanhunk.append(line)
383 cleanhunk.append(line)
383 return cleanhunk
384 return cleanhunk
384
385
385 def _getdifflines(iterdiff):
386 def _getdifflines(iterdiff):
386 """return a cleaned up lines"""
387 """return a cleaned up lines"""
387 lines = next(iterdiff, None)
388 lines = next(iterdiff, None)
388
389
389 if lines is None:
390 if lines is None:
390 return lines
391 return lines
391
392
392 return _prepare_hunk(lines)
393 return _prepare_hunk(lines)
393
394
394 def _cmpdiff(leftctx, rightctx):
395 def _cmpdiff(leftctx, rightctx):
395 """return True if both ctx introduce the "same diff"
396 """return True if both ctx introduce the "same diff"
396
397
397 This is a first and basic implementation, with many shortcoming.
398 This is a first and basic implementation, with many shortcoming.
398 """
399 """
399 diffopts = diffutil.diffallopts(leftctx.repo().ui, {'git': True})
400 diffopts = diffutil.diffallopts(leftctx.repo().ui, {'git': True})
400 # Leftctx or right ctx might be filtered, so we need to use the contexts
401 # Leftctx or right ctx might be filtered, so we need to use the contexts
401 # with an unfiltered repository to safely compute the diff
402 # with an unfiltered repository to safely compute the diff
402 leftunfi = leftctx._repo.unfiltered()[leftctx.rev()]
403 leftunfi = leftctx._repo.unfiltered()[leftctx.rev()]
403 leftdiff = leftunfi.diff(opts=diffopts)
404 leftdiff = leftunfi.diff(opts=diffopts)
404 rightunfi = rightctx._repo.unfiltered()[rightctx.rev()]
405 rightunfi = rightctx._repo.unfiltered()[rightctx.rev()]
405 rightdiff = rightunfi.diff(opts=diffopts)
406 rightdiff = rightunfi.diff(opts=diffopts)
406
407
407 left, right = (0, 0)
408 left, right = (0, 0)
408 while None not in (left, right):
409 while None not in (left, right):
409 left = _getdifflines(leftdiff)
410 left = _getdifflines(leftdiff)
410 right = _getdifflines(rightdiff)
411 right = _getdifflines(rightdiff)
411
412
412 if left != right:
413 if left != right:
413 return False
414 return False
414 return True
415 return True
415
416
416 def geteffectflag(source, successors):
417 def geteffectflag(source, successors):
417 """ From an obs-marker relation, compute what changed between the
418 """ From an obs-marker relation, compute what changed between the
418 predecessor and the successor.
419 predecessor and the successor.
419 """
420 """
420 effects = 0
421 effects = 0
421
422
422 for changectx in successors:
423 for changectx in successors:
423 # Check if description has changed
424 # Check if description has changed
424 if changectx.description() != source.description():
425 if changectx.description() != source.description():
425 effects |= DESCCHANGED
426 effects |= DESCCHANGED
426
427
427 # Check if user has changed
428 # Check if user has changed
428 if changectx.user() != source.user():
429 if changectx.user() != source.user():
429 effects |= USERCHANGED
430 effects |= USERCHANGED
430
431
431 # Check if date has changed
432 # Check if date has changed
432 if changectx.date() != source.date():
433 if changectx.date() != source.date():
433 effects |= DATECHANGED
434 effects |= DATECHANGED
434
435
435 # Check if branch has changed
436 # Check if branch has changed
436 if changectx.branch() != source.branch():
437 if changectx.branch() != source.branch():
437 effects |= BRANCHCHANGED
438 effects |= BRANCHCHANGED
438
439
439 # Check if at least one of the parent has changed
440 # Check if at least one of the parent has changed
440 if changectx.parents() != source.parents():
441 if changectx.parents() != source.parents():
441 effects |= PARENTCHANGED
442 effects |= PARENTCHANGED
442
443
443 # Check if other meta has changed
444 # Check if other meta has changed
444 changeextra = changectx.extra().items()
445 changeextra = changectx.extra().items()
445 ctxmeta = list(filter(metanotblacklisted, changeextra))
446 ctxmeta = list(filter(metanotblacklisted, changeextra))
446
447
447 sourceextra = source.extra().items()
448 sourceextra = source.extra().items()
448 srcmeta = list(filter(metanotblacklisted, sourceextra))
449 srcmeta = list(filter(metanotblacklisted, sourceextra))
449
450
450 if ctxmeta != srcmeta:
451 if ctxmeta != srcmeta:
451 effects |= METACHANGED
452 effects |= METACHANGED
452
453
453 # Check if the diff has changed
454 # Check if the diff has changed
454 if not _cmpdiff(source, changectx):
455 if not _cmpdiff(source, changectx):
455 effects |= DIFFCHANGED
456 effects |= DIFFCHANGED
456
457
457 return effects
458 return effects
458
459
459 def getobsoleted(repo, tr):
460 def getobsoleted(repo, tr):
460 """return the set of pre-existing revisions obsoleted by a transaction"""
461 """return the set of pre-existing revisions obsoleted by a transaction"""
461 torev = repo.unfiltered().changelog.nodemap.get
462 torev = repo.unfiltered().changelog.nodemap.get
462 phase = repo._phasecache.phase
463 phase = repo._phasecache.phase
463 succsmarkers = repo.obsstore.successors.get
464 succsmarkers = repo.obsstore.successors.get
464 public = phases.public
465 public = phases.public
465 addedmarkers = tr.changes['obsmarkers']
466 addedmarkers = tr.changes['obsmarkers']
466 origrepolen = tr.changes['origrepolen']
467 origrepolen = tr.changes['origrepolen']
467 seenrevs = set()
468 seenrevs = set()
468 obsoleted = set()
469 obsoleted = set()
469 for mark in addedmarkers:
470 for mark in addedmarkers:
470 node = mark[0]
471 node = mark[0]
471 rev = torev(node)
472 rev = torev(node)
472 if rev is None or rev in seenrevs or rev >= origrepolen:
473 if rev is None or rev in seenrevs or rev >= origrepolen:
473 continue
474 continue
474 seenrevs.add(rev)
475 seenrevs.add(rev)
475 if phase(repo, rev) == public:
476 if phase(repo, rev) == public:
476 continue
477 continue
477 if set(succsmarkers(node) or []).issubset(addedmarkers):
478 if set(succsmarkers(node) or []).issubset(addedmarkers):
478 obsoleted.add(rev)
479 obsoleted.add(rev)
479 return obsoleted
480 return obsoleted
480
481
481 class _succs(list):
482 class _succs(list):
482 """small class to represent a successors with some metadata about it"""
483 """small class to represent a successors with some metadata about it"""
483
484
484 def __init__(self, *args, **kwargs):
485 def __init__(self, *args, **kwargs):
485 super(_succs, self).__init__(*args, **kwargs)
486 super(_succs, self).__init__(*args, **kwargs)
486 self.markers = set()
487 self.markers = set()
487
488
488 def copy(self):
489 def copy(self):
489 new = _succs(self)
490 new = _succs(self)
490 new.markers = self.markers.copy()
491 new.markers = self.markers.copy()
491 return new
492 return new
492
493
493 @util.propertycache
494 @util.propertycache
494 def _set(self):
495 def _set(self):
495 # immutable
496 # immutable
496 return set(self)
497 return set(self)
497
498
498 def canmerge(self, other):
499 def canmerge(self, other):
499 return self._set.issubset(other._set)
500 return self._set.issubset(other._set)
500
501
501 def successorssets(repo, initialnode, closest=False, cache=None):
502 def successorssets(repo, initialnode, closest=False, cache=None):
502 """Return set of all latest successors of initial nodes
503 """Return set of all latest successors of initial nodes
503
504
504 The successors set of a changeset A are the group of revisions that succeed
505 The successors set of a changeset A are the group of revisions that succeed
505 A. It succeeds A as a consistent whole, each revision being only a partial
506 A. It succeeds A as a consistent whole, each revision being only a partial
506 replacement. By default, the successors set contains non-obsolete
507 replacement. By default, the successors set contains non-obsolete
507 changesets only, walking the obsolescence graph until reaching a leaf. If
508 changesets only, walking the obsolescence graph until reaching a leaf. If
508 'closest' is set to True, closest successors-sets are return (the
509 'closest' is set to True, closest successors-sets are return (the
509 obsolescence walk stops on known changesets).
510 obsolescence walk stops on known changesets).
510
511
511 This function returns the full list of successor sets which is why it
512 This function returns the full list of successor sets which is why it
512 returns a list of tuples and not just a single tuple. Each tuple is a valid
513 returns a list of tuples and not just a single tuple. Each tuple is a valid
513 successors set. Note that (A,) may be a valid successors set for changeset A
514 successors set. Note that (A,) may be a valid successors set for changeset A
514 (see below).
515 (see below).
515
516
516 In most cases, a changeset A will have a single element (e.g. the changeset
517 In most cases, a changeset A will have a single element (e.g. the changeset
517 A is replaced by A') in its successors set. Though, it is also common for a
518 A is replaced by A') in its successors set. Though, it is also common for a
518 changeset A to have no elements in its successor set (e.g. the changeset
519 changeset A to have no elements in its successor set (e.g. the changeset
519 has been pruned). Therefore, the returned list of successors sets will be
520 has been pruned). Therefore, the returned list of successors sets will be
520 [(A',)] or [], respectively.
521 [(A',)] or [], respectively.
521
522
522 When a changeset A is split into A' and B', however, it will result in a
523 When a changeset A is split into A' and B', however, it will result in a
523 successors set containing more than a single element, i.e. [(A',B')].
524 successors set containing more than a single element, i.e. [(A',B')].
524 Divergent changesets will result in multiple successors sets, i.e. [(A',),
525 Divergent changesets will result in multiple successors sets, i.e. [(A',),
525 (A'')].
526 (A'')].
526
527
527 If a changeset A is not obsolete, then it will conceptually have no
528 If a changeset A is not obsolete, then it will conceptually have no
528 successors set. To distinguish this from a pruned changeset, the successor
529 successors set. To distinguish this from a pruned changeset, the successor
529 set will contain itself only, i.e. [(A,)].
530 set will contain itself only, i.e. [(A,)].
530
531
531 Finally, final successors unknown locally are considered to be pruned
532 Finally, final successors unknown locally are considered to be pruned
532 (pruned: obsoleted without any successors). (Final: successors not affected
533 (pruned: obsoleted without any successors). (Final: successors not affected
533 by markers).
534 by markers).
534
535
535 The 'closest' mode respect the repoview filtering. For example, without
536 The 'closest' mode respect the repoview filtering. For example, without
536 filter it will stop at the first locally known changeset, with 'visible'
537 filter it will stop at the first locally known changeset, with 'visible'
537 filter it will stop on visible changesets).
538 filter it will stop on visible changesets).
538
539
539 The optional `cache` parameter is a dictionary that may contains
540 The optional `cache` parameter is a dictionary that may contains
540 precomputed successors sets. It is meant to reuse the computation of a
541 precomputed successors sets. It is meant to reuse the computation of a
541 previous call to `successorssets` when multiple calls are made at the same
542 previous call to `successorssets` when multiple calls are made at the same
542 time. The cache dictionary is updated in place. The caller is responsible
543 time. The cache dictionary is updated in place. The caller is responsible
543 for its life span. Code that makes multiple calls to `successorssets`
544 for its life span. Code that makes multiple calls to `successorssets`
544 *should* use this cache mechanism or risk a performance hit.
545 *should* use this cache mechanism or risk a performance hit.
545
546
546 Since results are different depending of the 'closest' most, the same cache
547 Since results are different depending of the 'closest' most, the same cache
547 cannot be reused for both mode.
548 cannot be reused for both mode.
548 """
549 """
549
550
550 succmarkers = repo.obsstore.successors
551 succmarkers = repo.obsstore.successors
551
552
552 # Stack of nodes we search successors sets for
553 # Stack of nodes we search successors sets for
553 toproceed = [initialnode]
554 toproceed = [initialnode]
554 # set version of above list for fast loop detection
555 # set version of above list for fast loop detection
555 # element added to "toproceed" must be added here
556 # element added to "toproceed" must be added here
556 stackedset = set(toproceed)
557 stackedset = set(toproceed)
557 if cache is None:
558 if cache is None:
558 cache = {}
559 cache = {}
559
560
560 # This while loop is the flattened version of a recursive search for
561 # This while loop is the flattened version of a recursive search for
561 # successors sets
562 # successors sets
562 #
563 #
563 # def successorssets(x):
564 # def successorssets(x):
564 # successors = directsuccessors(x)
565 # successors = directsuccessors(x)
565 # ss = [[]]
566 # ss = [[]]
566 # for succ in directsuccessors(x):
567 # for succ in directsuccessors(x):
567 # # product as in itertools cartesian product
568 # # product as in itertools cartesian product
568 # ss = product(ss, successorssets(succ))
569 # ss = product(ss, successorssets(succ))
569 # return ss
570 # return ss
570 #
571 #
571 # But we can not use plain recursive calls here:
572 # But we can not use plain recursive calls here:
572 # - that would blow the python call stack
573 # - that would blow the python call stack
573 # - obsolescence markers may have cycles, we need to handle them.
574 # - obsolescence markers may have cycles, we need to handle them.
574 #
575 #
575 # The `toproceed` list act as our call stack. Every node we search
576 # The `toproceed` list act as our call stack. Every node we search
576 # successors set for are stacked there.
577 # successors set for are stacked there.
577 #
578 #
578 # The `stackedset` is set version of this stack used to check if a node is
579 # The `stackedset` is set version of this stack used to check if a node is
579 # already stacked. This check is used to detect cycles and prevent infinite
580 # already stacked. This check is used to detect cycles and prevent infinite
580 # loop.
581 # loop.
581 #
582 #
582 # successors set of all nodes are stored in the `cache` dictionary.
583 # successors set of all nodes are stored in the `cache` dictionary.
583 #
584 #
584 # After this while loop ends we use the cache to return the successors sets
585 # After this while loop ends we use the cache to return the successors sets
585 # for the node requested by the caller.
586 # for the node requested by the caller.
586 while toproceed:
587 while toproceed:
587 # Every iteration tries to compute the successors sets of the topmost
588 # Every iteration tries to compute the successors sets of the topmost
588 # node of the stack: CURRENT.
589 # node of the stack: CURRENT.
589 #
590 #
590 # There are four possible outcomes:
591 # There are four possible outcomes:
591 #
592 #
592 # 1) We already know the successors sets of CURRENT:
593 # 1) We already know the successors sets of CURRENT:
593 # -> mission accomplished, pop it from the stack.
594 # -> mission accomplished, pop it from the stack.
594 # 2) Stop the walk:
595 # 2) Stop the walk:
595 # default case: Node is not obsolete
596 # default case: Node is not obsolete
596 # closest case: Node is known at this repo filter level
597 # closest case: Node is known at this repo filter level
597 # -> the node is its own successors sets. Add it to the cache.
598 # -> the node is its own successors sets. Add it to the cache.
598 # 3) We do not know successors set of direct successors of CURRENT:
599 # 3) We do not know successors set of direct successors of CURRENT:
599 # -> We add those successors to the stack.
600 # -> We add those successors to the stack.
600 # 4) We know successors sets of all direct successors of CURRENT:
601 # 4) We know successors sets of all direct successors of CURRENT:
601 # -> We can compute CURRENT successors set and add it to the
602 # -> We can compute CURRENT successors set and add it to the
602 # cache.
603 # cache.
603 #
604 #
604 current = toproceed[-1]
605 current = toproceed[-1]
605
606
606 # case 2 condition is a bit hairy because of closest,
607 # case 2 condition is a bit hairy because of closest,
607 # we compute it on its own
608 # we compute it on its own
608 case2condition = ((current not in succmarkers)
609 case2condition = ((current not in succmarkers)
609 or (closest and current != initialnode
610 or (closest and current != initialnode
610 and current in repo))
611 and current in repo))
611
612
612 if current in cache:
613 if current in cache:
613 # case (1): We already know the successors sets
614 # case (1): We already know the successors sets
614 stackedset.remove(toproceed.pop())
615 stackedset.remove(toproceed.pop())
615 elif case2condition:
616 elif case2condition:
616 # case (2): end of walk.
617 # case (2): end of walk.
617 if current in repo:
618 if current in repo:
618 # We have a valid successors.
619 # We have a valid successors.
619 cache[current] = [_succs((current,))]
620 cache[current] = [_succs((current,))]
620 else:
621 else:
621 # Final obsolete version is unknown locally.
622 # Final obsolete version is unknown locally.
622 # Do not count that as a valid successors
623 # Do not count that as a valid successors
623 cache[current] = []
624 cache[current] = []
624 else:
625 else:
625 # cases (3) and (4)
626 # cases (3) and (4)
626 #
627 #
627 # We proceed in two phases. Phase 1 aims to distinguish case (3)
628 # We proceed in two phases. Phase 1 aims to distinguish case (3)
628 # from case (4):
629 # from case (4):
629 #
630 #
630 # For each direct successors of CURRENT, we check whether its
631 # For each direct successors of CURRENT, we check whether its
631 # successors sets are known. If they are not, we stack the
632 # successors sets are known. If they are not, we stack the
632 # unknown node and proceed to the next iteration of the while
633 # unknown node and proceed to the next iteration of the while
633 # loop. (case 3)
634 # loop. (case 3)
634 #
635 #
635 # During this step, we may detect obsolescence cycles: a node
636 # During this step, we may detect obsolescence cycles: a node
636 # with unknown successors sets but already in the call stack.
637 # with unknown successors sets but already in the call stack.
637 # In such a situation, we arbitrary set the successors sets of
638 # In such a situation, we arbitrary set the successors sets of
638 # the node to nothing (node pruned) to break the cycle.
639 # the node to nothing (node pruned) to break the cycle.
639 #
640 #
640 # If no break was encountered we proceed to phase 2.
641 # If no break was encountered we proceed to phase 2.
641 #
642 #
642 # Phase 2 computes successors sets of CURRENT (case 4); see details
643 # Phase 2 computes successors sets of CURRENT (case 4); see details
643 # in phase 2 itself.
644 # in phase 2 itself.
644 #
645 #
645 # Note the two levels of iteration in each phase.
646 # Note the two levels of iteration in each phase.
646 # - The first one handles obsolescence markers using CURRENT as
647 # - The first one handles obsolescence markers using CURRENT as
647 # precursor (successors markers of CURRENT).
648 # precursor (successors markers of CURRENT).
648 #
649 #
649 # Having multiple entry here means divergence.
650 # Having multiple entry here means divergence.
650 #
651 #
651 # - The second one handles successors defined in each marker.
652 # - The second one handles successors defined in each marker.
652 #
653 #
653 # Having none means pruned node, multiple successors means split,
654 # Having none means pruned node, multiple successors means split,
654 # single successors are standard replacement.
655 # single successors are standard replacement.
655 #
656 #
656 for mark in sorted(succmarkers[current]):
657 for mark in sorted(succmarkers[current]):
657 for suc in mark[1]:
658 for suc in mark[1]:
658 if suc not in cache:
659 if suc not in cache:
659 if suc in stackedset:
660 if suc in stackedset:
660 # cycle breaking
661 # cycle breaking
661 cache[suc] = []
662 cache[suc] = []
662 else:
663 else:
663 # case (3) If we have not computed successors sets
664 # case (3) If we have not computed successors sets
664 # of one of those successors we add it to the
665 # of one of those successors we add it to the
665 # `toproceed` stack and stop all work for this
666 # `toproceed` stack and stop all work for this
666 # iteration.
667 # iteration.
667 toproceed.append(suc)
668 toproceed.append(suc)
668 stackedset.add(suc)
669 stackedset.add(suc)
669 break
670 break
670 else:
671 else:
671 continue
672 continue
672 break
673 break
673 else:
674 else:
674 # case (4): we know all successors sets of all direct
675 # case (4): we know all successors sets of all direct
675 # successors
676 # successors
676 #
677 #
677 # Successors set contributed by each marker depends on the
678 # Successors set contributed by each marker depends on the
678 # successors sets of all its "successors" node.
679 # successors sets of all its "successors" node.
679 #
680 #
680 # Each different marker is a divergence in the obsolescence
681 # Each different marker is a divergence in the obsolescence
681 # history. It contributes successors sets distinct from other
682 # history. It contributes successors sets distinct from other
682 # markers.
683 # markers.
683 #
684 #
684 # Within a marker, a successor may have divergent successors
685 # Within a marker, a successor may have divergent successors
685 # sets. In such a case, the marker will contribute multiple
686 # sets. In such a case, the marker will contribute multiple
686 # divergent successors sets. If multiple successors have
687 # divergent successors sets. If multiple successors have
687 # divergent successors sets, a Cartesian product is used.
688 # divergent successors sets, a Cartesian product is used.
688 #
689 #
689 # At the end we post-process successors sets to remove
690 # At the end we post-process successors sets to remove
690 # duplicated entry and successors set that are strict subset of
691 # duplicated entry and successors set that are strict subset of
691 # another one.
692 # another one.
692 succssets = []
693 succssets = []
693 for mark in sorted(succmarkers[current]):
694 for mark in sorted(succmarkers[current]):
694 # successors sets contributed by this marker
695 # successors sets contributed by this marker
695 base = _succs()
696 base = _succs()
696 base.markers.add(mark)
697 base.markers.add(mark)
697 markss = [base]
698 markss = [base]
698 for suc in mark[1]:
699 for suc in mark[1]:
699 # cardinal product with previous successors
700 # cardinal product with previous successors
700 productresult = []
701 productresult = []
701 for prefix in markss:
702 for prefix in markss:
702 for suffix in cache[suc]:
703 for suffix in cache[suc]:
703 newss = prefix.copy()
704 newss = prefix.copy()
704 newss.markers.update(suffix.markers)
705 newss.markers.update(suffix.markers)
705 for part in suffix:
706 for part in suffix:
706 # do not duplicated entry in successors set
707 # do not duplicated entry in successors set
707 # first entry wins.
708 # first entry wins.
708 if part not in newss:
709 if part not in newss:
709 newss.append(part)
710 newss.append(part)
710 productresult.append(newss)
711 productresult.append(newss)
711 markss = productresult
712 markss = productresult
712 succssets.extend(markss)
713 succssets.extend(markss)
713 # remove duplicated and subset
714 # remove duplicated and subset
714 seen = []
715 seen = []
715 final = []
716 final = []
716 candidates = sorted((s for s in succssets if s),
717 candidates = sorted((s for s in succssets if s),
717 key=len, reverse=True)
718 key=len, reverse=True)
718 for cand in candidates:
719 for cand in candidates:
719 for seensuccs in seen:
720 for seensuccs in seen:
720 if cand.canmerge(seensuccs):
721 if cand.canmerge(seensuccs):
721 seensuccs.markers.update(cand.markers)
722 seensuccs.markers.update(cand.markers)
722 break
723 break
723 else:
724 else:
724 final.append(cand)
725 final.append(cand)
725 seen.append(cand)
726 seen.append(cand)
726 final.reverse() # put small successors set first
727 final.reverse() # put small successors set first
727 cache[current] = final
728 cache[current] = final
728 return cache[initialnode]
729 return cache[initialnode]
729
730
730 def successorsandmarkers(repo, ctx):
731 def successorsandmarkers(repo, ctx):
731 """compute the raw data needed for computing obsfate
732 """compute the raw data needed for computing obsfate
732 Returns a list of dict, one dict per successors set
733 Returns a list of dict, one dict per successors set
733 """
734 """
734 if not ctx.obsolete():
735 if not ctx.obsolete():
735 return None
736 return None
736
737
737 ssets = successorssets(repo, ctx.node(), closest=True)
738 ssets = successorssets(repo, ctx.node(), closest=True)
738
739
739 # closestsuccessors returns an empty list for pruned revisions, remap it
740 # closestsuccessors returns an empty list for pruned revisions, remap it
740 # into a list containing an empty list for future processing
741 # into a list containing an empty list for future processing
741 if ssets == []:
742 if ssets == []:
742 ssets = [[]]
743 ssets = [[]]
743
744
744 # Try to recover pruned markers
745 # Try to recover pruned markers
745 succsmap = repo.obsstore.successors
746 succsmap = repo.obsstore.successors
746 fullsuccessorsets = [] # successor set + markers
747 fullsuccessorsets = [] # successor set + markers
747 for sset in ssets:
748 for sset in ssets:
748 if sset:
749 if sset:
749 fullsuccessorsets.append(sset)
750 fullsuccessorsets.append(sset)
750 else:
751 else:
751 # successorsset return an empty set() when ctx or one of its
752 # successorsset return an empty set() when ctx or one of its
752 # successors is pruned.
753 # successors is pruned.
753 # In this case, walk the obs-markers tree again starting with ctx
754 # In this case, walk the obs-markers tree again starting with ctx
754 # and find the relevant pruning obs-makers, the ones without
755 # and find the relevant pruning obs-makers, the ones without
755 # successors.
756 # successors.
756 # Having these markers allow us to compute some information about
757 # Having these markers allow us to compute some information about
757 # its fate, like who pruned this changeset and when.
758 # its fate, like who pruned this changeset and when.
758
759
759 # XXX we do not catch all prune markers (eg rewritten then pruned)
760 # XXX we do not catch all prune markers (eg rewritten then pruned)
760 # (fix me later)
761 # (fix me later)
761 foundany = False
762 foundany = False
762 for mark in succsmap.get(ctx.node(), ()):
763 for mark in succsmap.get(ctx.node(), ()):
763 if not mark[1]:
764 if not mark[1]:
764 foundany = True
765 foundany = True
765 sset = _succs()
766 sset = _succs()
766 sset.markers.add(mark)
767 sset.markers.add(mark)
767 fullsuccessorsets.append(sset)
768 fullsuccessorsets.append(sset)
768 if not foundany:
769 if not foundany:
769 fullsuccessorsets.append(_succs())
770 fullsuccessorsets.append(_succs())
770
771
771 values = []
772 values = []
772 for sset in fullsuccessorsets:
773 for sset in fullsuccessorsets:
773 values.append({'successors': sset, 'markers': sset.markers})
774 values.append({'successors': sset, 'markers': sset.markers})
774
775
775 return values
776 return values
776
777
777 def _getobsfate(successorssets):
778 def _getobsfate(successorssets):
778 """ Compute a changeset obsolescence fate based on its successorssets.
779 """ Compute a changeset obsolescence fate based on its successorssets.
779 Successors can be the tipmost ones or the immediate ones. This function
780 Successors can be the tipmost ones or the immediate ones. This function
780 return values are not meant to be shown directly to users, it is meant to
781 return values are not meant to be shown directly to users, it is meant to
781 be used by internal functions only.
782 be used by internal functions only.
782 Returns one fate from the following values:
783 Returns one fate from the following values:
783 - pruned
784 - pruned
784 - diverged
785 - diverged
785 - superseded
786 - superseded
786 - superseded_split
787 - superseded_split
787 """
788 """
788
789
789 if len(successorssets) == 0:
790 if len(successorssets) == 0:
790 # The commit has been pruned
791 # The commit has been pruned
791 return 'pruned'
792 return 'pruned'
792 elif len(successorssets) > 1:
793 elif len(successorssets) > 1:
793 return 'diverged'
794 return 'diverged'
794 else:
795 else:
795 # No divergence, only one set of successors
796 # No divergence, only one set of successors
796 successors = successorssets[0]
797 successors = successorssets[0]
797
798
798 if len(successors) == 1:
799 if len(successors) == 1:
799 return 'superseded'
800 return 'superseded'
800 else:
801 else:
801 return 'superseded_split'
802 return 'superseded_split'
802
803
803 def obsfateverb(successorset, markers):
804 def obsfateverb(successorset, markers):
804 """ Return the verb summarizing the successorset and potentially using
805 """ Return the verb summarizing the successorset and potentially using
805 information from the markers
806 information from the markers
806 """
807 """
807 if not successorset:
808 if not successorset:
808 verb = 'pruned'
809 verb = 'pruned'
809 elif len(successorset) == 1:
810 elif len(successorset) == 1:
810 verb = 'rewritten'
811 verb = 'rewritten'
811 else:
812 else:
812 verb = 'split'
813 verb = 'split'
813 return verb
814 return verb
814
815
815 def markersdates(markers):
816 def markersdates(markers):
816 """returns the list of dates for a list of markers
817 """returns the list of dates for a list of markers
817 """
818 """
818 return [m[4] for m in markers]
819 return [m[4] for m in markers]
819
820
820 def markersusers(markers):
821 def markersusers(markers):
821 """ Returns a sorted list of markers users without duplicates
822 """ Returns a sorted list of markers users without duplicates
822 """
823 """
823 markersmeta = [dict(m[3]) for m in markers]
824 markersmeta = [dict(m[3]) for m in markers]
824 users = set(encoding.tolocal(meta['user']) for meta in markersmeta
825 users = set(encoding.tolocal(meta['user']) for meta in markersmeta
825 if meta.get('user'))
826 if meta.get('user'))
826
827
827 return sorted(users)
828 return sorted(users)
828
829
829 def markersoperations(markers):
830 def markersoperations(markers):
830 """ Returns a sorted list of markers operations without duplicates
831 """ Returns a sorted list of markers operations without duplicates
831 """
832 """
832 markersmeta = [dict(m[3]) for m in markers]
833 markersmeta = [dict(m[3]) for m in markers]
833 operations = set(meta.get('operation') for meta in markersmeta
834 operations = set(meta.get('operation') for meta in markersmeta
834 if meta.get('operation'))
835 if meta.get('operation'))
835
836
836 return sorted(operations)
837 return sorted(operations)
837
838
838 def obsfateprinter(ui, repo, successors, markers, formatctx):
839 def obsfateprinter(ui, repo, successors, markers, formatctx):
839 """ Build a obsfate string for a single successorset using all obsfate
840 """ Build a obsfate string for a single successorset using all obsfate
840 related function defined in obsutil
841 related function defined in obsutil
841 """
842 """
842 quiet = ui.quiet
843 quiet = ui.quiet
843 verbose = ui.verbose
844 verbose = ui.verbose
844 normal = not verbose and not quiet
845 normal = not verbose and not quiet
845
846
846 line = []
847 line = []
847
848
848 # Verb
849 # Verb
849 line.append(obsfateverb(successors, markers))
850 line.append(obsfateverb(successors, markers))
850
851
851 # Operations
852 # Operations
852 operations = markersoperations(markers)
853 operations = markersoperations(markers)
853 if operations:
854 if operations:
854 line.append(" using %s" % ", ".join(operations))
855 line.append(" using %s" % ", ".join(operations))
855
856
856 # Successors
857 # Successors
857 if successors:
858 if successors:
858 fmtsuccessors = [formatctx(repo[succ]) for succ in successors]
859 fmtsuccessors = [formatctx(repo[succ]) for succ in successors]
859 line.append(" as %s" % ", ".join(fmtsuccessors))
860 line.append(" as %s" % ", ".join(fmtsuccessors))
860
861
861 # Users
862 # Users
862 users = markersusers(markers)
863 users = markersusers(markers)
863 # Filter out current user in not verbose mode to reduce amount of
864 # Filter out current user in not verbose mode to reduce amount of
864 # information
865 # information
865 if not verbose:
866 if not verbose:
866 currentuser = ui.username(acceptempty=True)
867 currentuser = ui.username(acceptempty=True)
867 if len(users) == 1 and currentuser in users:
868 if len(users) == 1 and currentuser in users:
868 users = None
869 users = None
869
870
870 if (verbose or normal) and users:
871 if (verbose or normal) and users:
871 line.append(" by %s" % ", ".join(users))
872 line.append(" by %s" % ", ".join(users))
872
873
873 # Date
874 # Date
874 dates = markersdates(markers)
875 dates = markersdates(markers)
875
876
876 if dates and verbose:
877 if dates and verbose:
877 min_date = min(dates)
878 min_date = min(dates)
878 max_date = max(dates)
879 max_date = max(dates)
879
880
880 if min_date == max_date:
881 if min_date == max_date:
881 fmtmin_date = dateutil.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
882 fmtmin_date = dateutil.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
882 line.append(" (at %s)" % fmtmin_date)
883 line.append(" (at %s)" % fmtmin_date)
883 else:
884 else:
884 fmtmin_date = dateutil.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
885 fmtmin_date = dateutil.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
885 fmtmax_date = dateutil.datestr(max_date, '%Y-%m-%d %H:%M %1%2')
886 fmtmax_date = dateutil.datestr(max_date, '%Y-%m-%d %H:%M %1%2')
886 line.append(" (between %s and %s)" % (fmtmin_date, fmtmax_date))
887 line.append(" (between %s and %s)" % (fmtmin_date, fmtmax_date))
887
888
888 return "".join(line)
889 return "".join(line)
889
890
890
891
891 filteredmsgtable = {
892 filteredmsgtable = {
892 "pruned": _("hidden revision '%s' is pruned"),
893 "pruned": _("hidden revision '%s' is pruned"),
893 "diverged": _("hidden revision '%s' has diverged"),
894 "diverged": _("hidden revision '%s' has diverged"),
894 "superseded": _("hidden revision '%s' was rewritten as: %s"),
895 "superseded": _("hidden revision '%s' was rewritten as: %s"),
895 "superseded_split": _("hidden revision '%s' was split as: %s"),
896 "superseded_split": _("hidden revision '%s' was split as: %s"),
896 "superseded_split_several": _("hidden revision '%s' was split as: %s and "
897 "superseded_split_several": _("hidden revision '%s' was split as: %s and "
897 "%d more"),
898 "%d more"),
898 }
899 }
899
900
900 def _getfilteredreason(repo, changeid, ctx):
901 def _getfilteredreason(repo, changeid, ctx):
901 """return a human-friendly string on why a obsolete changeset is hidden
902 """return a human-friendly string on why a obsolete changeset is hidden
902 """
903 """
903 successors = successorssets(repo, ctx.node())
904 successors = successorssets(repo, ctx.node())
904 fate = _getobsfate(successors)
905 fate = _getobsfate(successors)
905
906
906 # Be more precise in case the revision is superseded
907 # Be more precise in case the revision is superseded
907 if fate == 'pruned':
908 if fate == 'pruned':
908 return filteredmsgtable['pruned'] % changeid
909 return filteredmsgtable['pruned'] % changeid
909 elif fate == 'diverged':
910 elif fate == 'diverged':
910 return filteredmsgtable['diverged'] % changeid
911 return filteredmsgtable['diverged'] % changeid
911 elif fate == 'superseded':
912 elif fate == 'superseded':
912 single_successor = nodemod.short(successors[0][0])
913 single_successor = nodemod.short(successors[0][0])
913 return filteredmsgtable['superseded'] % (changeid, single_successor)
914 return filteredmsgtable['superseded'] % (changeid, single_successor)
914 elif fate == 'superseded_split':
915 elif fate == 'superseded_split':
915
916
916 succs = []
917 succs = []
917 for node_id in successors[0]:
918 for node_id in successors[0]:
918 succs.append(nodemod.short(node_id))
919 succs.append(nodemod.short(node_id))
919
920
920 if len(succs) <= 2:
921 if len(succs) <= 2:
921 fmtsuccs = ', '.join(succs)
922 fmtsuccs = ', '.join(succs)
922 return filteredmsgtable['superseded_split'] % (changeid, fmtsuccs)
923 return filteredmsgtable['superseded_split'] % (changeid, fmtsuccs)
923 else:
924 else:
924 firstsuccessors = ', '.join(succs[:2])
925 firstsuccessors = ', '.join(succs[:2])
925 remainingnumber = len(succs) - 2
926 remainingnumber = len(succs) - 2
926
927
927 args = (changeid, firstsuccessors, remainingnumber)
928 args = (changeid, firstsuccessors, remainingnumber)
928 return filteredmsgtable['superseded_split_several'] % args
929 return filteredmsgtable['superseded_split_several'] % args
929
930
930 def divergentsets(repo, ctx):
931 def divergentsets(repo, ctx):
931 """Compute sets of commits divergent with a given one"""
932 """Compute sets of commits divergent with a given one"""
932 cache = {}
933 cache = {}
933 base = {}
934 base = {}
934 for n in allpredecessors(repo.obsstore, [ctx.node()]):
935 for n in allpredecessors(repo.obsstore, [ctx.node()]):
935 if n == ctx.node():
936 if n == ctx.node():
936 # a node can't be a base for divergence with itself
937 # a node can't be a base for divergence with itself
937 continue
938 continue
938 nsuccsets = successorssets(repo, n, cache)
939 nsuccsets = successorssets(repo, n, cache)
939 for nsuccset in nsuccsets:
940 for nsuccset in nsuccsets:
940 if ctx.node() in nsuccset:
941 if ctx.node() in nsuccset:
941 # we are only interested in *other* successor sets
942 # we are only interested in *other* successor sets
942 continue
943 continue
943 if tuple(nsuccset) in base:
944 if tuple(nsuccset) in base:
944 # we already know the latest base for this divergency
945 # we already know the latest base for this divergency
945 continue
946 continue
946 base[tuple(nsuccset)] = n
947 base[tuple(nsuccset)] = n
947 return [{'divergentnodes': divset, 'commonpredecessor': b}
948 return [{'divergentnodes': divset, 'commonpredecessor': b}
948 for divset, b in base.iteritems()]
949 for divset, b in base.iteritems()]
949
950
950 def whyunstable(repo, ctx):
951 def whyunstable(repo, ctx):
951 result = []
952 result = []
952 if ctx.orphan():
953 if ctx.orphan():
953 for parent in ctx.parents():
954 for parent in ctx.parents():
954 kind = None
955 kind = None
955 if parent.orphan():
956 if parent.orphan():
956 kind = 'orphan'
957 kind = 'orphan'
957 elif parent.obsolete():
958 elif parent.obsolete():
958 kind = 'obsolete'
959 kind = 'obsolete'
959 if kind is not None:
960 if kind is not None:
960 result.append({'instability': 'orphan',
961 result.append({'instability': 'orphan',
961 'reason': '%s parent' % kind,
962 'reason': '%s parent' % kind,
962 'node': parent.hex()})
963 'node': parent.hex()})
963 if ctx.phasedivergent():
964 if ctx.phasedivergent():
964 predecessors = allpredecessors(repo.obsstore, [ctx.node()],
965 predecessors = allpredecessors(repo.obsstore, [ctx.node()],
965 ignoreflags=bumpedfix)
966 ignoreflags=bumpedfix)
966 immutable = [repo[p] for p in predecessors
967 immutable = [repo[p] for p in predecessors
967 if p in repo and not repo[p].mutable()]
968 if p in repo and not repo[p].mutable()]
968 for predecessor in immutable:
969 for predecessor in immutable:
969 result.append({'instability': 'phase-divergent',
970 result.append({'instability': 'phase-divergent',
970 'reason': 'immutable predecessor',
971 'reason': 'immutable predecessor',
971 'node': predecessor.hex()})
972 'node': predecessor.hex()})
972 if ctx.contentdivergent():
973 if ctx.contentdivergent():
973 dsets = divergentsets(repo, ctx)
974 dsets = divergentsets(repo, ctx)
974 for dset in dsets:
975 for dset in dsets:
975 divnodes = [repo[n] for n in dset['divergentnodes']]
976 divnodes = [repo[n] for n in dset['divergentnodes']]
976 result.append({'instability': 'content-divergent',
977 result.append({'instability': 'content-divergent',
977 'divergentnodes': divnodes,
978 'divergentnodes': divnodes,
978 'reason': 'predecessor',
979 'reason': 'predecessor',
979 'node': nodemod.hex(dset['commonpredecessor'])})
980 'node': nodemod.hex(dset['commonpredecessor'])})
980 return result
981 return result
General Comments 0
You need to be logged in to leave comments. Login now