##// END OF EJS Templates
obsutil: use public interface to access to repo.ui
Yuya Nishihara -
r38605:79d7d715 default
parent child Browse files
Show More
@@ -1,980 +1,980 b''
1 # obsutil.py - utility functions for obsolescence
1 # obsutil.py - utility functions for obsolescence
2 #
2 #
3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import re
10 import re
11
11
12 from .i18n import _
12 from .i18n import _
13 from . import (
13 from . import (
14 node as nodemod,
14 node as nodemod,
15 phases,
15 phases,
16 util,
16 util,
17 )
17 )
18 from .utils import (
18 from .utils import (
19 dateutil,
19 dateutil,
20 diffutil,
20 diffutil,
21 )
21 )
22
22
23 ### obsolescence marker flag
23 ### obsolescence marker flag
24
24
25 ## bumpedfix flag
25 ## bumpedfix flag
26 #
26 #
27 # When a changeset A' succeed to a changeset A which became public, we call A'
27 # When a changeset A' succeed to a changeset A which became public, we call A'
28 # "bumped" because it's a successors of a public changesets
28 # "bumped" because it's a successors of a public changesets
29 #
29 #
30 # o A' (bumped)
30 # o A' (bumped)
31 # |`:
31 # |`:
32 # | o A
32 # | o A
33 # |/
33 # |/
34 # o Z
34 # o Z
35 #
35 #
36 # The way to solve this situation is to create a new changeset Ad as children
36 # The way to solve this situation is to create a new changeset Ad as children
37 # of A. This changeset have the same content than A'. So the diff from A to A'
37 # of A. This changeset have the same content than A'. So the diff from A to A'
38 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
38 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
39 #
39 #
40 # o Ad
40 # o Ad
41 # |`:
41 # |`:
42 # | x A'
42 # | x A'
43 # |'|
43 # |'|
44 # o | A
44 # o | A
45 # |/
45 # |/
46 # o Z
46 # o Z
47 #
47 #
48 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
48 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
49 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
49 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
50 # This flag mean that the successors express the changes between the public and
50 # This flag mean that the successors express the changes between the public and
51 # bumped version and fix the situation, breaking the transitivity of
51 # bumped version and fix the situation, breaking the transitivity of
52 # "bumped" here.
52 # "bumped" here.
53 bumpedfix = 1
53 bumpedfix = 1
54 usingsha256 = 2
54 usingsha256 = 2
55
55
56 class marker(object):
56 class marker(object):
57 """Wrap obsolete marker raw data"""
57 """Wrap obsolete marker raw data"""
58
58
59 def __init__(self, repo, data):
59 def __init__(self, repo, data):
60 # the repo argument will be used to create changectx in later version
60 # the repo argument will be used to create changectx in later version
61 self._repo = repo
61 self._repo = repo
62 self._data = data
62 self._data = data
63 self._decodedmeta = None
63 self._decodedmeta = None
64
64
65 def __hash__(self):
65 def __hash__(self):
66 return hash(self._data)
66 return hash(self._data)
67
67
68 def __eq__(self, other):
68 def __eq__(self, other):
69 if type(other) != type(self):
69 if type(other) != type(self):
70 return False
70 return False
71 return self._data == other._data
71 return self._data == other._data
72
72
73 def prednode(self):
73 def prednode(self):
74 """Predecessor changeset node identifier"""
74 """Predecessor changeset node identifier"""
75 return self._data[0]
75 return self._data[0]
76
76
77 def succnodes(self):
77 def succnodes(self):
78 """List of successor changesets node identifiers"""
78 """List of successor changesets node identifiers"""
79 return self._data[1]
79 return self._data[1]
80
80
81 def parentnodes(self):
81 def parentnodes(self):
82 """Parents of the predecessors (None if not recorded)"""
82 """Parents of the predecessors (None if not recorded)"""
83 return self._data[5]
83 return self._data[5]
84
84
85 def metadata(self):
85 def metadata(self):
86 """Decoded metadata dictionary"""
86 """Decoded metadata dictionary"""
87 return dict(self._data[3])
87 return dict(self._data[3])
88
88
89 def date(self):
89 def date(self):
90 """Creation date as (unixtime, offset)"""
90 """Creation date as (unixtime, offset)"""
91 return self._data[4]
91 return self._data[4]
92
92
93 def flags(self):
93 def flags(self):
94 """The flags field of the marker"""
94 """The flags field of the marker"""
95 return self._data[2]
95 return self._data[2]
96
96
97 def getmarkers(repo, nodes=None, exclusive=False):
97 def getmarkers(repo, nodes=None, exclusive=False):
98 """returns markers known in a repository
98 """returns markers known in a repository
99
99
100 If <nodes> is specified, only markers "relevant" to those nodes are are
100 If <nodes> is specified, only markers "relevant" to those nodes are are
101 returned"""
101 returned"""
102 if nodes is None:
102 if nodes is None:
103 rawmarkers = repo.obsstore
103 rawmarkers = repo.obsstore
104 elif exclusive:
104 elif exclusive:
105 rawmarkers = exclusivemarkers(repo, nodes)
105 rawmarkers = exclusivemarkers(repo, nodes)
106 else:
106 else:
107 rawmarkers = repo.obsstore.relevantmarkers(nodes)
107 rawmarkers = repo.obsstore.relevantmarkers(nodes)
108
108
109 for markerdata in rawmarkers:
109 for markerdata in rawmarkers:
110 yield marker(repo, markerdata)
110 yield marker(repo, markerdata)
111
111
112 def closestpredecessors(repo, nodeid):
112 def closestpredecessors(repo, nodeid):
113 """yield the list of next predecessors pointing on visible changectx nodes
113 """yield the list of next predecessors pointing on visible changectx nodes
114
114
115 This function respect the repoview filtering, filtered revision will be
115 This function respect the repoview filtering, filtered revision will be
116 considered missing.
116 considered missing.
117 """
117 """
118
118
119 precursors = repo.obsstore.predecessors
119 precursors = repo.obsstore.predecessors
120 stack = [nodeid]
120 stack = [nodeid]
121 seen = set(stack)
121 seen = set(stack)
122
122
123 while stack:
123 while stack:
124 current = stack.pop()
124 current = stack.pop()
125 currentpreccs = precursors.get(current, ())
125 currentpreccs = precursors.get(current, ())
126
126
127 for prec in currentpreccs:
127 for prec in currentpreccs:
128 precnodeid = prec[0]
128 precnodeid = prec[0]
129
129
130 # Basic cycle protection
130 # Basic cycle protection
131 if precnodeid in seen:
131 if precnodeid in seen:
132 continue
132 continue
133 seen.add(precnodeid)
133 seen.add(precnodeid)
134
134
135 if precnodeid in repo:
135 if precnodeid in repo:
136 yield precnodeid
136 yield precnodeid
137 else:
137 else:
138 stack.append(precnodeid)
138 stack.append(precnodeid)
139
139
140 def allpredecessors(obsstore, nodes, ignoreflags=0):
140 def allpredecessors(obsstore, nodes, ignoreflags=0):
141 """Yield node for every precursors of <nodes>.
141 """Yield node for every precursors of <nodes>.
142
142
143 Some precursors may be unknown locally.
143 Some precursors may be unknown locally.
144
144
145 This is a linear yield unsuited to detecting folded changesets. It includes
145 This is a linear yield unsuited to detecting folded changesets. It includes
146 initial nodes too."""
146 initial nodes too."""
147
147
148 remaining = set(nodes)
148 remaining = set(nodes)
149 seen = set(remaining)
149 seen = set(remaining)
150 while remaining:
150 while remaining:
151 current = remaining.pop()
151 current = remaining.pop()
152 yield current
152 yield current
153 for mark in obsstore.predecessors.get(current, ()):
153 for mark in obsstore.predecessors.get(current, ()):
154 # ignore marker flagged with specified flag
154 # ignore marker flagged with specified flag
155 if mark[2] & ignoreflags:
155 if mark[2] & ignoreflags:
156 continue
156 continue
157 suc = mark[0]
157 suc = mark[0]
158 if suc not in seen:
158 if suc not in seen:
159 seen.add(suc)
159 seen.add(suc)
160 remaining.add(suc)
160 remaining.add(suc)
161
161
162 def allsuccessors(obsstore, nodes, ignoreflags=0):
162 def allsuccessors(obsstore, nodes, ignoreflags=0):
163 """Yield node for every successor of <nodes>.
163 """Yield node for every successor of <nodes>.
164
164
165 Some successors may be unknown locally.
165 Some successors may be unknown locally.
166
166
167 This is a linear yield unsuited to detecting split changesets. It includes
167 This is a linear yield unsuited to detecting split changesets. It includes
168 initial nodes too."""
168 initial nodes too."""
169 remaining = set(nodes)
169 remaining = set(nodes)
170 seen = set(remaining)
170 seen = set(remaining)
171 while remaining:
171 while remaining:
172 current = remaining.pop()
172 current = remaining.pop()
173 yield current
173 yield current
174 for mark in obsstore.successors.get(current, ()):
174 for mark in obsstore.successors.get(current, ()):
175 # ignore marker flagged with specified flag
175 # ignore marker flagged with specified flag
176 if mark[2] & ignoreflags:
176 if mark[2] & ignoreflags:
177 continue
177 continue
178 for suc in mark[1]:
178 for suc in mark[1]:
179 if suc not in seen:
179 if suc not in seen:
180 seen.add(suc)
180 seen.add(suc)
181 remaining.add(suc)
181 remaining.add(suc)
182
182
183 def _filterprunes(markers):
183 def _filterprunes(markers):
184 """return a set with no prune markers"""
184 """return a set with no prune markers"""
185 return set(m for m in markers if m[1])
185 return set(m for m in markers if m[1])
186
186
187 def exclusivemarkers(repo, nodes):
187 def exclusivemarkers(repo, nodes):
188 """set of markers relevant to "nodes" but no other locally-known nodes
188 """set of markers relevant to "nodes" but no other locally-known nodes
189
189
190 This function compute the set of markers "exclusive" to a locally-known
190 This function compute the set of markers "exclusive" to a locally-known
191 node. This means we walk the markers starting from <nodes> until we reach a
191 node. This means we walk the markers starting from <nodes> until we reach a
192 locally-known precursors outside of <nodes>. Element of <nodes> with
192 locally-known precursors outside of <nodes>. Element of <nodes> with
193 locally-known successors outside of <nodes> are ignored (since their
193 locally-known successors outside of <nodes> are ignored (since their
194 precursors markers are also relevant to these successors).
194 precursors markers are also relevant to these successors).
195
195
196 For example:
196 For example:
197
197
198 # (A0 rewritten as A1)
198 # (A0 rewritten as A1)
199 #
199 #
200 # A0 <-1- A1 # Marker "1" is exclusive to A1
200 # A0 <-1- A1 # Marker "1" is exclusive to A1
201
201
202 or
202 or
203
203
204 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
204 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
205 #
205 #
206 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
206 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
207
207
208 or
208 or
209
209
210 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
210 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
211 #
211 #
212 # <-2- A1 # Marker "2" is exclusive to A0,A1
212 # <-2- A1 # Marker "2" is exclusive to A0,A1
213 # /
213 # /
214 # <-1- A0
214 # <-1- A0
215 # \
215 # \
216 # <-3- A2 # Marker "3" is exclusive to A0,A2
216 # <-3- A2 # Marker "3" is exclusive to A0,A2
217 #
217 #
218 # in addition:
218 # in addition:
219 #
219 #
220 # Markers "2,3" are exclusive to A1,A2
220 # Markers "2,3" are exclusive to A1,A2
221 # Markers "1,2,3" are exclusive to A0,A1,A2
221 # Markers "1,2,3" are exclusive to A0,A1,A2
222
222
223 See test/test-obsolete-bundle-strip.t for more examples.
223 See test/test-obsolete-bundle-strip.t for more examples.
224
224
225 An example usage is strip. When stripping a changeset, we also want to
225 An example usage is strip. When stripping a changeset, we also want to
226 strip the markers exclusive to this changeset. Otherwise we would have
226 strip the markers exclusive to this changeset. Otherwise we would have
227 "dangling"" obsolescence markers from its precursors: Obsolescence markers
227 "dangling"" obsolescence markers from its precursors: Obsolescence markers
228 marking a node as obsolete without any successors available locally.
228 marking a node as obsolete without any successors available locally.
229
229
230 As for relevant markers, the prune markers for children will be followed.
230 As for relevant markers, the prune markers for children will be followed.
231 Of course, they will only be followed if the pruned children is
231 Of course, they will only be followed if the pruned children is
232 locally-known. Since the prune markers are relevant to the pruned node.
232 locally-known. Since the prune markers are relevant to the pruned node.
233 However, while prune markers are considered relevant to the parent of the
233 However, while prune markers are considered relevant to the parent of the
234 pruned changesets, prune markers for locally-known changeset (with no
234 pruned changesets, prune markers for locally-known changeset (with no
235 successors) are considered exclusive to the pruned nodes. This allows
235 successors) are considered exclusive to the pruned nodes. This allows
236 to strip the prune markers (with the rest of the exclusive chain) alongside
236 to strip the prune markers (with the rest of the exclusive chain) alongside
237 the pruned changesets.
237 the pruned changesets.
238 """
238 """
239 # running on a filtered repository would be dangerous as markers could be
239 # running on a filtered repository would be dangerous as markers could be
240 # reported as exclusive when they are relevant for other filtered nodes.
240 # reported as exclusive when they are relevant for other filtered nodes.
241 unfi = repo.unfiltered()
241 unfi = repo.unfiltered()
242
242
243 # shortcut to various useful item
243 # shortcut to various useful item
244 nm = unfi.changelog.nodemap
244 nm = unfi.changelog.nodemap
245 precursorsmarkers = unfi.obsstore.predecessors
245 precursorsmarkers = unfi.obsstore.predecessors
246 successormarkers = unfi.obsstore.successors
246 successormarkers = unfi.obsstore.successors
247 childrenmarkers = unfi.obsstore.children
247 childrenmarkers = unfi.obsstore.children
248
248
249 # exclusive markers (return of the function)
249 # exclusive markers (return of the function)
250 exclmarkers = set()
250 exclmarkers = set()
251 # we need fast membership testing
251 # we need fast membership testing
252 nodes = set(nodes)
252 nodes = set(nodes)
253 # looking for head in the obshistory
253 # looking for head in the obshistory
254 #
254 #
255 # XXX we are ignoring all issues in regard with cycle for now.
255 # XXX we are ignoring all issues in regard with cycle for now.
256 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
256 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
257 stack.sort()
257 stack.sort()
258 # nodes already stacked
258 # nodes already stacked
259 seennodes = set(stack)
259 seennodes = set(stack)
260 while stack:
260 while stack:
261 current = stack.pop()
261 current = stack.pop()
262 # fetch precursors markers
262 # fetch precursors markers
263 markers = list(precursorsmarkers.get(current, ()))
263 markers = list(precursorsmarkers.get(current, ()))
264 # extend the list with prune markers
264 # extend the list with prune markers
265 for mark in successormarkers.get(current, ()):
265 for mark in successormarkers.get(current, ()):
266 if not mark[1]:
266 if not mark[1]:
267 markers.append(mark)
267 markers.append(mark)
268 # and markers from children (looking for prune)
268 # and markers from children (looking for prune)
269 for mark in childrenmarkers.get(current, ()):
269 for mark in childrenmarkers.get(current, ()):
270 if not mark[1]:
270 if not mark[1]:
271 markers.append(mark)
271 markers.append(mark)
272 # traverse the markers
272 # traverse the markers
273 for mark in markers:
273 for mark in markers:
274 if mark in exclmarkers:
274 if mark in exclmarkers:
275 # markers already selected
275 # markers already selected
276 continue
276 continue
277
277
278 # If the markers is about the current node, select it
278 # If the markers is about the current node, select it
279 #
279 #
280 # (this delay the addition of markers from children)
280 # (this delay the addition of markers from children)
281 if mark[1] or mark[0] == current:
281 if mark[1] or mark[0] == current:
282 exclmarkers.add(mark)
282 exclmarkers.add(mark)
283
283
284 # should we keep traversing through the precursors?
284 # should we keep traversing through the precursors?
285 prec = mark[0]
285 prec = mark[0]
286
286
287 # nodes in the stack or already processed
287 # nodes in the stack or already processed
288 if prec in seennodes:
288 if prec in seennodes:
289 continue
289 continue
290
290
291 # is this a locally known node ?
291 # is this a locally known node ?
292 known = prec in nm
292 known = prec in nm
293 # if locally-known and not in the <nodes> set the traversal
293 # if locally-known and not in the <nodes> set the traversal
294 # stop here.
294 # stop here.
295 if known and prec not in nodes:
295 if known and prec not in nodes:
296 continue
296 continue
297
297
298 # do not keep going if there are unselected markers pointing to this
298 # do not keep going if there are unselected markers pointing to this
299 # nodes. If we end up traversing these unselected markers later the
299 # nodes. If we end up traversing these unselected markers later the
300 # node will be taken care of at that point.
300 # node will be taken care of at that point.
301 precmarkers = _filterprunes(successormarkers.get(prec))
301 precmarkers = _filterprunes(successormarkers.get(prec))
302 if precmarkers.issubset(exclmarkers):
302 if precmarkers.issubset(exclmarkers):
303 seennodes.add(prec)
303 seennodes.add(prec)
304 stack.append(prec)
304 stack.append(prec)
305
305
306 return exclmarkers
306 return exclmarkers
307
307
308 def foreground(repo, nodes):
308 def foreground(repo, nodes):
309 """return all nodes in the "foreground" of other node
309 """return all nodes in the "foreground" of other node
310
310
311 The foreground of a revision is anything reachable using parent -> children
311 The foreground of a revision is anything reachable using parent -> children
312 or precursor -> successor relation. It is very similar to "descendant" but
312 or precursor -> successor relation. It is very similar to "descendant" but
313 augmented with obsolescence information.
313 augmented with obsolescence information.
314
314
315 Beware that possible obsolescence cycle may result if complex situation.
315 Beware that possible obsolescence cycle may result if complex situation.
316 """
316 """
317 repo = repo.unfiltered()
317 repo = repo.unfiltered()
318 foreground = set(repo.set('%ln::', nodes))
318 foreground = set(repo.set('%ln::', nodes))
319 if repo.obsstore:
319 if repo.obsstore:
320 # We only need this complicated logic if there is obsolescence
320 # We only need this complicated logic if there is obsolescence
321 # XXX will probably deserve an optimised revset.
321 # XXX will probably deserve an optimised revset.
322 nm = repo.changelog.nodemap
322 nm = repo.changelog.nodemap
323 plen = -1
323 plen = -1
324 # compute the whole set of successors or descendants
324 # compute the whole set of successors or descendants
325 while len(foreground) != plen:
325 while len(foreground) != plen:
326 plen = len(foreground)
326 plen = len(foreground)
327 succs = set(c.node() for c in foreground)
327 succs = set(c.node() for c in foreground)
328 mutable = [c.node() for c in foreground if c.mutable()]
328 mutable = [c.node() for c in foreground if c.mutable()]
329 succs.update(allsuccessors(repo.obsstore, mutable))
329 succs.update(allsuccessors(repo.obsstore, mutable))
330 known = (n for n in succs if n in nm)
330 known = (n for n in succs if n in nm)
331 foreground = set(repo.set('%ln::', known))
331 foreground = set(repo.set('%ln::', known))
332 return set(c.node() for c in foreground)
332 return set(c.node() for c in foreground)
333
333
334 # effectflag field
334 # effectflag field
335 #
335 #
336 # Effect-flag is a 1-byte bit field used to store what changed between a
336 # Effect-flag is a 1-byte bit field used to store what changed between a
337 # changeset and its successor(s).
337 # changeset and its successor(s).
338 #
338 #
339 # The effect flag is stored in obs-markers metadata while we iterate on the
339 # The effect flag is stored in obs-markers metadata while we iterate on the
340 # information design. That's why we have the EFFECTFLAGFIELD. If we come up
340 # information design. That's why we have the EFFECTFLAGFIELD. If we come up
341 # with an incompatible design for effect flag, we can store a new design under
341 # with an incompatible design for effect flag, we can store a new design under
342 # another field name so we don't break readers. We plan to extend the existing
342 # another field name so we don't break readers. We plan to extend the existing
343 # obsmarkers bit-field when the effect flag design will be stabilized.
343 # obsmarkers bit-field when the effect flag design will be stabilized.
344 #
344 #
345 # The effect-flag is placed behind an experimental flag
345 # The effect-flag is placed behind an experimental flag
346 # `effect-flags` set to off by default.
346 # `effect-flags` set to off by default.
347 #
347 #
348
348
349 EFFECTFLAGFIELD = "ef1"
349 EFFECTFLAGFIELD = "ef1"
350
350
351 DESCCHANGED = 1 << 0 # action changed the description
351 DESCCHANGED = 1 << 0 # action changed the description
352 METACHANGED = 1 << 1 # action change the meta
352 METACHANGED = 1 << 1 # action change the meta
353 DIFFCHANGED = 1 << 3 # action change diff introduced by the changeset
353 DIFFCHANGED = 1 << 3 # action change diff introduced by the changeset
354 PARENTCHANGED = 1 << 2 # action change the parent
354 PARENTCHANGED = 1 << 2 # action change the parent
355 USERCHANGED = 1 << 4 # the user changed
355 USERCHANGED = 1 << 4 # the user changed
356 DATECHANGED = 1 << 5 # the date changed
356 DATECHANGED = 1 << 5 # the date changed
357 BRANCHCHANGED = 1 << 6 # the branch changed
357 BRANCHCHANGED = 1 << 6 # the branch changed
358
358
359 METABLACKLIST = [
359 METABLACKLIST = [
360 re.compile('^branch$'),
360 re.compile('^branch$'),
361 re.compile('^.*-source$'),
361 re.compile('^.*-source$'),
362 re.compile('^.*_source$'),
362 re.compile('^.*_source$'),
363 re.compile('^source$'),
363 re.compile('^source$'),
364 ]
364 ]
365
365
366 def metanotblacklisted(metaitem):
366 def metanotblacklisted(metaitem):
367 """ Check that the key of a meta item (extrakey, extravalue) does not
367 """ Check that the key of a meta item (extrakey, extravalue) does not
368 match at least one of the blacklist pattern
368 match at least one of the blacklist pattern
369 """
369 """
370 metakey = metaitem[0]
370 metakey = metaitem[0]
371
371
372 return not any(pattern.match(metakey) for pattern in METABLACKLIST)
372 return not any(pattern.match(metakey) for pattern in METABLACKLIST)
373
373
374 def _prepare_hunk(hunk):
374 def _prepare_hunk(hunk):
375 """Drop all information but the username and patch"""
375 """Drop all information but the username and patch"""
376 cleanhunk = []
376 cleanhunk = []
377 for line in hunk.splitlines():
377 for line in hunk.splitlines():
378 if line.startswith(b'# User') or not line.startswith(b'#'):
378 if line.startswith(b'# User') or not line.startswith(b'#'):
379 if line.startswith(b'@@'):
379 if line.startswith(b'@@'):
380 line = b'@@\n'
380 line = b'@@\n'
381 cleanhunk.append(line)
381 cleanhunk.append(line)
382 return cleanhunk
382 return cleanhunk
383
383
384 def _getdifflines(iterdiff):
384 def _getdifflines(iterdiff):
385 """return a cleaned up lines"""
385 """return a cleaned up lines"""
386 lines = next(iterdiff, None)
386 lines = next(iterdiff, None)
387
387
388 if lines is None:
388 if lines is None:
389 return lines
389 return lines
390
390
391 return _prepare_hunk(lines)
391 return _prepare_hunk(lines)
392
392
393 def _cmpdiff(leftctx, rightctx):
393 def _cmpdiff(leftctx, rightctx):
394 """return True if both ctx introduce the "same diff"
394 """return True if both ctx introduce the "same diff"
395
395
396 This is a first and basic implementation, with many shortcoming.
396 This is a first and basic implementation, with many shortcoming.
397 """
397 """
398 diffopts = diffutil.diffopts(leftctx._repo.ui, {'git': True})
398 diffopts = diffutil.diffopts(leftctx.repo().ui, {'git': True})
399 # Leftctx or right ctx might be filtered, so we need to use the contexts
399 # Leftctx or right ctx might be filtered, so we need to use the contexts
400 # with an unfiltered repository to safely compute the diff
400 # with an unfiltered repository to safely compute the diff
401 leftunfi = leftctx._repo.unfiltered()[leftctx.rev()]
401 leftunfi = leftctx._repo.unfiltered()[leftctx.rev()]
402 leftdiff = leftunfi.diff(opts=diffopts)
402 leftdiff = leftunfi.diff(opts=diffopts)
403 rightunfi = rightctx._repo.unfiltered()[rightctx.rev()]
403 rightunfi = rightctx._repo.unfiltered()[rightctx.rev()]
404 rightdiff = rightunfi.diff(opts=diffopts)
404 rightdiff = rightunfi.diff(opts=diffopts)
405
405
406 left, right = (0, 0)
406 left, right = (0, 0)
407 while None not in (left, right):
407 while None not in (left, right):
408 left = _getdifflines(leftdiff)
408 left = _getdifflines(leftdiff)
409 right = _getdifflines(rightdiff)
409 right = _getdifflines(rightdiff)
410
410
411 if left != right:
411 if left != right:
412 return False
412 return False
413 return True
413 return True
414
414
415 def geteffectflag(relation):
415 def geteffectflag(relation):
416 """ From an obs-marker relation, compute what changed between the
416 """ From an obs-marker relation, compute what changed between the
417 predecessor and the successor.
417 predecessor and the successor.
418 """
418 """
419 effects = 0
419 effects = 0
420
420
421 source = relation[0]
421 source = relation[0]
422
422
423 for changectx in relation[1]:
423 for changectx in relation[1]:
424 # Check if description has changed
424 # Check if description has changed
425 if changectx.description() != source.description():
425 if changectx.description() != source.description():
426 effects |= DESCCHANGED
426 effects |= DESCCHANGED
427
427
428 # Check if user has changed
428 # Check if user has changed
429 if changectx.user() != source.user():
429 if changectx.user() != source.user():
430 effects |= USERCHANGED
430 effects |= USERCHANGED
431
431
432 # Check if date has changed
432 # Check if date has changed
433 if changectx.date() != source.date():
433 if changectx.date() != source.date():
434 effects |= DATECHANGED
434 effects |= DATECHANGED
435
435
436 # Check if branch has changed
436 # Check if branch has changed
437 if changectx.branch() != source.branch():
437 if changectx.branch() != source.branch():
438 effects |= BRANCHCHANGED
438 effects |= BRANCHCHANGED
439
439
440 # Check if at least one of the parent has changed
440 # Check if at least one of the parent has changed
441 if changectx.parents() != source.parents():
441 if changectx.parents() != source.parents():
442 effects |= PARENTCHANGED
442 effects |= PARENTCHANGED
443
443
444 # Check if other meta has changed
444 # Check if other meta has changed
445 changeextra = changectx.extra().items()
445 changeextra = changectx.extra().items()
446 ctxmeta = list(filter(metanotblacklisted, changeextra))
446 ctxmeta = list(filter(metanotblacklisted, changeextra))
447
447
448 sourceextra = source.extra().items()
448 sourceextra = source.extra().items()
449 srcmeta = list(filter(metanotblacklisted, sourceextra))
449 srcmeta = list(filter(metanotblacklisted, sourceextra))
450
450
451 if ctxmeta != srcmeta:
451 if ctxmeta != srcmeta:
452 effects |= METACHANGED
452 effects |= METACHANGED
453
453
454 # Check if the diff has changed
454 # Check if the diff has changed
455 if not _cmpdiff(source, changectx):
455 if not _cmpdiff(source, changectx):
456 effects |= DIFFCHANGED
456 effects |= DIFFCHANGED
457
457
458 return effects
458 return effects
459
459
460 def getobsoleted(repo, tr):
460 def getobsoleted(repo, tr):
461 """return the set of pre-existing revisions obsoleted by a transaction"""
461 """return the set of pre-existing revisions obsoleted by a transaction"""
462 torev = repo.unfiltered().changelog.nodemap.get
462 torev = repo.unfiltered().changelog.nodemap.get
463 phase = repo._phasecache.phase
463 phase = repo._phasecache.phase
464 succsmarkers = repo.obsstore.successors.get
464 succsmarkers = repo.obsstore.successors.get
465 public = phases.public
465 public = phases.public
466 addedmarkers = tr.changes.get('obsmarkers')
466 addedmarkers = tr.changes.get('obsmarkers')
467 addedrevs = tr.changes.get('revs')
467 addedrevs = tr.changes.get('revs')
468 seenrevs = set()
468 seenrevs = set()
469 obsoleted = set()
469 obsoleted = set()
470 for mark in addedmarkers:
470 for mark in addedmarkers:
471 node = mark[0]
471 node = mark[0]
472 rev = torev(node)
472 rev = torev(node)
473 if rev is None or rev in seenrevs or rev in addedrevs:
473 if rev is None or rev in seenrevs or rev in addedrevs:
474 continue
474 continue
475 seenrevs.add(rev)
475 seenrevs.add(rev)
476 if phase(repo, rev) == public:
476 if phase(repo, rev) == public:
477 continue
477 continue
478 if set(succsmarkers(node) or []).issubset(addedmarkers):
478 if set(succsmarkers(node) or []).issubset(addedmarkers):
479 obsoleted.add(rev)
479 obsoleted.add(rev)
480 return obsoleted
480 return obsoleted
481
481
482 class _succs(list):
482 class _succs(list):
483 """small class to represent a successors with some metadata about it"""
483 """small class to represent a successors with some metadata about it"""
484
484
485 def __init__(self, *args, **kwargs):
485 def __init__(self, *args, **kwargs):
486 super(_succs, self).__init__(*args, **kwargs)
486 super(_succs, self).__init__(*args, **kwargs)
487 self.markers = set()
487 self.markers = set()
488
488
489 def copy(self):
489 def copy(self):
490 new = _succs(self)
490 new = _succs(self)
491 new.markers = self.markers.copy()
491 new.markers = self.markers.copy()
492 return new
492 return new
493
493
494 @util.propertycache
494 @util.propertycache
495 def _set(self):
495 def _set(self):
496 # immutable
496 # immutable
497 return set(self)
497 return set(self)
498
498
499 def canmerge(self, other):
499 def canmerge(self, other):
500 return self._set.issubset(other._set)
500 return self._set.issubset(other._set)
501
501
502 def successorssets(repo, initialnode, closest=False, cache=None):
502 def successorssets(repo, initialnode, closest=False, cache=None):
503 """Return set of all latest successors of initial nodes
503 """Return set of all latest successors of initial nodes
504
504
505 The successors set of a changeset A are the group of revisions that succeed
505 The successors set of a changeset A are the group of revisions that succeed
506 A. It succeeds A as a consistent whole, each revision being only a partial
506 A. It succeeds A as a consistent whole, each revision being only a partial
507 replacement. By default, the successors set contains non-obsolete
507 replacement. By default, the successors set contains non-obsolete
508 changesets only, walking the obsolescence graph until reaching a leaf. If
508 changesets only, walking the obsolescence graph until reaching a leaf. If
509 'closest' is set to True, closest successors-sets are return (the
509 'closest' is set to True, closest successors-sets are return (the
510 obsolescence walk stops on known changesets).
510 obsolescence walk stops on known changesets).
511
511
512 This function returns the full list of successor sets which is why it
512 This function returns the full list of successor sets which is why it
513 returns a list of tuples and not just a single tuple. Each tuple is a valid
513 returns a list of tuples and not just a single tuple. Each tuple is a valid
514 successors set. Note that (A,) may be a valid successors set for changeset A
514 successors set. Note that (A,) may be a valid successors set for changeset A
515 (see below).
515 (see below).
516
516
517 In most cases, a changeset A will have a single element (e.g. the changeset
517 In most cases, a changeset A will have a single element (e.g. the changeset
518 A is replaced by A') in its successors set. Though, it is also common for a
518 A is replaced by A') in its successors set. Though, it is also common for a
519 changeset A to have no elements in its successor set (e.g. the changeset
519 changeset A to have no elements in its successor set (e.g. the changeset
520 has been pruned). Therefore, the returned list of successors sets will be
520 has been pruned). Therefore, the returned list of successors sets will be
521 [(A',)] or [], respectively.
521 [(A',)] or [], respectively.
522
522
523 When a changeset A is split into A' and B', however, it will result in a
523 When a changeset A is split into A' and B', however, it will result in a
524 successors set containing more than a single element, i.e. [(A',B')].
524 successors set containing more than a single element, i.e. [(A',B')].
525 Divergent changesets will result in multiple successors sets, i.e. [(A',),
525 Divergent changesets will result in multiple successors sets, i.e. [(A',),
526 (A'')].
526 (A'')].
527
527
528 If a changeset A is not obsolete, then it will conceptually have no
528 If a changeset A is not obsolete, then it will conceptually have no
529 successors set. To distinguish this from a pruned changeset, the successor
529 successors set. To distinguish this from a pruned changeset, the successor
530 set will contain itself only, i.e. [(A,)].
530 set will contain itself only, i.e. [(A,)].
531
531
532 Finally, final successors unknown locally are considered to be pruned
532 Finally, final successors unknown locally are considered to be pruned
533 (pruned: obsoleted without any successors). (Final: successors not affected
533 (pruned: obsoleted without any successors). (Final: successors not affected
534 by markers).
534 by markers).
535
535
536 The 'closest' mode respect the repoview filtering. For example, without
536 The 'closest' mode respect the repoview filtering. For example, without
537 filter it will stop at the first locally known changeset, with 'visible'
537 filter it will stop at the first locally known changeset, with 'visible'
538 filter it will stop on visible changesets).
538 filter it will stop on visible changesets).
539
539
540 The optional `cache` parameter is a dictionary that may contains
540 The optional `cache` parameter is a dictionary that may contains
541 precomputed successors sets. It is meant to reuse the computation of a
541 precomputed successors sets. It is meant to reuse the computation of a
542 previous call to `successorssets` when multiple calls are made at the same
542 previous call to `successorssets` when multiple calls are made at the same
543 time. The cache dictionary is updated in place. The caller is responsible
543 time. The cache dictionary is updated in place. The caller is responsible
544 for its life span. Code that makes multiple calls to `successorssets`
544 for its life span. Code that makes multiple calls to `successorssets`
545 *should* use this cache mechanism or risk a performance hit.
545 *should* use this cache mechanism or risk a performance hit.
546
546
547 Since results are different depending of the 'closest' most, the same cache
547 Since results are different depending of the 'closest' most, the same cache
548 cannot be reused for both mode.
548 cannot be reused for both mode.
549 """
549 """
550
550
551 succmarkers = repo.obsstore.successors
551 succmarkers = repo.obsstore.successors
552
552
553 # Stack of nodes we search successors sets for
553 # Stack of nodes we search successors sets for
554 toproceed = [initialnode]
554 toproceed = [initialnode]
555 # set version of above list for fast loop detection
555 # set version of above list for fast loop detection
556 # element added to "toproceed" must be added here
556 # element added to "toproceed" must be added here
557 stackedset = set(toproceed)
557 stackedset = set(toproceed)
558 if cache is None:
558 if cache is None:
559 cache = {}
559 cache = {}
560
560
561 # This while loop is the flattened version of a recursive search for
561 # This while loop is the flattened version of a recursive search for
562 # successors sets
562 # successors sets
563 #
563 #
564 # def successorssets(x):
564 # def successorssets(x):
565 # successors = directsuccessors(x)
565 # successors = directsuccessors(x)
566 # ss = [[]]
566 # ss = [[]]
567 # for succ in directsuccessors(x):
567 # for succ in directsuccessors(x):
568 # # product as in itertools cartesian product
568 # # product as in itertools cartesian product
569 # ss = product(ss, successorssets(succ))
569 # ss = product(ss, successorssets(succ))
570 # return ss
570 # return ss
571 #
571 #
572 # But we can not use plain recursive calls here:
572 # But we can not use plain recursive calls here:
573 # - that would blow the python call stack
573 # - that would blow the python call stack
574 # - obsolescence markers may have cycles, we need to handle them.
574 # - obsolescence markers may have cycles, we need to handle them.
575 #
575 #
576 # The `toproceed` list act as our call stack. Every node we search
576 # The `toproceed` list act as our call stack. Every node we search
577 # successors set for are stacked there.
577 # successors set for are stacked there.
578 #
578 #
579 # The `stackedset` is set version of this stack used to check if a node is
579 # The `stackedset` is set version of this stack used to check if a node is
580 # already stacked. This check is used to detect cycles and prevent infinite
580 # already stacked. This check is used to detect cycles and prevent infinite
581 # loop.
581 # loop.
582 #
582 #
583 # successors set of all nodes are stored in the `cache` dictionary.
583 # successors set of all nodes are stored in the `cache` dictionary.
584 #
584 #
585 # After this while loop ends we use the cache to return the successors sets
585 # After this while loop ends we use the cache to return the successors sets
586 # for the node requested by the caller.
586 # for the node requested by the caller.
587 while toproceed:
587 while toproceed:
588 # Every iteration tries to compute the successors sets of the topmost
588 # Every iteration tries to compute the successors sets of the topmost
589 # node of the stack: CURRENT.
589 # node of the stack: CURRENT.
590 #
590 #
591 # There are four possible outcomes:
591 # There are four possible outcomes:
592 #
592 #
593 # 1) We already know the successors sets of CURRENT:
593 # 1) We already know the successors sets of CURRENT:
594 # -> mission accomplished, pop it from the stack.
594 # -> mission accomplished, pop it from the stack.
595 # 2) Stop the walk:
595 # 2) Stop the walk:
596 # default case: Node is not obsolete
596 # default case: Node is not obsolete
597 # closest case: Node is known at this repo filter level
597 # closest case: Node is known at this repo filter level
598 # -> the node is its own successors sets. Add it to the cache.
598 # -> the node is its own successors sets. Add it to the cache.
599 # 3) We do not know successors set of direct successors of CURRENT:
599 # 3) We do not know successors set of direct successors of CURRENT:
600 # -> We add those successors to the stack.
600 # -> We add those successors to the stack.
601 # 4) We know successors sets of all direct successors of CURRENT:
601 # 4) We know successors sets of all direct successors of CURRENT:
602 # -> We can compute CURRENT successors set and add it to the
602 # -> We can compute CURRENT successors set and add it to the
603 # cache.
603 # cache.
604 #
604 #
605 current = toproceed[-1]
605 current = toproceed[-1]
606
606
607 # case 2 condition is a bit hairy because of closest,
607 # case 2 condition is a bit hairy because of closest,
608 # we compute it on its own
608 # we compute it on its own
609 case2condition = ((current not in succmarkers)
609 case2condition = ((current not in succmarkers)
610 or (closest and current != initialnode
610 or (closest and current != initialnode
611 and current in repo))
611 and current in repo))
612
612
613 if current in cache:
613 if current in cache:
614 # case (1): We already know the successors sets
614 # case (1): We already know the successors sets
615 stackedset.remove(toproceed.pop())
615 stackedset.remove(toproceed.pop())
616 elif case2condition:
616 elif case2condition:
617 # case (2): end of walk.
617 # case (2): end of walk.
618 if current in repo:
618 if current in repo:
619 # We have a valid successors.
619 # We have a valid successors.
620 cache[current] = [_succs((current,))]
620 cache[current] = [_succs((current,))]
621 else:
621 else:
622 # Final obsolete version is unknown locally.
622 # Final obsolete version is unknown locally.
623 # Do not count that as a valid successors
623 # Do not count that as a valid successors
624 cache[current] = []
624 cache[current] = []
625 else:
625 else:
626 # cases (3) and (4)
626 # cases (3) and (4)
627 #
627 #
628 # We proceed in two phases. Phase 1 aims to distinguish case (3)
628 # We proceed in two phases. Phase 1 aims to distinguish case (3)
629 # from case (4):
629 # from case (4):
630 #
630 #
631 # For each direct successors of CURRENT, we check whether its
631 # For each direct successors of CURRENT, we check whether its
632 # successors sets are known. If they are not, we stack the
632 # successors sets are known. If they are not, we stack the
633 # unknown node and proceed to the next iteration of the while
633 # unknown node and proceed to the next iteration of the while
634 # loop. (case 3)
634 # loop. (case 3)
635 #
635 #
636 # During this step, we may detect obsolescence cycles: a node
636 # During this step, we may detect obsolescence cycles: a node
637 # with unknown successors sets but already in the call stack.
637 # with unknown successors sets but already in the call stack.
638 # In such a situation, we arbitrary set the successors sets of
638 # In such a situation, we arbitrary set the successors sets of
639 # the node to nothing (node pruned) to break the cycle.
639 # the node to nothing (node pruned) to break the cycle.
640 #
640 #
641 # If no break was encountered we proceed to phase 2.
641 # If no break was encountered we proceed to phase 2.
642 #
642 #
643 # Phase 2 computes successors sets of CURRENT (case 4); see details
643 # Phase 2 computes successors sets of CURRENT (case 4); see details
644 # in phase 2 itself.
644 # in phase 2 itself.
645 #
645 #
646 # Note the two levels of iteration in each phase.
646 # Note the two levels of iteration in each phase.
647 # - The first one handles obsolescence markers using CURRENT as
647 # - The first one handles obsolescence markers using CURRENT as
648 # precursor (successors markers of CURRENT).
648 # precursor (successors markers of CURRENT).
649 #
649 #
650 # Having multiple entry here means divergence.
650 # Having multiple entry here means divergence.
651 #
651 #
652 # - The second one handles successors defined in each marker.
652 # - The second one handles successors defined in each marker.
653 #
653 #
654 # Having none means pruned node, multiple successors means split,
654 # Having none means pruned node, multiple successors means split,
655 # single successors are standard replacement.
655 # single successors are standard replacement.
656 #
656 #
657 for mark in sorted(succmarkers[current]):
657 for mark in sorted(succmarkers[current]):
658 for suc in mark[1]:
658 for suc in mark[1]:
659 if suc not in cache:
659 if suc not in cache:
660 if suc in stackedset:
660 if suc in stackedset:
661 # cycle breaking
661 # cycle breaking
662 cache[suc] = []
662 cache[suc] = []
663 else:
663 else:
664 # case (3) If we have not computed successors sets
664 # case (3) If we have not computed successors sets
665 # of one of those successors we add it to the
665 # of one of those successors we add it to the
666 # `toproceed` stack and stop all work for this
666 # `toproceed` stack and stop all work for this
667 # iteration.
667 # iteration.
668 toproceed.append(suc)
668 toproceed.append(suc)
669 stackedset.add(suc)
669 stackedset.add(suc)
670 break
670 break
671 else:
671 else:
672 continue
672 continue
673 break
673 break
674 else:
674 else:
675 # case (4): we know all successors sets of all direct
675 # case (4): we know all successors sets of all direct
676 # successors
676 # successors
677 #
677 #
678 # Successors set contributed by each marker depends on the
678 # Successors set contributed by each marker depends on the
679 # successors sets of all its "successors" node.
679 # successors sets of all its "successors" node.
680 #
680 #
681 # Each different marker is a divergence in the obsolescence
681 # Each different marker is a divergence in the obsolescence
682 # history. It contributes successors sets distinct from other
682 # history. It contributes successors sets distinct from other
683 # markers.
683 # markers.
684 #
684 #
685 # Within a marker, a successor may have divergent successors
685 # Within a marker, a successor may have divergent successors
686 # sets. In such a case, the marker will contribute multiple
686 # sets. In such a case, the marker will contribute multiple
687 # divergent successors sets. If multiple successors have
687 # divergent successors sets. If multiple successors have
688 # divergent successors sets, a Cartesian product is used.
688 # divergent successors sets, a Cartesian product is used.
689 #
689 #
690 # At the end we post-process successors sets to remove
690 # At the end we post-process successors sets to remove
691 # duplicated entry and successors set that are strict subset of
691 # duplicated entry and successors set that are strict subset of
692 # another one.
692 # another one.
693 succssets = []
693 succssets = []
694 for mark in sorted(succmarkers[current]):
694 for mark in sorted(succmarkers[current]):
695 # successors sets contributed by this marker
695 # successors sets contributed by this marker
696 base = _succs()
696 base = _succs()
697 base.markers.add(mark)
697 base.markers.add(mark)
698 markss = [base]
698 markss = [base]
699 for suc in mark[1]:
699 for suc in mark[1]:
700 # cardinal product with previous successors
700 # cardinal product with previous successors
701 productresult = []
701 productresult = []
702 for prefix in markss:
702 for prefix in markss:
703 for suffix in cache[suc]:
703 for suffix in cache[suc]:
704 newss = prefix.copy()
704 newss = prefix.copy()
705 newss.markers.update(suffix.markers)
705 newss.markers.update(suffix.markers)
706 for part in suffix:
706 for part in suffix:
707 # do not duplicated entry in successors set
707 # do not duplicated entry in successors set
708 # first entry wins.
708 # first entry wins.
709 if part not in newss:
709 if part not in newss:
710 newss.append(part)
710 newss.append(part)
711 productresult.append(newss)
711 productresult.append(newss)
712 markss = productresult
712 markss = productresult
713 succssets.extend(markss)
713 succssets.extend(markss)
714 # remove duplicated and subset
714 # remove duplicated and subset
715 seen = []
715 seen = []
716 final = []
716 final = []
717 candidates = sorted((s for s in succssets if s),
717 candidates = sorted((s for s in succssets if s),
718 key=len, reverse=True)
718 key=len, reverse=True)
719 for cand in candidates:
719 for cand in candidates:
720 for seensuccs in seen:
720 for seensuccs in seen:
721 if cand.canmerge(seensuccs):
721 if cand.canmerge(seensuccs):
722 seensuccs.markers.update(cand.markers)
722 seensuccs.markers.update(cand.markers)
723 break
723 break
724 else:
724 else:
725 final.append(cand)
725 final.append(cand)
726 seen.append(cand)
726 seen.append(cand)
727 final.reverse() # put small successors set first
727 final.reverse() # put small successors set first
728 cache[current] = final
728 cache[current] = final
729 return cache[initialnode]
729 return cache[initialnode]
730
730
731 def successorsandmarkers(repo, ctx):
731 def successorsandmarkers(repo, ctx):
732 """compute the raw data needed for computing obsfate
732 """compute the raw data needed for computing obsfate
733 Returns a list of dict, one dict per successors set
733 Returns a list of dict, one dict per successors set
734 """
734 """
735 if not ctx.obsolete():
735 if not ctx.obsolete():
736 return None
736 return None
737
737
738 ssets = successorssets(repo, ctx.node(), closest=True)
738 ssets = successorssets(repo, ctx.node(), closest=True)
739
739
740 # closestsuccessors returns an empty list for pruned revisions, remap it
740 # closestsuccessors returns an empty list for pruned revisions, remap it
741 # into a list containing an empty list for future processing
741 # into a list containing an empty list for future processing
742 if ssets == []:
742 if ssets == []:
743 ssets = [[]]
743 ssets = [[]]
744
744
745 # Try to recover pruned markers
745 # Try to recover pruned markers
746 succsmap = repo.obsstore.successors
746 succsmap = repo.obsstore.successors
747 fullsuccessorsets = [] # successor set + markers
747 fullsuccessorsets = [] # successor set + markers
748 for sset in ssets:
748 for sset in ssets:
749 if sset:
749 if sset:
750 fullsuccessorsets.append(sset)
750 fullsuccessorsets.append(sset)
751 else:
751 else:
752 # successorsset return an empty set() when ctx or one of its
752 # successorsset return an empty set() when ctx or one of its
753 # successors is pruned.
753 # successors is pruned.
754 # In this case, walk the obs-markers tree again starting with ctx
754 # In this case, walk the obs-markers tree again starting with ctx
755 # and find the relevant pruning obs-makers, the ones without
755 # and find the relevant pruning obs-makers, the ones without
756 # successors.
756 # successors.
757 # Having these markers allow us to compute some information about
757 # Having these markers allow us to compute some information about
758 # its fate, like who pruned this changeset and when.
758 # its fate, like who pruned this changeset and when.
759
759
760 # XXX we do not catch all prune markers (eg rewritten then pruned)
760 # XXX we do not catch all prune markers (eg rewritten then pruned)
761 # (fix me later)
761 # (fix me later)
762 foundany = False
762 foundany = False
763 for mark in succsmap.get(ctx.node(), ()):
763 for mark in succsmap.get(ctx.node(), ()):
764 if not mark[1]:
764 if not mark[1]:
765 foundany = True
765 foundany = True
766 sset = _succs()
766 sset = _succs()
767 sset.markers.add(mark)
767 sset.markers.add(mark)
768 fullsuccessorsets.append(sset)
768 fullsuccessorsets.append(sset)
769 if not foundany:
769 if not foundany:
770 fullsuccessorsets.append(_succs())
770 fullsuccessorsets.append(_succs())
771
771
772 values = []
772 values = []
773 for sset in fullsuccessorsets:
773 for sset in fullsuccessorsets:
774 values.append({'successors': sset, 'markers': sset.markers})
774 values.append({'successors': sset, 'markers': sset.markers})
775
775
776 return values
776 return values
777
777
778 def _getobsfate(successorssets):
778 def _getobsfate(successorssets):
779 """ Compute a changeset obsolescence fate based on its successorssets.
779 """ Compute a changeset obsolescence fate based on its successorssets.
780 Successors can be the tipmost ones or the immediate ones. This function
780 Successors can be the tipmost ones or the immediate ones. This function
781 return values are not meant to be shown directly to users, it is meant to
781 return values are not meant to be shown directly to users, it is meant to
782 be used by internal functions only.
782 be used by internal functions only.
783 Returns one fate from the following values:
783 Returns one fate from the following values:
784 - pruned
784 - pruned
785 - diverged
785 - diverged
786 - superseded
786 - superseded
787 - superseded_split
787 - superseded_split
788 """
788 """
789
789
790 if len(successorssets) == 0:
790 if len(successorssets) == 0:
791 # The commit has been pruned
791 # The commit has been pruned
792 return 'pruned'
792 return 'pruned'
793 elif len(successorssets) > 1:
793 elif len(successorssets) > 1:
794 return 'diverged'
794 return 'diverged'
795 else:
795 else:
796 # No divergence, only one set of successors
796 # No divergence, only one set of successors
797 successors = successorssets[0]
797 successors = successorssets[0]
798
798
799 if len(successors) == 1:
799 if len(successors) == 1:
800 return 'superseded'
800 return 'superseded'
801 else:
801 else:
802 return 'superseded_split'
802 return 'superseded_split'
803
803
804 def obsfateverb(successorset, markers):
804 def obsfateverb(successorset, markers):
805 """ Return the verb summarizing the successorset and potentially using
805 """ Return the verb summarizing the successorset and potentially using
806 information from the markers
806 information from the markers
807 """
807 """
808 if not successorset:
808 if not successorset:
809 verb = 'pruned'
809 verb = 'pruned'
810 elif len(successorset) == 1:
810 elif len(successorset) == 1:
811 verb = 'rewritten'
811 verb = 'rewritten'
812 else:
812 else:
813 verb = 'split'
813 verb = 'split'
814 return verb
814 return verb
815
815
816 def markersdates(markers):
816 def markersdates(markers):
817 """returns the list of dates for a list of markers
817 """returns the list of dates for a list of markers
818 """
818 """
819 return [m[4] for m in markers]
819 return [m[4] for m in markers]
820
820
821 def markersusers(markers):
821 def markersusers(markers):
822 """ Returns a sorted list of markers users without duplicates
822 """ Returns a sorted list of markers users without duplicates
823 """
823 """
824 markersmeta = [dict(m[3]) for m in markers]
824 markersmeta = [dict(m[3]) for m in markers]
825 users = set(meta.get('user') for meta in markersmeta if meta.get('user'))
825 users = set(meta.get('user') for meta in markersmeta if meta.get('user'))
826
826
827 return sorted(users)
827 return sorted(users)
828
828
829 def markersoperations(markers):
829 def markersoperations(markers):
830 """ Returns a sorted list of markers operations without duplicates
830 """ Returns a sorted list of markers operations without duplicates
831 """
831 """
832 markersmeta = [dict(m[3]) for m in markers]
832 markersmeta = [dict(m[3]) for m in markers]
833 operations = set(meta.get('operation') for meta in markersmeta
833 operations = set(meta.get('operation') for meta in markersmeta
834 if meta.get('operation'))
834 if meta.get('operation'))
835
835
836 return sorted(operations)
836 return sorted(operations)
837
837
838 def obsfateprinter(ui, repo, successors, markers, formatctx):
838 def obsfateprinter(ui, repo, successors, markers, formatctx):
839 """ Build a obsfate string for a single successorset using all obsfate
839 """ Build a obsfate string for a single successorset using all obsfate
840 related function defined in obsutil
840 related function defined in obsutil
841 """
841 """
842 quiet = ui.quiet
842 quiet = ui.quiet
843 verbose = ui.verbose
843 verbose = ui.verbose
844 normal = not verbose and not quiet
844 normal = not verbose and not quiet
845
845
846 line = []
846 line = []
847
847
848 # Verb
848 # Verb
849 line.append(obsfateverb(successors, markers))
849 line.append(obsfateverb(successors, markers))
850
850
851 # Operations
851 # Operations
852 operations = markersoperations(markers)
852 operations = markersoperations(markers)
853 if operations:
853 if operations:
854 line.append(" using %s" % ", ".join(operations))
854 line.append(" using %s" % ", ".join(operations))
855
855
856 # Successors
856 # Successors
857 if successors:
857 if successors:
858 fmtsuccessors = [formatctx(repo[succ]) for succ in successors]
858 fmtsuccessors = [formatctx(repo[succ]) for succ in successors]
859 line.append(" as %s" % ", ".join(fmtsuccessors))
859 line.append(" as %s" % ", ".join(fmtsuccessors))
860
860
861 # Users
861 # Users
862 users = markersusers(markers)
862 users = markersusers(markers)
863 # Filter out current user in not verbose mode to reduce amount of
863 # Filter out current user in not verbose mode to reduce amount of
864 # information
864 # information
865 if not verbose:
865 if not verbose:
866 currentuser = ui.username(acceptempty=True)
866 currentuser = ui.username(acceptempty=True)
867 if len(users) == 1 and currentuser in users:
867 if len(users) == 1 and currentuser in users:
868 users = None
868 users = None
869
869
870 if (verbose or normal) and users:
870 if (verbose or normal) and users:
871 line.append(" by %s" % ", ".join(users))
871 line.append(" by %s" % ", ".join(users))
872
872
873 # Date
873 # Date
874 dates = markersdates(markers)
874 dates = markersdates(markers)
875
875
876 if dates and verbose:
876 if dates and verbose:
877 min_date = min(dates)
877 min_date = min(dates)
878 max_date = max(dates)
878 max_date = max(dates)
879
879
880 if min_date == max_date:
880 if min_date == max_date:
881 fmtmin_date = dateutil.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
881 fmtmin_date = dateutil.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
882 line.append(" (at %s)" % fmtmin_date)
882 line.append(" (at %s)" % fmtmin_date)
883 else:
883 else:
884 fmtmin_date = dateutil.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
884 fmtmin_date = dateutil.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
885 fmtmax_date = dateutil.datestr(max_date, '%Y-%m-%d %H:%M %1%2')
885 fmtmax_date = dateutil.datestr(max_date, '%Y-%m-%d %H:%M %1%2')
886 line.append(" (between %s and %s)" % (fmtmin_date, fmtmax_date))
886 line.append(" (between %s and %s)" % (fmtmin_date, fmtmax_date))
887
887
888 return "".join(line)
888 return "".join(line)
889
889
890
890
891 filteredmsgtable = {
891 filteredmsgtable = {
892 "pruned": _("hidden revision '%s' is pruned"),
892 "pruned": _("hidden revision '%s' is pruned"),
893 "diverged": _("hidden revision '%s' has diverged"),
893 "diverged": _("hidden revision '%s' has diverged"),
894 "superseded": _("hidden revision '%s' was rewritten as: %s"),
894 "superseded": _("hidden revision '%s' was rewritten as: %s"),
895 "superseded_split": _("hidden revision '%s' was split as: %s"),
895 "superseded_split": _("hidden revision '%s' was split as: %s"),
896 "superseded_split_several": _("hidden revision '%s' was split as: %s and "
896 "superseded_split_several": _("hidden revision '%s' was split as: %s and "
897 "%d more"),
897 "%d more"),
898 }
898 }
899
899
900 def _getfilteredreason(repo, changeid, ctx):
900 def _getfilteredreason(repo, changeid, ctx):
901 """return a human-friendly string on why a obsolete changeset is hidden
901 """return a human-friendly string on why a obsolete changeset is hidden
902 """
902 """
903 successors = successorssets(repo, ctx.node())
903 successors = successorssets(repo, ctx.node())
904 fate = _getobsfate(successors)
904 fate = _getobsfate(successors)
905
905
906 # Be more precise in case the revision is superseded
906 # Be more precise in case the revision is superseded
907 if fate == 'pruned':
907 if fate == 'pruned':
908 return filteredmsgtable['pruned'] % changeid
908 return filteredmsgtable['pruned'] % changeid
909 elif fate == 'diverged':
909 elif fate == 'diverged':
910 return filteredmsgtable['diverged'] % changeid
910 return filteredmsgtable['diverged'] % changeid
911 elif fate == 'superseded':
911 elif fate == 'superseded':
912 single_successor = nodemod.short(successors[0][0])
912 single_successor = nodemod.short(successors[0][0])
913 return filteredmsgtable['superseded'] % (changeid, single_successor)
913 return filteredmsgtable['superseded'] % (changeid, single_successor)
914 elif fate == 'superseded_split':
914 elif fate == 'superseded_split':
915
915
916 succs = []
916 succs = []
917 for node_id in successors[0]:
917 for node_id in successors[0]:
918 succs.append(nodemod.short(node_id))
918 succs.append(nodemod.short(node_id))
919
919
920 if len(succs) <= 2:
920 if len(succs) <= 2:
921 fmtsuccs = ', '.join(succs)
921 fmtsuccs = ', '.join(succs)
922 return filteredmsgtable['superseded_split'] % (changeid, fmtsuccs)
922 return filteredmsgtable['superseded_split'] % (changeid, fmtsuccs)
923 else:
923 else:
924 firstsuccessors = ', '.join(succs[:2])
924 firstsuccessors = ', '.join(succs[:2])
925 remainingnumber = len(succs) - 2
925 remainingnumber = len(succs) - 2
926
926
927 args = (changeid, firstsuccessors, remainingnumber)
927 args = (changeid, firstsuccessors, remainingnumber)
928 return filteredmsgtable['superseded_split_several'] % args
928 return filteredmsgtable['superseded_split_several'] % args
929
929
930 def divergentsets(repo, ctx):
930 def divergentsets(repo, ctx):
931 """Compute sets of commits divergent with a given one"""
931 """Compute sets of commits divergent with a given one"""
932 cache = {}
932 cache = {}
933 base = {}
933 base = {}
934 for n in allpredecessors(repo.obsstore, [ctx.node()]):
934 for n in allpredecessors(repo.obsstore, [ctx.node()]):
935 if n == ctx.node():
935 if n == ctx.node():
936 # a node can't be a base for divergence with itself
936 # a node can't be a base for divergence with itself
937 continue
937 continue
938 nsuccsets = successorssets(repo, n, cache)
938 nsuccsets = successorssets(repo, n, cache)
939 for nsuccset in nsuccsets:
939 for nsuccset in nsuccsets:
940 if ctx.node() in nsuccset:
940 if ctx.node() in nsuccset:
941 # we are only interested in *other* successor sets
941 # we are only interested in *other* successor sets
942 continue
942 continue
943 if tuple(nsuccset) in base:
943 if tuple(nsuccset) in base:
944 # we already know the latest base for this divergency
944 # we already know the latest base for this divergency
945 continue
945 continue
946 base[tuple(nsuccset)] = n
946 base[tuple(nsuccset)] = n
947 return [{'divergentnodes': divset, 'commonpredecessor': b}
947 return [{'divergentnodes': divset, 'commonpredecessor': b}
948 for divset, b in base.iteritems()]
948 for divset, b in base.iteritems()]
949
949
950 def whyunstable(repo, ctx):
950 def whyunstable(repo, ctx):
951 result = []
951 result = []
952 if ctx.orphan():
952 if ctx.orphan():
953 for parent in ctx.parents():
953 for parent in ctx.parents():
954 kind = None
954 kind = None
955 if parent.orphan():
955 if parent.orphan():
956 kind = 'orphan'
956 kind = 'orphan'
957 elif parent.obsolete():
957 elif parent.obsolete():
958 kind = 'obsolete'
958 kind = 'obsolete'
959 if kind is not None:
959 if kind is not None:
960 result.append({'instability': 'orphan',
960 result.append({'instability': 'orphan',
961 'reason': '%s parent' % kind,
961 'reason': '%s parent' % kind,
962 'node': parent.hex()})
962 'node': parent.hex()})
963 if ctx.phasedivergent():
963 if ctx.phasedivergent():
964 predecessors = allpredecessors(repo.obsstore, [ctx.node()],
964 predecessors = allpredecessors(repo.obsstore, [ctx.node()],
965 ignoreflags=bumpedfix)
965 ignoreflags=bumpedfix)
966 immutable = [repo[p] for p in predecessors
966 immutable = [repo[p] for p in predecessors
967 if p in repo and not repo[p].mutable()]
967 if p in repo and not repo[p].mutable()]
968 for predecessor in immutable:
968 for predecessor in immutable:
969 result.append({'instability': 'phase-divergent',
969 result.append({'instability': 'phase-divergent',
970 'reason': 'immutable predecessor',
970 'reason': 'immutable predecessor',
971 'node': predecessor.hex()})
971 'node': predecessor.hex()})
972 if ctx.contentdivergent():
972 if ctx.contentdivergent():
973 dsets = divergentsets(repo, ctx)
973 dsets = divergentsets(repo, ctx)
974 for dset in dsets:
974 for dset in dsets:
975 divnodes = [repo[n] for n in dset['divergentnodes']]
975 divnodes = [repo[n] for n in dset['divergentnodes']]
976 result.append({'instability': 'content-divergent',
976 result.append({'instability': 'content-divergent',
977 'divergentnodes': divnodes,
977 'divergentnodes': divnodes,
978 'reason': 'predecessor',
978 'reason': 'predecessor',
979 'node': nodemod.hex(dset['commonpredecessor'])})
979 'node': nodemod.hex(dset['commonpredecessor'])})
980 return result
980 return result
General Comments 0
You need to be logged in to leave comments. Login now