##// END OF EJS Templates
obsutil: don't assume leftctx and rightctx repo as same...
Pulkit Goyal -
r41862:9de6c4f6 default
parent child Browse files
Show More
@@ -1,984 +1,987 b''
1 # obsutil.py - utility functions for obsolescence
1 # obsutil.py - utility functions for obsolescence
2 #
2 #
3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import re
10 import re
11
11
12 from .i18n import _
12 from .i18n import _
13 from . import (
13 from . import (
14 diffutil,
14 diffutil,
15 encoding,
15 encoding,
16 node as nodemod,
16 node as nodemod,
17 phases,
17 phases,
18 util,
18 util,
19 )
19 )
20 from .utils import (
20 from .utils import (
21 dateutil,
21 dateutil,
22 )
22 )
23
23
24 ### obsolescence marker flag
24 ### obsolescence marker flag
25
25
26 ## bumpedfix flag
26 ## bumpedfix flag
27 #
27 #
28 # When a changeset A' succeed to a changeset A which became public, we call A'
28 # When a changeset A' succeed to a changeset A which became public, we call A'
29 # "bumped" because it's a successors of a public changesets
29 # "bumped" because it's a successors of a public changesets
30 #
30 #
31 # o A' (bumped)
31 # o A' (bumped)
32 # |`:
32 # |`:
33 # | o A
33 # | o A
34 # |/
34 # |/
35 # o Z
35 # o Z
36 #
36 #
37 # The way to solve this situation is to create a new changeset Ad as children
37 # The way to solve this situation is to create a new changeset Ad as children
38 # of A. This changeset have the same content than A'. So the diff from A to A'
38 # of A. This changeset have the same content than A'. So the diff from A to A'
39 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
39 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
40 #
40 #
41 # o Ad
41 # o Ad
42 # |`:
42 # |`:
43 # | x A'
43 # | x A'
44 # |'|
44 # |'|
45 # o | A
45 # o | A
46 # |/
46 # |/
47 # o Z
47 # o Z
48 #
48 #
49 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
49 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
50 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
50 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
51 # This flag mean that the successors express the changes between the public and
51 # This flag mean that the successors express the changes between the public and
52 # bumped version and fix the situation, breaking the transitivity of
52 # bumped version and fix the situation, breaking the transitivity of
53 # "bumped" here.
53 # "bumped" here.
54 bumpedfix = 1
54 bumpedfix = 1
55 usingsha256 = 2
55 usingsha256 = 2
56
56
57 class marker(object):
57 class marker(object):
58 """Wrap obsolete marker raw data"""
58 """Wrap obsolete marker raw data"""
59
59
60 def __init__(self, repo, data):
60 def __init__(self, repo, data):
61 # the repo argument will be used to create changectx in later version
61 # the repo argument will be used to create changectx in later version
62 self._repo = repo
62 self._repo = repo
63 self._data = data
63 self._data = data
64 self._decodedmeta = None
64 self._decodedmeta = None
65
65
66 def __hash__(self):
66 def __hash__(self):
67 return hash(self._data)
67 return hash(self._data)
68
68
69 def __eq__(self, other):
69 def __eq__(self, other):
70 if type(other) != type(self):
70 if type(other) != type(self):
71 return False
71 return False
72 return self._data == other._data
72 return self._data == other._data
73
73
74 def prednode(self):
74 def prednode(self):
75 """Predecessor changeset node identifier"""
75 """Predecessor changeset node identifier"""
76 return self._data[0]
76 return self._data[0]
77
77
78 def succnodes(self):
78 def succnodes(self):
79 """List of successor changesets node identifiers"""
79 """List of successor changesets node identifiers"""
80 return self._data[1]
80 return self._data[1]
81
81
82 def parentnodes(self):
82 def parentnodes(self):
83 """Parents of the predecessors (None if not recorded)"""
83 """Parents of the predecessors (None if not recorded)"""
84 return self._data[5]
84 return self._data[5]
85
85
86 def metadata(self):
86 def metadata(self):
87 """Decoded metadata dictionary"""
87 """Decoded metadata dictionary"""
88 return dict(self._data[3])
88 return dict(self._data[3])
89
89
90 def date(self):
90 def date(self):
91 """Creation date as (unixtime, offset)"""
91 """Creation date as (unixtime, offset)"""
92 return self._data[4]
92 return self._data[4]
93
93
94 def flags(self):
94 def flags(self):
95 """The flags field of the marker"""
95 """The flags field of the marker"""
96 return self._data[2]
96 return self._data[2]
97
97
98 def getmarkers(repo, nodes=None, exclusive=False):
98 def getmarkers(repo, nodes=None, exclusive=False):
99 """returns markers known in a repository
99 """returns markers known in a repository
100
100
101 If <nodes> is specified, only markers "relevant" to those nodes are are
101 If <nodes> is specified, only markers "relevant" to those nodes are are
102 returned"""
102 returned"""
103 if nodes is None:
103 if nodes is None:
104 rawmarkers = repo.obsstore
104 rawmarkers = repo.obsstore
105 elif exclusive:
105 elif exclusive:
106 rawmarkers = exclusivemarkers(repo, nodes)
106 rawmarkers = exclusivemarkers(repo, nodes)
107 else:
107 else:
108 rawmarkers = repo.obsstore.relevantmarkers(nodes)
108 rawmarkers = repo.obsstore.relevantmarkers(nodes)
109
109
110 for markerdata in rawmarkers:
110 for markerdata in rawmarkers:
111 yield marker(repo, markerdata)
111 yield marker(repo, markerdata)
112
112
113 def closestpredecessors(repo, nodeid):
113 def closestpredecessors(repo, nodeid):
114 """yield the list of next predecessors pointing on visible changectx nodes
114 """yield the list of next predecessors pointing on visible changectx nodes
115
115
116 This function respect the repoview filtering, filtered revision will be
116 This function respect the repoview filtering, filtered revision will be
117 considered missing.
117 considered missing.
118 """
118 """
119
119
120 precursors = repo.obsstore.predecessors
120 precursors = repo.obsstore.predecessors
121 stack = [nodeid]
121 stack = [nodeid]
122 seen = set(stack)
122 seen = set(stack)
123
123
124 while stack:
124 while stack:
125 current = stack.pop()
125 current = stack.pop()
126 currentpreccs = precursors.get(current, ())
126 currentpreccs = precursors.get(current, ())
127
127
128 for prec in currentpreccs:
128 for prec in currentpreccs:
129 precnodeid = prec[0]
129 precnodeid = prec[0]
130
130
131 # Basic cycle protection
131 # Basic cycle protection
132 if precnodeid in seen:
132 if precnodeid in seen:
133 continue
133 continue
134 seen.add(precnodeid)
134 seen.add(precnodeid)
135
135
136 if precnodeid in repo:
136 if precnodeid in repo:
137 yield precnodeid
137 yield precnodeid
138 else:
138 else:
139 stack.append(precnodeid)
139 stack.append(precnodeid)
140
140
141 def allpredecessors(obsstore, nodes, ignoreflags=0):
141 def allpredecessors(obsstore, nodes, ignoreflags=0):
142 """Yield node for every precursors of <nodes>.
142 """Yield node for every precursors of <nodes>.
143
143
144 Some precursors may be unknown locally.
144 Some precursors may be unknown locally.
145
145
146 This is a linear yield unsuited to detecting folded changesets. It includes
146 This is a linear yield unsuited to detecting folded changesets. It includes
147 initial nodes too."""
147 initial nodes too."""
148
148
149 remaining = set(nodes)
149 remaining = set(nodes)
150 seen = set(remaining)
150 seen = set(remaining)
151 prec = obsstore.predecessors.get
151 prec = obsstore.predecessors.get
152 while remaining:
152 while remaining:
153 current = remaining.pop()
153 current = remaining.pop()
154 yield current
154 yield current
155 for mark in prec(current, ()):
155 for mark in prec(current, ()):
156 # ignore marker flagged with specified flag
156 # ignore marker flagged with specified flag
157 if mark[2] & ignoreflags:
157 if mark[2] & ignoreflags:
158 continue
158 continue
159 suc = mark[0]
159 suc = mark[0]
160 if suc not in seen:
160 if suc not in seen:
161 seen.add(suc)
161 seen.add(suc)
162 remaining.add(suc)
162 remaining.add(suc)
163
163
164 def allsuccessors(obsstore, nodes, ignoreflags=0):
164 def allsuccessors(obsstore, nodes, ignoreflags=0):
165 """Yield node for every successor of <nodes>.
165 """Yield node for every successor of <nodes>.
166
166
167 Some successors may be unknown locally.
167 Some successors may be unknown locally.
168
168
169 This is a linear yield unsuited to detecting split changesets. It includes
169 This is a linear yield unsuited to detecting split changesets. It includes
170 initial nodes too."""
170 initial nodes too."""
171 remaining = set(nodes)
171 remaining = set(nodes)
172 seen = set(remaining)
172 seen = set(remaining)
173 while remaining:
173 while remaining:
174 current = remaining.pop()
174 current = remaining.pop()
175 yield current
175 yield current
176 for mark in obsstore.successors.get(current, ()):
176 for mark in obsstore.successors.get(current, ()):
177 # ignore marker flagged with specified flag
177 # ignore marker flagged with specified flag
178 if mark[2] & ignoreflags:
178 if mark[2] & ignoreflags:
179 continue
179 continue
180 for suc in mark[1]:
180 for suc in mark[1]:
181 if suc not in seen:
181 if suc not in seen:
182 seen.add(suc)
182 seen.add(suc)
183 remaining.add(suc)
183 remaining.add(suc)
184
184
185 def _filterprunes(markers):
185 def _filterprunes(markers):
186 """return a set with no prune markers"""
186 """return a set with no prune markers"""
187 return set(m for m in markers if m[1])
187 return set(m for m in markers if m[1])
188
188
189 def exclusivemarkers(repo, nodes):
189 def exclusivemarkers(repo, nodes):
190 """set of markers relevant to "nodes" but no other locally-known nodes
190 """set of markers relevant to "nodes" but no other locally-known nodes
191
191
192 This function compute the set of markers "exclusive" to a locally-known
192 This function compute the set of markers "exclusive" to a locally-known
193 node. This means we walk the markers starting from <nodes> until we reach a
193 node. This means we walk the markers starting from <nodes> until we reach a
194 locally-known precursors outside of <nodes>. Element of <nodes> with
194 locally-known precursors outside of <nodes>. Element of <nodes> with
195 locally-known successors outside of <nodes> are ignored (since their
195 locally-known successors outside of <nodes> are ignored (since their
196 precursors markers are also relevant to these successors).
196 precursors markers are also relevant to these successors).
197
197
198 For example:
198 For example:
199
199
200 # (A0 rewritten as A1)
200 # (A0 rewritten as A1)
201 #
201 #
202 # A0 <-1- A1 # Marker "1" is exclusive to A1
202 # A0 <-1- A1 # Marker "1" is exclusive to A1
203
203
204 or
204 or
205
205
206 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
206 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
207 #
207 #
208 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
208 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
209
209
210 or
210 or
211
211
212 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
212 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
213 #
213 #
214 # <-2- A1 # Marker "2" is exclusive to A0,A1
214 # <-2- A1 # Marker "2" is exclusive to A0,A1
215 # /
215 # /
216 # <-1- A0
216 # <-1- A0
217 # \
217 # \
218 # <-3- A2 # Marker "3" is exclusive to A0,A2
218 # <-3- A2 # Marker "3" is exclusive to A0,A2
219 #
219 #
220 # in addition:
220 # in addition:
221 #
221 #
222 # Markers "2,3" are exclusive to A1,A2
222 # Markers "2,3" are exclusive to A1,A2
223 # Markers "1,2,3" are exclusive to A0,A1,A2
223 # Markers "1,2,3" are exclusive to A0,A1,A2
224
224
225 See test/test-obsolete-bundle-strip.t for more examples.
225 See test/test-obsolete-bundle-strip.t for more examples.
226
226
227 An example usage is strip. When stripping a changeset, we also want to
227 An example usage is strip. When stripping a changeset, we also want to
228 strip the markers exclusive to this changeset. Otherwise we would have
228 strip the markers exclusive to this changeset. Otherwise we would have
229 "dangling"" obsolescence markers from its precursors: Obsolescence markers
229 "dangling"" obsolescence markers from its precursors: Obsolescence markers
230 marking a node as obsolete without any successors available locally.
230 marking a node as obsolete without any successors available locally.
231
231
232 As for relevant markers, the prune markers for children will be followed.
232 As for relevant markers, the prune markers for children will be followed.
233 Of course, they will only be followed if the pruned children is
233 Of course, they will only be followed if the pruned children is
234 locally-known. Since the prune markers are relevant to the pruned node.
234 locally-known. Since the prune markers are relevant to the pruned node.
235 However, while prune markers are considered relevant to the parent of the
235 However, while prune markers are considered relevant to the parent of the
236 pruned changesets, prune markers for locally-known changeset (with no
236 pruned changesets, prune markers for locally-known changeset (with no
237 successors) are considered exclusive to the pruned nodes. This allows
237 successors) are considered exclusive to the pruned nodes. This allows
238 to strip the prune markers (with the rest of the exclusive chain) alongside
238 to strip the prune markers (with the rest of the exclusive chain) alongside
239 the pruned changesets.
239 the pruned changesets.
240 """
240 """
241 # running on a filtered repository would be dangerous as markers could be
241 # running on a filtered repository would be dangerous as markers could be
242 # reported as exclusive when they are relevant for other filtered nodes.
242 # reported as exclusive when they are relevant for other filtered nodes.
243 unfi = repo.unfiltered()
243 unfi = repo.unfiltered()
244
244
245 # shortcut to various useful item
245 # shortcut to various useful item
246 nm = unfi.changelog.nodemap
246 nm = unfi.changelog.nodemap
247 precursorsmarkers = unfi.obsstore.predecessors
247 precursorsmarkers = unfi.obsstore.predecessors
248 successormarkers = unfi.obsstore.successors
248 successormarkers = unfi.obsstore.successors
249 childrenmarkers = unfi.obsstore.children
249 childrenmarkers = unfi.obsstore.children
250
250
251 # exclusive markers (return of the function)
251 # exclusive markers (return of the function)
252 exclmarkers = set()
252 exclmarkers = set()
253 # we need fast membership testing
253 # we need fast membership testing
254 nodes = set(nodes)
254 nodes = set(nodes)
255 # looking for head in the obshistory
255 # looking for head in the obshistory
256 #
256 #
257 # XXX we are ignoring all issues in regard with cycle for now.
257 # XXX we are ignoring all issues in regard with cycle for now.
258 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
258 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
259 stack.sort()
259 stack.sort()
260 # nodes already stacked
260 # nodes already stacked
261 seennodes = set(stack)
261 seennodes = set(stack)
262 while stack:
262 while stack:
263 current = stack.pop()
263 current = stack.pop()
264 # fetch precursors markers
264 # fetch precursors markers
265 markers = list(precursorsmarkers.get(current, ()))
265 markers = list(precursorsmarkers.get(current, ()))
266 # extend the list with prune markers
266 # extend the list with prune markers
267 for mark in successormarkers.get(current, ()):
267 for mark in successormarkers.get(current, ()):
268 if not mark[1]:
268 if not mark[1]:
269 markers.append(mark)
269 markers.append(mark)
270 # and markers from children (looking for prune)
270 # and markers from children (looking for prune)
271 for mark in childrenmarkers.get(current, ()):
271 for mark in childrenmarkers.get(current, ()):
272 if not mark[1]:
272 if not mark[1]:
273 markers.append(mark)
273 markers.append(mark)
274 # traverse the markers
274 # traverse the markers
275 for mark in markers:
275 for mark in markers:
276 if mark in exclmarkers:
276 if mark in exclmarkers:
277 # markers already selected
277 # markers already selected
278 continue
278 continue
279
279
280 # If the markers is about the current node, select it
280 # If the markers is about the current node, select it
281 #
281 #
282 # (this delay the addition of markers from children)
282 # (this delay the addition of markers from children)
283 if mark[1] or mark[0] == current:
283 if mark[1] or mark[0] == current:
284 exclmarkers.add(mark)
284 exclmarkers.add(mark)
285
285
286 # should we keep traversing through the precursors?
286 # should we keep traversing through the precursors?
287 prec = mark[0]
287 prec = mark[0]
288
288
289 # nodes in the stack or already processed
289 # nodes in the stack or already processed
290 if prec in seennodes:
290 if prec in seennodes:
291 continue
291 continue
292
292
293 # is this a locally known node ?
293 # is this a locally known node ?
294 known = prec in nm
294 known = prec in nm
295 # if locally-known and not in the <nodes> set the traversal
295 # if locally-known and not in the <nodes> set the traversal
296 # stop here.
296 # stop here.
297 if known and prec not in nodes:
297 if known and prec not in nodes:
298 continue
298 continue
299
299
300 # do not keep going if there are unselected markers pointing to this
300 # do not keep going if there are unselected markers pointing to this
301 # nodes. If we end up traversing these unselected markers later the
301 # nodes. If we end up traversing these unselected markers later the
302 # node will be taken care of at that point.
302 # node will be taken care of at that point.
303 precmarkers = _filterprunes(successormarkers.get(prec))
303 precmarkers = _filterprunes(successormarkers.get(prec))
304 if precmarkers.issubset(exclmarkers):
304 if precmarkers.issubset(exclmarkers):
305 seennodes.add(prec)
305 seennodes.add(prec)
306 stack.append(prec)
306 stack.append(prec)
307
307
308 return exclmarkers
308 return exclmarkers
309
309
310 def foreground(repo, nodes):
310 def foreground(repo, nodes):
311 """return all nodes in the "foreground" of other node
311 """return all nodes in the "foreground" of other node
312
312
313 The foreground of a revision is anything reachable using parent -> children
313 The foreground of a revision is anything reachable using parent -> children
314 or precursor -> successor relation. It is very similar to "descendant" but
314 or precursor -> successor relation. It is very similar to "descendant" but
315 augmented with obsolescence information.
315 augmented with obsolescence information.
316
316
317 Beware that possible obsolescence cycle may result if complex situation.
317 Beware that possible obsolescence cycle may result if complex situation.
318 """
318 """
319 repo = repo.unfiltered()
319 repo = repo.unfiltered()
320 foreground = set(repo.set('%ln::', nodes))
320 foreground = set(repo.set('%ln::', nodes))
321 if repo.obsstore:
321 if repo.obsstore:
322 # We only need this complicated logic if there is obsolescence
322 # We only need this complicated logic if there is obsolescence
323 # XXX will probably deserve an optimised revset.
323 # XXX will probably deserve an optimised revset.
324 nm = repo.changelog.nodemap
324 nm = repo.changelog.nodemap
325 plen = -1
325 plen = -1
326 # compute the whole set of successors or descendants
326 # compute the whole set of successors or descendants
327 while len(foreground) != plen:
327 while len(foreground) != plen:
328 plen = len(foreground)
328 plen = len(foreground)
329 succs = set(c.node() for c in foreground)
329 succs = set(c.node() for c in foreground)
330 mutable = [c.node() for c in foreground if c.mutable()]
330 mutable = [c.node() for c in foreground if c.mutable()]
331 succs.update(allsuccessors(repo.obsstore, mutable))
331 succs.update(allsuccessors(repo.obsstore, mutable))
332 known = (n for n in succs if n in nm)
332 known = (n for n in succs if n in nm)
333 foreground = set(repo.set('%ln::', known))
333 foreground = set(repo.set('%ln::', known))
334 return set(c.node() for c in foreground)
334 return set(c.node() for c in foreground)
335
335
336 # effectflag field
336 # effectflag field
337 #
337 #
338 # Effect-flag is a 1-byte bit field used to store what changed between a
338 # Effect-flag is a 1-byte bit field used to store what changed between a
339 # changeset and its successor(s).
339 # changeset and its successor(s).
340 #
340 #
341 # The effect flag is stored in obs-markers metadata while we iterate on the
341 # The effect flag is stored in obs-markers metadata while we iterate on the
342 # information design. That's why we have the EFFECTFLAGFIELD. If we come up
342 # information design. That's why we have the EFFECTFLAGFIELD. If we come up
343 # with an incompatible design for effect flag, we can store a new design under
343 # with an incompatible design for effect flag, we can store a new design under
344 # another field name so we don't break readers. We plan to extend the existing
344 # another field name so we don't break readers. We plan to extend the existing
345 # obsmarkers bit-field when the effect flag design will be stabilized.
345 # obsmarkers bit-field when the effect flag design will be stabilized.
346 #
346 #
347 # The effect-flag is placed behind an experimental flag
347 # The effect-flag is placed behind an experimental flag
348 # `effect-flags` set to off by default.
348 # `effect-flags` set to off by default.
349 #
349 #
350
350
351 EFFECTFLAGFIELD = "ef1"
351 EFFECTFLAGFIELD = "ef1"
352
352
353 DESCCHANGED = 1 << 0 # action changed the description
353 DESCCHANGED = 1 << 0 # action changed the description
354 METACHANGED = 1 << 1 # action change the meta
354 METACHANGED = 1 << 1 # action change the meta
355 DIFFCHANGED = 1 << 3 # action change diff introduced by the changeset
355 DIFFCHANGED = 1 << 3 # action change diff introduced by the changeset
356 PARENTCHANGED = 1 << 2 # action change the parent
356 PARENTCHANGED = 1 << 2 # action change the parent
357 USERCHANGED = 1 << 4 # the user changed
357 USERCHANGED = 1 << 4 # the user changed
358 DATECHANGED = 1 << 5 # the date changed
358 DATECHANGED = 1 << 5 # the date changed
359 BRANCHCHANGED = 1 << 6 # the branch changed
359 BRANCHCHANGED = 1 << 6 # the branch changed
360
360
361 METABLACKLIST = [
361 METABLACKLIST = [
362 re.compile('^branch$'),
362 re.compile('^branch$'),
363 re.compile('^.*-source$'),
363 re.compile('^.*-source$'),
364 re.compile('^.*_source$'),
364 re.compile('^.*_source$'),
365 re.compile('^source$'),
365 re.compile('^source$'),
366 ]
366 ]
367
367
368 def metanotblacklisted(metaitem):
368 def metanotblacklisted(metaitem):
369 """ Check that the key of a meta item (extrakey, extravalue) does not
369 """ Check that the key of a meta item (extrakey, extravalue) does not
370 match at least one of the blacklist pattern
370 match at least one of the blacklist pattern
371 """
371 """
372 metakey = metaitem[0]
372 metakey = metaitem[0]
373
373
374 return not any(pattern.match(metakey) for pattern in METABLACKLIST)
374 return not any(pattern.match(metakey) for pattern in METABLACKLIST)
375
375
376 def _prepare_hunk(hunk):
376 def _prepare_hunk(hunk):
377 """Drop all information but the username and patch"""
377 """Drop all information but the username and patch"""
378 cleanhunk = []
378 cleanhunk = []
379 for line in hunk.splitlines():
379 for line in hunk.splitlines():
380 if line.startswith(b'# User') or not line.startswith(b'#'):
380 if line.startswith(b'# User') or not line.startswith(b'#'):
381 if line.startswith(b'@@'):
381 if line.startswith(b'@@'):
382 line = b'@@\n'
382 line = b'@@\n'
383 cleanhunk.append(line)
383 cleanhunk.append(line)
384 return cleanhunk
384 return cleanhunk
385
385
386 def _getdifflines(iterdiff):
386 def _getdifflines(iterdiff):
387 """return a cleaned up lines"""
387 """return a cleaned up lines"""
388 lines = next(iterdiff, None)
388 lines = next(iterdiff, None)
389
389
390 if lines is None:
390 if lines is None:
391 return lines
391 return lines
392
392
393 return _prepare_hunk(lines)
393 return _prepare_hunk(lines)
394
394
395 def _cmpdiff(leftctx, rightctx):
395 def _cmpdiff(leftctx, rightctx):
396 """return True if both ctx introduce the "same diff"
396 """return True if both ctx introduce the "same diff"
397
397
398 This is a first and basic implementation, with many shortcoming.
398 This is a first and basic implementation, with many shortcoming.
399 """
399 """
400 # lefctx.repo() and rightctx.repo() are the same here
400 diffopts = diffutil.diffallopts(leftctx.repo().ui, {'git': True})
401 repo = leftctx.repo()
401
402 diffopts = diffutil.diffallopts(repo.ui, {'git': True})
403 # Leftctx or right ctx might be filtered, so we need to use the contexts
402 # Leftctx or right ctx might be filtered, so we need to use the contexts
404 # with an unfiltered repository to safely compute the diff
403 # with an unfiltered repository to safely compute the diff
405 leftunfi = repo.unfiltered()[leftctx.rev()]
404
405 # leftctx and rightctx can be from different repository views in case of
406 # hgsubversion, do don't try to access them from same repository
407 # rightctx.repo() and leftctx.repo() are not always the same
408 leftunfi = leftctx._repo.unfiltered()[leftctx.rev()]
406 leftdiff = leftunfi.diff(opts=diffopts)
409 leftdiff = leftunfi.diff(opts=diffopts)
407 rightunfi = repo.unfiltered()[rightctx.rev()]
410 rightunfi = rightctx._repo.unfiltered()[rightctx.rev()]
408 rightdiff = rightunfi.diff(opts=diffopts)
411 rightdiff = rightunfi.diff(opts=diffopts)
409
412
410 left, right = (0, 0)
413 left, right = (0, 0)
411 while None not in (left, right):
414 while None not in (left, right):
412 left = _getdifflines(leftdiff)
415 left = _getdifflines(leftdiff)
413 right = _getdifflines(rightdiff)
416 right = _getdifflines(rightdiff)
414
417
415 if left != right:
418 if left != right:
416 return False
419 return False
417 return True
420 return True
418
421
419 def geteffectflag(source, successors):
422 def geteffectflag(source, successors):
420 """ From an obs-marker relation, compute what changed between the
423 """ From an obs-marker relation, compute what changed between the
421 predecessor and the successor.
424 predecessor and the successor.
422 """
425 """
423 effects = 0
426 effects = 0
424
427
425 for changectx in successors:
428 for changectx in successors:
426 # Check if description has changed
429 # Check if description has changed
427 if changectx.description() != source.description():
430 if changectx.description() != source.description():
428 effects |= DESCCHANGED
431 effects |= DESCCHANGED
429
432
430 # Check if user has changed
433 # Check if user has changed
431 if changectx.user() != source.user():
434 if changectx.user() != source.user():
432 effects |= USERCHANGED
435 effects |= USERCHANGED
433
436
434 # Check if date has changed
437 # Check if date has changed
435 if changectx.date() != source.date():
438 if changectx.date() != source.date():
436 effects |= DATECHANGED
439 effects |= DATECHANGED
437
440
438 # Check if branch has changed
441 # Check if branch has changed
439 if changectx.branch() != source.branch():
442 if changectx.branch() != source.branch():
440 effects |= BRANCHCHANGED
443 effects |= BRANCHCHANGED
441
444
442 # Check if at least one of the parent has changed
445 # Check if at least one of the parent has changed
443 if changectx.parents() != source.parents():
446 if changectx.parents() != source.parents():
444 effects |= PARENTCHANGED
447 effects |= PARENTCHANGED
445
448
446 # Check if other meta has changed
449 # Check if other meta has changed
447 changeextra = changectx.extra().items()
450 changeextra = changectx.extra().items()
448 ctxmeta = list(filter(metanotblacklisted, changeextra))
451 ctxmeta = list(filter(metanotblacklisted, changeextra))
449
452
450 sourceextra = source.extra().items()
453 sourceextra = source.extra().items()
451 srcmeta = list(filter(metanotblacklisted, sourceextra))
454 srcmeta = list(filter(metanotblacklisted, sourceextra))
452
455
453 if ctxmeta != srcmeta:
456 if ctxmeta != srcmeta:
454 effects |= METACHANGED
457 effects |= METACHANGED
455
458
456 # Check if the diff has changed
459 # Check if the diff has changed
457 if not _cmpdiff(source, changectx):
460 if not _cmpdiff(source, changectx):
458 effects |= DIFFCHANGED
461 effects |= DIFFCHANGED
459
462
460 return effects
463 return effects
461
464
462 def getobsoleted(repo, tr):
465 def getobsoleted(repo, tr):
463 """return the set of pre-existing revisions obsoleted by a transaction"""
466 """return the set of pre-existing revisions obsoleted by a transaction"""
464 torev = repo.unfiltered().changelog.nodemap.get
467 torev = repo.unfiltered().changelog.nodemap.get
465 phase = repo._phasecache.phase
468 phase = repo._phasecache.phase
466 succsmarkers = repo.obsstore.successors.get
469 succsmarkers = repo.obsstore.successors.get
467 public = phases.public
470 public = phases.public
468 addedmarkers = tr.changes['obsmarkers']
471 addedmarkers = tr.changes['obsmarkers']
469 origrepolen = tr.changes['origrepolen']
472 origrepolen = tr.changes['origrepolen']
470 seenrevs = set()
473 seenrevs = set()
471 obsoleted = set()
474 obsoleted = set()
472 for mark in addedmarkers:
475 for mark in addedmarkers:
473 node = mark[0]
476 node = mark[0]
474 rev = torev(node)
477 rev = torev(node)
475 if rev is None or rev in seenrevs or rev >= origrepolen:
478 if rev is None or rev in seenrevs or rev >= origrepolen:
476 continue
479 continue
477 seenrevs.add(rev)
480 seenrevs.add(rev)
478 if phase(repo, rev) == public:
481 if phase(repo, rev) == public:
479 continue
482 continue
480 if set(succsmarkers(node) or []).issubset(addedmarkers):
483 if set(succsmarkers(node) or []).issubset(addedmarkers):
481 obsoleted.add(rev)
484 obsoleted.add(rev)
482 return obsoleted
485 return obsoleted
483
486
484 class _succs(list):
487 class _succs(list):
485 """small class to represent a successors with some metadata about it"""
488 """small class to represent a successors with some metadata about it"""
486
489
487 def __init__(self, *args, **kwargs):
490 def __init__(self, *args, **kwargs):
488 super(_succs, self).__init__(*args, **kwargs)
491 super(_succs, self).__init__(*args, **kwargs)
489 self.markers = set()
492 self.markers = set()
490
493
491 def copy(self):
494 def copy(self):
492 new = _succs(self)
495 new = _succs(self)
493 new.markers = self.markers.copy()
496 new.markers = self.markers.copy()
494 return new
497 return new
495
498
496 @util.propertycache
499 @util.propertycache
497 def _set(self):
500 def _set(self):
498 # immutable
501 # immutable
499 return set(self)
502 return set(self)
500
503
501 def canmerge(self, other):
504 def canmerge(self, other):
502 return self._set.issubset(other._set)
505 return self._set.issubset(other._set)
503
506
504 def successorssets(repo, initialnode, closest=False, cache=None):
507 def successorssets(repo, initialnode, closest=False, cache=None):
505 """Return set of all latest successors of initial nodes
508 """Return set of all latest successors of initial nodes
506
509
507 The successors set of a changeset A are the group of revisions that succeed
510 The successors set of a changeset A are the group of revisions that succeed
508 A. It succeeds A as a consistent whole, each revision being only a partial
511 A. It succeeds A as a consistent whole, each revision being only a partial
509 replacement. By default, the successors set contains non-obsolete
512 replacement. By default, the successors set contains non-obsolete
510 changesets only, walking the obsolescence graph until reaching a leaf. If
513 changesets only, walking the obsolescence graph until reaching a leaf. If
511 'closest' is set to True, closest successors-sets are return (the
514 'closest' is set to True, closest successors-sets are return (the
512 obsolescence walk stops on known changesets).
515 obsolescence walk stops on known changesets).
513
516
514 This function returns the full list of successor sets which is why it
517 This function returns the full list of successor sets which is why it
515 returns a list of tuples and not just a single tuple. Each tuple is a valid
518 returns a list of tuples and not just a single tuple. Each tuple is a valid
516 successors set. Note that (A,) may be a valid successors set for changeset A
519 successors set. Note that (A,) may be a valid successors set for changeset A
517 (see below).
520 (see below).
518
521
519 In most cases, a changeset A will have a single element (e.g. the changeset
522 In most cases, a changeset A will have a single element (e.g. the changeset
520 A is replaced by A') in its successors set. Though, it is also common for a
523 A is replaced by A') in its successors set. Though, it is also common for a
521 changeset A to have no elements in its successor set (e.g. the changeset
524 changeset A to have no elements in its successor set (e.g. the changeset
522 has been pruned). Therefore, the returned list of successors sets will be
525 has been pruned). Therefore, the returned list of successors sets will be
523 [(A',)] or [], respectively.
526 [(A',)] or [], respectively.
524
527
525 When a changeset A is split into A' and B', however, it will result in a
528 When a changeset A is split into A' and B', however, it will result in a
526 successors set containing more than a single element, i.e. [(A',B')].
529 successors set containing more than a single element, i.e. [(A',B')].
527 Divergent changesets will result in multiple successors sets, i.e. [(A',),
530 Divergent changesets will result in multiple successors sets, i.e. [(A',),
528 (A'')].
531 (A'')].
529
532
530 If a changeset A is not obsolete, then it will conceptually have no
533 If a changeset A is not obsolete, then it will conceptually have no
531 successors set. To distinguish this from a pruned changeset, the successor
534 successors set. To distinguish this from a pruned changeset, the successor
532 set will contain itself only, i.e. [(A,)].
535 set will contain itself only, i.e. [(A,)].
533
536
534 Finally, final successors unknown locally are considered to be pruned
537 Finally, final successors unknown locally are considered to be pruned
535 (pruned: obsoleted without any successors). (Final: successors not affected
538 (pruned: obsoleted without any successors). (Final: successors not affected
536 by markers).
539 by markers).
537
540
538 The 'closest' mode respect the repoview filtering. For example, without
541 The 'closest' mode respect the repoview filtering. For example, without
539 filter it will stop at the first locally known changeset, with 'visible'
542 filter it will stop at the first locally known changeset, with 'visible'
540 filter it will stop on visible changesets).
543 filter it will stop on visible changesets).
541
544
542 The optional `cache` parameter is a dictionary that may contains
545 The optional `cache` parameter is a dictionary that may contains
543 precomputed successors sets. It is meant to reuse the computation of a
546 precomputed successors sets. It is meant to reuse the computation of a
544 previous call to `successorssets` when multiple calls are made at the same
547 previous call to `successorssets` when multiple calls are made at the same
545 time. The cache dictionary is updated in place. The caller is responsible
548 time. The cache dictionary is updated in place. The caller is responsible
546 for its life span. Code that makes multiple calls to `successorssets`
549 for its life span. Code that makes multiple calls to `successorssets`
547 *should* use this cache mechanism or risk a performance hit.
550 *should* use this cache mechanism or risk a performance hit.
548
551
549 Since results are different depending of the 'closest' most, the same cache
552 Since results are different depending of the 'closest' most, the same cache
550 cannot be reused for both mode.
553 cannot be reused for both mode.
551 """
554 """
552
555
553 succmarkers = repo.obsstore.successors
556 succmarkers = repo.obsstore.successors
554
557
555 # Stack of nodes we search successors sets for
558 # Stack of nodes we search successors sets for
556 toproceed = [initialnode]
559 toproceed = [initialnode]
557 # set version of above list for fast loop detection
560 # set version of above list for fast loop detection
558 # element added to "toproceed" must be added here
561 # element added to "toproceed" must be added here
559 stackedset = set(toproceed)
562 stackedset = set(toproceed)
560 if cache is None:
563 if cache is None:
561 cache = {}
564 cache = {}
562
565
563 # This while loop is the flattened version of a recursive search for
566 # This while loop is the flattened version of a recursive search for
564 # successors sets
567 # successors sets
565 #
568 #
566 # def successorssets(x):
569 # def successorssets(x):
567 # successors = directsuccessors(x)
570 # successors = directsuccessors(x)
568 # ss = [[]]
571 # ss = [[]]
569 # for succ in directsuccessors(x):
572 # for succ in directsuccessors(x):
570 # # product as in itertools cartesian product
573 # # product as in itertools cartesian product
571 # ss = product(ss, successorssets(succ))
574 # ss = product(ss, successorssets(succ))
572 # return ss
575 # return ss
573 #
576 #
574 # But we can not use plain recursive calls here:
577 # But we can not use plain recursive calls here:
575 # - that would blow the python call stack
578 # - that would blow the python call stack
576 # - obsolescence markers may have cycles, we need to handle them.
579 # - obsolescence markers may have cycles, we need to handle them.
577 #
580 #
578 # The `toproceed` list act as our call stack. Every node we search
581 # The `toproceed` list act as our call stack. Every node we search
579 # successors set for are stacked there.
582 # successors set for are stacked there.
580 #
583 #
581 # The `stackedset` is set version of this stack used to check if a node is
584 # The `stackedset` is set version of this stack used to check if a node is
582 # already stacked. This check is used to detect cycles and prevent infinite
585 # already stacked. This check is used to detect cycles and prevent infinite
583 # loop.
586 # loop.
584 #
587 #
585 # successors set of all nodes are stored in the `cache` dictionary.
588 # successors set of all nodes are stored in the `cache` dictionary.
586 #
589 #
587 # After this while loop ends we use the cache to return the successors sets
590 # After this while loop ends we use the cache to return the successors sets
588 # for the node requested by the caller.
591 # for the node requested by the caller.
589 while toproceed:
592 while toproceed:
590 # Every iteration tries to compute the successors sets of the topmost
593 # Every iteration tries to compute the successors sets of the topmost
591 # node of the stack: CURRENT.
594 # node of the stack: CURRENT.
592 #
595 #
593 # There are four possible outcomes:
596 # There are four possible outcomes:
594 #
597 #
595 # 1) We already know the successors sets of CURRENT:
598 # 1) We already know the successors sets of CURRENT:
596 # -> mission accomplished, pop it from the stack.
599 # -> mission accomplished, pop it from the stack.
597 # 2) Stop the walk:
600 # 2) Stop the walk:
598 # default case: Node is not obsolete
601 # default case: Node is not obsolete
599 # closest case: Node is known at this repo filter level
602 # closest case: Node is known at this repo filter level
600 # -> the node is its own successors sets. Add it to the cache.
603 # -> the node is its own successors sets. Add it to the cache.
601 # 3) We do not know successors set of direct successors of CURRENT:
604 # 3) We do not know successors set of direct successors of CURRENT:
602 # -> We add those successors to the stack.
605 # -> We add those successors to the stack.
603 # 4) We know successors sets of all direct successors of CURRENT:
606 # 4) We know successors sets of all direct successors of CURRENT:
604 # -> We can compute CURRENT successors set and add it to the
607 # -> We can compute CURRENT successors set and add it to the
605 # cache.
608 # cache.
606 #
609 #
607 current = toproceed[-1]
610 current = toproceed[-1]
608
611
609 # case 2 condition is a bit hairy because of closest,
612 # case 2 condition is a bit hairy because of closest,
610 # we compute it on its own
613 # we compute it on its own
611 case2condition = ((current not in succmarkers)
614 case2condition = ((current not in succmarkers)
612 or (closest and current != initialnode
615 or (closest and current != initialnode
613 and current in repo))
616 and current in repo))
614
617
615 if current in cache:
618 if current in cache:
616 # case (1): We already know the successors sets
619 # case (1): We already know the successors sets
617 stackedset.remove(toproceed.pop())
620 stackedset.remove(toproceed.pop())
618 elif case2condition:
621 elif case2condition:
619 # case (2): end of walk.
622 # case (2): end of walk.
620 if current in repo:
623 if current in repo:
621 # We have a valid successors.
624 # We have a valid successors.
622 cache[current] = [_succs((current,))]
625 cache[current] = [_succs((current,))]
623 else:
626 else:
624 # Final obsolete version is unknown locally.
627 # Final obsolete version is unknown locally.
625 # Do not count that as a valid successors
628 # Do not count that as a valid successors
626 cache[current] = []
629 cache[current] = []
627 else:
630 else:
628 # cases (3) and (4)
631 # cases (3) and (4)
629 #
632 #
630 # We proceed in two phases. Phase 1 aims to distinguish case (3)
633 # We proceed in two phases. Phase 1 aims to distinguish case (3)
631 # from case (4):
634 # from case (4):
632 #
635 #
633 # For each direct successors of CURRENT, we check whether its
636 # For each direct successors of CURRENT, we check whether its
634 # successors sets are known. If they are not, we stack the
637 # successors sets are known. If they are not, we stack the
635 # unknown node and proceed to the next iteration of the while
638 # unknown node and proceed to the next iteration of the while
636 # loop. (case 3)
639 # loop. (case 3)
637 #
640 #
638 # During this step, we may detect obsolescence cycles: a node
641 # During this step, we may detect obsolescence cycles: a node
639 # with unknown successors sets but already in the call stack.
642 # with unknown successors sets but already in the call stack.
640 # In such a situation, we arbitrary set the successors sets of
643 # In such a situation, we arbitrary set the successors sets of
641 # the node to nothing (node pruned) to break the cycle.
644 # the node to nothing (node pruned) to break the cycle.
642 #
645 #
643 # If no break was encountered we proceed to phase 2.
646 # If no break was encountered we proceed to phase 2.
644 #
647 #
645 # Phase 2 computes successors sets of CURRENT (case 4); see details
648 # Phase 2 computes successors sets of CURRENT (case 4); see details
646 # in phase 2 itself.
649 # in phase 2 itself.
647 #
650 #
648 # Note the two levels of iteration in each phase.
651 # Note the two levels of iteration in each phase.
649 # - The first one handles obsolescence markers using CURRENT as
652 # - The first one handles obsolescence markers using CURRENT as
650 # precursor (successors markers of CURRENT).
653 # precursor (successors markers of CURRENT).
651 #
654 #
652 # Having multiple entry here means divergence.
655 # Having multiple entry here means divergence.
653 #
656 #
654 # - The second one handles successors defined in each marker.
657 # - The second one handles successors defined in each marker.
655 #
658 #
656 # Having none means pruned node, multiple successors means split,
659 # Having none means pruned node, multiple successors means split,
657 # single successors are standard replacement.
660 # single successors are standard replacement.
658 #
661 #
659 for mark in sorted(succmarkers[current]):
662 for mark in sorted(succmarkers[current]):
660 for suc in mark[1]:
663 for suc in mark[1]:
661 if suc not in cache:
664 if suc not in cache:
662 if suc in stackedset:
665 if suc in stackedset:
663 # cycle breaking
666 # cycle breaking
664 cache[suc] = []
667 cache[suc] = []
665 else:
668 else:
666 # case (3) If we have not computed successors sets
669 # case (3) If we have not computed successors sets
667 # of one of those successors we add it to the
670 # of one of those successors we add it to the
668 # `toproceed` stack and stop all work for this
671 # `toproceed` stack and stop all work for this
669 # iteration.
672 # iteration.
670 toproceed.append(suc)
673 toproceed.append(suc)
671 stackedset.add(suc)
674 stackedset.add(suc)
672 break
675 break
673 else:
676 else:
674 continue
677 continue
675 break
678 break
676 else:
679 else:
677 # case (4): we know all successors sets of all direct
680 # case (4): we know all successors sets of all direct
678 # successors
681 # successors
679 #
682 #
680 # Successors set contributed by each marker depends on the
683 # Successors set contributed by each marker depends on the
681 # successors sets of all its "successors" node.
684 # successors sets of all its "successors" node.
682 #
685 #
683 # Each different marker is a divergence in the obsolescence
686 # Each different marker is a divergence in the obsolescence
684 # history. It contributes successors sets distinct from other
687 # history. It contributes successors sets distinct from other
685 # markers.
688 # markers.
686 #
689 #
687 # Within a marker, a successor may have divergent successors
690 # Within a marker, a successor may have divergent successors
688 # sets. In such a case, the marker will contribute multiple
691 # sets. In such a case, the marker will contribute multiple
689 # divergent successors sets. If multiple successors have
692 # divergent successors sets. If multiple successors have
690 # divergent successors sets, a Cartesian product is used.
693 # divergent successors sets, a Cartesian product is used.
691 #
694 #
692 # At the end we post-process successors sets to remove
695 # At the end we post-process successors sets to remove
693 # duplicated entry and successors set that are strict subset of
696 # duplicated entry and successors set that are strict subset of
694 # another one.
697 # another one.
695 succssets = []
698 succssets = []
696 for mark in sorted(succmarkers[current]):
699 for mark in sorted(succmarkers[current]):
697 # successors sets contributed by this marker
700 # successors sets contributed by this marker
698 base = _succs()
701 base = _succs()
699 base.markers.add(mark)
702 base.markers.add(mark)
700 markss = [base]
703 markss = [base]
701 for suc in mark[1]:
704 for suc in mark[1]:
702 # cardinal product with previous successors
705 # cardinal product with previous successors
703 productresult = []
706 productresult = []
704 for prefix in markss:
707 for prefix in markss:
705 for suffix in cache[suc]:
708 for suffix in cache[suc]:
706 newss = prefix.copy()
709 newss = prefix.copy()
707 newss.markers.update(suffix.markers)
710 newss.markers.update(suffix.markers)
708 for part in suffix:
711 for part in suffix:
709 # do not duplicated entry in successors set
712 # do not duplicated entry in successors set
710 # first entry wins.
713 # first entry wins.
711 if part not in newss:
714 if part not in newss:
712 newss.append(part)
715 newss.append(part)
713 productresult.append(newss)
716 productresult.append(newss)
714 if productresult:
717 if productresult:
715 markss = productresult
718 markss = productresult
716 succssets.extend(markss)
719 succssets.extend(markss)
717 # remove duplicated and subset
720 # remove duplicated and subset
718 seen = []
721 seen = []
719 final = []
722 final = []
720 candidates = sorted((s for s in succssets if s),
723 candidates = sorted((s for s in succssets if s),
721 key=len, reverse=True)
724 key=len, reverse=True)
722 for cand in candidates:
725 for cand in candidates:
723 for seensuccs in seen:
726 for seensuccs in seen:
724 if cand.canmerge(seensuccs):
727 if cand.canmerge(seensuccs):
725 seensuccs.markers.update(cand.markers)
728 seensuccs.markers.update(cand.markers)
726 break
729 break
727 else:
730 else:
728 final.append(cand)
731 final.append(cand)
729 seen.append(cand)
732 seen.append(cand)
730 final.reverse() # put small successors set first
733 final.reverse() # put small successors set first
731 cache[current] = final
734 cache[current] = final
732 return cache[initialnode]
735 return cache[initialnode]
733
736
734 def successorsandmarkers(repo, ctx):
737 def successorsandmarkers(repo, ctx):
735 """compute the raw data needed for computing obsfate
738 """compute the raw data needed for computing obsfate
736 Returns a list of dict, one dict per successors set
739 Returns a list of dict, one dict per successors set
737 """
740 """
738 if not ctx.obsolete():
741 if not ctx.obsolete():
739 return None
742 return None
740
743
741 ssets = successorssets(repo, ctx.node(), closest=True)
744 ssets = successorssets(repo, ctx.node(), closest=True)
742
745
743 # closestsuccessors returns an empty list for pruned revisions, remap it
746 # closestsuccessors returns an empty list for pruned revisions, remap it
744 # into a list containing an empty list for future processing
747 # into a list containing an empty list for future processing
745 if ssets == []:
748 if ssets == []:
746 ssets = [[]]
749 ssets = [[]]
747
750
748 # Try to recover pruned markers
751 # Try to recover pruned markers
749 succsmap = repo.obsstore.successors
752 succsmap = repo.obsstore.successors
750 fullsuccessorsets = [] # successor set + markers
753 fullsuccessorsets = [] # successor set + markers
751 for sset in ssets:
754 for sset in ssets:
752 if sset:
755 if sset:
753 fullsuccessorsets.append(sset)
756 fullsuccessorsets.append(sset)
754 else:
757 else:
755 # successorsset return an empty set() when ctx or one of its
758 # successorsset return an empty set() when ctx or one of its
756 # successors is pruned.
759 # successors is pruned.
757 # In this case, walk the obs-markers tree again starting with ctx
760 # In this case, walk the obs-markers tree again starting with ctx
758 # and find the relevant pruning obs-makers, the ones without
761 # and find the relevant pruning obs-makers, the ones without
759 # successors.
762 # successors.
760 # Having these markers allow us to compute some information about
763 # Having these markers allow us to compute some information about
761 # its fate, like who pruned this changeset and when.
764 # its fate, like who pruned this changeset and when.
762
765
763 # XXX we do not catch all prune markers (eg rewritten then pruned)
766 # XXX we do not catch all prune markers (eg rewritten then pruned)
764 # (fix me later)
767 # (fix me later)
765 foundany = False
768 foundany = False
766 for mark in succsmap.get(ctx.node(), ()):
769 for mark in succsmap.get(ctx.node(), ()):
767 if not mark[1]:
770 if not mark[1]:
768 foundany = True
771 foundany = True
769 sset = _succs()
772 sset = _succs()
770 sset.markers.add(mark)
773 sset.markers.add(mark)
771 fullsuccessorsets.append(sset)
774 fullsuccessorsets.append(sset)
772 if not foundany:
775 if not foundany:
773 fullsuccessorsets.append(_succs())
776 fullsuccessorsets.append(_succs())
774
777
775 values = []
778 values = []
776 for sset in fullsuccessorsets:
779 for sset in fullsuccessorsets:
777 values.append({'successors': sset, 'markers': sset.markers})
780 values.append({'successors': sset, 'markers': sset.markers})
778
781
779 return values
782 return values
780
783
781 def _getobsfate(successorssets):
784 def _getobsfate(successorssets):
782 """ Compute a changeset obsolescence fate based on its successorssets.
785 """ Compute a changeset obsolescence fate based on its successorssets.
783 Successors can be the tipmost ones or the immediate ones. This function
786 Successors can be the tipmost ones or the immediate ones. This function
784 return values are not meant to be shown directly to users, it is meant to
787 return values are not meant to be shown directly to users, it is meant to
785 be used by internal functions only.
788 be used by internal functions only.
786 Returns one fate from the following values:
789 Returns one fate from the following values:
787 - pruned
790 - pruned
788 - diverged
791 - diverged
789 - superseded
792 - superseded
790 - superseded_split
793 - superseded_split
791 """
794 """
792
795
793 if len(successorssets) == 0:
796 if len(successorssets) == 0:
794 # The commit has been pruned
797 # The commit has been pruned
795 return 'pruned'
798 return 'pruned'
796 elif len(successorssets) > 1:
799 elif len(successorssets) > 1:
797 return 'diverged'
800 return 'diverged'
798 else:
801 else:
799 # No divergence, only one set of successors
802 # No divergence, only one set of successors
800 successors = successorssets[0]
803 successors = successorssets[0]
801
804
802 if len(successors) == 1:
805 if len(successors) == 1:
803 return 'superseded'
806 return 'superseded'
804 else:
807 else:
805 return 'superseded_split'
808 return 'superseded_split'
806
809
807 def obsfateverb(successorset, markers):
810 def obsfateverb(successorset, markers):
808 """ Return the verb summarizing the successorset and potentially using
811 """ Return the verb summarizing the successorset and potentially using
809 information from the markers
812 information from the markers
810 """
813 """
811 if not successorset:
814 if not successorset:
812 verb = 'pruned'
815 verb = 'pruned'
813 elif len(successorset) == 1:
816 elif len(successorset) == 1:
814 verb = 'rewritten'
817 verb = 'rewritten'
815 else:
818 else:
816 verb = 'split'
819 verb = 'split'
817 return verb
820 return verb
818
821
819 def markersdates(markers):
822 def markersdates(markers):
820 """returns the list of dates for a list of markers
823 """returns the list of dates for a list of markers
821 """
824 """
822 return [m[4] for m in markers]
825 return [m[4] for m in markers]
823
826
824 def markersusers(markers):
827 def markersusers(markers):
825 """ Returns a sorted list of markers users without duplicates
828 """ Returns a sorted list of markers users without duplicates
826 """
829 """
827 markersmeta = [dict(m[3]) for m in markers]
830 markersmeta = [dict(m[3]) for m in markers]
828 users = set(encoding.tolocal(meta['user']) for meta in markersmeta
831 users = set(encoding.tolocal(meta['user']) for meta in markersmeta
829 if meta.get('user'))
832 if meta.get('user'))
830
833
831 return sorted(users)
834 return sorted(users)
832
835
833 def markersoperations(markers):
836 def markersoperations(markers):
834 """ Returns a sorted list of markers operations without duplicates
837 """ Returns a sorted list of markers operations without duplicates
835 """
838 """
836 markersmeta = [dict(m[3]) for m in markers]
839 markersmeta = [dict(m[3]) for m in markers]
837 operations = set(meta.get('operation') for meta in markersmeta
840 operations = set(meta.get('operation') for meta in markersmeta
838 if meta.get('operation'))
841 if meta.get('operation'))
839
842
840 return sorted(operations)
843 return sorted(operations)
841
844
842 def obsfateprinter(ui, repo, successors, markers, formatctx):
845 def obsfateprinter(ui, repo, successors, markers, formatctx):
843 """ Build a obsfate string for a single successorset using all obsfate
846 """ Build a obsfate string for a single successorset using all obsfate
844 related function defined in obsutil
847 related function defined in obsutil
845 """
848 """
846 quiet = ui.quiet
849 quiet = ui.quiet
847 verbose = ui.verbose
850 verbose = ui.verbose
848 normal = not verbose and not quiet
851 normal = not verbose and not quiet
849
852
850 line = []
853 line = []
851
854
852 # Verb
855 # Verb
853 line.append(obsfateverb(successors, markers))
856 line.append(obsfateverb(successors, markers))
854
857
855 # Operations
858 # Operations
856 operations = markersoperations(markers)
859 operations = markersoperations(markers)
857 if operations:
860 if operations:
858 line.append(" using %s" % ", ".join(operations))
861 line.append(" using %s" % ", ".join(operations))
859
862
860 # Successors
863 # Successors
861 if successors:
864 if successors:
862 fmtsuccessors = [formatctx(repo[succ]) for succ in successors]
865 fmtsuccessors = [formatctx(repo[succ]) for succ in successors]
863 line.append(" as %s" % ", ".join(fmtsuccessors))
866 line.append(" as %s" % ", ".join(fmtsuccessors))
864
867
865 # Users
868 # Users
866 users = markersusers(markers)
869 users = markersusers(markers)
867 # Filter out current user in not verbose mode to reduce amount of
870 # Filter out current user in not verbose mode to reduce amount of
868 # information
871 # information
869 if not verbose:
872 if not verbose:
870 currentuser = ui.username(acceptempty=True)
873 currentuser = ui.username(acceptempty=True)
871 if len(users) == 1 and currentuser in users:
874 if len(users) == 1 and currentuser in users:
872 users = None
875 users = None
873
876
874 if (verbose or normal) and users:
877 if (verbose or normal) and users:
875 line.append(" by %s" % ", ".join(users))
878 line.append(" by %s" % ", ".join(users))
876
879
877 # Date
880 # Date
878 dates = markersdates(markers)
881 dates = markersdates(markers)
879
882
880 if dates and verbose:
883 if dates and verbose:
881 min_date = min(dates)
884 min_date = min(dates)
882 max_date = max(dates)
885 max_date = max(dates)
883
886
884 if min_date == max_date:
887 if min_date == max_date:
885 fmtmin_date = dateutil.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
888 fmtmin_date = dateutil.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
886 line.append(" (at %s)" % fmtmin_date)
889 line.append(" (at %s)" % fmtmin_date)
887 else:
890 else:
888 fmtmin_date = dateutil.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
891 fmtmin_date = dateutil.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
889 fmtmax_date = dateutil.datestr(max_date, '%Y-%m-%d %H:%M %1%2')
892 fmtmax_date = dateutil.datestr(max_date, '%Y-%m-%d %H:%M %1%2')
890 line.append(" (between %s and %s)" % (fmtmin_date, fmtmax_date))
893 line.append(" (between %s and %s)" % (fmtmin_date, fmtmax_date))
891
894
892 return "".join(line)
895 return "".join(line)
893
896
894
897
895 filteredmsgtable = {
898 filteredmsgtable = {
896 "pruned": _("hidden revision '%s' is pruned"),
899 "pruned": _("hidden revision '%s' is pruned"),
897 "diverged": _("hidden revision '%s' has diverged"),
900 "diverged": _("hidden revision '%s' has diverged"),
898 "superseded": _("hidden revision '%s' was rewritten as: %s"),
901 "superseded": _("hidden revision '%s' was rewritten as: %s"),
899 "superseded_split": _("hidden revision '%s' was split as: %s"),
902 "superseded_split": _("hidden revision '%s' was split as: %s"),
900 "superseded_split_several": _("hidden revision '%s' was split as: %s and "
903 "superseded_split_several": _("hidden revision '%s' was split as: %s and "
901 "%d more"),
904 "%d more"),
902 }
905 }
903
906
904 def _getfilteredreason(repo, changeid, ctx):
907 def _getfilteredreason(repo, changeid, ctx):
905 """return a human-friendly string on why a obsolete changeset is hidden
908 """return a human-friendly string on why a obsolete changeset is hidden
906 """
909 """
907 successors = successorssets(repo, ctx.node())
910 successors = successorssets(repo, ctx.node())
908 fate = _getobsfate(successors)
911 fate = _getobsfate(successors)
909
912
910 # Be more precise in case the revision is superseded
913 # Be more precise in case the revision is superseded
911 if fate == 'pruned':
914 if fate == 'pruned':
912 return filteredmsgtable['pruned'] % changeid
915 return filteredmsgtable['pruned'] % changeid
913 elif fate == 'diverged':
916 elif fate == 'diverged':
914 return filteredmsgtable['diverged'] % changeid
917 return filteredmsgtable['diverged'] % changeid
915 elif fate == 'superseded':
918 elif fate == 'superseded':
916 single_successor = nodemod.short(successors[0][0])
919 single_successor = nodemod.short(successors[0][0])
917 return filteredmsgtable['superseded'] % (changeid, single_successor)
920 return filteredmsgtable['superseded'] % (changeid, single_successor)
918 elif fate == 'superseded_split':
921 elif fate == 'superseded_split':
919
922
920 succs = []
923 succs = []
921 for node_id in successors[0]:
924 for node_id in successors[0]:
922 succs.append(nodemod.short(node_id))
925 succs.append(nodemod.short(node_id))
923
926
924 if len(succs) <= 2:
927 if len(succs) <= 2:
925 fmtsuccs = ', '.join(succs)
928 fmtsuccs = ', '.join(succs)
926 return filteredmsgtable['superseded_split'] % (changeid, fmtsuccs)
929 return filteredmsgtable['superseded_split'] % (changeid, fmtsuccs)
927 else:
930 else:
928 firstsuccessors = ', '.join(succs[:2])
931 firstsuccessors = ', '.join(succs[:2])
929 remainingnumber = len(succs) - 2
932 remainingnumber = len(succs) - 2
930
933
931 args = (changeid, firstsuccessors, remainingnumber)
934 args = (changeid, firstsuccessors, remainingnumber)
932 return filteredmsgtable['superseded_split_several'] % args
935 return filteredmsgtable['superseded_split_several'] % args
933
936
934 def divergentsets(repo, ctx):
937 def divergentsets(repo, ctx):
935 """Compute sets of commits divergent with a given one"""
938 """Compute sets of commits divergent with a given one"""
936 cache = {}
939 cache = {}
937 base = {}
940 base = {}
938 for n in allpredecessors(repo.obsstore, [ctx.node()]):
941 for n in allpredecessors(repo.obsstore, [ctx.node()]):
939 if n == ctx.node():
942 if n == ctx.node():
940 # a node can't be a base for divergence with itself
943 # a node can't be a base for divergence with itself
941 continue
944 continue
942 nsuccsets = successorssets(repo, n, cache)
945 nsuccsets = successorssets(repo, n, cache)
943 for nsuccset in nsuccsets:
946 for nsuccset in nsuccsets:
944 if ctx.node() in nsuccset:
947 if ctx.node() in nsuccset:
945 # we are only interested in *other* successor sets
948 # we are only interested in *other* successor sets
946 continue
949 continue
947 if tuple(nsuccset) in base:
950 if tuple(nsuccset) in base:
948 # we already know the latest base for this divergency
951 # we already know the latest base for this divergency
949 continue
952 continue
950 base[tuple(nsuccset)] = n
953 base[tuple(nsuccset)] = n
951 return [{'divergentnodes': divset, 'commonpredecessor': b}
954 return [{'divergentnodes': divset, 'commonpredecessor': b}
952 for divset, b in base.iteritems()]
955 for divset, b in base.iteritems()]
953
956
954 def whyunstable(repo, ctx):
957 def whyunstable(repo, ctx):
955 result = []
958 result = []
956 if ctx.orphan():
959 if ctx.orphan():
957 for parent in ctx.parents():
960 for parent in ctx.parents():
958 kind = None
961 kind = None
959 if parent.orphan():
962 if parent.orphan():
960 kind = 'orphan'
963 kind = 'orphan'
961 elif parent.obsolete():
964 elif parent.obsolete():
962 kind = 'obsolete'
965 kind = 'obsolete'
963 if kind is not None:
966 if kind is not None:
964 result.append({'instability': 'orphan',
967 result.append({'instability': 'orphan',
965 'reason': '%s parent' % kind,
968 'reason': '%s parent' % kind,
966 'node': parent.hex()})
969 'node': parent.hex()})
967 if ctx.phasedivergent():
970 if ctx.phasedivergent():
968 predecessors = allpredecessors(repo.obsstore, [ctx.node()],
971 predecessors = allpredecessors(repo.obsstore, [ctx.node()],
969 ignoreflags=bumpedfix)
972 ignoreflags=bumpedfix)
970 immutable = [repo[p] for p in predecessors
973 immutable = [repo[p] for p in predecessors
971 if p in repo and not repo[p].mutable()]
974 if p in repo and not repo[p].mutable()]
972 for predecessor in immutable:
975 for predecessor in immutable:
973 result.append({'instability': 'phase-divergent',
976 result.append({'instability': 'phase-divergent',
974 'reason': 'immutable predecessor',
977 'reason': 'immutable predecessor',
975 'node': predecessor.hex()})
978 'node': predecessor.hex()})
976 if ctx.contentdivergent():
979 if ctx.contentdivergent():
977 dsets = divergentsets(repo, ctx)
980 dsets = divergentsets(repo, ctx)
978 for dset in dsets:
981 for dset in dsets:
979 divnodes = [repo[n] for n in dset['divergentnodes']]
982 divnodes = [repo[n] for n in dset['divergentnodes']]
980 result.append({'instability': 'content-divergent',
983 result.append({'instability': 'content-divergent',
981 'divergentnodes': divnodes,
984 'divergentnodes': divnodes,
982 'reason': 'predecessor',
985 'reason': 'predecessor',
983 'node': nodemod.hex(dset['commonpredecessor'])})
986 'node': nodemod.hex(dset['commonpredecessor'])})
984 return result
987 return result
General Comments 0
You need to be logged in to leave comments. Login now