##// END OF EJS Templates
obsutil: clarify the access to "repo"...
Boris Feld -
r40564:520514af default
parent child Browse files
Show More
@@ -1,981 +1,983 b''
1 # obsutil.py - utility functions for obsolescence
1 # obsutil.py - utility functions for obsolescence
2 #
2 #
3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import re
10 import re
11
11
12 from .i18n import _
12 from .i18n import _
13 from . import (
13 from . import (
14 diffutil,
14 diffutil,
15 encoding,
15 encoding,
16 node as nodemod,
16 node as nodemod,
17 phases,
17 phases,
18 util,
18 util,
19 )
19 )
20 from .utils import (
20 from .utils import (
21 dateutil,
21 dateutil,
22 )
22 )
23
23
24 ### obsolescence marker flag
24 ### obsolescence marker flag
25
25
26 ## bumpedfix flag
26 ## bumpedfix flag
27 #
27 #
28 # When a changeset A' succeed to a changeset A which became public, we call A'
28 # When a changeset A' succeed to a changeset A which became public, we call A'
29 # "bumped" because it's a successors of a public changesets
29 # "bumped" because it's a successors of a public changesets
30 #
30 #
31 # o A' (bumped)
31 # o A' (bumped)
32 # |`:
32 # |`:
33 # | o A
33 # | o A
34 # |/
34 # |/
35 # o Z
35 # o Z
36 #
36 #
37 # The way to solve this situation is to create a new changeset Ad as children
37 # The way to solve this situation is to create a new changeset Ad as children
38 # of A. This changeset have the same content than A'. So the diff from A to A'
38 # of A. This changeset have the same content than A'. So the diff from A to A'
39 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
39 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
40 #
40 #
41 # o Ad
41 # o Ad
42 # |`:
42 # |`:
43 # | x A'
43 # | x A'
44 # |'|
44 # |'|
45 # o | A
45 # o | A
46 # |/
46 # |/
47 # o Z
47 # o Z
48 #
48 #
49 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
49 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
50 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
50 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
51 # This flag mean that the successors express the changes between the public and
51 # This flag mean that the successors express the changes between the public and
52 # bumped version and fix the situation, breaking the transitivity of
52 # bumped version and fix the situation, breaking the transitivity of
53 # "bumped" here.
53 # "bumped" here.
54 bumpedfix = 1
54 bumpedfix = 1
55 usingsha256 = 2
55 usingsha256 = 2
56
56
57 class marker(object):
57 class marker(object):
58 """Wrap obsolete marker raw data"""
58 """Wrap obsolete marker raw data"""
59
59
60 def __init__(self, repo, data):
60 def __init__(self, repo, data):
61 # the repo argument will be used to create changectx in later version
61 # the repo argument will be used to create changectx in later version
62 self._repo = repo
62 self._repo = repo
63 self._data = data
63 self._data = data
64 self._decodedmeta = None
64 self._decodedmeta = None
65
65
66 def __hash__(self):
66 def __hash__(self):
67 return hash(self._data)
67 return hash(self._data)
68
68
69 def __eq__(self, other):
69 def __eq__(self, other):
70 if type(other) != type(self):
70 if type(other) != type(self):
71 return False
71 return False
72 return self._data == other._data
72 return self._data == other._data
73
73
74 def prednode(self):
74 def prednode(self):
75 """Predecessor changeset node identifier"""
75 """Predecessor changeset node identifier"""
76 return self._data[0]
76 return self._data[0]
77
77
78 def succnodes(self):
78 def succnodes(self):
79 """List of successor changesets node identifiers"""
79 """List of successor changesets node identifiers"""
80 return self._data[1]
80 return self._data[1]
81
81
82 def parentnodes(self):
82 def parentnodes(self):
83 """Parents of the predecessors (None if not recorded)"""
83 """Parents of the predecessors (None if not recorded)"""
84 return self._data[5]
84 return self._data[5]
85
85
86 def metadata(self):
86 def metadata(self):
87 """Decoded metadata dictionary"""
87 """Decoded metadata dictionary"""
88 return dict(self._data[3])
88 return dict(self._data[3])
89
89
90 def date(self):
90 def date(self):
91 """Creation date as (unixtime, offset)"""
91 """Creation date as (unixtime, offset)"""
92 return self._data[4]
92 return self._data[4]
93
93
94 def flags(self):
94 def flags(self):
95 """The flags field of the marker"""
95 """The flags field of the marker"""
96 return self._data[2]
96 return self._data[2]
97
97
98 def getmarkers(repo, nodes=None, exclusive=False):
98 def getmarkers(repo, nodes=None, exclusive=False):
99 """returns markers known in a repository
99 """returns markers known in a repository
100
100
101 If <nodes> is specified, only markers "relevant" to those nodes are are
101 If <nodes> is specified, only markers "relevant" to those nodes are are
102 returned"""
102 returned"""
103 if nodes is None:
103 if nodes is None:
104 rawmarkers = repo.obsstore
104 rawmarkers = repo.obsstore
105 elif exclusive:
105 elif exclusive:
106 rawmarkers = exclusivemarkers(repo, nodes)
106 rawmarkers = exclusivemarkers(repo, nodes)
107 else:
107 else:
108 rawmarkers = repo.obsstore.relevantmarkers(nodes)
108 rawmarkers = repo.obsstore.relevantmarkers(nodes)
109
109
110 for markerdata in rawmarkers:
110 for markerdata in rawmarkers:
111 yield marker(repo, markerdata)
111 yield marker(repo, markerdata)
112
112
113 def closestpredecessors(repo, nodeid):
113 def closestpredecessors(repo, nodeid):
114 """yield the list of next predecessors pointing on visible changectx nodes
114 """yield the list of next predecessors pointing on visible changectx nodes
115
115
116 This function respect the repoview filtering, filtered revision will be
116 This function respect the repoview filtering, filtered revision will be
117 considered missing.
117 considered missing.
118 """
118 """
119
119
120 precursors = repo.obsstore.predecessors
120 precursors = repo.obsstore.predecessors
121 stack = [nodeid]
121 stack = [nodeid]
122 seen = set(stack)
122 seen = set(stack)
123
123
124 while stack:
124 while stack:
125 current = stack.pop()
125 current = stack.pop()
126 currentpreccs = precursors.get(current, ())
126 currentpreccs = precursors.get(current, ())
127
127
128 for prec in currentpreccs:
128 for prec in currentpreccs:
129 precnodeid = prec[0]
129 precnodeid = prec[0]
130
130
131 # Basic cycle protection
131 # Basic cycle protection
132 if precnodeid in seen:
132 if precnodeid in seen:
133 continue
133 continue
134 seen.add(precnodeid)
134 seen.add(precnodeid)
135
135
136 if precnodeid in repo:
136 if precnodeid in repo:
137 yield precnodeid
137 yield precnodeid
138 else:
138 else:
139 stack.append(precnodeid)
139 stack.append(precnodeid)
140
140
141 def allpredecessors(obsstore, nodes, ignoreflags=0):
141 def allpredecessors(obsstore, nodes, ignoreflags=0):
142 """Yield node for every precursors of <nodes>.
142 """Yield node for every precursors of <nodes>.
143
143
144 Some precursors may be unknown locally.
144 Some precursors may be unknown locally.
145
145
146 This is a linear yield unsuited to detecting folded changesets. It includes
146 This is a linear yield unsuited to detecting folded changesets. It includes
147 initial nodes too."""
147 initial nodes too."""
148
148
149 remaining = set(nodes)
149 remaining = set(nodes)
150 seen = set(remaining)
150 seen = set(remaining)
151 prec = obsstore.predecessors.get
151 prec = obsstore.predecessors.get
152 while remaining:
152 while remaining:
153 current = remaining.pop()
153 current = remaining.pop()
154 yield current
154 yield current
155 for mark in prec(current, ()):
155 for mark in prec(current, ()):
156 # ignore marker flagged with specified flag
156 # ignore marker flagged with specified flag
157 if mark[2] & ignoreflags:
157 if mark[2] & ignoreflags:
158 continue
158 continue
159 suc = mark[0]
159 suc = mark[0]
160 if suc not in seen:
160 if suc not in seen:
161 seen.add(suc)
161 seen.add(suc)
162 remaining.add(suc)
162 remaining.add(suc)
163
163
164 def allsuccessors(obsstore, nodes, ignoreflags=0):
164 def allsuccessors(obsstore, nodes, ignoreflags=0):
165 """Yield node for every successor of <nodes>.
165 """Yield node for every successor of <nodes>.
166
166
167 Some successors may be unknown locally.
167 Some successors may be unknown locally.
168
168
169 This is a linear yield unsuited to detecting split changesets. It includes
169 This is a linear yield unsuited to detecting split changesets. It includes
170 initial nodes too."""
170 initial nodes too."""
171 remaining = set(nodes)
171 remaining = set(nodes)
172 seen = set(remaining)
172 seen = set(remaining)
173 while remaining:
173 while remaining:
174 current = remaining.pop()
174 current = remaining.pop()
175 yield current
175 yield current
176 for mark in obsstore.successors.get(current, ()):
176 for mark in obsstore.successors.get(current, ()):
177 # ignore marker flagged with specified flag
177 # ignore marker flagged with specified flag
178 if mark[2] & ignoreflags:
178 if mark[2] & ignoreflags:
179 continue
179 continue
180 for suc in mark[1]:
180 for suc in mark[1]:
181 if suc not in seen:
181 if suc not in seen:
182 seen.add(suc)
182 seen.add(suc)
183 remaining.add(suc)
183 remaining.add(suc)
184
184
185 def _filterprunes(markers):
185 def _filterprunes(markers):
186 """return a set with no prune markers"""
186 """return a set with no prune markers"""
187 return set(m for m in markers if m[1])
187 return set(m for m in markers if m[1])
188
188
189 def exclusivemarkers(repo, nodes):
189 def exclusivemarkers(repo, nodes):
190 """set of markers relevant to "nodes" but no other locally-known nodes
190 """set of markers relevant to "nodes" but no other locally-known nodes
191
191
192 This function compute the set of markers "exclusive" to a locally-known
192 This function compute the set of markers "exclusive" to a locally-known
193 node. This means we walk the markers starting from <nodes> until we reach a
193 node. This means we walk the markers starting from <nodes> until we reach a
194 locally-known precursors outside of <nodes>. Element of <nodes> with
194 locally-known precursors outside of <nodes>. Element of <nodes> with
195 locally-known successors outside of <nodes> are ignored (since their
195 locally-known successors outside of <nodes> are ignored (since their
196 precursors markers are also relevant to these successors).
196 precursors markers are also relevant to these successors).
197
197
198 For example:
198 For example:
199
199
200 # (A0 rewritten as A1)
200 # (A0 rewritten as A1)
201 #
201 #
202 # A0 <-1- A1 # Marker "1" is exclusive to A1
202 # A0 <-1- A1 # Marker "1" is exclusive to A1
203
203
204 or
204 or
205
205
206 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
206 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
207 #
207 #
208 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
208 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
209
209
210 or
210 or
211
211
212 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
212 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
213 #
213 #
214 # <-2- A1 # Marker "2" is exclusive to A0,A1
214 # <-2- A1 # Marker "2" is exclusive to A0,A1
215 # /
215 # /
216 # <-1- A0
216 # <-1- A0
217 # \
217 # \
218 # <-3- A2 # Marker "3" is exclusive to A0,A2
218 # <-3- A2 # Marker "3" is exclusive to A0,A2
219 #
219 #
220 # in addition:
220 # in addition:
221 #
221 #
222 # Markers "2,3" are exclusive to A1,A2
222 # Markers "2,3" are exclusive to A1,A2
223 # Markers "1,2,3" are exclusive to A0,A1,A2
223 # Markers "1,2,3" are exclusive to A0,A1,A2
224
224
225 See test/test-obsolete-bundle-strip.t for more examples.
225 See test/test-obsolete-bundle-strip.t for more examples.
226
226
227 An example usage is strip. When stripping a changeset, we also want to
227 An example usage is strip. When stripping a changeset, we also want to
228 strip the markers exclusive to this changeset. Otherwise we would have
228 strip the markers exclusive to this changeset. Otherwise we would have
229 "dangling"" obsolescence markers from its precursors: Obsolescence markers
229 "dangling"" obsolescence markers from its precursors: Obsolescence markers
230 marking a node as obsolete without any successors available locally.
230 marking a node as obsolete without any successors available locally.
231
231
232 As for relevant markers, the prune markers for children will be followed.
232 As for relevant markers, the prune markers for children will be followed.
233 Of course, they will only be followed if the pruned children is
233 Of course, they will only be followed if the pruned children is
234 locally-known. Since the prune markers are relevant to the pruned node.
234 locally-known. Since the prune markers are relevant to the pruned node.
235 However, while prune markers are considered relevant to the parent of the
235 However, while prune markers are considered relevant to the parent of the
236 pruned changesets, prune markers for locally-known changeset (with no
236 pruned changesets, prune markers for locally-known changeset (with no
237 successors) are considered exclusive to the pruned nodes. This allows
237 successors) are considered exclusive to the pruned nodes. This allows
238 to strip the prune markers (with the rest of the exclusive chain) alongside
238 to strip the prune markers (with the rest of the exclusive chain) alongside
239 the pruned changesets.
239 the pruned changesets.
240 """
240 """
241 # running on a filtered repository would be dangerous as markers could be
241 # running on a filtered repository would be dangerous as markers could be
242 # reported as exclusive when they are relevant for other filtered nodes.
242 # reported as exclusive when they are relevant for other filtered nodes.
243 unfi = repo.unfiltered()
243 unfi = repo.unfiltered()
244
244
245 # shortcut to various useful item
245 # shortcut to various useful item
246 nm = unfi.changelog.nodemap
246 nm = unfi.changelog.nodemap
247 precursorsmarkers = unfi.obsstore.predecessors
247 precursorsmarkers = unfi.obsstore.predecessors
248 successormarkers = unfi.obsstore.successors
248 successormarkers = unfi.obsstore.successors
249 childrenmarkers = unfi.obsstore.children
249 childrenmarkers = unfi.obsstore.children
250
250
251 # exclusive markers (return of the function)
251 # exclusive markers (return of the function)
252 exclmarkers = set()
252 exclmarkers = set()
253 # we need fast membership testing
253 # we need fast membership testing
254 nodes = set(nodes)
254 nodes = set(nodes)
255 # looking for head in the obshistory
255 # looking for head in the obshistory
256 #
256 #
257 # XXX we are ignoring all issues in regard with cycle for now.
257 # XXX we are ignoring all issues in regard with cycle for now.
258 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
258 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
259 stack.sort()
259 stack.sort()
260 # nodes already stacked
260 # nodes already stacked
261 seennodes = set(stack)
261 seennodes = set(stack)
262 while stack:
262 while stack:
263 current = stack.pop()
263 current = stack.pop()
264 # fetch precursors markers
264 # fetch precursors markers
265 markers = list(precursorsmarkers.get(current, ()))
265 markers = list(precursorsmarkers.get(current, ()))
266 # extend the list with prune markers
266 # extend the list with prune markers
267 for mark in successormarkers.get(current, ()):
267 for mark in successormarkers.get(current, ()):
268 if not mark[1]:
268 if not mark[1]:
269 markers.append(mark)
269 markers.append(mark)
270 # and markers from children (looking for prune)
270 # and markers from children (looking for prune)
271 for mark in childrenmarkers.get(current, ()):
271 for mark in childrenmarkers.get(current, ()):
272 if not mark[1]:
272 if not mark[1]:
273 markers.append(mark)
273 markers.append(mark)
274 # traverse the markers
274 # traverse the markers
275 for mark in markers:
275 for mark in markers:
276 if mark in exclmarkers:
276 if mark in exclmarkers:
277 # markers already selected
277 # markers already selected
278 continue
278 continue
279
279
280 # If the markers is about the current node, select it
280 # If the markers is about the current node, select it
281 #
281 #
282 # (this delay the addition of markers from children)
282 # (this delay the addition of markers from children)
283 if mark[1] or mark[0] == current:
283 if mark[1] or mark[0] == current:
284 exclmarkers.add(mark)
284 exclmarkers.add(mark)
285
285
286 # should we keep traversing through the precursors?
286 # should we keep traversing through the precursors?
287 prec = mark[0]
287 prec = mark[0]
288
288
289 # nodes in the stack or already processed
289 # nodes in the stack or already processed
290 if prec in seennodes:
290 if prec in seennodes:
291 continue
291 continue
292
292
293 # is this a locally known node ?
293 # is this a locally known node ?
294 known = prec in nm
294 known = prec in nm
295 # if locally-known and not in the <nodes> set the traversal
295 # if locally-known and not in the <nodes> set the traversal
296 # stop here.
296 # stop here.
297 if known and prec not in nodes:
297 if known and prec not in nodes:
298 continue
298 continue
299
299
300 # do not keep going if there are unselected markers pointing to this
300 # do not keep going if there are unselected markers pointing to this
301 # nodes. If we end up traversing these unselected markers later the
301 # nodes. If we end up traversing these unselected markers later the
302 # node will be taken care of at that point.
302 # node will be taken care of at that point.
303 precmarkers = _filterprunes(successormarkers.get(prec))
303 precmarkers = _filterprunes(successormarkers.get(prec))
304 if precmarkers.issubset(exclmarkers):
304 if precmarkers.issubset(exclmarkers):
305 seennodes.add(prec)
305 seennodes.add(prec)
306 stack.append(prec)
306 stack.append(prec)
307
307
308 return exclmarkers
308 return exclmarkers
309
309
310 def foreground(repo, nodes):
310 def foreground(repo, nodes):
311 """return all nodes in the "foreground" of other node
311 """return all nodes in the "foreground" of other node
312
312
313 The foreground of a revision is anything reachable using parent -> children
313 The foreground of a revision is anything reachable using parent -> children
314 or precursor -> successor relation. It is very similar to "descendant" but
314 or precursor -> successor relation. It is very similar to "descendant" but
315 augmented with obsolescence information.
315 augmented with obsolescence information.
316
316
317 Beware that possible obsolescence cycle may result if complex situation.
317 Beware that possible obsolescence cycle may result if complex situation.
318 """
318 """
319 repo = repo.unfiltered()
319 repo = repo.unfiltered()
320 foreground = set(repo.set('%ln::', nodes))
320 foreground = set(repo.set('%ln::', nodes))
321 if repo.obsstore:
321 if repo.obsstore:
322 # We only need this complicated logic if there is obsolescence
322 # We only need this complicated logic if there is obsolescence
323 # XXX will probably deserve an optimised revset.
323 # XXX will probably deserve an optimised revset.
324 nm = repo.changelog.nodemap
324 nm = repo.changelog.nodemap
325 plen = -1
325 plen = -1
326 # compute the whole set of successors or descendants
326 # compute the whole set of successors or descendants
327 while len(foreground) != plen:
327 while len(foreground) != plen:
328 plen = len(foreground)
328 plen = len(foreground)
329 succs = set(c.node() for c in foreground)
329 succs = set(c.node() for c in foreground)
330 mutable = [c.node() for c in foreground if c.mutable()]
330 mutable = [c.node() for c in foreground if c.mutable()]
331 succs.update(allsuccessors(repo.obsstore, mutable))
331 succs.update(allsuccessors(repo.obsstore, mutable))
332 known = (n for n in succs if n in nm)
332 known = (n for n in succs if n in nm)
333 foreground = set(repo.set('%ln::', known))
333 foreground = set(repo.set('%ln::', known))
334 return set(c.node() for c in foreground)
334 return set(c.node() for c in foreground)
335
335
336 # effectflag field
336 # effectflag field
337 #
337 #
338 # Effect-flag is a 1-byte bit field used to store what changed between a
338 # Effect-flag is a 1-byte bit field used to store what changed between a
339 # changeset and its successor(s).
339 # changeset and its successor(s).
340 #
340 #
341 # The effect flag is stored in obs-markers metadata while we iterate on the
341 # The effect flag is stored in obs-markers metadata while we iterate on the
342 # information design. That's why we have the EFFECTFLAGFIELD. If we come up
342 # information design. That's why we have the EFFECTFLAGFIELD. If we come up
343 # with an incompatible design for effect flag, we can store a new design under
343 # with an incompatible design for effect flag, we can store a new design under
344 # another field name so we don't break readers. We plan to extend the existing
344 # another field name so we don't break readers. We plan to extend the existing
345 # obsmarkers bit-field when the effect flag design will be stabilized.
345 # obsmarkers bit-field when the effect flag design will be stabilized.
346 #
346 #
347 # The effect-flag is placed behind an experimental flag
347 # The effect-flag is placed behind an experimental flag
348 # `effect-flags` set to off by default.
348 # `effect-flags` set to off by default.
349 #
349 #
350
350
351 EFFECTFLAGFIELD = "ef1"
351 EFFECTFLAGFIELD = "ef1"
352
352
353 DESCCHANGED = 1 << 0 # action changed the description
353 DESCCHANGED = 1 << 0 # action changed the description
354 METACHANGED = 1 << 1 # action change the meta
354 METACHANGED = 1 << 1 # action change the meta
355 DIFFCHANGED = 1 << 3 # action change diff introduced by the changeset
355 DIFFCHANGED = 1 << 3 # action change diff introduced by the changeset
356 PARENTCHANGED = 1 << 2 # action change the parent
356 PARENTCHANGED = 1 << 2 # action change the parent
357 USERCHANGED = 1 << 4 # the user changed
357 USERCHANGED = 1 << 4 # the user changed
358 DATECHANGED = 1 << 5 # the date changed
358 DATECHANGED = 1 << 5 # the date changed
359 BRANCHCHANGED = 1 << 6 # the branch changed
359 BRANCHCHANGED = 1 << 6 # the branch changed
360
360
361 METABLACKLIST = [
361 METABLACKLIST = [
362 re.compile('^branch$'),
362 re.compile('^branch$'),
363 re.compile('^.*-source$'),
363 re.compile('^.*-source$'),
364 re.compile('^.*_source$'),
364 re.compile('^.*_source$'),
365 re.compile('^source$'),
365 re.compile('^source$'),
366 ]
366 ]
367
367
368 def metanotblacklisted(metaitem):
368 def metanotblacklisted(metaitem):
369 """ Check that the key of a meta item (extrakey, extravalue) does not
369 """ Check that the key of a meta item (extrakey, extravalue) does not
370 match at least one of the blacklist pattern
370 match at least one of the blacklist pattern
371 """
371 """
372 metakey = metaitem[0]
372 metakey = metaitem[0]
373
373
374 return not any(pattern.match(metakey) for pattern in METABLACKLIST)
374 return not any(pattern.match(metakey) for pattern in METABLACKLIST)
375
375
376 def _prepare_hunk(hunk):
376 def _prepare_hunk(hunk):
377 """Drop all information but the username and patch"""
377 """Drop all information but the username and patch"""
378 cleanhunk = []
378 cleanhunk = []
379 for line in hunk.splitlines():
379 for line in hunk.splitlines():
380 if line.startswith(b'# User') or not line.startswith(b'#'):
380 if line.startswith(b'# User') or not line.startswith(b'#'):
381 if line.startswith(b'@@'):
381 if line.startswith(b'@@'):
382 line = b'@@\n'
382 line = b'@@\n'
383 cleanhunk.append(line)
383 cleanhunk.append(line)
384 return cleanhunk
384 return cleanhunk
385
385
386 def _getdifflines(iterdiff):
386 def _getdifflines(iterdiff):
387 """return a cleaned up lines"""
387 """return a cleaned up lines"""
388 lines = next(iterdiff, None)
388 lines = next(iterdiff, None)
389
389
390 if lines is None:
390 if lines is None:
391 return lines
391 return lines
392
392
393 return _prepare_hunk(lines)
393 return _prepare_hunk(lines)
394
394
395 def _cmpdiff(leftctx, rightctx):
395 def _cmpdiff(leftctx, rightctx):
396 """return True if both ctx introduce the "same diff"
396 """return True if both ctx introduce the "same diff"
397
397
398 This is a first and basic implementation, with many shortcoming.
398 This is a first and basic implementation, with many shortcoming.
399 """
399 """
400 diffopts = diffutil.diffallopts(leftctx.repo().ui, {'git': True})
400 # lefctx.repo() and rightctx.repo() are the same here
401 repo = leftctx.repo()
402 diffopts = diffutil.diffallopts(repo.ui, {'git': True})
401 # Leftctx or right ctx might be filtered, so we need to use the contexts
403 # Leftctx or right ctx might be filtered, so we need to use the contexts
402 # with an unfiltered repository to safely compute the diff
404 # with an unfiltered repository to safely compute the diff
403 leftunfi = leftctx._repo.unfiltered()[leftctx.rev()]
405 leftunfi = repo.unfiltered()[leftctx.rev()]
404 leftdiff = leftunfi.diff(opts=diffopts)
406 leftdiff = leftunfi.diff(opts=diffopts)
405 rightunfi = rightctx._repo.unfiltered()[rightctx.rev()]
407 rightunfi = repo.unfiltered()[rightctx.rev()]
406 rightdiff = rightunfi.diff(opts=diffopts)
408 rightdiff = rightunfi.diff(opts=diffopts)
407
409
408 left, right = (0, 0)
410 left, right = (0, 0)
409 while None not in (left, right):
411 while None not in (left, right):
410 left = _getdifflines(leftdiff)
412 left = _getdifflines(leftdiff)
411 right = _getdifflines(rightdiff)
413 right = _getdifflines(rightdiff)
412
414
413 if left != right:
415 if left != right:
414 return False
416 return False
415 return True
417 return True
416
418
417 def geteffectflag(source, successors):
419 def geteffectflag(source, successors):
418 """ From an obs-marker relation, compute what changed between the
420 """ From an obs-marker relation, compute what changed between the
419 predecessor and the successor.
421 predecessor and the successor.
420 """
422 """
421 effects = 0
423 effects = 0
422
424
423 for changectx in successors:
425 for changectx in successors:
424 # Check if description has changed
426 # Check if description has changed
425 if changectx.description() != source.description():
427 if changectx.description() != source.description():
426 effects |= DESCCHANGED
428 effects |= DESCCHANGED
427
429
428 # Check if user has changed
430 # Check if user has changed
429 if changectx.user() != source.user():
431 if changectx.user() != source.user():
430 effects |= USERCHANGED
432 effects |= USERCHANGED
431
433
432 # Check if date has changed
434 # Check if date has changed
433 if changectx.date() != source.date():
435 if changectx.date() != source.date():
434 effects |= DATECHANGED
436 effects |= DATECHANGED
435
437
436 # Check if branch has changed
438 # Check if branch has changed
437 if changectx.branch() != source.branch():
439 if changectx.branch() != source.branch():
438 effects |= BRANCHCHANGED
440 effects |= BRANCHCHANGED
439
441
440 # Check if at least one of the parent has changed
442 # Check if at least one of the parent has changed
441 if changectx.parents() != source.parents():
443 if changectx.parents() != source.parents():
442 effects |= PARENTCHANGED
444 effects |= PARENTCHANGED
443
445
444 # Check if other meta has changed
446 # Check if other meta has changed
445 changeextra = changectx.extra().items()
447 changeextra = changectx.extra().items()
446 ctxmeta = list(filter(metanotblacklisted, changeextra))
448 ctxmeta = list(filter(metanotblacklisted, changeextra))
447
449
448 sourceextra = source.extra().items()
450 sourceextra = source.extra().items()
449 srcmeta = list(filter(metanotblacklisted, sourceextra))
451 srcmeta = list(filter(metanotblacklisted, sourceextra))
450
452
451 if ctxmeta != srcmeta:
453 if ctxmeta != srcmeta:
452 effects |= METACHANGED
454 effects |= METACHANGED
453
455
454 # Check if the diff has changed
456 # Check if the diff has changed
455 if not _cmpdiff(source, changectx):
457 if not _cmpdiff(source, changectx):
456 effects |= DIFFCHANGED
458 effects |= DIFFCHANGED
457
459
458 return effects
460 return effects
459
461
460 def getobsoleted(repo, tr):
462 def getobsoleted(repo, tr):
461 """return the set of pre-existing revisions obsoleted by a transaction"""
463 """return the set of pre-existing revisions obsoleted by a transaction"""
462 torev = repo.unfiltered().changelog.nodemap.get
464 torev = repo.unfiltered().changelog.nodemap.get
463 phase = repo._phasecache.phase
465 phase = repo._phasecache.phase
464 succsmarkers = repo.obsstore.successors.get
466 succsmarkers = repo.obsstore.successors.get
465 public = phases.public
467 public = phases.public
466 addedmarkers = tr.changes['obsmarkers']
468 addedmarkers = tr.changes['obsmarkers']
467 origrepolen = tr.changes['origrepolen']
469 origrepolen = tr.changes['origrepolen']
468 seenrevs = set()
470 seenrevs = set()
469 obsoleted = set()
471 obsoleted = set()
470 for mark in addedmarkers:
472 for mark in addedmarkers:
471 node = mark[0]
473 node = mark[0]
472 rev = torev(node)
474 rev = torev(node)
473 if rev is None or rev in seenrevs or rev >= origrepolen:
475 if rev is None or rev in seenrevs or rev >= origrepolen:
474 continue
476 continue
475 seenrevs.add(rev)
477 seenrevs.add(rev)
476 if phase(repo, rev) == public:
478 if phase(repo, rev) == public:
477 continue
479 continue
478 if set(succsmarkers(node) or []).issubset(addedmarkers):
480 if set(succsmarkers(node) or []).issubset(addedmarkers):
479 obsoleted.add(rev)
481 obsoleted.add(rev)
480 return obsoleted
482 return obsoleted
481
483
482 class _succs(list):
484 class _succs(list):
483 """small class to represent a successors with some metadata about it"""
485 """small class to represent a successors with some metadata about it"""
484
486
485 def __init__(self, *args, **kwargs):
487 def __init__(self, *args, **kwargs):
486 super(_succs, self).__init__(*args, **kwargs)
488 super(_succs, self).__init__(*args, **kwargs)
487 self.markers = set()
489 self.markers = set()
488
490
489 def copy(self):
491 def copy(self):
490 new = _succs(self)
492 new = _succs(self)
491 new.markers = self.markers.copy()
493 new.markers = self.markers.copy()
492 return new
494 return new
493
495
494 @util.propertycache
496 @util.propertycache
495 def _set(self):
497 def _set(self):
496 # immutable
498 # immutable
497 return set(self)
499 return set(self)
498
500
499 def canmerge(self, other):
501 def canmerge(self, other):
500 return self._set.issubset(other._set)
502 return self._set.issubset(other._set)
501
503
502 def successorssets(repo, initialnode, closest=False, cache=None):
504 def successorssets(repo, initialnode, closest=False, cache=None):
503 """Return set of all latest successors of initial nodes
505 """Return set of all latest successors of initial nodes
504
506
505 The successors set of a changeset A are the group of revisions that succeed
507 The successors set of a changeset A are the group of revisions that succeed
506 A. It succeeds A as a consistent whole, each revision being only a partial
508 A. It succeeds A as a consistent whole, each revision being only a partial
507 replacement. By default, the successors set contains non-obsolete
509 replacement. By default, the successors set contains non-obsolete
508 changesets only, walking the obsolescence graph until reaching a leaf. If
510 changesets only, walking the obsolescence graph until reaching a leaf. If
509 'closest' is set to True, closest successors-sets are return (the
511 'closest' is set to True, closest successors-sets are return (the
510 obsolescence walk stops on known changesets).
512 obsolescence walk stops on known changesets).
511
513
512 This function returns the full list of successor sets which is why it
514 This function returns the full list of successor sets which is why it
513 returns a list of tuples and not just a single tuple. Each tuple is a valid
515 returns a list of tuples and not just a single tuple. Each tuple is a valid
514 successors set. Note that (A,) may be a valid successors set for changeset A
516 successors set. Note that (A,) may be a valid successors set for changeset A
515 (see below).
517 (see below).
516
518
517 In most cases, a changeset A will have a single element (e.g. the changeset
519 In most cases, a changeset A will have a single element (e.g. the changeset
518 A is replaced by A') in its successors set. Though, it is also common for a
520 A is replaced by A') in its successors set. Though, it is also common for a
519 changeset A to have no elements in its successor set (e.g. the changeset
521 changeset A to have no elements in its successor set (e.g. the changeset
520 has been pruned). Therefore, the returned list of successors sets will be
522 has been pruned). Therefore, the returned list of successors sets will be
521 [(A',)] or [], respectively.
523 [(A',)] or [], respectively.
522
524
523 When a changeset A is split into A' and B', however, it will result in a
525 When a changeset A is split into A' and B', however, it will result in a
524 successors set containing more than a single element, i.e. [(A',B')].
526 successors set containing more than a single element, i.e. [(A',B')].
525 Divergent changesets will result in multiple successors sets, i.e. [(A',),
527 Divergent changesets will result in multiple successors sets, i.e. [(A',),
526 (A'')].
528 (A'')].
527
529
528 If a changeset A is not obsolete, then it will conceptually have no
530 If a changeset A is not obsolete, then it will conceptually have no
529 successors set. To distinguish this from a pruned changeset, the successor
531 successors set. To distinguish this from a pruned changeset, the successor
530 set will contain itself only, i.e. [(A,)].
532 set will contain itself only, i.e. [(A,)].
531
533
532 Finally, final successors unknown locally are considered to be pruned
534 Finally, final successors unknown locally are considered to be pruned
533 (pruned: obsoleted without any successors). (Final: successors not affected
535 (pruned: obsoleted without any successors). (Final: successors not affected
534 by markers).
536 by markers).
535
537
536 The 'closest' mode respect the repoview filtering. For example, without
538 The 'closest' mode respect the repoview filtering. For example, without
537 filter it will stop at the first locally known changeset, with 'visible'
539 filter it will stop at the first locally known changeset, with 'visible'
538 filter it will stop on visible changesets).
540 filter it will stop on visible changesets).
539
541
540 The optional `cache` parameter is a dictionary that may contains
542 The optional `cache` parameter is a dictionary that may contains
541 precomputed successors sets. It is meant to reuse the computation of a
543 precomputed successors sets. It is meant to reuse the computation of a
542 previous call to `successorssets` when multiple calls are made at the same
544 previous call to `successorssets` when multiple calls are made at the same
543 time. The cache dictionary is updated in place. The caller is responsible
545 time. The cache dictionary is updated in place. The caller is responsible
544 for its life span. Code that makes multiple calls to `successorssets`
546 for its life span. Code that makes multiple calls to `successorssets`
545 *should* use this cache mechanism or risk a performance hit.
547 *should* use this cache mechanism or risk a performance hit.
546
548
547 Since results are different depending of the 'closest' most, the same cache
549 Since results are different depending of the 'closest' most, the same cache
548 cannot be reused for both mode.
550 cannot be reused for both mode.
549 """
551 """
550
552
551 succmarkers = repo.obsstore.successors
553 succmarkers = repo.obsstore.successors
552
554
553 # Stack of nodes we search successors sets for
555 # Stack of nodes we search successors sets for
554 toproceed = [initialnode]
556 toproceed = [initialnode]
555 # set version of above list for fast loop detection
557 # set version of above list for fast loop detection
556 # element added to "toproceed" must be added here
558 # element added to "toproceed" must be added here
557 stackedset = set(toproceed)
559 stackedset = set(toproceed)
558 if cache is None:
560 if cache is None:
559 cache = {}
561 cache = {}
560
562
561 # This while loop is the flattened version of a recursive search for
563 # This while loop is the flattened version of a recursive search for
562 # successors sets
564 # successors sets
563 #
565 #
564 # def successorssets(x):
566 # def successorssets(x):
565 # successors = directsuccessors(x)
567 # successors = directsuccessors(x)
566 # ss = [[]]
568 # ss = [[]]
567 # for succ in directsuccessors(x):
569 # for succ in directsuccessors(x):
568 # # product as in itertools cartesian product
570 # # product as in itertools cartesian product
569 # ss = product(ss, successorssets(succ))
571 # ss = product(ss, successorssets(succ))
570 # return ss
572 # return ss
571 #
573 #
572 # But we can not use plain recursive calls here:
574 # But we can not use plain recursive calls here:
573 # - that would blow the python call stack
575 # - that would blow the python call stack
574 # - obsolescence markers may have cycles, we need to handle them.
576 # - obsolescence markers may have cycles, we need to handle them.
575 #
577 #
576 # The `toproceed` list act as our call stack. Every node we search
578 # The `toproceed` list act as our call stack. Every node we search
577 # successors set for are stacked there.
579 # successors set for are stacked there.
578 #
580 #
579 # The `stackedset` is set version of this stack used to check if a node is
581 # The `stackedset` is set version of this stack used to check if a node is
580 # already stacked. This check is used to detect cycles and prevent infinite
582 # already stacked. This check is used to detect cycles and prevent infinite
581 # loop.
583 # loop.
582 #
584 #
583 # successors set of all nodes are stored in the `cache` dictionary.
585 # successors set of all nodes are stored in the `cache` dictionary.
584 #
586 #
585 # After this while loop ends we use the cache to return the successors sets
587 # After this while loop ends we use the cache to return the successors sets
586 # for the node requested by the caller.
588 # for the node requested by the caller.
587 while toproceed:
589 while toproceed:
588 # Every iteration tries to compute the successors sets of the topmost
590 # Every iteration tries to compute the successors sets of the topmost
589 # node of the stack: CURRENT.
591 # node of the stack: CURRENT.
590 #
592 #
591 # There are four possible outcomes:
593 # There are four possible outcomes:
592 #
594 #
593 # 1) We already know the successors sets of CURRENT:
595 # 1) We already know the successors sets of CURRENT:
594 # -> mission accomplished, pop it from the stack.
596 # -> mission accomplished, pop it from the stack.
595 # 2) Stop the walk:
597 # 2) Stop the walk:
596 # default case: Node is not obsolete
598 # default case: Node is not obsolete
597 # closest case: Node is known at this repo filter level
599 # closest case: Node is known at this repo filter level
598 # -> the node is its own successors sets. Add it to the cache.
600 # -> the node is its own successors sets. Add it to the cache.
599 # 3) We do not know successors set of direct successors of CURRENT:
601 # 3) We do not know successors set of direct successors of CURRENT:
600 # -> We add those successors to the stack.
602 # -> We add those successors to the stack.
601 # 4) We know successors sets of all direct successors of CURRENT:
603 # 4) We know successors sets of all direct successors of CURRENT:
602 # -> We can compute CURRENT successors set and add it to the
604 # -> We can compute CURRENT successors set and add it to the
603 # cache.
605 # cache.
604 #
606 #
605 current = toproceed[-1]
607 current = toproceed[-1]
606
608
607 # case 2 condition is a bit hairy because of closest,
609 # case 2 condition is a bit hairy because of closest,
608 # we compute it on its own
610 # we compute it on its own
609 case2condition = ((current not in succmarkers)
611 case2condition = ((current not in succmarkers)
610 or (closest and current != initialnode
612 or (closest and current != initialnode
611 and current in repo))
613 and current in repo))
612
614
613 if current in cache:
615 if current in cache:
614 # case (1): We already know the successors sets
616 # case (1): We already know the successors sets
615 stackedset.remove(toproceed.pop())
617 stackedset.remove(toproceed.pop())
616 elif case2condition:
618 elif case2condition:
617 # case (2): end of walk.
619 # case (2): end of walk.
618 if current in repo:
620 if current in repo:
619 # We have a valid successors.
621 # We have a valid successors.
620 cache[current] = [_succs((current,))]
622 cache[current] = [_succs((current,))]
621 else:
623 else:
622 # Final obsolete version is unknown locally.
624 # Final obsolete version is unknown locally.
623 # Do not count that as a valid successors
625 # Do not count that as a valid successors
624 cache[current] = []
626 cache[current] = []
625 else:
627 else:
626 # cases (3) and (4)
628 # cases (3) and (4)
627 #
629 #
628 # We proceed in two phases. Phase 1 aims to distinguish case (3)
630 # We proceed in two phases. Phase 1 aims to distinguish case (3)
629 # from case (4):
631 # from case (4):
630 #
632 #
631 # For each direct successors of CURRENT, we check whether its
633 # For each direct successors of CURRENT, we check whether its
632 # successors sets are known. If they are not, we stack the
634 # successors sets are known. If they are not, we stack the
633 # unknown node and proceed to the next iteration of the while
635 # unknown node and proceed to the next iteration of the while
634 # loop. (case 3)
636 # loop. (case 3)
635 #
637 #
636 # During this step, we may detect obsolescence cycles: a node
638 # During this step, we may detect obsolescence cycles: a node
637 # with unknown successors sets but already in the call stack.
639 # with unknown successors sets but already in the call stack.
638 # In such a situation, we arbitrary set the successors sets of
640 # In such a situation, we arbitrary set the successors sets of
639 # the node to nothing (node pruned) to break the cycle.
641 # the node to nothing (node pruned) to break the cycle.
640 #
642 #
641 # If no break was encountered we proceed to phase 2.
643 # If no break was encountered we proceed to phase 2.
642 #
644 #
643 # Phase 2 computes successors sets of CURRENT (case 4); see details
645 # Phase 2 computes successors sets of CURRENT (case 4); see details
644 # in phase 2 itself.
646 # in phase 2 itself.
645 #
647 #
646 # Note the two levels of iteration in each phase.
648 # Note the two levels of iteration in each phase.
647 # - The first one handles obsolescence markers using CURRENT as
649 # - The first one handles obsolescence markers using CURRENT as
648 # precursor (successors markers of CURRENT).
650 # precursor (successors markers of CURRENT).
649 #
651 #
650 # Having multiple entry here means divergence.
652 # Having multiple entry here means divergence.
651 #
653 #
652 # - The second one handles successors defined in each marker.
654 # - The second one handles successors defined in each marker.
653 #
655 #
654 # Having none means pruned node, multiple successors means split,
656 # Having none means pruned node, multiple successors means split,
655 # single successors are standard replacement.
657 # single successors are standard replacement.
656 #
658 #
657 for mark in sorted(succmarkers[current]):
659 for mark in sorted(succmarkers[current]):
658 for suc in mark[1]:
660 for suc in mark[1]:
659 if suc not in cache:
661 if suc not in cache:
660 if suc in stackedset:
662 if suc in stackedset:
661 # cycle breaking
663 # cycle breaking
662 cache[suc] = []
664 cache[suc] = []
663 else:
665 else:
664 # case (3) If we have not computed successors sets
666 # case (3) If we have not computed successors sets
665 # of one of those successors we add it to the
667 # of one of those successors we add it to the
666 # `toproceed` stack and stop all work for this
668 # `toproceed` stack and stop all work for this
667 # iteration.
669 # iteration.
668 toproceed.append(suc)
670 toproceed.append(suc)
669 stackedset.add(suc)
671 stackedset.add(suc)
670 break
672 break
671 else:
673 else:
672 continue
674 continue
673 break
675 break
674 else:
676 else:
675 # case (4): we know all successors sets of all direct
677 # case (4): we know all successors sets of all direct
676 # successors
678 # successors
677 #
679 #
678 # Successors set contributed by each marker depends on the
680 # Successors set contributed by each marker depends on the
679 # successors sets of all its "successors" node.
681 # successors sets of all its "successors" node.
680 #
682 #
681 # Each different marker is a divergence in the obsolescence
683 # Each different marker is a divergence in the obsolescence
682 # history. It contributes successors sets distinct from other
684 # history. It contributes successors sets distinct from other
683 # markers.
685 # markers.
684 #
686 #
685 # Within a marker, a successor may have divergent successors
687 # Within a marker, a successor may have divergent successors
686 # sets. In such a case, the marker will contribute multiple
688 # sets. In such a case, the marker will contribute multiple
687 # divergent successors sets. If multiple successors have
689 # divergent successors sets. If multiple successors have
688 # divergent successors sets, a Cartesian product is used.
690 # divergent successors sets, a Cartesian product is used.
689 #
691 #
690 # At the end we post-process successors sets to remove
692 # At the end we post-process successors sets to remove
691 # duplicated entry and successors set that are strict subset of
693 # duplicated entry and successors set that are strict subset of
692 # another one.
694 # another one.
693 succssets = []
695 succssets = []
694 for mark in sorted(succmarkers[current]):
696 for mark in sorted(succmarkers[current]):
695 # successors sets contributed by this marker
697 # successors sets contributed by this marker
696 base = _succs()
698 base = _succs()
697 base.markers.add(mark)
699 base.markers.add(mark)
698 markss = [base]
700 markss = [base]
699 for suc in mark[1]:
701 for suc in mark[1]:
700 # cardinal product with previous successors
702 # cardinal product with previous successors
701 productresult = []
703 productresult = []
702 for prefix in markss:
704 for prefix in markss:
703 for suffix in cache[suc]:
705 for suffix in cache[suc]:
704 newss = prefix.copy()
706 newss = prefix.copy()
705 newss.markers.update(suffix.markers)
707 newss.markers.update(suffix.markers)
706 for part in suffix:
708 for part in suffix:
707 # do not duplicated entry in successors set
709 # do not duplicated entry in successors set
708 # first entry wins.
710 # first entry wins.
709 if part not in newss:
711 if part not in newss:
710 newss.append(part)
712 newss.append(part)
711 productresult.append(newss)
713 productresult.append(newss)
712 markss = productresult
714 markss = productresult
713 succssets.extend(markss)
715 succssets.extend(markss)
714 # remove duplicated and subset
716 # remove duplicated and subset
715 seen = []
717 seen = []
716 final = []
718 final = []
717 candidates = sorted((s for s in succssets if s),
719 candidates = sorted((s for s in succssets if s),
718 key=len, reverse=True)
720 key=len, reverse=True)
719 for cand in candidates:
721 for cand in candidates:
720 for seensuccs in seen:
722 for seensuccs in seen:
721 if cand.canmerge(seensuccs):
723 if cand.canmerge(seensuccs):
722 seensuccs.markers.update(cand.markers)
724 seensuccs.markers.update(cand.markers)
723 break
725 break
724 else:
726 else:
725 final.append(cand)
727 final.append(cand)
726 seen.append(cand)
728 seen.append(cand)
727 final.reverse() # put small successors set first
729 final.reverse() # put small successors set first
728 cache[current] = final
730 cache[current] = final
729 return cache[initialnode]
731 return cache[initialnode]
730
732
731 def successorsandmarkers(repo, ctx):
733 def successorsandmarkers(repo, ctx):
732 """compute the raw data needed for computing obsfate
734 """compute the raw data needed for computing obsfate
733 Returns a list of dict, one dict per successors set
735 Returns a list of dict, one dict per successors set
734 """
736 """
735 if not ctx.obsolete():
737 if not ctx.obsolete():
736 return None
738 return None
737
739
738 ssets = successorssets(repo, ctx.node(), closest=True)
740 ssets = successorssets(repo, ctx.node(), closest=True)
739
741
740 # closestsuccessors returns an empty list for pruned revisions, remap it
742 # closestsuccessors returns an empty list for pruned revisions, remap it
741 # into a list containing an empty list for future processing
743 # into a list containing an empty list for future processing
742 if ssets == []:
744 if ssets == []:
743 ssets = [[]]
745 ssets = [[]]
744
746
745 # Try to recover pruned markers
747 # Try to recover pruned markers
746 succsmap = repo.obsstore.successors
748 succsmap = repo.obsstore.successors
747 fullsuccessorsets = [] # successor set + markers
749 fullsuccessorsets = [] # successor set + markers
748 for sset in ssets:
750 for sset in ssets:
749 if sset:
751 if sset:
750 fullsuccessorsets.append(sset)
752 fullsuccessorsets.append(sset)
751 else:
753 else:
752 # successorsset return an empty set() when ctx or one of its
754 # successorsset return an empty set() when ctx or one of its
753 # successors is pruned.
755 # successors is pruned.
754 # In this case, walk the obs-markers tree again starting with ctx
756 # In this case, walk the obs-markers tree again starting with ctx
755 # and find the relevant pruning obs-makers, the ones without
757 # and find the relevant pruning obs-makers, the ones without
756 # successors.
758 # successors.
757 # Having these markers allow us to compute some information about
759 # Having these markers allow us to compute some information about
758 # its fate, like who pruned this changeset and when.
760 # its fate, like who pruned this changeset and when.
759
761
760 # XXX we do not catch all prune markers (eg rewritten then pruned)
762 # XXX we do not catch all prune markers (eg rewritten then pruned)
761 # (fix me later)
763 # (fix me later)
762 foundany = False
764 foundany = False
763 for mark in succsmap.get(ctx.node(), ()):
765 for mark in succsmap.get(ctx.node(), ()):
764 if not mark[1]:
766 if not mark[1]:
765 foundany = True
767 foundany = True
766 sset = _succs()
768 sset = _succs()
767 sset.markers.add(mark)
769 sset.markers.add(mark)
768 fullsuccessorsets.append(sset)
770 fullsuccessorsets.append(sset)
769 if not foundany:
771 if not foundany:
770 fullsuccessorsets.append(_succs())
772 fullsuccessorsets.append(_succs())
771
773
772 values = []
774 values = []
773 for sset in fullsuccessorsets:
775 for sset in fullsuccessorsets:
774 values.append({'successors': sset, 'markers': sset.markers})
776 values.append({'successors': sset, 'markers': sset.markers})
775
777
776 return values
778 return values
777
779
778 def _getobsfate(successorssets):
780 def _getobsfate(successorssets):
779 """ Compute a changeset obsolescence fate based on its successorssets.
781 """ Compute a changeset obsolescence fate based on its successorssets.
780 Successors can be the tipmost ones or the immediate ones. This function
782 Successors can be the tipmost ones or the immediate ones. This function
781 return values are not meant to be shown directly to users, it is meant to
783 return values are not meant to be shown directly to users, it is meant to
782 be used by internal functions only.
784 be used by internal functions only.
783 Returns one fate from the following values:
785 Returns one fate from the following values:
784 - pruned
786 - pruned
785 - diverged
787 - diverged
786 - superseded
788 - superseded
787 - superseded_split
789 - superseded_split
788 """
790 """
789
791
790 if len(successorssets) == 0:
792 if len(successorssets) == 0:
791 # The commit has been pruned
793 # The commit has been pruned
792 return 'pruned'
794 return 'pruned'
793 elif len(successorssets) > 1:
795 elif len(successorssets) > 1:
794 return 'diverged'
796 return 'diverged'
795 else:
797 else:
796 # No divergence, only one set of successors
798 # No divergence, only one set of successors
797 successors = successorssets[0]
799 successors = successorssets[0]
798
800
799 if len(successors) == 1:
801 if len(successors) == 1:
800 return 'superseded'
802 return 'superseded'
801 else:
803 else:
802 return 'superseded_split'
804 return 'superseded_split'
803
805
804 def obsfateverb(successorset, markers):
806 def obsfateverb(successorset, markers):
805 """ Return the verb summarizing the successorset and potentially using
807 """ Return the verb summarizing the successorset and potentially using
806 information from the markers
808 information from the markers
807 """
809 """
808 if not successorset:
810 if not successorset:
809 verb = 'pruned'
811 verb = 'pruned'
810 elif len(successorset) == 1:
812 elif len(successorset) == 1:
811 verb = 'rewritten'
813 verb = 'rewritten'
812 else:
814 else:
813 verb = 'split'
815 verb = 'split'
814 return verb
816 return verb
815
817
816 def markersdates(markers):
818 def markersdates(markers):
817 """returns the list of dates for a list of markers
819 """returns the list of dates for a list of markers
818 """
820 """
819 return [m[4] for m in markers]
821 return [m[4] for m in markers]
820
822
821 def markersusers(markers):
823 def markersusers(markers):
822 """ Returns a sorted list of markers users without duplicates
824 """ Returns a sorted list of markers users without duplicates
823 """
825 """
824 markersmeta = [dict(m[3]) for m in markers]
826 markersmeta = [dict(m[3]) for m in markers]
825 users = set(encoding.tolocal(meta['user']) for meta in markersmeta
827 users = set(encoding.tolocal(meta['user']) for meta in markersmeta
826 if meta.get('user'))
828 if meta.get('user'))
827
829
828 return sorted(users)
830 return sorted(users)
829
831
830 def markersoperations(markers):
832 def markersoperations(markers):
831 """ Returns a sorted list of markers operations without duplicates
833 """ Returns a sorted list of markers operations without duplicates
832 """
834 """
833 markersmeta = [dict(m[3]) for m in markers]
835 markersmeta = [dict(m[3]) for m in markers]
834 operations = set(meta.get('operation') for meta in markersmeta
836 operations = set(meta.get('operation') for meta in markersmeta
835 if meta.get('operation'))
837 if meta.get('operation'))
836
838
837 return sorted(operations)
839 return sorted(operations)
838
840
839 def obsfateprinter(ui, repo, successors, markers, formatctx):
841 def obsfateprinter(ui, repo, successors, markers, formatctx):
840 """ Build a obsfate string for a single successorset using all obsfate
842 """ Build a obsfate string for a single successorset using all obsfate
841 related function defined in obsutil
843 related function defined in obsutil
842 """
844 """
843 quiet = ui.quiet
845 quiet = ui.quiet
844 verbose = ui.verbose
846 verbose = ui.verbose
845 normal = not verbose and not quiet
847 normal = not verbose and not quiet
846
848
847 line = []
849 line = []
848
850
849 # Verb
851 # Verb
850 line.append(obsfateverb(successors, markers))
852 line.append(obsfateverb(successors, markers))
851
853
852 # Operations
854 # Operations
853 operations = markersoperations(markers)
855 operations = markersoperations(markers)
854 if operations:
856 if operations:
855 line.append(" using %s" % ", ".join(operations))
857 line.append(" using %s" % ", ".join(operations))
856
858
857 # Successors
859 # Successors
858 if successors:
860 if successors:
859 fmtsuccessors = [formatctx(repo[succ]) for succ in successors]
861 fmtsuccessors = [formatctx(repo[succ]) for succ in successors]
860 line.append(" as %s" % ", ".join(fmtsuccessors))
862 line.append(" as %s" % ", ".join(fmtsuccessors))
861
863
862 # Users
864 # Users
863 users = markersusers(markers)
865 users = markersusers(markers)
864 # Filter out current user in not verbose mode to reduce amount of
866 # Filter out current user in not verbose mode to reduce amount of
865 # information
867 # information
866 if not verbose:
868 if not verbose:
867 currentuser = ui.username(acceptempty=True)
869 currentuser = ui.username(acceptempty=True)
868 if len(users) == 1 and currentuser in users:
870 if len(users) == 1 and currentuser in users:
869 users = None
871 users = None
870
872
871 if (verbose or normal) and users:
873 if (verbose or normal) and users:
872 line.append(" by %s" % ", ".join(users))
874 line.append(" by %s" % ", ".join(users))
873
875
874 # Date
876 # Date
875 dates = markersdates(markers)
877 dates = markersdates(markers)
876
878
877 if dates and verbose:
879 if dates and verbose:
878 min_date = min(dates)
880 min_date = min(dates)
879 max_date = max(dates)
881 max_date = max(dates)
880
882
881 if min_date == max_date:
883 if min_date == max_date:
882 fmtmin_date = dateutil.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
884 fmtmin_date = dateutil.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
883 line.append(" (at %s)" % fmtmin_date)
885 line.append(" (at %s)" % fmtmin_date)
884 else:
886 else:
885 fmtmin_date = dateutil.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
887 fmtmin_date = dateutil.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
886 fmtmax_date = dateutil.datestr(max_date, '%Y-%m-%d %H:%M %1%2')
888 fmtmax_date = dateutil.datestr(max_date, '%Y-%m-%d %H:%M %1%2')
887 line.append(" (between %s and %s)" % (fmtmin_date, fmtmax_date))
889 line.append(" (between %s and %s)" % (fmtmin_date, fmtmax_date))
888
890
889 return "".join(line)
891 return "".join(line)
890
892
891
893
892 filteredmsgtable = {
894 filteredmsgtable = {
893 "pruned": _("hidden revision '%s' is pruned"),
895 "pruned": _("hidden revision '%s' is pruned"),
894 "diverged": _("hidden revision '%s' has diverged"),
896 "diverged": _("hidden revision '%s' has diverged"),
895 "superseded": _("hidden revision '%s' was rewritten as: %s"),
897 "superseded": _("hidden revision '%s' was rewritten as: %s"),
896 "superseded_split": _("hidden revision '%s' was split as: %s"),
898 "superseded_split": _("hidden revision '%s' was split as: %s"),
897 "superseded_split_several": _("hidden revision '%s' was split as: %s and "
899 "superseded_split_several": _("hidden revision '%s' was split as: %s and "
898 "%d more"),
900 "%d more"),
899 }
901 }
900
902
901 def _getfilteredreason(repo, changeid, ctx):
903 def _getfilteredreason(repo, changeid, ctx):
902 """return a human-friendly string on why a obsolete changeset is hidden
904 """return a human-friendly string on why a obsolete changeset is hidden
903 """
905 """
904 successors = successorssets(repo, ctx.node())
906 successors = successorssets(repo, ctx.node())
905 fate = _getobsfate(successors)
907 fate = _getobsfate(successors)
906
908
907 # Be more precise in case the revision is superseded
909 # Be more precise in case the revision is superseded
908 if fate == 'pruned':
910 if fate == 'pruned':
909 return filteredmsgtable['pruned'] % changeid
911 return filteredmsgtable['pruned'] % changeid
910 elif fate == 'diverged':
912 elif fate == 'diverged':
911 return filteredmsgtable['diverged'] % changeid
913 return filteredmsgtable['diverged'] % changeid
912 elif fate == 'superseded':
914 elif fate == 'superseded':
913 single_successor = nodemod.short(successors[0][0])
915 single_successor = nodemod.short(successors[0][0])
914 return filteredmsgtable['superseded'] % (changeid, single_successor)
916 return filteredmsgtable['superseded'] % (changeid, single_successor)
915 elif fate == 'superseded_split':
917 elif fate == 'superseded_split':
916
918
917 succs = []
919 succs = []
918 for node_id in successors[0]:
920 for node_id in successors[0]:
919 succs.append(nodemod.short(node_id))
921 succs.append(nodemod.short(node_id))
920
922
921 if len(succs) <= 2:
923 if len(succs) <= 2:
922 fmtsuccs = ', '.join(succs)
924 fmtsuccs = ', '.join(succs)
923 return filteredmsgtable['superseded_split'] % (changeid, fmtsuccs)
925 return filteredmsgtable['superseded_split'] % (changeid, fmtsuccs)
924 else:
926 else:
925 firstsuccessors = ', '.join(succs[:2])
927 firstsuccessors = ', '.join(succs[:2])
926 remainingnumber = len(succs) - 2
928 remainingnumber = len(succs) - 2
927
929
928 args = (changeid, firstsuccessors, remainingnumber)
930 args = (changeid, firstsuccessors, remainingnumber)
929 return filteredmsgtable['superseded_split_several'] % args
931 return filteredmsgtable['superseded_split_several'] % args
930
932
931 def divergentsets(repo, ctx):
933 def divergentsets(repo, ctx):
932 """Compute sets of commits divergent with a given one"""
934 """Compute sets of commits divergent with a given one"""
933 cache = {}
935 cache = {}
934 base = {}
936 base = {}
935 for n in allpredecessors(repo.obsstore, [ctx.node()]):
937 for n in allpredecessors(repo.obsstore, [ctx.node()]):
936 if n == ctx.node():
938 if n == ctx.node():
937 # a node can't be a base for divergence with itself
939 # a node can't be a base for divergence with itself
938 continue
940 continue
939 nsuccsets = successorssets(repo, n, cache)
941 nsuccsets = successorssets(repo, n, cache)
940 for nsuccset in nsuccsets:
942 for nsuccset in nsuccsets:
941 if ctx.node() in nsuccset:
943 if ctx.node() in nsuccset:
942 # we are only interested in *other* successor sets
944 # we are only interested in *other* successor sets
943 continue
945 continue
944 if tuple(nsuccset) in base:
946 if tuple(nsuccset) in base:
945 # we already know the latest base for this divergency
947 # we already know the latest base for this divergency
946 continue
948 continue
947 base[tuple(nsuccset)] = n
949 base[tuple(nsuccset)] = n
948 return [{'divergentnodes': divset, 'commonpredecessor': b}
950 return [{'divergentnodes': divset, 'commonpredecessor': b}
949 for divset, b in base.iteritems()]
951 for divset, b in base.iteritems()]
950
952
951 def whyunstable(repo, ctx):
953 def whyunstable(repo, ctx):
952 result = []
954 result = []
953 if ctx.orphan():
955 if ctx.orphan():
954 for parent in ctx.parents():
956 for parent in ctx.parents():
955 kind = None
957 kind = None
956 if parent.orphan():
958 if parent.orphan():
957 kind = 'orphan'
959 kind = 'orphan'
958 elif parent.obsolete():
960 elif parent.obsolete():
959 kind = 'obsolete'
961 kind = 'obsolete'
960 if kind is not None:
962 if kind is not None:
961 result.append({'instability': 'orphan',
963 result.append({'instability': 'orphan',
962 'reason': '%s parent' % kind,
964 'reason': '%s parent' % kind,
963 'node': parent.hex()})
965 'node': parent.hex()})
964 if ctx.phasedivergent():
966 if ctx.phasedivergent():
965 predecessors = allpredecessors(repo.obsstore, [ctx.node()],
967 predecessors = allpredecessors(repo.obsstore, [ctx.node()],
966 ignoreflags=bumpedfix)
968 ignoreflags=bumpedfix)
967 immutable = [repo[p] for p in predecessors
969 immutable = [repo[p] for p in predecessors
968 if p in repo and not repo[p].mutable()]
970 if p in repo and not repo[p].mutable()]
969 for predecessor in immutable:
971 for predecessor in immutable:
970 result.append({'instability': 'phase-divergent',
972 result.append({'instability': 'phase-divergent',
971 'reason': 'immutable predecessor',
973 'reason': 'immutable predecessor',
972 'node': predecessor.hex()})
974 'node': predecessor.hex()})
973 if ctx.contentdivergent():
975 if ctx.contentdivergent():
974 dsets = divergentsets(repo, ctx)
976 dsets = divergentsets(repo, ctx)
975 for dset in dsets:
977 for dset in dsets:
976 divnodes = [repo[n] for n in dset['divergentnodes']]
978 divnodes = [repo[n] for n in dset['divergentnodes']]
977 result.append({'instability': 'content-divergent',
979 result.append({'instability': 'content-divergent',
978 'divergentnodes': divnodes,
980 'divergentnodes': divnodes,
979 'reason': 'predecessor',
981 'reason': 'predecessor',
980 'node': nodemod.hex(dset['commonpredecessor'])})
982 'node': nodemod.hex(dset['commonpredecessor'])})
981 return result
983 return result
General Comments 0
You need to be logged in to leave comments. Login now