##// END OF EJS Templates
obsfate: fix obsfate_printer with empty date list...
Boris Feld -
r34874:aa849cf5 default
parent child Browse files
Show More
@@ -1,837 +1,837
1 # obsutil.py - utility functions for obsolescence
1 # obsutil.py - utility functions for obsolescence
2 #
2 #
3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import re
10 import re
11
11
12 from . import (
12 from . import (
13 phases,
13 phases,
14 util
14 util
15 )
15 )
16
16
17 class marker(object):
17 class marker(object):
18 """Wrap obsolete marker raw data"""
18 """Wrap obsolete marker raw data"""
19
19
20 def __init__(self, repo, data):
20 def __init__(self, repo, data):
21 # the repo argument will be used to create changectx in later version
21 # the repo argument will be used to create changectx in later version
22 self._repo = repo
22 self._repo = repo
23 self._data = data
23 self._data = data
24 self._decodedmeta = None
24 self._decodedmeta = None
25
25
26 def __hash__(self):
26 def __hash__(self):
27 return hash(self._data)
27 return hash(self._data)
28
28
29 def __eq__(self, other):
29 def __eq__(self, other):
30 if type(other) != type(self):
30 if type(other) != type(self):
31 return False
31 return False
32 return self._data == other._data
32 return self._data == other._data
33
33
34 def precnode(self):
34 def precnode(self):
35 msg = ("'marker.precnode' is deprecated, "
35 msg = ("'marker.precnode' is deprecated, "
36 "use 'marker.prednode'")
36 "use 'marker.prednode'")
37 util.nouideprecwarn(msg, '4.4')
37 util.nouideprecwarn(msg, '4.4')
38 return self.prednode()
38 return self.prednode()
39
39
40 def prednode(self):
40 def prednode(self):
41 """Predecessor changeset node identifier"""
41 """Predecessor changeset node identifier"""
42 return self._data[0]
42 return self._data[0]
43
43
44 def succnodes(self):
44 def succnodes(self):
45 """List of successor changesets node identifiers"""
45 """List of successor changesets node identifiers"""
46 return self._data[1]
46 return self._data[1]
47
47
48 def parentnodes(self):
48 def parentnodes(self):
49 """Parents of the predecessors (None if not recorded)"""
49 """Parents of the predecessors (None if not recorded)"""
50 return self._data[5]
50 return self._data[5]
51
51
52 def metadata(self):
52 def metadata(self):
53 """Decoded metadata dictionary"""
53 """Decoded metadata dictionary"""
54 return dict(self._data[3])
54 return dict(self._data[3])
55
55
56 def date(self):
56 def date(self):
57 """Creation date as (unixtime, offset)"""
57 """Creation date as (unixtime, offset)"""
58 return self._data[4]
58 return self._data[4]
59
59
60 def flags(self):
60 def flags(self):
61 """The flags field of the marker"""
61 """The flags field of the marker"""
62 return self._data[2]
62 return self._data[2]
63
63
64 def getmarkers(repo, nodes=None, exclusive=False):
64 def getmarkers(repo, nodes=None, exclusive=False):
65 """returns markers known in a repository
65 """returns markers known in a repository
66
66
67 If <nodes> is specified, only markers "relevant" to those nodes are are
67 If <nodes> is specified, only markers "relevant" to those nodes are are
68 returned"""
68 returned"""
69 if nodes is None:
69 if nodes is None:
70 rawmarkers = repo.obsstore
70 rawmarkers = repo.obsstore
71 elif exclusive:
71 elif exclusive:
72 rawmarkers = exclusivemarkers(repo, nodes)
72 rawmarkers = exclusivemarkers(repo, nodes)
73 else:
73 else:
74 rawmarkers = repo.obsstore.relevantmarkers(nodes)
74 rawmarkers = repo.obsstore.relevantmarkers(nodes)
75
75
76 for markerdata in rawmarkers:
76 for markerdata in rawmarkers:
77 yield marker(repo, markerdata)
77 yield marker(repo, markerdata)
78
78
79 def closestpredecessors(repo, nodeid):
79 def closestpredecessors(repo, nodeid):
80 """yield the list of next predecessors pointing on visible changectx nodes
80 """yield the list of next predecessors pointing on visible changectx nodes
81
81
82 This function respect the repoview filtering, filtered revision will be
82 This function respect the repoview filtering, filtered revision will be
83 considered missing.
83 considered missing.
84 """
84 """
85
85
86 precursors = repo.obsstore.predecessors
86 precursors = repo.obsstore.predecessors
87 stack = [nodeid]
87 stack = [nodeid]
88 seen = set(stack)
88 seen = set(stack)
89
89
90 while stack:
90 while stack:
91 current = stack.pop()
91 current = stack.pop()
92 currentpreccs = precursors.get(current, ())
92 currentpreccs = precursors.get(current, ())
93
93
94 for prec in currentpreccs:
94 for prec in currentpreccs:
95 precnodeid = prec[0]
95 precnodeid = prec[0]
96
96
97 # Basic cycle protection
97 # Basic cycle protection
98 if precnodeid in seen:
98 if precnodeid in seen:
99 continue
99 continue
100 seen.add(precnodeid)
100 seen.add(precnodeid)
101
101
102 if precnodeid in repo:
102 if precnodeid in repo:
103 yield precnodeid
103 yield precnodeid
104 else:
104 else:
105 stack.append(precnodeid)
105 stack.append(precnodeid)
106
106
107 def allprecursors(*args, **kwargs):
107 def allprecursors(*args, **kwargs):
108 """ (DEPRECATED)
108 """ (DEPRECATED)
109 """
109 """
110 msg = ("'obsutil.allprecursors' is deprecated, "
110 msg = ("'obsutil.allprecursors' is deprecated, "
111 "use 'obsutil.allpredecessors'")
111 "use 'obsutil.allpredecessors'")
112 util.nouideprecwarn(msg, '4.4')
112 util.nouideprecwarn(msg, '4.4')
113
113
114 return allpredecessors(*args, **kwargs)
114 return allpredecessors(*args, **kwargs)
115
115
116 def allpredecessors(obsstore, nodes, ignoreflags=0):
116 def allpredecessors(obsstore, nodes, ignoreflags=0):
117 """Yield node for every precursors of <nodes>.
117 """Yield node for every precursors of <nodes>.
118
118
119 Some precursors may be unknown locally.
119 Some precursors may be unknown locally.
120
120
121 This is a linear yield unsuited to detecting folded changesets. It includes
121 This is a linear yield unsuited to detecting folded changesets. It includes
122 initial nodes too."""
122 initial nodes too."""
123
123
124 remaining = set(nodes)
124 remaining = set(nodes)
125 seen = set(remaining)
125 seen = set(remaining)
126 while remaining:
126 while remaining:
127 current = remaining.pop()
127 current = remaining.pop()
128 yield current
128 yield current
129 for mark in obsstore.predecessors.get(current, ()):
129 for mark in obsstore.predecessors.get(current, ()):
130 # ignore marker flagged with specified flag
130 # ignore marker flagged with specified flag
131 if mark[2] & ignoreflags:
131 if mark[2] & ignoreflags:
132 continue
132 continue
133 suc = mark[0]
133 suc = mark[0]
134 if suc not in seen:
134 if suc not in seen:
135 seen.add(suc)
135 seen.add(suc)
136 remaining.add(suc)
136 remaining.add(suc)
137
137
138 def allsuccessors(obsstore, nodes, ignoreflags=0):
138 def allsuccessors(obsstore, nodes, ignoreflags=0):
139 """Yield node for every successor of <nodes>.
139 """Yield node for every successor of <nodes>.
140
140
141 Some successors may be unknown locally.
141 Some successors may be unknown locally.
142
142
143 This is a linear yield unsuited to detecting split changesets. It includes
143 This is a linear yield unsuited to detecting split changesets. It includes
144 initial nodes too."""
144 initial nodes too."""
145 remaining = set(nodes)
145 remaining = set(nodes)
146 seen = set(remaining)
146 seen = set(remaining)
147 while remaining:
147 while remaining:
148 current = remaining.pop()
148 current = remaining.pop()
149 yield current
149 yield current
150 for mark in obsstore.successors.get(current, ()):
150 for mark in obsstore.successors.get(current, ()):
151 # ignore marker flagged with specified flag
151 # ignore marker flagged with specified flag
152 if mark[2] & ignoreflags:
152 if mark[2] & ignoreflags:
153 continue
153 continue
154 for suc in mark[1]:
154 for suc in mark[1]:
155 if suc not in seen:
155 if suc not in seen:
156 seen.add(suc)
156 seen.add(suc)
157 remaining.add(suc)
157 remaining.add(suc)
158
158
159 def _filterprunes(markers):
159 def _filterprunes(markers):
160 """return a set with no prune markers"""
160 """return a set with no prune markers"""
161 return set(m for m in markers if m[1])
161 return set(m for m in markers if m[1])
162
162
163 def exclusivemarkers(repo, nodes):
163 def exclusivemarkers(repo, nodes):
164 """set of markers relevant to "nodes" but no other locally-known nodes
164 """set of markers relevant to "nodes" but no other locally-known nodes
165
165
166 This function compute the set of markers "exclusive" to a locally-known
166 This function compute the set of markers "exclusive" to a locally-known
167 node. This means we walk the markers starting from <nodes> until we reach a
167 node. This means we walk the markers starting from <nodes> until we reach a
168 locally-known precursors outside of <nodes>. Element of <nodes> with
168 locally-known precursors outside of <nodes>. Element of <nodes> with
169 locally-known successors outside of <nodes> are ignored (since their
169 locally-known successors outside of <nodes> are ignored (since their
170 precursors markers are also relevant to these successors).
170 precursors markers are also relevant to these successors).
171
171
172 For example:
172 For example:
173
173
174 # (A0 rewritten as A1)
174 # (A0 rewritten as A1)
175 #
175 #
176 # A0 <-1- A1 # Marker "1" is exclusive to A1
176 # A0 <-1- A1 # Marker "1" is exclusive to A1
177
177
178 or
178 or
179
179
180 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
180 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
181 #
181 #
182 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
182 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
183
183
184 or
184 or
185
185
186 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
186 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
187 #
187 #
188 # <-2- A1 # Marker "2" is exclusive to A0,A1
188 # <-2- A1 # Marker "2" is exclusive to A0,A1
189 # /
189 # /
190 # <-1- A0
190 # <-1- A0
191 # \
191 # \
192 # <-3- A2 # Marker "3" is exclusive to A0,A2
192 # <-3- A2 # Marker "3" is exclusive to A0,A2
193 #
193 #
194 # in addition:
194 # in addition:
195 #
195 #
196 # Markers "2,3" are exclusive to A1,A2
196 # Markers "2,3" are exclusive to A1,A2
197 # Markers "1,2,3" are exclusive to A0,A1,A2
197 # Markers "1,2,3" are exclusive to A0,A1,A2
198
198
199 See test/test-obsolete-bundle-strip.t for more examples.
199 See test/test-obsolete-bundle-strip.t for more examples.
200
200
201 An example usage is strip. When stripping a changeset, we also want to
201 An example usage is strip. When stripping a changeset, we also want to
202 strip the markers exclusive to this changeset. Otherwise we would have
202 strip the markers exclusive to this changeset. Otherwise we would have
203 "dangling"" obsolescence markers from its precursors: Obsolescence markers
203 "dangling"" obsolescence markers from its precursors: Obsolescence markers
204 marking a node as obsolete without any successors available locally.
204 marking a node as obsolete without any successors available locally.
205
205
206 As for relevant markers, the prune markers for children will be followed.
206 As for relevant markers, the prune markers for children will be followed.
207 Of course, they will only be followed if the pruned children is
207 Of course, they will only be followed if the pruned children is
208 locally-known. Since the prune markers are relevant to the pruned node.
208 locally-known. Since the prune markers are relevant to the pruned node.
209 However, while prune markers are considered relevant to the parent of the
209 However, while prune markers are considered relevant to the parent of the
210 pruned changesets, prune markers for locally-known changeset (with no
210 pruned changesets, prune markers for locally-known changeset (with no
211 successors) are considered exclusive to the pruned nodes. This allows
211 successors) are considered exclusive to the pruned nodes. This allows
212 to strip the prune markers (with the rest of the exclusive chain) alongside
212 to strip the prune markers (with the rest of the exclusive chain) alongside
213 the pruned changesets.
213 the pruned changesets.
214 """
214 """
215 # running on a filtered repository would be dangerous as markers could be
215 # running on a filtered repository would be dangerous as markers could be
216 # reported as exclusive when they are relevant for other filtered nodes.
216 # reported as exclusive when they are relevant for other filtered nodes.
217 unfi = repo.unfiltered()
217 unfi = repo.unfiltered()
218
218
219 # shortcut to various useful item
219 # shortcut to various useful item
220 nm = unfi.changelog.nodemap
220 nm = unfi.changelog.nodemap
221 precursorsmarkers = unfi.obsstore.predecessors
221 precursorsmarkers = unfi.obsstore.predecessors
222 successormarkers = unfi.obsstore.successors
222 successormarkers = unfi.obsstore.successors
223 childrenmarkers = unfi.obsstore.children
223 childrenmarkers = unfi.obsstore.children
224
224
225 # exclusive markers (return of the function)
225 # exclusive markers (return of the function)
226 exclmarkers = set()
226 exclmarkers = set()
227 # we need fast membership testing
227 # we need fast membership testing
228 nodes = set(nodes)
228 nodes = set(nodes)
229 # looking for head in the obshistory
229 # looking for head in the obshistory
230 #
230 #
231 # XXX we are ignoring all issues in regard with cycle for now.
231 # XXX we are ignoring all issues in regard with cycle for now.
232 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
232 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
233 stack.sort()
233 stack.sort()
234 # nodes already stacked
234 # nodes already stacked
235 seennodes = set(stack)
235 seennodes = set(stack)
236 while stack:
236 while stack:
237 current = stack.pop()
237 current = stack.pop()
238 # fetch precursors markers
238 # fetch precursors markers
239 markers = list(precursorsmarkers.get(current, ()))
239 markers = list(precursorsmarkers.get(current, ()))
240 # extend the list with prune markers
240 # extend the list with prune markers
241 for mark in successormarkers.get(current, ()):
241 for mark in successormarkers.get(current, ()):
242 if not mark[1]:
242 if not mark[1]:
243 markers.append(mark)
243 markers.append(mark)
244 # and markers from children (looking for prune)
244 # and markers from children (looking for prune)
245 for mark in childrenmarkers.get(current, ()):
245 for mark in childrenmarkers.get(current, ()):
246 if not mark[1]:
246 if not mark[1]:
247 markers.append(mark)
247 markers.append(mark)
248 # traverse the markers
248 # traverse the markers
249 for mark in markers:
249 for mark in markers:
250 if mark in exclmarkers:
250 if mark in exclmarkers:
251 # markers already selected
251 # markers already selected
252 continue
252 continue
253
253
254 # If the markers is about the current node, select it
254 # If the markers is about the current node, select it
255 #
255 #
256 # (this delay the addition of markers from children)
256 # (this delay the addition of markers from children)
257 if mark[1] or mark[0] == current:
257 if mark[1] or mark[0] == current:
258 exclmarkers.add(mark)
258 exclmarkers.add(mark)
259
259
260 # should we keep traversing through the precursors?
260 # should we keep traversing through the precursors?
261 prec = mark[0]
261 prec = mark[0]
262
262
263 # nodes in the stack or already processed
263 # nodes in the stack or already processed
264 if prec in seennodes:
264 if prec in seennodes:
265 continue
265 continue
266
266
267 # is this a locally known node ?
267 # is this a locally known node ?
268 known = prec in nm
268 known = prec in nm
269 # if locally-known and not in the <nodes> set the traversal
269 # if locally-known and not in the <nodes> set the traversal
270 # stop here.
270 # stop here.
271 if known and prec not in nodes:
271 if known and prec not in nodes:
272 continue
272 continue
273
273
274 # do not keep going if there are unselected markers pointing to this
274 # do not keep going if there are unselected markers pointing to this
275 # nodes. If we end up traversing these unselected markers later the
275 # nodes. If we end up traversing these unselected markers later the
276 # node will be taken care of at that point.
276 # node will be taken care of at that point.
277 precmarkers = _filterprunes(successormarkers.get(prec))
277 precmarkers = _filterprunes(successormarkers.get(prec))
278 if precmarkers.issubset(exclmarkers):
278 if precmarkers.issubset(exclmarkers):
279 seennodes.add(prec)
279 seennodes.add(prec)
280 stack.append(prec)
280 stack.append(prec)
281
281
282 return exclmarkers
282 return exclmarkers
283
283
284 def foreground(repo, nodes):
284 def foreground(repo, nodes):
285 """return all nodes in the "foreground" of other node
285 """return all nodes in the "foreground" of other node
286
286
287 The foreground of a revision is anything reachable using parent -> children
287 The foreground of a revision is anything reachable using parent -> children
288 or precursor -> successor relation. It is very similar to "descendant" but
288 or precursor -> successor relation. It is very similar to "descendant" but
289 augmented with obsolescence information.
289 augmented with obsolescence information.
290
290
291 Beware that possible obsolescence cycle may result if complex situation.
291 Beware that possible obsolescence cycle may result if complex situation.
292 """
292 """
293 repo = repo.unfiltered()
293 repo = repo.unfiltered()
294 foreground = set(repo.set('%ln::', nodes))
294 foreground = set(repo.set('%ln::', nodes))
295 if repo.obsstore:
295 if repo.obsstore:
296 # We only need this complicated logic if there is obsolescence
296 # We only need this complicated logic if there is obsolescence
297 # XXX will probably deserve an optimised revset.
297 # XXX will probably deserve an optimised revset.
298 nm = repo.changelog.nodemap
298 nm = repo.changelog.nodemap
299 plen = -1
299 plen = -1
300 # compute the whole set of successors or descendants
300 # compute the whole set of successors or descendants
301 while len(foreground) != plen:
301 while len(foreground) != plen:
302 plen = len(foreground)
302 plen = len(foreground)
303 succs = set(c.node() for c in foreground)
303 succs = set(c.node() for c in foreground)
304 mutable = [c.node() for c in foreground if c.mutable()]
304 mutable = [c.node() for c in foreground if c.mutable()]
305 succs.update(allsuccessors(repo.obsstore, mutable))
305 succs.update(allsuccessors(repo.obsstore, mutable))
306 known = (n for n in succs if n in nm)
306 known = (n for n in succs if n in nm)
307 foreground = set(repo.set('%ln::', known))
307 foreground = set(repo.set('%ln::', known))
308 return set(c.node() for c in foreground)
308 return set(c.node() for c in foreground)
309
309
310 # effectflag field
310 # effectflag field
311 #
311 #
312 # Effect-flag is a 1-byte bit field used to store what changed between a
312 # Effect-flag is a 1-byte bit field used to store what changed between a
313 # changeset and its successor(s).
313 # changeset and its successor(s).
314 #
314 #
315 # The effect flag is stored in obs-markers metadata while we iterate on the
315 # The effect flag is stored in obs-markers metadata while we iterate on the
316 # information design. That's why we have the EFFECTFLAGFIELD. If we come up
316 # information design. That's why we have the EFFECTFLAGFIELD. If we come up
317 # with an incompatible design for effect flag, we can store a new design under
317 # with an incompatible design for effect flag, we can store a new design under
318 # another field name so we don't break readers. We plan to extend the existing
318 # another field name so we don't break readers. We plan to extend the existing
319 # obsmarkers bit-field when the effect flag design will be stabilized.
319 # obsmarkers bit-field when the effect flag design will be stabilized.
320 #
320 #
321 # The effect-flag is placed behind an experimental flag
321 # The effect-flag is placed behind an experimental flag
322 # `effect-flags` set to off by default.
322 # `effect-flags` set to off by default.
323 #
323 #
324
324
325 EFFECTFLAGFIELD = "ef1"
325 EFFECTFLAGFIELD = "ef1"
326
326
327 DESCCHANGED = 1 << 0 # action changed the description
327 DESCCHANGED = 1 << 0 # action changed the description
328 METACHANGED = 1 << 1 # action change the meta
328 METACHANGED = 1 << 1 # action change the meta
329 DIFFCHANGED = 1 << 3 # action change diff introduced by the changeset
329 DIFFCHANGED = 1 << 3 # action change diff introduced by the changeset
330 PARENTCHANGED = 1 << 2 # action change the parent
330 PARENTCHANGED = 1 << 2 # action change the parent
331 USERCHANGED = 1 << 4 # the user changed
331 USERCHANGED = 1 << 4 # the user changed
332 DATECHANGED = 1 << 5 # the date changed
332 DATECHANGED = 1 << 5 # the date changed
333 BRANCHCHANGED = 1 << 6 # the branch changed
333 BRANCHCHANGED = 1 << 6 # the branch changed
334
334
335 METABLACKLIST = [
335 METABLACKLIST = [
336 re.compile('^branch$'),
336 re.compile('^branch$'),
337 re.compile('^.*-source$'),
337 re.compile('^.*-source$'),
338 re.compile('^.*_source$'),
338 re.compile('^.*_source$'),
339 re.compile('^source$'),
339 re.compile('^source$'),
340 ]
340 ]
341
341
342 def metanotblacklisted(metaitem):
342 def metanotblacklisted(metaitem):
343 """ Check that the key of a meta item (extrakey, extravalue) does not
343 """ Check that the key of a meta item (extrakey, extravalue) does not
344 match at least one of the blacklist pattern
344 match at least one of the blacklist pattern
345 """
345 """
346 metakey = metaitem[0]
346 metakey = metaitem[0]
347
347
348 return not any(pattern.match(metakey) for pattern in METABLACKLIST)
348 return not any(pattern.match(metakey) for pattern in METABLACKLIST)
349
349
350 def _prepare_hunk(hunk):
350 def _prepare_hunk(hunk):
351 """Drop all information but the username and patch"""
351 """Drop all information but the username and patch"""
352 cleanhunk = []
352 cleanhunk = []
353 for line in hunk.splitlines():
353 for line in hunk.splitlines():
354 if line.startswith(b'# User') or not line.startswith(b'#'):
354 if line.startswith(b'# User') or not line.startswith(b'#'):
355 if line.startswith(b'@@'):
355 if line.startswith(b'@@'):
356 line = b'@@\n'
356 line = b'@@\n'
357 cleanhunk.append(line)
357 cleanhunk.append(line)
358 return cleanhunk
358 return cleanhunk
359
359
360 def _getdifflines(iterdiff):
360 def _getdifflines(iterdiff):
361 """return a cleaned up lines"""
361 """return a cleaned up lines"""
362 lines = next(iterdiff, None)
362 lines = next(iterdiff, None)
363
363
364 if lines is None:
364 if lines is None:
365 return lines
365 return lines
366
366
367 return _prepare_hunk(lines)
367 return _prepare_hunk(lines)
368
368
369 def _cmpdiff(leftctx, rightctx):
369 def _cmpdiff(leftctx, rightctx):
370 """return True if both ctx introduce the "same diff"
370 """return True if both ctx introduce the "same diff"
371
371
372 This is a first and basic implementation, with many shortcoming.
372 This is a first and basic implementation, with many shortcoming.
373 """
373 """
374
374
375 # Leftctx or right ctx might be filtered, so we need to use the contexts
375 # Leftctx or right ctx might be filtered, so we need to use the contexts
376 # with an unfiltered repository to safely compute the diff
376 # with an unfiltered repository to safely compute the diff
377 leftunfi = leftctx._repo.unfiltered()[leftctx.rev()]
377 leftunfi = leftctx._repo.unfiltered()[leftctx.rev()]
378 leftdiff = leftunfi.diff(git=1)
378 leftdiff = leftunfi.diff(git=1)
379 rightunfi = rightctx._repo.unfiltered()[rightctx.rev()]
379 rightunfi = rightctx._repo.unfiltered()[rightctx.rev()]
380 rightdiff = rightunfi.diff(git=1)
380 rightdiff = rightunfi.diff(git=1)
381
381
382 left, right = (0, 0)
382 left, right = (0, 0)
383 while None not in (left, right):
383 while None not in (left, right):
384 left = _getdifflines(leftdiff)
384 left = _getdifflines(leftdiff)
385 right = _getdifflines(rightdiff)
385 right = _getdifflines(rightdiff)
386
386
387 if left != right:
387 if left != right:
388 return False
388 return False
389 return True
389 return True
390
390
391 def geteffectflag(relation):
391 def geteffectflag(relation):
392 """ From an obs-marker relation, compute what changed between the
392 """ From an obs-marker relation, compute what changed between the
393 predecessor and the successor.
393 predecessor and the successor.
394 """
394 """
395 effects = 0
395 effects = 0
396
396
397 source = relation[0]
397 source = relation[0]
398
398
399 for changectx in relation[1]:
399 for changectx in relation[1]:
400 # Check if description has changed
400 # Check if description has changed
401 if changectx.description() != source.description():
401 if changectx.description() != source.description():
402 effects |= DESCCHANGED
402 effects |= DESCCHANGED
403
403
404 # Check if user has changed
404 # Check if user has changed
405 if changectx.user() != source.user():
405 if changectx.user() != source.user():
406 effects |= USERCHANGED
406 effects |= USERCHANGED
407
407
408 # Check if date has changed
408 # Check if date has changed
409 if changectx.date() != source.date():
409 if changectx.date() != source.date():
410 effects |= DATECHANGED
410 effects |= DATECHANGED
411
411
412 # Check if branch has changed
412 # Check if branch has changed
413 if changectx.branch() != source.branch():
413 if changectx.branch() != source.branch():
414 effects |= BRANCHCHANGED
414 effects |= BRANCHCHANGED
415
415
416 # Check if at least one of the parent has changed
416 # Check if at least one of the parent has changed
417 if changectx.parents() != source.parents():
417 if changectx.parents() != source.parents():
418 effects |= PARENTCHANGED
418 effects |= PARENTCHANGED
419
419
420 # Check if other meta has changed
420 # Check if other meta has changed
421 changeextra = changectx.extra().items()
421 changeextra = changectx.extra().items()
422 ctxmeta = filter(metanotblacklisted, changeextra)
422 ctxmeta = filter(metanotblacklisted, changeextra)
423
423
424 sourceextra = source.extra().items()
424 sourceextra = source.extra().items()
425 srcmeta = filter(metanotblacklisted, sourceextra)
425 srcmeta = filter(metanotblacklisted, sourceextra)
426
426
427 if ctxmeta != srcmeta:
427 if ctxmeta != srcmeta:
428 effects |= METACHANGED
428 effects |= METACHANGED
429
429
430 # Check if the diff has changed
430 # Check if the diff has changed
431 if not _cmpdiff(source, changectx):
431 if not _cmpdiff(source, changectx):
432 effects |= DIFFCHANGED
432 effects |= DIFFCHANGED
433
433
434 return effects
434 return effects
435
435
436 def getobsoleted(repo, tr):
436 def getobsoleted(repo, tr):
437 """return the set of pre-existing revisions obsoleted by a transaction"""
437 """return the set of pre-existing revisions obsoleted by a transaction"""
438 torev = repo.unfiltered().changelog.nodemap.get
438 torev = repo.unfiltered().changelog.nodemap.get
439 phase = repo._phasecache.phase
439 phase = repo._phasecache.phase
440 succsmarkers = repo.obsstore.successors.get
440 succsmarkers = repo.obsstore.successors.get
441 public = phases.public
441 public = phases.public
442 addedmarkers = tr.changes.get('obsmarkers')
442 addedmarkers = tr.changes.get('obsmarkers')
443 addedrevs = tr.changes.get('revs')
443 addedrevs = tr.changes.get('revs')
444 seenrevs = set(addedrevs)
444 seenrevs = set(addedrevs)
445 obsoleted = set()
445 obsoleted = set()
446 for mark in addedmarkers:
446 for mark in addedmarkers:
447 node = mark[0]
447 node = mark[0]
448 rev = torev(node)
448 rev = torev(node)
449 if rev is None or rev in seenrevs:
449 if rev is None or rev in seenrevs:
450 continue
450 continue
451 seenrevs.add(rev)
451 seenrevs.add(rev)
452 if phase(repo, rev) == public:
452 if phase(repo, rev) == public:
453 continue
453 continue
454 if set(succsmarkers(node) or []).issubset(addedmarkers):
454 if set(succsmarkers(node) or []).issubset(addedmarkers):
455 obsoleted.add(rev)
455 obsoleted.add(rev)
456 return obsoleted
456 return obsoleted
457
457
458 class _succs(list):
458 class _succs(list):
459 """small class to represent a successors with some metadata about it"""
459 """small class to represent a successors with some metadata about it"""
460
460
461 def __init__(self, *args, **kwargs):
461 def __init__(self, *args, **kwargs):
462 super(_succs, self).__init__(*args, **kwargs)
462 super(_succs, self).__init__(*args, **kwargs)
463 self.markers = set()
463 self.markers = set()
464
464
465 def copy(self):
465 def copy(self):
466 new = _succs(self)
466 new = _succs(self)
467 new.markers = self.markers.copy()
467 new.markers = self.markers.copy()
468 return new
468 return new
469
469
470 @util.propertycache
470 @util.propertycache
471 def _set(self):
471 def _set(self):
472 # immutable
472 # immutable
473 return set(self)
473 return set(self)
474
474
475 def canmerge(self, other):
475 def canmerge(self, other):
476 return self._set.issubset(other._set)
476 return self._set.issubset(other._set)
477
477
478 def successorssets(repo, initialnode, closest=False, cache=None):
478 def successorssets(repo, initialnode, closest=False, cache=None):
479 """Return set of all latest successors of initial nodes
479 """Return set of all latest successors of initial nodes
480
480
481 The successors set of a changeset A are the group of revisions that succeed
481 The successors set of a changeset A are the group of revisions that succeed
482 A. It succeeds A as a consistent whole, each revision being only a partial
482 A. It succeeds A as a consistent whole, each revision being only a partial
483 replacement. By default, the successors set contains non-obsolete
483 replacement. By default, the successors set contains non-obsolete
484 changesets only, walking the obsolescence graph until reaching a leaf. If
484 changesets only, walking the obsolescence graph until reaching a leaf. If
485 'closest' is set to True, closest successors-sets are return (the
485 'closest' is set to True, closest successors-sets are return (the
486 obsolescence walk stops on known changesets).
486 obsolescence walk stops on known changesets).
487
487
488 This function returns the full list of successor sets which is why it
488 This function returns the full list of successor sets which is why it
489 returns a list of tuples and not just a single tuple. Each tuple is a valid
489 returns a list of tuples and not just a single tuple. Each tuple is a valid
490 successors set. Note that (A,) may be a valid successors set for changeset A
490 successors set. Note that (A,) may be a valid successors set for changeset A
491 (see below).
491 (see below).
492
492
493 In most cases, a changeset A will have a single element (e.g. the changeset
493 In most cases, a changeset A will have a single element (e.g. the changeset
494 A is replaced by A') in its successors set. Though, it is also common for a
494 A is replaced by A') in its successors set. Though, it is also common for a
495 changeset A to have no elements in its successor set (e.g. the changeset
495 changeset A to have no elements in its successor set (e.g. the changeset
496 has been pruned). Therefore, the returned list of successors sets will be
496 has been pruned). Therefore, the returned list of successors sets will be
497 [(A',)] or [], respectively.
497 [(A',)] or [], respectively.
498
498
499 When a changeset A is split into A' and B', however, it will result in a
499 When a changeset A is split into A' and B', however, it will result in a
500 successors set containing more than a single element, i.e. [(A',B')].
500 successors set containing more than a single element, i.e. [(A',B')].
501 Divergent changesets will result in multiple successors sets, i.e. [(A',),
501 Divergent changesets will result in multiple successors sets, i.e. [(A',),
502 (A'')].
502 (A'')].
503
503
504 If a changeset A is not obsolete, then it will conceptually have no
504 If a changeset A is not obsolete, then it will conceptually have no
505 successors set. To distinguish this from a pruned changeset, the successor
505 successors set. To distinguish this from a pruned changeset, the successor
506 set will contain itself only, i.e. [(A,)].
506 set will contain itself only, i.e. [(A,)].
507
507
508 Finally, final successors unknown locally are considered to be pruned
508 Finally, final successors unknown locally are considered to be pruned
509 (pruned: obsoleted without any successors). (Final: successors not affected
509 (pruned: obsoleted without any successors). (Final: successors not affected
510 by markers).
510 by markers).
511
511
512 The 'closest' mode respect the repoview filtering. For example, without
512 The 'closest' mode respect the repoview filtering. For example, without
513 filter it will stop at the first locally known changeset, with 'visible'
513 filter it will stop at the first locally known changeset, with 'visible'
514 filter it will stop on visible changesets).
514 filter it will stop on visible changesets).
515
515
516 The optional `cache` parameter is a dictionary that may contains
516 The optional `cache` parameter is a dictionary that may contains
517 precomputed successors sets. It is meant to reuse the computation of a
517 precomputed successors sets. It is meant to reuse the computation of a
518 previous call to `successorssets` when multiple calls are made at the same
518 previous call to `successorssets` when multiple calls are made at the same
519 time. The cache dictionary is updated in place. The caller is responsible
519 time. The cache dictionary is updated in place. The caller is responsible
520 for its life span. Code that makes multiple calls to `successorssets`
520 for its life span. Code that makes multiple calls to `successorssets`
521 *should* use this cache mechanism or risk a performance hit.
521 *should* use this cache mechanism or risk a performance hit.
522
522
523 Since results are different depending of the 'closest' most, the same cache
523 Since results are different depending of the 'closest' most, the same cache
524 cannot be reused for both mode.
524 cannot be reused for both mode.
525 """
525 """
526
526
527 succmarkers = repo.obsstore.successors
527 succmarkers = repo.obsstore.successors
528
528
529 # Stack of nodes we search successors sets for
529 # Stack of nodes we search successors sets for
530 toproceed = [initialnode]
530 toproceed = [initialnode]
531 # set version of above list for fast loop detection
531 # set version of above list for fast loop detection
532 # element added to "toproceed" must be added here
532 # element added to "toproceed" must be added here
533 stackedset = set(toproceed)
533 stackedset = set(toproceed)
534 if cache is None:
534 if cache is None:
535 cache = {}
535 cache = {}
536
536
537 # This while loop is the flattened version of a recursive search for
537 # This while loop is the flattened version of a recursive search for
538 # successors sets
538 # successors sets
539 #
539 #
540 # def successorssets(x):
540 # def successorssets(x):
541 # successors = directsuccessors(x)
541 # successors = directsuccessors(x)
542 # ss = [[]]
542 # ss = [[]]
543 # for succ in directsuccessors(x):
543 # for succ in directsuccessors(x):
544 # # product as in itertools cartesian product
544 # # product as in itertools cartesian product
545 # ss = product(ss, successorssets(succ))
545 # ss = product(ss, successorssets(succ))
546 # return ss
546 # return ss
547 #
547 #
548 # But we can not use plain recursive calls here:
548 # But we can not use plain recursive calls here:
549 # - that would blow the python call stack
549 # - that would blow the python call stack
550 # - obsolescence markers may have cycles, we need to handle them.
550 # - obsolescence markers may have cycles, we need to handle them.
551 #
551 #
552 # The `toproceed` list act as our call stack. Every node we search
552 # The `toproceed` list act as our call stack. Every node we search
553 # successors set for are stacked there.
553 # successors set for are stacked there.
554 #
554 #
555 # The `stackedset` is set version of this stack used to check if a node is
555 # The `stackedset` is set version of this stack used to check if a node is
556 # already stacked. This check is used to detect cycles and prevent infinite
556 # already stacked. This check is used to detect cycles and prevent infinite
557 # loop.
557 # loop.
558 #
558 #
559 # successors set of all nodes are stored in the `cache` dictionary.
559 # successors set of all nodes are stored in the `cache` dictionary.
560 #
560 #
561 # After this while loop ends we use the cache to return the successors sets
561 # After this while loop ends we use the cache to return the successors sets
562 # for the node requested by the caller.
562 # for the node requested by the caller.
563 while toproceed:
563 while toproceed:
564 # Every iteration tries to compute the successors sets of the topmost
564 # Every iteration tries to compute the successors sets of the topmost
565 # node of the stack: CURRENT.
565 # node of the stack: CURRENT.
566 #
566 #
567 # There are four possible outcomes:
567 # There are four possible outcomes:
568 #
568 #
569 # 1) We already know the successors sets of CURRENT:
569 # 1) We already know the successors sets of CURRENT:
570 # -> mission accomplished, pop it from the stack.
570 # -> mission accomplished, pop it from the stack.
571 # 2) Stop the walk:
571 # 2) Stop the walk:
572 # default case: Node is not obsolete
572 # default case: Node is not obsolete
573 # closest case: Node is known at this repo filter level
573 # closest case: Node is known at this repo filter level
574 # -> the node is its own successors sets. Add it to the cache.
574 # -> the node is its own successors sets. Add it to the cache.
575 # 3) We do not know successors set of direct successors of CURRENT:
575 # 3) We do not know successors set of direct successors of CURRENT:
576 # -> We add those successors to the stack.
576 # -> We add those successors to the stack.
577 # 4) We know successors sets of all direct successors of CURRENT:
577 # 4) We know successors sets of all direct successors of CURRENT:
578 # -> We can compute CURRENT successors set and add it to the
578 # -> We can compute CURRENT successors set and add it to the
579 # cache.
579 # cache.
580 #
580 #
581 current = toproceed[-1]
581 current = toproceed[-1]
582
582
583 # case 2 condition is a bit hairy because of closest,
583 # case 2 condition is a bit hairy because of closest,
584 # we compute it on its own
584 # we compute it on its own
585 case2condition = ((current not in succmarkers)
585 case2condition = ((current not in succmarkers)
586 or (closest and current != initialnode
586 or (closest and current != initialnode
587 and current in repo))
587 and current in repo))
588
588
589 if current in cache:
589 if current in cache:
590 # case (1): We already know the successors sets
590 # case (1): We already know the successors sets
591 stackedset.remove(toproceed.pop())
591 stackedset.remove(toproceed.pop())
592 elif case2condition:
592 elif case2condition:
593 # case (2): end of walk.
593 # case (2): end of walk.
594 if current in repo:
594 if current in repo:
595 # We have a valid successors.
595 # We have a valid successors.
596 cache[current] = [_succs((current,))]
596 cache[current] = [_succs((current,))]
597 else:
597 else:
598 # Final obsolete version is unknown locally.
598 # Final obsolete version is unknown locally.
599 # Do not count that as a valid successors
599 # Do not count that as a valid successors
600 cache[current] = []
600 cache[current] = []
601 else:
601 else:
602 # cases (3) and (4)
602 # cases (3) and (4)
603 #
603 #
604 # We proceed in two phases. Phase 1 aims to distinguish case (3)
604 # We proceed in two phases. Phase 1 aims to distinguish case (3)
605 # from case (4):
605 # from case (4):
606 #
606 #
607 # For each direct successors of CURRENT, we check whether its
607 # For each direct successors of CURRENT, we check whether its
608 # successors sets are known. If they are not, we stack the
608 # successors sets are known. If they are not, we stack the
609 # unknown node and proceed to the next iteration of the while
609 # unknown node and proceed to the next iteration of the while
610 # loop. (case 3)
610 # loop. (case 3)
611 #
611 #
612 # During this step, we may detect obsolescence cycles: a node
612 # During this step, we may detect obsolescence cycles: a node
613 # with unknown successors sets but already in the call stack.
613 # with unknown successors sets but already in the call stack.
614 # In such a situation, we arbitrary set the successors sets of
614 # In such a situation, we arbitrary set the successors sets of
615 # the node to nothing (node pruned) to break the cycle.
615 # the node to nothing (node pruned) to break the cycle.
616 #
616 #
617 # If no break was encountered we proceed to phase 2.
617 # If no break was encountered we proceed to phase 2.
618 #
618 #
619 # Phase 2 computes successors sets of CURRENT (case 4); see details
619 # Phase 2 computes successors sets of CURRENT (case 4); see details
620 # in phase 2 itself.
620 # in phase 2 itself.
621 #
621 #
622 # Note the two levels of iteration in each phase.
622 # Note the two levels of iteration in each phase.
623 # - The first one handles obsolescence markers using CURRENT as
623 # - The first one handles obsolescence markers using CURRENT as
624 # precursor (successors markers of CURRENT).
624 # precursor (successors markers of CURRENT).
625 #
625 #
626 # Having multiple entry here means divergence.
626 # Having multiple entry here means divergence.
627 #
627 #
628 # - The second one handles successors defined in each marker.
628 # - The second one handles successors defined in each marker.
629 #
629 #
630 # Having none means pruned node, multiple successors means split,
630 # Having none means pruned node, multiple successors means split,
631 # single successors are standard replacement.
631 # single successors are standard replacement.
632 #
632 #
633 for mark in sorted(succmarkers[current]):
633 for mark in sorted(succmarkers[current]):
634 for suc in mark[1]:
634 for suc in mark[1]:
635 if suc not in cache:
635 if suc not in cache:
636 if suc in stackedset:
636 if suc in stackedset:
637 # cycle breaking
637 # cycle breaking
638 cache[suc] = []
638 cache[suc] = []
639 else:
639 else:
640 # case (3) If we have not computed successors sets
640 # case (3) If we have not computed successors sets
641 # of one of those successors we add it to the
641 # of one of those successors we add it to the
642 # `toproceed` stack and stop all work for this
642 # `toproceed` stack and stop all work for this
643 # iteration.
643 # iteration.
644 toproceed.append(suc)
644 toproceed.append(suc)
645 stackedset.add(suc)
645 stackedset.add(suc)
646 break
646 break
647 else:
647 else:
648 continue
648 continue
649 break
649 break
650 else:
650 else:
651 # case (4): we know all successors sets of all direct
651 # case (4): we know all successors sets of all direct
652 # successors
652 # successors
653 #
653 #
654 # Successors set contributed by each marker depends on the
654 # Successors set contributed by each marker depends on the
655 # successors sets of all its "successors" node.
655 # successors sets of all its "successors" node.
656 #
656 #
657 # Each different marker is a divergence in the obsolescence
657 # Each different marker is a divergence in the obsolescence
658 # history. It contributes successors sets distinct from other
658 # history. It contributes successors sets distinct from other
659 # markers.
659 # markers.
660 #
660 #
661 # Within a marker, a successor may have divergent successors
661 # Within a marker, a successor may have divergent successors
662 # sets. In such a case, the marker will contribute multiple
662 # sets. In such a case, the marker will contribute multiple
663 # divergent successors sets. If multiple successors have
663 # divergent successors sets. If multiple successors have
664 # divergent successors sets, a Cartesian product is used.
664 # divergent successors sets, a Cartesian product is used.
665 #
665 #
666 # At the end we post-process successors sets to remove
666 # At the end we post-process successors sets to remove
667 # duplicated entry and successors set that are strict subset of
667 # duplicated entry and successors set that are strict subset of
668 # another one.
668 # another one.
669 succssets = []
669 succssets = []
670 for mark in sorted(succmarkers[current]):
670 for mark in sorted(succmarkers[current]):
671 # successors sets contributed by this marker
671 # successors sets contributed by this marker
672 base = _succs()
672 base = _succs()
673 base.markers.add(mark)
673 base.markers.add(mark)
674 markss = [base]
674 markss = [base]
675 for suc in mark[1]:
675 for suc in mark[1]:
676 # cardinal product with previous successors
676 # cardinal product with previous successors
677 productresult = []
677 productresult = []
678 for prefix in markss:
678 for prefix in markss:
679 for suffix in cache[suc]:
679 for suffix in cache[suc]:
680 newss = prefix.copy()
680 newss = prefix.copy()
681 newss.markers.update(suffix.markers)
681 newss.markers.update(suffix.markers)
682 for part in suffix:
682 for part in suffix:
683 # do not duplicated entry in successors set
683 # do not duplicated entry in successors set
684 # first entry wins.
684 # first entry wins.
685 if part not in newss:
685 if part not in newss:
686 newss.append(part)
686 newss.append(part)
687 productresult.append(newss)
687 productresult.append(newss)
688 markss = productresult
688 markss = productresult
689 succssets.extend(markss)
689 succssets.extend(markss)
690 # remove duplicated and subset
690 # remove duplicated and subset
691 seen = []
691 seen = []
692 final = []
692 final = []
693 candidates = sorted((s for s in succssets if s),
693 candidates = sorted((s for s in succssets if s),
694 key=len, reverse=True)
694 key=len, reverse=True)
695 for cand in candidates:
695 for cand in candidates:
696 for seensuccs in seen:
696 for seensuccs in seen:
697 if cand.canmerge(seensuccs):
697 if cand.canmerge(seensuccs):
698 seensuccs.markers.update(cand.markers)
698 seensuccs.markers.update(cand.markers)
699 break
699 break
700 else:
700 else:
701 final.append(cand)
701 final.append(cand)
702 seen.append(cand)
702 seen.append(cand)
703 final.reverse() # put small successors set first
703 final.reverse() # put small successors set first
704 cache[current] = final
704 cache[current] = final
705 return cache[initialnode]
705 return cache[initialnode]
706
706
707 def successorsandmarkers(repo, ctx):
707 def successorsandmarkers(repo, ctx):
708 """compute the raw data needed for computing obsfate
708 """compute the raw data needed for computing obsfate
709 Returns a list of dict, one dict per successors set
709 Returns a list of dict, one dict per successors set
710 """
710 """
711 if not ctx.obsolete():
711 if not ctx.obsolete():
712 return None
712 return None
713
713
714 ssets = successorssets(repo, ctx.node(), closest=True)
714 ssets = successorssets(repo, ctx.node(), closest=True)
715
715
716 # closestsuccessors returns an empty list for pruned revisions, remap it
716 # closestsuccessors returns an empty list for pruned revisions, remap it
717 # into a list containing an empty list for future processing
717 # into a list containing an empty list for future processing
718 if ssets == []:
718 if ssets == []:
719 ssets = [[]]
719 ssets = [[]]
720
720
721 # Try to recover pruned markers
721 # Try to recover pruned markers
722 succsmap = repo.obsstore.successors
722 succsmap = repo.obsstore.successors
723 fullsuccessorsets = [] # successor set + markers
723 fullsuccessorsets = [] # successor set + markers
724 for sset in ssets:
724 for sset in ssets:
725 if sset:
725 if sset:
726 fullsuccessorsets.append(sset)
726 fullsuccessorsets.append(sset)
727 else:
727 else:
728 # successorsset return an empty set() when ctx or one of its
728 # successorsset return an empty set() when ctx or one of its
729 # successors is pruned.
729 # successors is pruned.
730 # In this case, walk the obs-markers tree again starting with ctx
730 # In this case, walk the obs-markers tree again starting with ctx
731 # and find the relevant pruning obs-makers, the ones without
731 # and find the relevant pruning obs-makers, the ones without
732 # successors.
732 # successors.
733 # Having these markers allow us to compute some information about
733 # Having these markers allow us to compute some information about
734 # its fate, like who pruned this changeset and when.
734 # its fate, like who pruned this changeset and when.
735
735
736 # XXX we do not catch all prune markers (eg rewritten then pruned)
736 # XXX we do not catch all prune markers (eg rewritten then pruned)
737 # (fix me later)
737 # (fix me later)
738 foundany = False
738 foundany = False
739 for mark in succsmap.get(ctx.node(), ()):
739 for mark in succsmap.get(ctx.node(), ()):
740 if not mark[1]:
740 if not mark[1]:
741 foundany = True
741 foundany = True
742 sset = _succs()
742 sset = _succs()
743 sset.markers.add(mark)
743 sset.markers.add(mark)
744 fullsuccessorsets.append(sset)
744 fullsuccessorsets.append(sset)
745 if not foundany:
745 if not foundany:
746 fullsuccessorsets.append(_succs())
746 fullsuccessorsets.append(_succs())
747
747
748 values = []
748 values = []
749 for sset in fullsuccessorsets:
749 for sset in fullsuccessorsets:
750 values.append({'successors': sset, 'markers': sset.markers})
750 values.append({'successors': sset, 'markers': sset.markers})
751
751
752 return values
752 return values
753
753
754 def successorsetverb(successorset):
754 def successorsetverb(successorset):
755 """ Return the verb summarizing the successorset
755 """ Return the verb summarizing the successorset
756 """
756 """
757 if not successorset:
757 if not successorset:
758 verb = 'pruned'
758 verb = 'pruned'
759 elif len(successorset) == 1:
759 elif len(successorset) == 1:
760 verb = 'rewritten'
760 verb = 'rewritten'
761 else:
761 else:
762 verb = 'split'
762 verb = 'split'
763 return verb
763 return verb
764
764
765 def markersdates(markers):
765 def markersdates(markers):
766 """returns the list of dates for a list of markers
766 """returns the list of dates for a list of markers
767 """
767 """
768 return [m[4] for m in markers]
768 return [m[4] for m in markers]
769
769
770 def markersusers(markers):
770 def markersusers(markers):
771 """ Returns a sorted list of markers users without duplicates
771 """ Returns a sorted list of markers users without duplicates
772 """
772 """
773 markersmeta = [dict(m[3]) for m in markers]
773 markersmeta = [dict(m[3]) for m in markers]
774 users = set(meta.get('user') for meta in markersmeta if meta.get('user'))
774 users = set(meta.get('user') for meta in markersmeta if meta.get('user'))
775
775
776 return sorted(users)
776 return sorted(users)
777
777
778 def markersoperations(markers):
778 def markersoperations(markers):
779 """ Returns a sorted list of markers operations without duplicates
779 """ Returns a sorted list of markers operations without duplicates
780 """
780 """
781 markersmeta = [dict(m[3]) for m in markers]
781 markersmeta = [dict(m[3]) for m in markers]
782 operations = set(meta.get('operation') for meta in markersmeta
782 operations = set(meta.get('operation') for meta in markersmeta
783 if meta.get('operation'))
783 if meta.get('operation'))
784
784
785 return sorted(operations)
785 return sorted(operations)
786
786
787 def obsfateprinter(successors, markers, ui):
787 def obsfateprinter(successors, markers, ui):
788 """ Build a obsfate string for a single successorset using all obsfate
788 """ Build a obsfate string for a single successorset using all obsfate
789 related function defined in obsutil
789 related function defined in obsutil
790 """
790 """
791 quiet = ui.quiet
791 quiet = ui.quiet
792 verbose = ui.verbose
792 verbose = ui.verbose
793 normal = not verbose and not quiet
793 normal = not verbose and not quiet
794
794
795 line = []
795 line = []
796
796
797 # Verb
797 # Verb
798 line.append(successorsetverb(successors))
798 line.append(successorsetverb(successors))
799
799
800 # Operations
800 # Operations
801 operations = markersoperations(markers)
801 operations = markersoperations(markers)
802 if operations:
802 if operations:
803 line.append(" using %s" % ", ".join(operations))
803 line.append(" using %s" % ", ".join(operations))
804
804
805 # Successors
805 # Successors
806 if successors:
806 if successors:
807 fmtsuccessors = [successors.joinfmt(succ) for succ in successors]
807 fmtsuccessors = [successors.joinfmt(succ) for succ in successors]
808 line.append(" as %s" % ", ".join(fmtsuccessors))
808 line.append(" as %s" % ", ".join(fmtsuccessors))
809
809
810 # Users
810 # Users
811 users = markersusers(markers)
811 users = markersusers(markers)
812 # Filter out current user in not verbose mode to reduce amount of
812 # Filter out current user in not verbose mode to reduce amount of
813 # information
813 # information
814 if not verbose:
814 if not verbose:
815 currentuser = ui.username(acceptempty=True)
815 currentuser = ui.username(acceptempty=True)
816 if len(users) == 1 and currentuser in users:
816 if len(users) == 1 and currentuser in users:
817 users = None
817 users = None
818
818
819 if (verbose or normal) and users:
819 if (verbose or normal) and users:
820 line.append(" by %s" % ", ".join(users))
820 line.append(" by %s" % ", ".join(users))
821
821
822 # Date
822 # Date
823 dates = markersdates(markers)
823 dates = markersdates(markers)
824
824
825 if verbose:
825 if dates and verbose:
826 min_date = min(dates)
826 min_date = min(dates)
827 max_date = max(dates)
827 max_date = max(dates)
828
828
829 if min_date == max_date:
829 if min_date == max_date:
830 fmtmin_date = util.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
830 fmtmin_date = util.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
831 line.append(" (at %s)" % fmtmin_date)
831 line.append(" (at %s)" % fmtmin_date)
832 else:
832 else:
833 fmtmin_date = util.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
833 fmtmin_date = util.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
834 fmtmax_date = util.datestr(max_date, '%Y-%m-%d %H:%M %1%2')
834 fmtmax_date = util.datestr(max_date, '%Y-%m-%d %H:%M %1%2')
835 line.append(" (between %s and %s)" % (fmtmin_date, fmtmax_date))
835 line.append(" (between %s and %s)" % (fmtmin_date, fmtmax_date))
836
836
837 return "".join(line)
837 return "".join(line)
General Comments 0
You need to be logged in to leave comments. Login now