##// END OF EJS Templates
effectflag: document effect flag...
Boris Feld -
r34423:2fd06499 default
parent child Browse files
Show More
@@ -1,771 +1,785 b''
1 # obsutil.py - utility functions for obsolescence
1 # obsutil.py - utility functions for obsolescence
2 #
2 #
3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import re
10 import re
11
11
12 from . import (
12 from . import (
13 phases,
13 phases,
14 util
14 util
15 )
15 )
16
16
17 class marker(object):
17 class marker(object):
18 """Wrap obsolete marker raw data"""
18 """Wrap obsolete marker raw data"""
19
19
20 def __init__(self, repo, data):
20 def __init__(self, repo, data):
21 # the repo argument will be used to create changectx in later version
21 # the repo argument will be used to create changectx in later version
22 self._repo = repo
22 self._repo = repo
23 self._data = data
23 self._data = data
24 self._decodedmeta = None
24 self._decodedmeta = None
25
25
26 def __hash__(self):
26 def __hash__(self):
27 return hash(self._data)
27 return hash(self._data)
28
28
29 def __eq__(self, other):
29 def __eq__(self, other):
30 if type(other) != type(self):
30 if type(other) != type(self):
31 return False
31 return False
32 return self._data == other._data
32 return self._data == other._data
33
33
34 def precnode(self):
34 def precnode(self):
35 msg = ("'marker.precnode' is deprecated, "
35 msg = ("'marker.precnode' is deprecated, "
36 "use 'marker.prednode'")
36 "use 'marker.prednode'")
37 util.nouideprecwarn(msg, '4.4')
37 util.nouideprecwarn(msg, '4.4')
38 return self.prednode()
38 return self.prednode()
39
39
40 def prednode(self):
40 def prednode(self):
41 """Predecessor changeset node identifier"""
41 """Predecessor changeset node identifier"""
42 return self._data[0]
42 return self._data[0]
43
43
44 def succnodes(self):
44 def succnodes(self):
45 """List of successor changesets node identifiers"""
45 """List of successor changesets node identifiers"""
46 return self._data[1]
46 return self._data[1]
47
47
48 def parentnodes(self):
48 def parentnodes(self):
49 """Parents of the predecessors (None if not recorded)"""
49 """Parents of the predecessors (None if not recorded)"""
50 return self._data[5]
50 return self._data[5]
51
51
52 def metadata(self):
52 def metadata(self):
53 """Decoded metadata dictionary"""
53 """Decoded metadata dictionary"""
54 return dict(self._data[3])
54 return dict(self._data[3])
55
55
56 def date(self):
56 def date(self):
57 """Creation date as (unixtime, offset)"""
57 """Creation date as (unixtime, offset)"""
58 return self._data[4]
58 return self._data[4]
59
59
60 def flags(self):
60 def flags(self):
61 """The flags field of the marker"""
61 """The flags field of the marker"""
62 return self._data[2]
62 return self._data[2]
63
63
64 def getmarkers(repo, nodes=None, exclusive=False):
64 def getmarkers(repo, nodes=None, exclusive=False):
65 """returns markers known in a repository
65 """returns markers known in a repository
66
66
67 If <nodes> is specified, only markers "relevant" to those nodes are are
67 If <nodes> is specified, only markers "relevant" to those nodes are are
68 returned"""
68 returned"""
69 if nodes is None:
69 if nodes is None:
70 rawmarkers = repo.obsstore
70 rawmarkers = repo.obsstore
71 elif exclusive:
71 elif exclusive:
72 rawmarkers = exclusivemarkers(repo, nodes)
72 rawmarkers = exclusivemarkers(repo, nodes)
73 else:
73 else:
74 rawmarkers = repo.obsstore.relevantmarkers(nodes)
74 rawmarkers = repo.obsstore.relevantmarkers(nodes)
75
75
76 for markerdata in rawmarkers:
76 for markerdata in rawmarkers:
77 yield marker(repo, markerdata)
77 yield marker(repo, markerdata)
78
78
79 def closestpredecessors(repo, nodeid):
79 def closestpredecessors(repo, nodeid):
80 """yield the list of next predecessors pointing on visible changectx nodes
80 """yield the list of next predecessors pointing on visible changectx nodes
81
81
82 This function respect the repoview filtering, filtered revision will be
82 This function respect the repoview filtering, filtered revision will be
83 considered missing.
83 considered missing.
84 """
84 """
85
85
86 precursors = repo.obsstore.predecessors
86 precursors = repo.obsstore.predecessors
87 stack = [nodeid]
87 stack = [nodeid]
88 seen = set(stack)
88 seen = set(stack)
89
89
90 while stack:
90 while stack:
91 current = stack.pop()
91 current = stack.pop()
92 currentpreccs = precursors.get(current, ())
92 currentpreccs = precursors.get(current, ())
93
93
94 for prec in currentpreccs:
94 for prec in currentpreccs:
95 precnodeid = prec[0]
95 precnodeid = prec[0]
96
96
97 # Basic cycle protection
97 # Basic cycle protection
98 if precnodeid in seen:
98 if precnodeid in seen:
99 continue
99 continue
100 seen.add(precnodeid)
100 seen.add(precnodeid)
101
101
102 if precnodeid in repo:
102 if precnodeid in repo:
103 yield precnodeid
103 yield precnodeid
104 else:
104 else:
105 stack.append(precnodeid)
105 stack.append(precnodeid)
106
106
107 def allprecursors(*args, **kwargs):
107 def allprecursors(*args, **kwargs):
108 """ (DEPRECATED)
108 """ (DEPRECATED)
109 """
109 """
110 msg = ("'obsutil.allprecursors' is deprecated, "
110 msg = ("'obsutil.allprecursors' is deprecated, "
111 "use 'obsutil.allpredecessors'")
111 "use 'obsutil.allpredecessors'")
112 util.nouideprecwarn(msg, '4.4')
112 util.nouideprecwarn(msg, '4.4')
113
113
114 return allpredecessors(*args, **kwargs)
114 return allpredecessors(*args, **kwargs)
115
115
116 def allpredecessors(obsstore, nodes, ignoreflags=0):
116 def allpredecessors(obsstore, nodes, ignoreflags=0):
117 """Yield node for every precursors of <nodes>.
117 """Yield node for every precursors of <nodes>.
118
118
119 Some precursors may be unknown locally.
119 Some precursors may be unknown locally.
120
120
121 This is a linear yield unsuited to detecting folded changesets. It includes
121 This is a linear yield unsuited to detecting folded changesets. It includes
122 initial nodes too."""
122 initial nodes too."""
123
123
124 remaining = set(nodes)
124 remaining = set(nodes)
125 seen = set(remaining)
125 seen = set(remaining)
126 while remaining:
126 while remaining:
127 current = remaining.pop()
127 current = remaining.pop()
128 yield current
128 yield current
129 for mark in obsstore.predecessors.get(current, ()):
129 for mark in obsstore.predecessors.get(current, ()):
130 # ignore marker flagged with specified flag
130 # ignore marker flagged with specified flag
131 if mark[2] & ignoreflags:
131 if mark[2] & ignoreflags:
132 continue
132 continue
133 suc = mark[0]
133 suc = mark[0]
134 if suc not in seen:
134 if suc not in seen:
135 seen.add(suc)
135 seen.add(suc)
136 remaining.add(suc)
136 remaining.add(suc)
137
137
138 def allsuccessors(obsstore, nodes, ignoreflags=0):
138 def allsuccessors(obsstore, nodes, ignoreflags=0):
139 """Yield node for every successor of <nodes>.
139 """Yield node for every successor of <nodes>.
140
140
141 Some successors may be unknown locally.
141 Some successors may be unknown locally.
142
142
143 This is a linear yield unsuited to detecting split changesets. It includes
143 This is a linear yield unsuited to detecting split changesets. It includes
144 initial nodes too."""
144 initial nodes too."""
145 remaining = set(nodes)
145 remaining = set(nodes)
146 seen = set(remaining)
146 seen = set(remaining)
147 while remaining:
147 while remaining:
148 current = remaining.pop()
148 current = remaining.pop()
149 yield current
149 yield current
150 for mark in obsstore.successors.get(current, ()):
150 for mark in obsstore.successors.get(current, ()):
151 # ignore marker flagged with specified flag
151 # ignore marker flagged with specified flag
152 if mark[2] & ignoreflags:
152 if mark[2] & ignoreflags:
153 continue
153 continue
154 for suc in mark[1]:
154 for suc in mark[1]:
155 if suc not in seen:
155 if suc not in seen:
156 seen.add(suc)
156 seen.add(suc)
157 remaining.add(suc)
157 remaining.add(suc)
158
158
159 def _filterprunes(markers):
159 def _filterprunes(markers):
160 """return a set with no prune markers"""
160 """return a set with no prune markers"""
161 return set(m for m in markers if m[1])
161 return set(m for m in markers if m[1])
162
162
163 def exclusivemarkers(repo, nodes):
163 def exclusivemarkers(repo, nodes):
164 """set of markers relevant to "nodes" but no other locally-known nodes
164 """set of markers relevant to "nodes" but no other locally-known nodes
165
165
166 This function compute the set of markers "exclusive" to a locally-known
166 This function compute the set of markers "exclusive" to a locally-known
167 node. This means we walk the markers starting from <nodes> until we reach a
167 node. This means we walk the markers starting from <nodes> until we reach a
168 locally-known precursors outside of <nodes>. Element of <nodes> with
168 locally-known precursors outside of <nodes>. Element of <nodes> with
169 locally-known successors outside of <nodes> are ignored (since their
169 locally-known successors outside of <nodes> are ignored (since their
170 precursors markers are also relevant to these successors).
170 precursors markers are also relevant to these successors).
171
171
172 For example:
172 For example:
173
173
174 # (A0 rewritten as A1)
174 # (A0 rewritten as A1)
175 #
175 #
176 # A0 <-1- A1 # Marker "1" is exclusive to A1
176 # A0 <-1- A1 # Marker "1" is exclusive to A1
177
177
178 or
178 or
179
179
180 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
180 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
181 #
181 #
182 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
182 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
183
183
184 or
184 or
185
185
186 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
186 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
187 #
187 #
188 # <-2- A1 # Marker "2" is exclusive to A0,A1
188 # <-2- A1 # Marker "2" is exclusive to A0,A1
189 # /
189 # /
190 # <-1- A0
190 # <-1- A0
191 # \
191 # \
192 # <-3- A2 # Marker "3" is exclusive to A0,A2
192 # <-3- A2 # Marker "3" is exclusive to A0,A2
193 #
193 #
194 # in addition:
194 # in addition:
195 #
195 #
196 # Markers "2,3" are exclusive to A1,A2
196 # Markers "2,3" are exclusive to A1,A2
197 # Markers "1,2,3" are exclusive to A0,A1,A2
197 # Markers "1,2,3" are exclusive to A0,A1,A2
198
198
199 See test/test-obsolete-bundle-strip.t for more examples.
199 See test/test-obsolete-bundle-strip.t for more examples.
200
200
201 An example usage is strip. When stripping a changeset, we also want to
201 An example usage is strip. When stripping a changeset, we also want to
202 strip the markers exclusive to this changeset. Otherwise we would have
202 strip the markers exclusive to this changeset. Otherwise we would have
203 "dangling"" obsolescence markers from its precursors: Obsolescence markers
203 "dangling"" obsolescence markers from its precursors: Obsolescence markers
204 marking a node as obsolete without any successors available locally.
204 marking a node as obsolete without any successors available locally.
205
205
206 As for relevant markers, the prune markers for children will be followed.
206 As for relevant markers, the prune markers for children will be followed.
207 Of course, they will only be followed if the pruned children is
207 Of course, they will only be followed if the pruned children is
208 locally-known. Since the prune markers are relevant to the pruned node.
208 locally-known. Since the prune markers are relevant to the pruned node.
209 However, while prune markers are considered relevant to the parent of the
209 However, while prune markers are considered relevant to the parent of the
210 pruned changesets, prune markers for locally-known changeset (with no
210 pruned changesets, prune markers for locally-known changeset (with no
211 successors) are considered exclusive to the pruned nodes. This allows
211 successors) are considered exclusive to the pruned nodes. This allows
212 to strip the prune markers (with the rest of the exclusive chain) alongside
212 to strip the prune markers (with the rest of the exclusive chain) alongside
213 the pruned changesets.
213 the pruned changesets.
214 """
214 """
215 # running on a filtered repository would be dangerous as markers could be
215 # running on a filtered repository would be dangerous as markers could be
216 # reported as exclusive when they are relevant for other filtered nodes.
216 # reported as exclusive when they are relevant for other filtered nodes.
217 unfi = repo.unfiltered()
217 unfi = repo.unfiltered()
218
218
219 # shortcut to various useful item
219 # shortcut to various useful item
220 nm = unfi.changelog.nodemap
220 nm = unfi.changelog.nodemap
221 precursorsmarkers = unfi.obsstore.predecessors
221 precursorsmarkers = unfi.obsstore.predecessors
222 successormarkers = unfi.obsstore.successors
222 successormarkers = unfi.obsstore.successors
223 childrenmarkers = unfi.obsstore.children
223 childrenmarkers = unfi.obsstore.children
224
224
225 # exclusive markers (return of the function)
225 # exclusive markers (return of the function)
226 exclmarkers = set()
226 exclmarkers = set()
227 # we need fast membership testing
227 # we need fast membership testing
228 nodes = set(nodes)
228 nodes = set(nodes)
229 # looking for head in the obshistory
229 # looking for head in the obshistory
230 #
230 #
231 # XXX we are ignoring all issues in regard with cycle for now.
231 # XXX we are ignoring all issues in regard with cycle for now.
232 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
232 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
233 stack.sort()
233 stack.sort()
234 # nodes already stacked
234 # nodes already stacked
235 seennodes = set(stack)
235 seennodes = set(stack)
236 while stack:
236 while stack:
237 current = stack.pop()
237 current = stack.pop()
238 # fetch precursors markers
238 # fetch precursors markers
239 markers = list(precursorsmarkers.get(current, ()))
239 markers = list(precursorsmarkers.get(current, ()))
240 # extend the list with prune markers
240 # extend the list with prune markers
241 for mark in successormarkers.get(current, ()):
241 for mark in successormarkers.get(current, ()):
242 if not mark[1]:
242 if not mark[1]:
243 markers.append(mark)
243 markers.append(mark)
244 # and markers from children (looking for prune)
244 # and markers from children (looking for prune)
245 for mark in childrenmarkers.get(current, ()):
245 for mark in childrenmarkers.get(current, ()):
246 if not mark[1]:
246 if not mark[1]:
247 markers.append(mark)
247 markers.append(mark)
248 # traverse the markers
248 # traverse the markers
249 for mark in markers:
249 for mark in markers:
250 if mark in exclmarkers:
250 if mark in exclmarkers:
251 # markers already selected
251 # markers already selected
252 continue
252 continue
253
253
254 # If the markers is about the current node, select it
254 # If the markers is about the current node, select it
255 #
255 #
256 # (this delay the addition of markers from children)
256 # (this delay the addition of markers from children)
257 if mark[1] or mark[0] == current:
257 if mark[1] or mark[0] == current:
258 exclmarkers.add(mark)
258 exclmarkers.add(mark)
259
259
260 # should we keep traversing through the precursors?
260 # should we keep traversing through the precursors?
261 prec = mark[0]
261 prec = mark[0]
262
262
263 # nodes in the stack or already processed
263 # nodes in the stack or already processed
264 if prec in seennodes:
264 if prec in seennodes:
265 continue
265 continue
266
266
267 # is this a locally known node ?
267 # is this a locally known node ?
268 known = prec in nm
268 known = prec in nm
269 # if locally-known and not in the <nodes> set the traversal
269 # if locally-known and not in the <nodes> set the traversal
270 # stop here.
270 # stop here.
271 if known and prec not in nodes:
271 if known and prec not in nodes:
272 continue
272 continue
273
273
274 # do not keep going if there are unselected markers pointing to this
274 # do not keep going if there are unselected markers pointing to this
275 # nodes. If we end up traversing these unselected markers later the
275 # nodes. If we end up traversing these unselected markers later the
276 # node will be taken care of at that point.
276 # node will be taken care of at that point.
277 precmarkers = _filterprunes(successormarkers.get(prec))
277 precmarkers = _filterprunes(successormarkers.get(prec))
278 if precmarkers.issubset(exclmarkers):
278 if precmarkers.issubset(exclmarkers):
279 seennodes.add(prec)
279 seennodes.add(prec)
280 stack.append(prec)
280 stack.append(prec)
281
281
282 return exclmarkers
282 return exclmarkers
283
283
284 def foreground(repo, nodes):
284 def foreground(repo, nodes):
285 """return all nodes in the "foreground" of other node
285 """return all nodes in the "foreground" of other node
286
286
287 The foreground of a revision is anything reachable using parent -> children
287 The foreground of a revision is anything reachable using parent -> children
288 or precursor -> successor relation. It is very similar to "descendant" but
288 or precursor -> successor relation. It is very similar to "descendant" but
289 augmented with obsolescence information.
289 augmented with obsolescence information.
290
290
291 Beware that possible obsolescence cycle may result if complex situation.
291 Beware that possible obsolescence cycle may result if complex situation.
292 """
292 """
293 repo = repo.unfiltered()
293 repo = repo.unfiltered()
294 foreground = set(repo.set('%ln::', nodes))
294 foreground = set(repo.set('%ln::', nodes))
295 if repo.obsstore:
295 if repo.obsstore:
296 # We only need this complicated logic if there is obsolescence
296 # We only need this complicated logic if there is obsolescence
297 # XXX will probably deserve an optimised revset.
297 # XXX will probably deserve an optimised revset.
298 nm = repo.changelog.nodemap
298 nm = repo.changelog.nodemap
299 plen = -1
299 plen = -1
300 # compute the whole set of successors or descendants
300 # compute the whole set of successors or descendants
301 while len(foreground) != plen:
301 while len(foreground) != plen:
302 plen = len(foreground)
302 plen = len(foreground)
303 succs = set(c.node() for c in foreground)
303 succs = set(c.node() for c in foreground)
304 mutable = [c.node() for c in foreground if c.mutable()]
304 mutable = [c.node() for c in foreground if c.mutable()]
305 succs.update(allsuccessors(repo.obsstore, mutable))
305 succs.update(allsuccessors(repo.obsstore, mutable))
306 known = (n for n in succs if n in nm)
306 known = (n for n in succs if n in nm)
307 foreground = set(repo.set('%ln::', known))
307 foreground = set(repo.set('%ln::', known))
308 return set(c.node() for c in foreground)
308 return set(c.node() for c in foreground)
309
309
310 # logic around storing and using effect flags
310 # effectflag field
311 #
312 # Effect-flag is a 1-byte bit field used to store what changed between a
313 # changeset and its successor(s).
314 #
315 # The effect flag is stored in obs-markers metadata while we iterate on the
316 # information design. That's why we have the EFFECTFLAGFIELD. If we come up
317 # with an incompatible design for effect flag, we can store a new design under
318 # another field name so we don't break readers. We plan to extend the existing
319 # obsmarkers bit-field when the effect flag design will be stabilized.
320 #
321 # The effect-flag is placed behind an experimental flag
322 # `effect-flags` set to off by default.
323 #
324
311 EFFECTFLAGFIELD = "ef1"
325 EFFECTFLAGFIELD = "ef1"
312
326
313 DESCCHANGED = 1 << 0 # action changed the description
327 DESCCHANGED = 1 << 0 # action changed the description
314 METACHANGED = 1 << 1 # action change the meta
328 METACHANGED = 1 << 1 # action change the meta
315 DIFFCHANGED = 1 << 3 # action change diff introduced by the changeset
329 DIFFCHANGED = 1 << 3 # action change diff introduced by the changeset
316 PARENTCHANGED = 1 << 2 # action change the parent
330 PARENTCHANGED = 1 << 2 # action change the parent
317 USERCHANGED = 1 << 4 # the user changed
331 USERCHANGED = 1 << 4 # the user changed
318 DATECHANGED = 1 << 5 # the date changed
332 DATECHANGED = 1 << 5 # the date changed
319 BRANCHCHANGED = 1 << 6 # the branch changed
333 BRANCHCHANGED = 1 << 6 # the branch changed
320
334
321 METABLACKLIST = [
335 METABLACKLIST = [
322 re.compile('^branch$'),
336 re.compile('^branch$'),
323 re.compile('^.*-source$'),
337 re.compile('^.*-source$'),
324 re.compile('^.*_source$'),
338 re.compile('^.*_source$'),
325 re.compile('^source$'),
339 re.compile('^source$'),
326 ]
340 ]
327
341
328 def metanotblacklisted(metaitem):
342 def metanotblacklisted(metaitem):
329 """ Check that the key of a meta item (extrakey, extravalue) does not
343 """ Check that the key of a meta item (extrakey, extravalue) does not
330 match at least one of the blacklist pattern
344 match at least one of the blacklist pattern
331 """
345 """
332 metakey = metaitem[0]
346 metakey = metaitem[0]
333
347
334 return not any(pattern.match(metakey) for pattern in METABLACKLIST)
348 return not any(pattern.match(metakey) for pattern in METABLACKLIST)
335
349
336 def _prepare_hunk(hunk):
350 def _prepare_hunk(hunk):
337 """Drop all information but the username and patch"""
351 """Drop all information but the username and patch"""
338 cleanhunk = []
352 cleanhunk = []
339 for line in hunk.splitlines():
353 for line in hunk.splitlines():
340 if line.startswith(b'# User') or not line.startswith(b'#'):
354 if line.startswith(b'# User') or not line.startswith(b'#'):
341 if line.startswith(b'@@'):
355 if line.startswith(b'@@'):
342 line = b'@@\n'
356 line = b'@@\n'
343 cleanhunk.append(line)
357 cleanhunk.append(line)
344 return cleanhunk
358 return cleanhunk
345
359
346 def _getdifflines(iterdiff):
360 def _getdifflines(iterdiff):
347 """return a cleaned up lines"""
361 """return a cleaned up lines"""
348 lines = next(iterdiff, None)
362 lines = next(iterdiff, None)
349
363
350 if lines is None:
364 if lines is None:
351 return lines
365 return lines
352
366
353 return _prepare_hunk(lines)
367 return _prepare_hunk(lines)
354
368
355 def _cmpdiff(leftctx, rightctx):
369 def _cmpdiff(leftctx, rightctx):
356 """return True if both ctx introduce the "same diff"
370 """return True if both ctx introduce the "same diff"
357
371
358 This is a first and basic implementation, with many shortcoming.
372 This is a first and basic implementation, with many shortcoming.
359 """
373 """
360
374
361 # Leftctx or right ctx might be filtered, so we need to use the contexts
375 # Leftctx or right ctx might be filtered, so we need to use the contexts
362 # with an unfiltered repository to safely compute the diff
376 # with an unfiltered repository to safely compute the diff
363 leftunfi = leftctx._repo.unfiltered()[leftctx.rev()]
377 leftunfi = leftctx._repo.unfiltered()[leftctx.rev()]
364 leftdiff = leftunfi.diff(git=1)
378 leftdiff = leftunfi.diff(git=1)
365 rightunfi = rightctx._repo.unfiltered()[rightctx.rev()]
379 rightunfi = rightctx._repo.unfiltered()[rightctx.rev()]
366 rightdiff = rightunfi.diff(git=1)
380 rightdiff = rightunfi.diff(git=1)
367
381
368 left, right = (0, 0)
382 left, right = (0, 0)
369 while None not in (left, right):
383 while None not in (left, right):
370 left = _getdifflines(leftdiff)
384 left = _getdifflines(leftdiff)
371 right = _getdifflines(rightdiff)
385 right = _getdifflines(rightdiff)
372
386
373 if left != right:
387 if left != right:
374 return False
388 return False
375 return True
389 return True
376
390
377 def geteffectflag(relation):
391 def geteffectflag(relation):
378 """ From an obs-marker relation, compute what changed between the
392 """ From an obs-marker relation, compute what changed between the
379 predecessor and the successor.
393 predecessor and the successor.
380 """
394 """
381 effects = 0
395 effects = 0
382
396
383 source = relation[0]
397 source = relation[0]
384
398
385 for changectx in relation[1]:
399 for changectx in relation[1]:
386 # Check if description has changed
400 # Check if description has changed
387 if changectx.description() != source.description():
401 if changectx.description() != source.description():
388 effects |= DESCCHANGED
402 effects |= DESCCHANGED
389
403
390 # Check if user has changed
404 # Check if user has changed
391 if changectx.user() != source.user():
405 if changectx.user() != source.user():
392 effects |= USERCHANGED
406 effects |= USERCHANGED
393
407
394 # Check if date has changed
408 # Check if date has changed
395 if changectx.date() != source.date():
409 if changectx.date() != source.date():
396 effects |= DATECHANGED
410 effects |= DATECHANGED
397
411
398 # Check if branch has changed
412 # Check if branch has changed
399 if changectx.branch() != source.branch():
413 if changectx.branch() != source.branch():
400 effects |= BRANCHCHANGED
414 effects |= BRANCHCHANGED
401
415
402 # Check if at least one of the parent has changed
416 # Check if at least one of the parent has changed
403 if changectx.parents() != source.parents():
417 if changectx.parents() != source.parents():
404 effects |= PARENTCHANGED
418 effects |= PARENTCHANGED
405
419
406 # Check if other meta has changed
420 # Check if other meta has changed
407 changeextra = changectx.extra().items()
421 changeextra = changectx.extra().items()
408 ctxmeta = filter(metanotblacklisted, changeextra)
422 ctxmeta = filter(metanotblacklisted, changeextra)
409
423
410 sourceextra = source.extra().items()
424 sourceextra = source.extra().items()
411 srcmeta = filter(metanotblacklisted, sourceextra)
425 srcmeta = filter(metanotblacklisted, sourceextra)
412
426
413 if ctxmeta != srcmeta:
427 if ctxmeta != srcmeta:
414 effects |= METACHANGED
428 effects |= METACHANGED
415
429
416 # Check if the diff has changed
430 # Check if the diff has changed
417 if not _cmpdiff(source, changectx):
431 if not _cmpdiff(source, changectx):
418 effects |= DIFFCHANGED
432 effects |= DIFFCHANGED
419
433
420 return effects
434 return effects
421
435
422 def getobsoleted(repo, tr):
436 def getobsoleted(repo, tr):
423 """return the set of pre-existing revisions obsoleted by a transaction"""
437 """return the set of pre-existing revisions obsoleted by a transaction"""
424 torev = repo.unfiltered().changelog.nodemap.get
438 torev = repo.unfiltered().changelog.nodemap.get
425 phase = repo._phasecache.phase
439 phase = repo._phasecache.phase
426 succsmarkers = repo.obsstore.successors.get
440 succsmarkers = repo.obsstore.successors.get
427 public = phases.public
441 public = phases.public
428 addedmarkers = tr.changes.get('obsmarkers')
442 addedmarkers = tr.changes.get('obsmarkers')
429 addedrevs = tr.changes.get('revs')
443 addedrevs = tr.changes.get('revs')
430 seenrevs = set(addedrevs)
444 seenrevs = set(addedrevs)
431 obsoleted = set()
445 obsoleted = set()
432 for mark in addedmarkers:
446 for mark in addedmarkers:
433 node = mark[0]
447 node = mark[0]
434 rev = torev(node)
448 rev = torev(node)
435 if rev is None or rev in seenrevs:
449 if rev is None or rev in seenrevs:
436 continue
450 continue
437 seenrevs.add(rev)
451 seenrevs.add(rev)
438 if phase(repo, rev) == public:
452 if phase(repo, rev) == public:
439 continue
453 continue
440 if set(succsmarkers(node) or []).issubset(addedmarkers):
454 if set(succsmarkers(node) or []).issubset(addedmarkers):
441 obsoleted.add(rev)
455 obsoleted.add(rev)
442 return obsoleted
456 return obsoleted
443
457
444 class _succs(list):
458 class _succs(list):
445 """small class to represent a successors with some metadata about it"""
459 """small class to represent a successors with some metadata about it"""
446
460
447 def __init__(self, *args, **kwargs):
461 def __init__(self, *args, **kwargs):
448 super(_succs, self).__init__(*args, **kwargs)
462 super(_succs, self).__init__(*args, **kwargs)
449 self.markers = set()
463 self.markers = set()
450
464
451 def copy(self):
465 def copy(self):
452 new = _succs(self)
466 new = _succs(self)
453 new.markers = self.markers.copy()
467 new.markers = self.markers.copy()
454 return new
468 return new
455
469
456 @util.propertycache
470 @util.propertycache
457 def _set(self):
471 def _set(self):
458 # immutable
472 # immutable
459 return set(self)
473 return set(self)
460
474
461 def canmerge(self, other):
475 def canmerge(self, other):
462 return self._set.issubset(other._set)
476 return self._set.issubset(other._set)
463
477
464 def successorssets(repo, initialnode, closest=False, cache=None):
478 def successorssets(repo, initialnode, closest=False, cache=None):
465 """Return set of all latest successors of initial nodes
479 """Return set of all latest successors of initial nodes
466
480
467 The successors set of a changeset A are the group of revisions that succeed
481 The successors set of a changeset A are the group of revisions that succeed
468 A. It succeeds A as a consistent whole, each revision being only a partial
482 A. It succeeds A as a consistent whole, each revision being only a partial
469 replacement. By default, the successors set contains non-obsolete
483 replacement. By default, the successors set contains non-obsolete
470 changesets only, walking the obsolescence graph until reaching a leaf. If
484 changesets only, walking the obsolescence graph until reaching a leaf. If
471 'closest' is set to True, closest successors-sets are return (the
485 'closest' is set to True, closest successors-sets are return (the
472 obsolescence walk stops on known changesets).
486 obsolescence walk stops on known changesets).
473
487
474 This function returns the full list of successor sets which is why it
488 This function returns the full list of successor sets which is why it
475 returns a list of tuples and not just a single tuple. Each tuple is a valid
489 returns a list of tuples and not just a single tuple. Each tuple is a valid
476 successors set. Note that (A,) may be a valid successors set for changeset A
490 successors set. Note that (A,) may be a valid successors set for changeset A
477 (see below).
491 (see below).
478
492
479 In most cases, a changeset A will have a single element (e.g. the changeset
493 In most cases, a changeset A will have a single element (e.g. the changeset
480 A is replaced by A') in its successors set. Though, it is also common for a
494 A is replaced by A') in its successors set. Though, it is also common for a
481 changeset A to have no elements in its successor set (e.g. the changeset
495 changeset A to have no elements in its successor set (e.g. the changeset
482 has been pruned). Therefore, the returned list of successors sets will be
496 has been pruned). Therefore, the returned list of successors sets will be
483 [(A',)] or [], respectively.
497 [(A',)] or [], respectively.
484
498
485 When a changeset A is split into A' and B', however, it will result in a
499 When a changeset A is split into A' and B', however, it will result in a
486 successors set containing more than a single element, i.e. [(A',B')].
500 successors set containing more than a single element, i.e. [(A',B')].
487 Divergent changesets will result in multiple successors sets, i.e. [(A',),
501 Divergent changesets will result in multiple successors sets, i.e. [(A',),
488 (A'')].
502 (A'')].
489
503
490 If a changeset A is not obsolete, then it will conceptually have no
504 If a changeset A is not obsolete, then it will conceptually have no
491 successors set. To distinguish this from a pruned changeset, the successor
505 successors set. To distinguish this from a pruned changeset, the successor
492 set will contain itself only, i.e. [(A,)].
506 set will contain itself only, i.e. [(A,)].
493
507
494 Finally, final successors unknown locally are considered to be pruned
508 Finally, final successors unknown locally are considered to be pruned
495 (pruned: obsoleted without any successors). (Final: successors not affected
509 (pruned: obsoleted without any successors). (Final: successors not affected
496 by markers).
510 by markers).
497
511
498 The 'closest' mode respect the repoview filtering. For example, without
512 The 'closest' mode respect the repoview filtering. For example, without
499 filter it will stop at the first locally known changeset, with 'visible'
513 filter it will stop at the first locally known changeset, with 'visible'
500 filter it will stop on visible changesets).
514 filter it will stop on visible changesets).
501
515
502 The optional `cache` parameter is a dictionary that may contains
516 The optional `cache` parameter is a dictionary that may contains
503 precomputed successors sets. It is meant to reuse the computation of a
517 precomputed successors sets. It is meant to reuse the computation of a
504 previous call to `successorssets` when multiple calls are made at the same
518 previous call to `successorssets` when multiple calls are made at the same
505 time. The cache dictionary is updated in place. The caller is responsible
519 time. The cache dictionary is updated in place. The caller is responsible
506 for its life span. Code that makes multiple calls to `successorssets`
520 for its life span. Code that makes multiple calls to `successorssets`
507 *should* use this cache mechanism or risk a performance hit.
521 *should* use this cache mechanism or risk a performance hit.
508
522
509 Since results are different depending of the 'closest' most, the same cache
523 Since results are different depending of the 'closest' most, the same cache
510 cannot be reused for both mode.
524 cannot be reused for both mode.
511 """
525 """
512
526
513 succmarkers = repo.obsstore.successors
527 succmarkers = repo.obsstore.successors
514
528
515 # Stack of nodes we search successors sets for
529 # Stack of nodes we search successors sets for
516 toproceed = [initialnode]
530 toproceed = [initialnode]
517 # set version of above list for fast loop detection
531 # set version of above list for fast loop detection
518 # element added to "toproceed" must be added here
532 # element added to "toproceed" must be added here
519 stackedset = set(toproceed)
533 stackedset = set(toproceed)
520 if cache is None:
534 if cache is None:
521 cache = {}
535 cache = {}
522
536
523 # This while loop is the flattened version of a recursive search for
537 # This while loop is the flattened version of a recursive search for
524 # successors sets
538 # successors sets
525 #
539 #
526 # def successorssets(x):
540 # def successorssets(x):
527 # successors = directsuccessors(x)
541 # successors = directsuccessors(x)
528 # ss = [[]]
542 # ss = [[]]
529 # for succ in directsuccessors(x):
543 # for succ in directsuccessors(x):
530 # # product as in itertools cartesian product
544 # # product as in itertools cartesian product
531 # ss = product(ss, successorssets(succ))
545 # ss = product(ss, successorssets(succ))
532 # return ss
546 # return ss
533 #
547 #
534 # But we can not use plain recursive calls here:
548 # But we can not use plain recursive calls here:
535 # - that would blow the python call stack
549 # - that would blow the python call stack
536 # - obsolescence markers may have cycles, we need to handle them.
550 # - obsolescence markers may have cycles, we need to handle them.
537 #
551 #
538 # The `toproceed` list act as our call stack. Every node we search
552 # The `toproceed` list act as our call stack. Every node we search
539 # successors set for are stacked there.
553 # successors set for are stacked there.
540 #
554 #
541 # The `stackedset` is set version of this stack used to check if a node is
555 # The `stackedset` is set version of this stack used to check if a node is
542 # already stacked. This check is used to detect cycles and prevent infinite
556 # already stacked. This check is used to detect cycles and prevent infinite
543 # loop.
557 # loop.
544 #
558 #
545 # successors set of all nodes are stored in the `cache` dictionary.
559 # successors set of all nodes are stored in the `cache` dictionary.
546 #
560 #
547 # After this while loop ends we use the cache to return the successors sets
561 # After this while loop ends we use the cache to return the successors sets
548 # for the node requested by the caller.
562 # for the node requested by the caller.
549 while toproceed:
563 while toproceed:
550 # Every iteration tries to compute the successors sets of the topmost
564 # Every iteration tries to compute the successors sets of the topmost
551 # node of the stack: CURRENT.
565 # node of the stack: CURRENT.
552 #
566 #
553 # There are four possible outcomes:
567 # There are four possible outcomes:
554 #
568 #
555 # 1) We already know the successors sets of CURRENT:
569 # 1) We already know the successors sets of CURRENT:
556 # -> mission accomplished, pop it from the stack.
570 # -> mission accomplished, pop it from the stack.
557 # 2) Stop the walk:
571 # 2) Stop the walk:
558 # default case: Node is not obsolete
572 # default case: Node is not obsolete
559 # closest case: Node is known at this repo filter level
573 # closest case: Node is known at this repo filter level
560 # -> the node is its own successors sets. Add it to the cache.
574 # -> the node is its own successors sets. Add it to the cache.
561 # 3) We do not know successors set of direct successors of CURRENT:
575 # 3) We do not know successors set of direct successors of CURRENT:
562 # -> We add those successors to the stack.
576 # -> We add those successors to the stack.
563 # 4) We know successors sets of all direct successors of CURRENT:
577 # 4) We know successors sets of all direct successors of CURRENT:
564 # -> We can compute CURRENT successors set and add it to the
578 # -> We can compute CURRENT successors set and add it to the
565 # cache.
579 # cache.
566 #
580 #
567 current = toproceed[-1]
581 current = toproceed[-1]
568
582
569 # case 2 condition is a bit hairy because of closest,
583 # case 2 condition is a bit hairy because of closest,
570 # we compute it on its own
584 # we compute it on its own
571 case2condition = ((current not in succmarkers)
585 case2condition = ((current not in succmarkers)
572 or (closest and current != initialnode
586 or (closest and current != initialnode
573 and current in repo))
587 and current in repo))
574
588
575 if current in cache:
589 if current in cache:
576 # case (1): We already know the successors sets
590 # case (1): We already know the successors sets
577 stackedset.remove(toproceed.pop())
591 stackedset.remove(toproceed.pop())
578 elif case2condition:
592 elif case2condition:
579 # case (2): end of walk.
593 # case (2): end of walk.
580 if current in repo:
594 if current in repo:
581 # We have a valid successors.
595 # We have a valid successors.
582 cache[current] = [_succs((current,))]
596 cache[current] = [_succs((current,))]
583 else:
597 else:
584 # Final obsolete version is unknown locally.
598 # Final obsolete version is unknown locally.
585 # Do not count that as a valid successors
599 # Do not count that as a valid successors
586 cache[current] = []
600 cache[current] = []
587 else:
601 else:
588 # cases (3) and (4)
602 # cases (3) and (4)
589 #
603 #
590 # We proceed in two phases. Phase 1 aims to distinguish case (3)
604 # We proceed in two phases. Phase 1 aims to distinguish case (3)
591 # from case (4):
605 # from case (4):
592 #
606 #
593 # For each direct successors of CURRENT, we check whether its
607 # For each direct successors of CURRENT, we check whether its
594 # successors sets are known. If they are not, we stack the
608 # successors sets are known. If they are not, we stack the
595 # unknown node and proceed to the next iteration of the while
609 # unknown node and proceed to the next iteration of the while
596 # loop. (case 3)
610 # loop. (case 3)
597 #
611 #
598 # During this step, we may detect obsolescence cycles: a node
612 # During this step, we may detect obsolescence cycles: a node
599 # with unknown successors sets but already in the call stack.
613 # with unknown successors sets but already in the call stack.
600 # In such a situation, we arbitrary set the successors sets of
614 # In such a situation, we arbitrary set the successors sets of
601 # the node to nothing (node pruned) to break the cycle.
615 # the node to nothing (node pruned) to break the cycle.
602 #
616 #
603 # If no break was encountered we proceed to phase 2.
617 # If no break was encountered we proceed to phase 2.
604 #
618 #
605 # Phase 2 computes successors sets of CURRENT (case 4); see details
619 # Phase 2 computes successors sets of CURRENT (case 4); see details
606 # in phase 2 itself.
620 # in phase 2 itself.
607 #
621 #
608 # Note the two levels of iteration in each phase.
622 # Note the two levels of iteration in each phase.
609 # - The first one handles obsolescence markers using CURRENT as
623 # - The first one handles obsolescence markers using CURRENT as
610 # precursor (successors markers of CURRENT).
624 # precursor (successors markers of CURRENT).
611 #
625 #
612 # Having multiple entry here means divergence.
626 # Having multiple entry here means divergence.
613 #
627 #
614 # - The second one handles successors defined in each marker.
628 # - The second one handles successors defined in each marker.
615 #
629 #
616 # Having none means pruned node, multiple successors means split,
630 # Having none means pruned node, multiple successors means split,
617 # single successors are standard replacement.
631 # single successors are standard replacement.
618 #
632 #
619 for mark in sorted(succmarkers[current]):
633 for mark in sorted(succmarkers[current]):
620 for suc in mark[1]:
634 for suc in mark[1]:
621 if suc not in cache:
635 if suc not in cache:
622 if suc in stackedset:
636 if suc in stackedset:
623 # cycle breaking
637 # cycle breaking
624 cache[suc] = []
638 cache[suc] = []
625 else:
639 else:
626 # case (3) If we have not computed successors sets
640 # case (3) If we have not computed successors sets
627 # of one of those successors we add it to the
641 # of one of those successors we add it to the
628 # `toproceed` stack and stop all work for this
642 # `toproceed` stack and stop all work for this
629 # iteration.
643 # iteration.
630 toproceed.append(suc)
644 toproceed.append(suc)
631 stackedset.add(suc)
645 stackedset.add(suc)
632 break
646 break
633 else:
647 else:
634 continue
648 continue
635 break
649 break
636 else:
650 else:
637 # case (4): we know all successors sets of all direct
651 # case (4): we know all successors sets of all direct
638 # successors
652 # successors
639 #
653 #
640 # Successors set contributed by each marker depends on the
654 # Successors set contributed by each marker depends on the
641 # successors sets of all its "successors" node.
655 # successors sets of all its "successors" node.
642 #
656 #
643 # Each different marker is a divergence in the obsolescence
657 # Each different marker is a divergence in the obsolescence
644 # history. It contributes successors sets distinct from other
658 # history. It contributes successors sets distinct from other
645 # markers.
659 # markers.
646 #
660 #
647 # Within a marker, a successor may have divergent successors
661 # Within a marker, a successor may have divergent successors
648 # sets. In such a case, the marker will contribute multiple
662 # sets. In such a case, the marker will contribute multiple
649 # divergent successors sets. If multiple successors have
663 # divergent successors sets. If multiple successors have
650 # divergent successors sets, a Cartesian product is used.
664 # divergent successors sets, a Cartesian product is used.
651 #
665 #
652 # At the end we post-process successors sets to remove
666 # At the end we post-process successors sets to remove
653 # duplicated entry and successors set that are strict subset of
667 # duplicated entry and successors set that are strict subset of
654 # another one.
668 # another one.
655 succssets = []
669 succssets = []
656 for mark in sorted(succmarkers[current]):
670 for mark in sorted(succmarkers[current]):
657 # successors sets contributed by this marker
671 # successors sets contributed by this marker
658 base = _succs()
672 base = _succs()
659 base.markers.add(mark)
673 base.markers.add(mark)
660 markss = [base]
674 markss = [base]
661 for suc in mark[1]:
675 for suc in mark[1]:
662 # cardinal product with previous successors
676 # cardinal product with previous successors
663 productresult = []
677 productresult = []
664 for prefix in markss:
678 for prefix in markss:
665 for suffix in cache[suc]:
679 for suffix in cache[suc]:
666 newss = prefix.copy()
680 newss = prefix.copy()
667 newss.markers.update(suffix.markers)
681 newss.markers.update(suffix.markers)
668 for part in suffix:
682 for part in suffix:
669 # do not duplicated entry in successors set
683 # do not duplicated entry in successors set
670 # first entry wins.
684 # first entry wins.
671 if part not in newss:
685 if part not in newss:
672 newss.append(part)
686 newss.append(part)
673 productresult.append(newss)
687 productresult.append(newss)
674 markss = productresult
688 markss = productresult
675 succssets.extend(markss)
689 succssets.extend(markss)
676 # remove duplicated and subset
690 # remove duplicated and subset
677 seen = []
691 seen = []
678 final = []
692 final = []
679 candidates = sorted((s for s in succssets if s),
693 candidates = sorted((s for s in succssets if s),
680 key=len, reverse=True)
694 key=len, reverse=True)
681 for cand in candidates:
695 for cand in candidates:
682 for seensuccs in seen:
696 for seensuccs in seen:
683 if cand.canmerge(seensuccs):
697 if cand.canmerge(seensuccs):
684 seensuccs.markers.update(cand.markers)
698 seensuccs.markers.update(cand.markers)
685 break
699 break
686 else:
700 else:
687 final.append(cand)
701 final.append(cand)
688 seen.append(cand)
702 seen.append(cand)
689 final.reverse() # put small successors set first
703 final.reverse() # put small successors set first
690 cache[current] = final
704 cache[current] = final
691 return cache[initialnode]
705 return cache[initialnode]
692
706
693 def successorsandmarkers(repo, ctx):
707 def successorsandmarkers(repo, ctx):
694 """compute the raw data needed for computing obsfate
708 """compute the raw data needed for computing obsfate
695 Returns a list of dict, one dict per successors set
709 Returns a list of dict, one dict per successors set
696 """
710 """
697 if not ctx.obsolete():
711 if not ctx.obsolete():
698 return None
712 return None
699
713
700 ssets = successorssets(repo, ctx.node(), closest=True)
714 ssets = successorssets(repo, ctx.node(), closest=True)
701
715
702 # closestsuccessors returns an empty list for pruned revisions, remap it
716 # closestsuccessors returns an empty list for pruned revisions, remap it
703 # into a list containing an empty list for future processing
717 # into a list containing an empty list for future processing
704 if ssets == []:
718 if ssets == []:
705 ssets = [[]]
719 ssets = [[]]
706
720
707 # Try to recover pruned markers
721 # Try to recover pruned markers
708 succsmap = repo.obsstore.successors
722 succsmap = repo.obsstore.successors
709 fullsuccessorsets = [] # successor set + markers
723 fullsuccessorsets = [] # successor set + markers
710 for sset in ssets:
724 for sset in ssets:
711 if sset:
725 if sset:
712 fullsuccessorsets.append(sset)
726 fullsuccessorsets.append(sset)
713 else:
727 else:
714 # successorsset return an empty set() when ctx or one of its
728 # successorsset return an empty set() when ctx or one of its
715 # successors is pruned.
729 # successors is pruned.
716 # In this case, walk the obs-markers tree again starting with ctx
730 # In this case, walk the obs-markers tree again starting with ctx
717 # and find the relevant pruning obs-makers, the ones without
731 # and find the relevant pruning obs-makers, the ones without
718 # successors.
732 # successors.
719 # Having these markers allow us to compute some information about
733 # Having these markers allow us to compute some information about
720 # its fate, like who pruned this changeset and when.
734 # its fate, like who pruned this changeset and when.
721
735
722 # XXX we do not catch all prune markers (eg rewritten then pruned)
736 # XXX we do not catch all prune markers (eg rewritten then pruned)
723 # (fix me later)
737 # (fix me later)
724 foundany = False
738 foundany = False
725 for mark in succsmap.get(ctx.node(), ()):
739 for mark in succsmap.get(ctx.node(), ()):
726 if not mark[1]:
740 if not mark[1]:
727 foundany = True
741 foundany = True
728 sset = _succs()
742 sset = _succs()
729 sset.markers.add(mark)
743 sset.markers.add(mark)
730 fullsuccessorsets.append(sset)
744 fullsuccessorsets.append(sset)
731 if not foundany:
745 if not foundany:
732 fullsuccessorsets.append(_succs())
746 fullsuccessorsets.append(_succs())
733
747
734 values = []
748 values = []
735 for sset in fullsuccessorsets:
749 for sset in fullsuccessorsets:
736 values.append({'successors': sset, 'markers': sset.markers})
750 values.append({'successors': sset, 'markers': sset.markers})
737
751
738 return values
752 return values
739
753
740 def successorsetverb(successorset):
754 def successorsetverb(successorset):
741 """ Return the verb summarizing the successorset
755 """ Return the verb summarizing the successorset
742 """
756 """
743 if not successorset:
757 if not successorset:
744 verb = 'pruned'
758 verb = 'pruned'
745 elif len(successorset) == 1:
759 elif len(successorset) == 1:
746 verb = 'rewritten'
760 verb = 'rewritten'
747 else:
761 else:
748 verb = 'split'
762 verb = 'split'
749 return verb
763 return verb
750
764
751 def markersdates(markers):
765 def markersdates(markers):
752 """returns the list of dates for a list of markers
766 """returns the list of dates for a list of markers
753 """
767 """
754 return [m[4] for m in markers]
768 return [m[4] for m in markers]
755
769
756 def markersusers(markers):
770 def markersusers(markers):
757 """ Returns a sorted list of markers users without duplicates
771 """ Returns a sorted list of markers users without duplicates
758 """
772 """
759 markersmeta = [dict(m[3]) for m in markers]
773 markersmeta = [dict(m[3]) for m in markers]
760 users = set(meta.get('user') for meta in markersmeta if meta.get('user'))
774 users = set(meta.get('user') for meta in markersmeta if meta.get('user'))
761
775
762 return sorted(users)
776 return sorted(users)
763
777
764 def markersoperations(markers):
778 def markersoperations(markers):
765 """ Returns a sorted list of markers operations without duplicates
779 """ Returns a sorted list of markers operations without duplicates
766 """
780 """
767 markersmeta = [dict(m[3]) for m in markers]
781 markersmeta = [dict(m[3]) for m in markers]
768 operations = set(meta.get('operation') for meta in markersmeta
782 operations = set(meta.get('operation') for meta in markersmeta
769 if meta.get('operation'))
783 if meta.get('operation'))
770
784
771 return sorted(operations)
785 return sorted(operations)
General Comments 0
You need to be logged in to leave comments. Login now