##// END OF EJS Templates
effectflag: document effect flag...
Boris Feld -
r34423:2fd06499 default
parent child Browse files
Show More
@@ -1,771 +1,785 b''
1 1 # obsutil.py - utility functions for obsolescence
2 2 #
3 3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import re
11 11
12 12 from . import (
13 13 phases,
14 14 util
15 15 )
16 16
17 17 class marker(object):
18 18 """Wrap obsolete marker raw data"""
19 19
20 20 def __init__(self, repo, data):
21 21 # the repo argument will be used to create changectx in later version
22 22 self._repo = repo
23 23 self._data = data
24 24 self._decodedmeta = None
25 25
26 26 def __hash__(self):
27 27 return hash(self._data)
28 28
29 29 def __eq__(self, other):
30 30 if type(other) != type(self):
31 31 return False
32 32 return self._data == other._data
33 33
34 34 def precnode(self):
35 35 msg = ("'marker.precnode' is deprecated, "
36 36 "use 'marker.prednode'")
37 37 util.nouideprecwarn(msg, '4.4')
38 38 return self.prednode()
39 39
40 40 def prednode(self):
41 41 """Predecessor changeset node identifier"""
42 42 return self._data[0]
43 43
44 44 def succnodes(self):
45 45 """List of successor changesets node identifiers"""
46 46 return self._data[1]
47 47
48 48 def parentnodes(self):
49 49 """Parents of the predecessors (None if not recorded)"""
50 50 return self._data[5]
51 51
52 52 def metadata(self):
53 53 """Decoded metadata dictionary"""
54 54 return dict(self._data[3])
55 55
56 56 def date(self):
57 57 """Creation date as (unixtime, offset)"""
58 58 return self._data[4]
59 59
60 60 def flags(self):
61 61 """The flags field of the marker"""
62 62 return self._data[2]
63 63
64 64 def getmarkers(repo, nodes=None, exclusive=False):
65 65 """returns markers known in a repository
66 66
67 67 If <nodes> is specified, only markers "relevant" to those nodes are are
68 68 returned"""
69 69 if nodes is None:
70 70 rawmarkers = repo.obsstore
71 71 elif exclusive:
72 72 rawmarkers = exclusivemarkers(repo, nodes)
73 73 else:
74 74 rawmarkers = repo.obsstore.relevantmarkers(nodes)
75 75
76 76 for markerdata in rawmarkers:
77 77 yield marker(repo, markerdata)
78 78
79 79 def closestpredecessors(repo, nodeid):
80 80 """yield the list of next predecessors pointing on visible changectx nodes
81 81
82 82 This function respect the repoview filtering, filtered revision will be
83 83 considered missing.
84 84 """
85 85
86 86 precursors = repo.obsstore.predecessors
87 87 stack = [nodeid]
88 88 seen = set(stack)
89 89
90 90 while stack:
91 91 current = stack.pop()
92 92 currentpreccs = precursors.get(current, ())
93 93
94 94 for prec in currentpreccs:
95 95 precnodeid = prec[0]
96 96
97 97 # Basic cycle protection
98 98 if precnodeid in seen:
99 99 continue
100 100 seen.add(precnodeid)
101 101
102 102 if precnodeid in repo:
103 103 yield precnodeid
104 104 else:
105 105 stack.append(precnodeid)
106 106
107 107 def allprecursors(*args, **kwargs):
108 108 """ (DEPRECATED)
109 109 """
110 110 msg = ("'obsutil.allprecursors' is deprecated, "
111 111 "use 'obsutil.allpredecessors'")
112 112 util.nouideprecwarn(msg, '4.4')
113 113
114 114 return allpredecessors(*args, **kwargs)
115 115
116 116 def allpredecessors(obsstore, nodes, ignoreflags=0):
117 117 """Yield node for every precursors of <nodes>.
118 118
119 119 Some precursors may be unknown locally.
120 120
121 121 This is a linear yield unsuited to detecting folded changesets. It includes
122 122 initial nodes too."""
123 123
124 124 remaining = set(nodes)
125 125 seen = set(remaining)
126 126 while remaining:
127 127 current = remaining.pop()
128 128 yield current
129 129 for mark in obsstore.predecessors.get(current, ()):
130 130 # ignore marker flagged with specified flag
131 131 if mark[2] & ignoreflags:
132 132 continue
133 133 suc = mark[0]
134 134 if suc not in seen:
135 135 seen.add(suc)
136 136 remaining.add(suc)
137 137
138 138 def allsuccessors(obsstore, nodes, ignoreflags=0):
139 139 """Yield node for every successor of <nodes>.
140 140
141 141 Some successors may be unknown locally.
142 142
143 143 This is a linear yield unsuited to detecting split changesets. It includes
144 144 initial nodes too."""
145 145 remaining = set(nodes)
146 146 seen = set(remaining)
147 147 while remaining:
148 148 current = remaining.pop()
149 149 yield current
150 150 for mark in obsstore.successors.get(current, ()):
151 151 # ignore marker flagged with specified flag
152 152 if mark[2] & ignoreflags:
153 153 continue
154 154 for suc in mark[1]:
155 155 if suc not in seen:
156 156 seen.add(suc)
157 157 remaining.add(suc)
158 158
159 159 def _filterprunes(markers):
160 160 """return a set with no prune markers"""
161 161 return set(m for m in markers if m[1])
162 162
163 163 def exclusivemarkers(repo, nodes):
164 164 """set of markers relevant to "nodes" but no other locally-known nodes
165 165
166 166 This function compute the set of markers "exclusive" to a locally-known
167 167 node. This means we walk the markers starting from <nodes> until we reach a
168 168 locally-known precursors outside of <nodes>. Element of <nodes> with
169 169 locally-known successors outside of <nodes> are ignored (since their
170 170 precursors markers are also relevant to these successors).
171 171
172 172 For example:
173 173
174 174 # (A0 rewritten as A1)
175 175 #
176 176 # A0 <-1- A1 # Marker "1" is exclusive to A1
177 177
178 178 or
179 179
180 180 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
181 181 #
182 182 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
183 183
184 184 or
185 185
186 186 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
187 187 #
188 188 # <-2- A1 # Marker "2" is exclusive to A0,A1
189 189 # /
190 190 # <-1- A0
191 191 # \
192 192 # <-3- A2 # Marker "3" is exclusive to A0,A2
193 193 #
194 194 # in addition:
195 195 #
196 196 # Markers "2,3" are exclusive to A1,A2
197 197 # Markers "1,2,3" are exclusive to A0,A1,A2
198 198
199 199 See test/test-obsolete-bundle-strip.t for more examples.
200 200
201 201 An example usage is strip. When stripping a changeset, we also want to
202 202 strip the markers exclusive to this changeset. Otherwise we would have
203 203 "dangling"" obsolescence markers from its precursors: Obsolescence markers
204 204 marking a node as obsolete without any successors available locally.
205 205
206 206 As for relevant markers, the prune markers for children will be followed.
207 207 Of course, they will only be followed if the pruned children is
208 208 locally-known. Since the prune markers are relevant to the pruned node.
209 209 However, while prune markers are considered relevant to the parent of the
210 210 pruned changesets, prune markers for locally-known changeset (with no
211 211 successors) are considered exclusive to the pruned nodes. This allows
212 212 to strip the prune markers (with the rest of the exclusive chain) alongside
213 213 the pruned changesets.
214 214 """
215 215 # running on a filtered repository would be dangerous as markers could be
216 216 # reported as exclusive when they are relevant for other filtered nodes.
217 217 unfi = repo.unfiltered()
218 218
219 219 # shortcut to various useful item
220 220 nm = unfi.changelog.nodemap
221 221 precursorsmarkers = unfi.obsstore.predecessors
222 222 successormarkers = unfi.obsstore.successors
223 223 childrenmarkers = unfi.obsstore.children
224 224
225 225 # exclusive markers (return of the function)
226 226 exclmarkers = set()
227 227 # we need fast membership testing
228 228 nodes = set(nodes)
229 229 # looking for head in the obshistory
230 230 #
231 231 # XXX we are ignoring all issues in regard with cycle for now.
232 232 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
233 233 stack.sort()
234 234 # nodes already stacked
235 235 seennodes = set(stack)
236 236 while stack:
237 237 current = stack.pop()
238 238 # fetch precursors markers
239 239 markers = list(precursorsmarkers.get(current, ()))
240 240 # extend the list with prune markers
241 241 for mark in successormarkers.get(current, ()):
242 242 if not mark[1]:
243 243 markers.append(mark)
244 244 # and markers from children (looking for prune)
245 245 for mark in childrenmarkers.get(current, ()):
246 246 if not mark[1]:
247 247 markers.append(mark)
248 248 # traverse the markers
249 249 for mark in markers:
250 250 if mark in exclmarkers:
251 251 # markers already selected
252 252 continue
253 253
254 254 # If the markers is about the current node, select it
255 255 #
256 256 # (this delay the addition of markers from children)
257 257 if mark[1] or mark[0] == current:
258 258 exclmarkers.add(mark)
259 259
260 260 # should we keep traversing through the precursors?
261 261 prec = mark[0]
262 262
263 263 # nodes in the stack or already processed
264 264 if prec in seennodes:
265 265 continue
266 266
267 267 # is this a locally known node ?
268 268 known = prec in nm
269 269 # if locally-known and not in the <nodes> set the traversal
270 270 # stop here.
271 271 if known and prec not in nodes:
272 272 continue
273 273
274 274 # do not keep going if there are unselected markers pointing to this
275 275 # nodes. If we end up traversing these unselected markers later the
276 276 # node will be taken care of at that point.
277 277 precmarkers = _filterprunes(successormarkers.get(prec))
278 278 if precmarkers.issubset(exclmarkers):
279 279 seennodes.add(prec)
280 280 stack.append(prec)
281 281
282 282 return exclmarkers
283 283
284 284 def foreground(repo, nodes):
285 285 """return all nodes in the "foreground" of other node
286 286
287 287 The foreground of a revision is anything reachable using parent -> children
288 288 or precursor -> successor relation. It is very similar to "descendant" but
289 289 augmented with obsolescence information.
290 290
291 291 Beware that possible obsolescence cycle may result if complex situation.
292 292 """
293 293 repo = repo.unfiltered()
294 294 foreground = set(repo.set('%ln::', nodes))
295 295 if repo.obsstore:
296 296 # We only need this complicated logic if there is obsolescence
297 297 # XXX will probably deserve an optimised revset.
298 298 nm = repo.changelog.nodemap
299 299 plen = -1
300 300 # compute the whole set of successors or descendants
301 301 while len(foreground) != plen:
302 302 plen = len(foreground)
303 303 succs = set(c.node() for c in foreground)
304 304 mutable = [c.node() for c in foreground if c.mutable()]
305 305 succs.update(allsuccessors(repo.obsstore, mutable))
306 306 known = (n for n in succs if n in nm)
307 307 foreground = set(repo.set('%ln::', known))
308 308 return set(c.node() for c in foreground)
309 309
310 # logic around storing and using effect flags
310 # effectflag field
311 #
312 # Effect-flag is a 1-byte bit field used to store what changed between a
313 # changeset and its successor(s).
314 #
315 # The effect flag is stored in obs-markers metadata while we iterate on the
316 # information design. That's why we have the EFFECTFLAGFIELD. If we come up
317 # with an incompatible design for effect flag, we can store a new design under
318 # another field name so we don't break readers. We plan to extend the existing
319 # obsmarkers bit-field when the effect flag design will be stabilized.
320 #
321 # The effect-flag is placed behind an experimental flag
322 # `effect-flags` set to off by default.
323 #
324
311 325 EFFECTFLAGFIELD = "ef1"
312 326
313 327 DESCCHANGED = 1 << 0 # action changed the description
314 328 METACHANGED = 1 << 1 # action change the meta
315 329 DIFFCHANGED = 1 << 3 # action change diff introduced by the changeset
316 330 PARENTCHANGED = 1 << 2 # action change the parent
317 331 USERCHANGED = 1 << 4 # the user changed
318 332 DATECHANGED = 1 << 5 # the date changed
319 333 BRANCHCHANGED = 1 << 6 # the branch changed
320 334
321 335 METABLACKLIST = [
322 336 re.compile('^branch$'),
323 337 re.compile('^.*-source$'),
324 338 re.compile('^.*_source$'),
325 339 re.compile('^source$'),
326 340 ]
327 341
328 342 def metanotblacklisted(metaitem):
329 343 """ Check that the key of a meta item (extrakey, extravalue) does not
330 344 match at least one of the blacklist pattern
331 345 """
332 346 metakey = metaitem[0]
333 347
334 348 return not any(pattern.match(metakey) for pattern in METABLACKLIST)
335 349
336 350 def _prepare_hunk(hunk):
337 351 """Drop all information but the username and patch"""
338 352 cleanhunk = []
339 353 for line in hunk.splitlines():
340 354 if line.startswith(b'# User') or not line.startswith(b'#'):
341 355 if line.startswith(b'@@'):
342 356 line = b'@@\n'
343 357 cleanhunk.append(line)
344 358 return cleanhunk
345 359
346 360 def _getdifflines(iterdiff):
347 361 """return a cleaned up lines"""
348 362 lines = next(iterdiff, None)
349 363
350 364 if lines is None:
351 365 return lines
352 366
353 367 return _prepare_hunk(lines)
354 368
355 369 def _cmpdiff(leftctx, rightctx):
356 370 """return True if both ctx introduce the "same diff"
357 371
358 372 This is a first and basic implementation, with many shortcoming.
359 373 """
360 374
361 375 # Leftctx or right ctx might be filtered, so we need to use the contexts
362 376 # with an unfiltered repository to safely compute the diff
363 377 leftunfi = leftctx._repo.unfiltered()[leftctx.rev()]
364 378 leftdiff = leftunfi.diff(git=1)
365 379 rightunfi = rightctx._repo.unfiltered()[rightctx.rev()]
366 380 rightdiff = rightunfi.diff(git=1)
367 381
368 382 left, right = (0, 0)
369 383 while None not in (left, right):
370 384 left = _getdifflines(leftdiff)
371 385 right = _getdifflines(rightdiff)
372 386
373 387 if left != right:
374 388 return False
375 389 return True
376 390
377 391 def geteffectflag(relation):
378 392 """ From an obs-marker relation, compute what changed between the
379 393 predecessor and the successor.
380 394 """
381 395 effects = 0
382 396
383 397 source = relation[0]
384 398
385 399 for changectx in relation[1]:
386 400 # Check if description has changed
387 401 if changectx.description() != source.description():
388 402 effects |= DESCCHANGED
389 403
390 404 # Check if user has changed
391 405 if changectx.user() != source.user():
392 406 effects |= USERCHANGED
393 407
394 408 # Check if date has changed
395 409 if changectx.date() != source.date():
396 410 effects |= DATECHANGED
397 411
398 412 # Check if branch has changed
399 413 if changectx.branch() != source.branch():
400 414 effects |= BRANCHCHANGED
401 415
402 416 # Check if at least one of the parent has changed
403 417 if changectx.parents() != source.parents():
404 418 effects |= PARENTCHANGED
405 419
406 420 # Check if other meta has changed
407 421 changeextra = changectx.extra().items()
408 422 ctxmeta = filter(metanotblacklisted, changeextra)
409 423
410 424 sourceextra = source.extra().items()
411 425 srcmeta = filter(metanotblacklisted, sourceextra)
412 426
413 427 if ctxmeta != srcmeta:
414 428 effects |= METACHANGED
415 429
416 430 # Check if the diff has changed
417 431 if not _cmpdiff(source, changectx):
418 432 effects |= DIFFCHANGED
419 433
420 434 return effects
421 435
422 436 def getobsoleted(repo, tr):
423 437 """return the set of pre-existing revisions obsoleted by a transaction"""
424 438 torev = repo.unfiltered().changelog.nodemap.get
425 439 phase = repo._phasecache.phase
426 440 succsmarkers = repo.obsstore.successors.get
427 441 public = phases.public
428 442 addedmarkers = tr.changes.get('obsmarkers')
429 443 addedrevs = tr.changes.get('revs')
430 444 seenrevs = set(addedrevs)
431 445 obsoleted = set()
432 446 for mark in addedmarkers:
433 447 node = mark[0]
434 448 rev = torev(node)
435 449 if rev is None or rev in seenrevs:
436 450 continue
437 451 seenrevs.add(rev)
438 452 if phase(repo, rev) == public:
439 453 continue
440 454 if set(succsmarkers(node) or []).issubset(addedmarkers):
441 455 obsoleted.add(rev)
442 456 return obsoleted
443 457
444 458 class _succs(list):
445 459 """small class to represent a successors with some metadata about it"""
446 460
447 461 def __init__(self, *args, **kwargs):
448 462 super(_succs, self).__init__(*args, **kwargs)
449 463 self.markers = set()
450 464
451 465 def copy(self):
452 466 new = _succs(self)
453 467 new.markers = self.markers.copy()
454 468 return new
455 469
456 470 @util.propertycache
457 471 def _set(self):
458 472 # immutable
459 473 return set(self)
460 474
461 475 def canmerge(self, other):
462 476 return self._set.issubset(other._set)
463 477
464 478 def successorssets(repo, initialnode, closest=False, cache=None):
465 479 """Return set of all latest successors of initial nodes
466 480
467 481 The successors set of a changeset A are the group of revisions that succeed
468 482 A. It succeeds A as a consistent whole, each revision being only a partial
469 483 replacement. By default, the successors set contains non-obsolete
470 484 changesets only, walking the obsolescence graph until reaching a leaf. If
471 485 'closest' is set to True, closest successors-sets are return (the
472 486 obsolescence walk stops on known changesets).
473 487
474 488 This function returns the full list of successor sets which is why it
475 489 returns a list of tuples and not just a single tuple. Each tuple is a valid
476 490 successors set. Note that (A,) may be a valid successors set for changeset A
477 491 (see below).
478 492
479 493 In most cases, a changeset A will have a single element (e.g. the changeset
480 494 A is replaced by A') in its successors set. Though, it is also common for a
481 495 changeset A to have no elements in its successor set (e.g. the changeset
482 496 has been pruned). Therefore, the returned list of successors sets will be
483 497 [(A',)] or [], respectively.
484 498
485 499 When a changeset A is split into A' and B', however, it will result in a
486 500 successors set containing more than a single element, i.e. [(A',B')].
487 501 Divergent changesets will result in multiple successors sets, i.e. [(A',),
488 502 (A'')].
489 503
490 504 If a changeset A is not obsolete, then it will conceptually have no
491 505 successors set. To distinguish this from a pruned changeset, the successor
492 506 set will contain itself only, i.e. [(A,)].
493 507
494 508 Finally, final successors unknown locally are considered to be pruned
495 509 (pruned: obsoleted without any successors). (Final: successors not affected
496 510 by markers).
497 511
498 512 The 'closest' mode respect the repoview filtering. For example, without
499 513 filter it will stop at the first locally known changeset, with 'visible'
500 514 filter it will stop on visible changesets).
501 515
502 516 The optional `cache` parameter is a dictionary that may contains
503 517 precomputed successors sets. It is meant to reuse the computation of a
504 518 previous call to `successorssets` when multiple calls are made at the same
505 519 time. The cache dictionary is updated in place. The caller is responsible
506 520 for its life span. Code that makes multiple calls to `successorssets`
507 521 *should* use this cache mechanism or risk a performance hit.
508 522
509 523 Since results are different depending of the 'closest' most, the same cache
510 524 cannot be reused for both mode.
511 525 """
512 526
513 527 succmarkers = repo.obsstore.successors
514 528
515 529 # Stack of nodes we search successors sets for
516 530 toproceed = [initialnode]
517 531 # set version of above list for fast loop detection
518 532 # element added to "toproceed" must be added here
519 533 stackedset = set(toproceed)
520 534 if cache is None:
521 535 cache = {}
522 536
523 537 # This while loop is the flattened version of a recursive search for
524 538 # successors sets
525 539 #
526 540 # def successorssets(x):
527 541 # successors = directsuccessors(x)
528 542 # ss = [[]]
529 543 # for succ in directsuccessors(x):
530 544 # # product as in itertools cartesian product
531 545 # ss = product(ss, successorssets(succ))
532 546 # return ss
533 547 #
534 548 # But we can not use plain recursive calls here:
535 549 # - that would blow the python call stack
536 550 # - obsolescence markers may have cycles, we need to handle them.
537 551 #
538 552 # The `toproceed` list act as our call stack. Every node we search
539 553 # successors set for are stacked there.
540 554 #
541 555 # The `stackedset` is set version of this stack used to check if a node is
542 556 # already stacked. This check is used to detect cycles and prevent infinite
543 557 # loop.
544 558 #
545 559 # successors set of all nodes are stored in the `cache` dictionary.
546 560 #
547 561 # After this while loop ends we use the cache to return the successors sets
548 562 # for the node requested by the caller.
549 563 while toproceed:
550 564 # Every iteration tries to compute the successors sets of the topmost
551 565 # node of the stack: CURRENT.
552 566 #
553 567 # There are four possible outcomes:
554 568 #
555 569 # 1) We already know the successors sets of CURRENT:
556 570 # -> mission accomplished, pop it from the stack.
557 571 # 2) Stop the walk:
558 572 # default case: Node is not obsolete
559 573 # closest case: Node is known at this repo filter level
560 574 # -> the node is its own successors sets. Add it to the cache.
561 575 # 3) We do not know successors set of direct successors of CURRENT:
562 576 # -> We add those successors to the stack.
563 577 # 4) We know successors sets of all direct successors of CURRENT:
564 578 # -> We can compute CURRENT successors set and add it to the
565 579 # cache.
566 580 #
567 581 current = toproceed[-1]
568 582
569 583 # case 2 condition is a bit hairy because of closest,
570 584 # we compute it on its own
571 585 case2condition = ((current not in succmarkers)
572 586 or (closest and current != initialnode
573 587 and current in repo))
574 588
575 589 if current in cache:
576 590 # case (1): We already know the successors sets
577 591 stackedset.remove(toproceed.pop())
578 592 elif case2condition:
579 593 # case (2): end of walk.
580 594 if current in repo:
581 595 # We have a valid successors.
582 596 cache[current] = [_succs((current,))]
583 597 else:
584 598 # Final obsolete version is unknown locally.
585 599 # Do not count that as a valid successors
586 600 cache[current] = []
587 601 else:
588 602 # cases (3) and (4)
589 603 #
590 604 # We proceed in two phases. Phase 1 aims to distinguish case (3)
591 605 # from case (4):
592 606 #
593 607 # For each direct successors of CURRENT, we check whether its
594 608 # successors sets are known. If they are not, we stack the
595 609 # unknown node and proceed to the next iteration of the while
596 610 # loop. (case 3)
597 611 #
598 612 # During this step, we may detect obsolescence cycles: a node
599 613 # with unknown successors sets but already in the call stack.
600 614 # In such a situation, we arbitrary set the successors sets of
601 615 # the node to nothing (node pruned) to break the cycle.
602 616 #
603 617 # If no break was encountered we proceed to phase 2.
604 618 #
605 619 # Phase 2 computes successors sets of CURRENT (case 4); see details
606 620 # in phase 2 itself.
607 621 #
608 622 # Note the two levels of iteration in each phase.
609 623 # - The first one handles obsolescence markers using CURRENT as
610 624 # precursor (successors markers of CURRENT).
611 625 #
612 626 # Having multiple entry here means divergence.
613 627 #
614 628 # - The second one handles successors defined in each marker.
615 629 #
616 630 # Having none means pruned node, multiple successors means split,
617 631 # single successors are standard replacement.
618 632 #
619 633 for mark in sorted(succmarkers[current]):
620 634 for suc in mark[1]:
621 635 if suc not in cache:
622 636 if suc in stackedset:
623 637 # cycle breaking
624 638 cache[suc] = []
625 639 else:
626 640 # case (3) If we have not computed successors sets
627 641 # of one of those successors we add it to the
628 642 # `toproceed` stack and stop all work for this
629 643 # iteration.
630 644 toproceed.append(suc)
631 645 stackedset.add(suc)
632 646 break
633 647 else:
634 648 continue
635 649 break
636 650 else:
637 651 # case (4): we know all successors sets of all direct
638 652 # successors
639 653 #
640 654 # Successors set contributed by each marker depends on the
641 655 # successors sets of all its "successors" node.
642 656 #
643 657 # Each different marker is a divergence in the obsolescence
644 658 # history. It contributes successors sets distinct from other
645 659 # markers.
646 660 #
647 661 # Within a marker, a successor may have divergent successors
648 662 # sets. In such a case, the marker will contribute multiple
649 663 # divergent successors sets. If multiple successors have
650 664 # divergent successors sets, a Cartesian product is used.
651 665 #
652 666 # At the end we post-process successors sets to remove
653 667 # duplicated entry and successors set that are strict subset of
654 668 # another one.
655 669 succssets = []
656 670 for mark in sorted(succmarkers[current]):
657 671 # successors sets contributed by this marker
658 672 base = _succs()
659 673 base.markers.add(mark)
660 674 markss = [base]
661 675 for suc in mark[1]:
662 676 # cardinal product with previous successors
663 677 productresult = []
664 678 for prefix in markss:
665 679 for suffix in cache[suc]:
666 680 newss = prefix.copy()
667 681 newss.markers.update(suffix.markers)
668 682 for part in suffix:
669 683 # do not duplicated entry in successors set
670 684 # first entry wins.
671 685 if part not in newss:
672 686 newss.append(part)
673 687 productresult.append(newss)
674 688 markss = productresult
675 689 succssets.extend(markss)
676 690 # remove duplicated and subset
677 691 seen = []
678 692 final = []
679 693 candidates = sorted((s for s in succssets if s),
680 694 key=len, reverse=True)
681 695 for cand in candidates:
682 696 for seensuccs in seen:
683 697 if cand.canmerge(seensuccs):
684 698 seensuccs.markers.update(cand.markers)
685 699 break
686 700 else:
687 701 final.append(cand)
688 702 seen.append(cand)
689 703 final.reverse() # put small successors set first
690 704 cache[current] = final
691 705 return cache[initialnode]
692 706
693 707 def successorsandmarkers(repo, ctx):
694 708 """compute the raw data needed for computing obsfate
695 709 Returns a list of dict, one dict per successors set
696 710 """
697 711 if not ctx.obsolete():
698 712 return None
699 713
700 714 ssets = successorssets(repo, ctx.node(), closest=True)
701 715
702 716 # closestsuccessors returns an empty list for pruned revisions, remap it
703 717 # into a list containing an empty list for future processing
704 718 if ssets == []:
705 719 ssets = [[]]
706 720
707 721 # Try to recover pruned markers
708 722 succsmap = repo.obsstore.successors
709 723 fullsuccessorsets = [] # successor set + markers
710 724 for sset in ssets:
711 725 if sset:
712 726 fullsuccessorsets.append(sset)
713 727 else:
714 728 # successorsset return an empty set() when ctx or one of its
715 729 # successors is pruned.
716 730 # In this case, walk the obs-markers tree again starting with ctx
717 731 # and find the relevant pruning obs-makers, the ones without
718 732 # successors.
719 733 # Having these markers allow us to compute some information about
720 734 # its fate, like who pruned this changeset and when.
721 735
722 736 # XXX we do not catch all prune markers (eg rewritten then pruned)
723 737 # (fix me later)
724 738 foundany = False
725 739 for mark in succsmap.get(ctx.node(), ()):
726 740 if not mark[1]:
727 741 foundany = True
728 742 sset = _succs()
729 743 sset.markers.add(mark)
730 744 fullsuccessorsets.append(sset)
731 745 if not foundany:
732 746 fullsuccessorsets.append(_succs())
733 747
734 748 values = []
735 749 for sset in fullsuccessorsets:
736 750 values.append({'successors': sset, 'markers': sset.markers})
737 751
738 752 return values
739 753
740 754 def successorsetverb(successorset):
741 755 """ Return the verb summarizing the successorset
742 756 """
743 757 if not successorset:
744 758 verb = 'pruned'
745 759 elif len(successorset) == 1:
746 760 verb = 'rewritten'
747 761 else:
748 762 verb = 'split'
749 763 return verb
750 764
751 765 def markersdates(markers):
752 766 """returns the list of dates for a list of markers
753 767 """
754 768 return [m[4] for m in markers]
755 769
756 770 def markersusers(markers):
757 771 """ Returns a sorted list of markers users without duplicates
758 772 """
759 773 markersmeta = [dict(m[3]) for m in markers]
760 774 users = set(meta.get('user') for meta in markersmeta if meta.get('user'))
761 775
762 776 return sorted(users)
763 777
764 778 def markersoperations(markers):
765 779 """ Returns a sorted list of markers operations without duplicates
766 780 """
767 781 markersmeta = [dict(m[3]) for m in markers]
768 782 operations = set(meta.get('operation') for meta in markersmeta
769 783 if meta.get('operation'))
770 784
771 785 return sorted(operations)
General Comments 0
You need to be logged in to leave comments. Login now