##// END OF EJS Templates
obsfate: fix obsfate_printer with empty date list...
Boris Feld -
r34874:aa849cf5 default
parent child Browse files
Show More
@@ -1,837 +1,837
1 1 # obsutil.py - utility functions for obsolescence
2 2 #
3 3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import re
11 11
12 12 from . import (
13 13 phases,
14 14 util
15 15 )
16 16
17 17 class marker(object):
18 18 """Wrap obsolete marker raw data"""
19 19
20 20 def __init__(self, repo, data):
21 21 # the repo argument will be used to create changectx in later version
22 22 self._repo = repo
23 23 self._data = data
24 24 self._decodedmeta = None
25 25
26 26 def __hash__(self):
27 27 return hash(self._data)
28 28
29 29 def __eq__(self, other):
30 30 if type(other) != type(self):
31 31 return False
32 32 return self._data == other._data
33 33
34 34 def precnode(self):
35 35 msg = ("'marker.precnode' is deprecated, "
36 36 "use 'marker.prednode'")
37 37 util.nouideprecwarn(msg, '4.4')
38 38 return self.prednode()
39 39
40 40 def prednode(self):
41 41 """Predecessor changeset node identifier"""
42 42 return self._data[0]
43 43
44 44 def succnodes(self):
45 45 """List of successor changesets node identifiers"""
46 46 return self._data[1]
47 47
48 48 def parentnodes(self):
49 49 """Parents of the predecessors (None if not recorded)"""
50 50 return self._data[5]
51 51
52 52 def metadata(self):
53 53 """Decoded metadata dictionary"""
54 54 return dict(self._data[3])
55 55
56 56 def date(self):
57 57 """Creation date as (unixtime, offset)"""
58 58 return self._data[4]
59 59
60 60 def flags(self):
61 61 """The flags field of the marker"""
62 62 return self._data[2]
63 63
64 64 def getmarkers(repo, nodes=None, exclusive=False):
65 65 """returns markers known in a repository
66 66
67 67 If <nodes> is specified, only markers "relevant" to those nodes are are
68 68 returned"""
69 69 if nodes is None:
70 70 rawmarkers = repo.obsstore
71 71 elif exclusive:
72 72 rawmarkers = exclusivemarkers(repo, nodes)
73 73 else:
74 74 rawmarkers = repo.obsstore.relevantmarkers(nodes)
75 75
76 76 for markerdata in rawmarkers:
77 77 yield marker(repo, markerdata)
78 78
79 79 def closestpredecessors(repo, nodeid):
80 80 """yield the list of next predecessors pointing on visible changectx nodes
81 81
82 82 This function respect the repoview filtering, filtered revision will be
83 83 considered missing.
84 84 """
85 85
86 86 precursors = repo.obsstore.predecessors
87 87 stack = [nodeid]
88 88 seen = set(stack)
89 89
90 90 while stack:
91 91 current = stack.pop()
92 92 currentpreccs = precursors.get(current, ())
93 93
94 94 for prec in currentpreccs:
95 95 precnodeid = prec[0]
96 96
97 97 # Basic cycle protection
98 98 if precnodeid in seen:
99 99 continue
100 100 seen.add(precnodeid)
101 101
102 102 if precnodeid in repo:
103 103 yield precnodeid
104 104 else:
105 105 stack.append(precnodeid)
106 106
107 107 def allprecursors(*args, **kwargs):
108 108 """ (DEPRECATED)
109 109 """
110 110 msg = ("'obsutil.allprecursors' is deprecated, "
111 111 "use 'obsutil.allpredecessors'")
112 112 util.nouideprecwarn(msg, '4.4')
113 113
114 114 return allpredecessors(*args, **kwargs)
115 115
116 116 def allpredecessors(obsstore, nodes, ignoreflags=0):
117 117 """Yield node for every precursors of <nodes>.
118 118
119 119 Some precursors may be unknown locally.
120 120
121 121 This is a linear yield unsuited to detecting folded changesets. It includes
122 122 initial nodes too."""
123 123
124 124 remaining = set(nodes)
125 125 seen = set(remaining)
126 126 while remaining:
127 127 current = remaining.pop()
128 128 yield current
129 129 for mark in obsstore.predecessors.get(current, ()):
130 130 # ignore marker flagged with specified flag
131 131 if mark[2] & ignoreflags:
132 132 continue
133 133 suc = mark[0]
134 134 if suc not in seen:
135 135 seen.add(suc)
136 136 remaining.add(suc)
137 137
138 138 def allsuccessors(obsstore, nodes, ignoreflags=0):
139 139 """Yield node for every successor of <nodes>.
140 140
141 141 Some successors may be unknown locally.
142 142
143 143 This is a linear yield unsuited to detecting split changesets. It includes
144 144 initial nodes too."""
145 145 remaining = set(nodes)
146 146 seen = set(remaining)
147 147 while remaining:
148 148 current = remaining.pop()
149 149 yield current
150 150 for mark in obsstore.successors.get(current, ()):
151 151 # ignore marker flagged with specified flag
152 152 if mark[2] & ignoreflags:
153 153 continue
154 154 for suc in mark[1]:
155 155 if suc not in seen:
156 156 seen.add(suc)
157 157 remaining.add(suc)
158 158
159 159 def _filterprunes(markers):
160 160 """return a set with no prune markers"""
161 161 return set(m for m in markers if m[1])
162 162
163 163 def exclusivemarkers(repo, nodes):
164 164 """set of markers relevant to "nodes" but no other locally-known nodes
165 165
166 166 This function compute the set of markers "exclusive" to a locally-known
167 167 node. This means we walk the markers starting from <nodes> until we reach a
168 168 locally-known precursors outside of <nodes>. Element of <nodes> with
169 169 locally-known successors outside of <nodes> are ignored (since their
170 170 precursors markers are also relevant to these successors).
171 171
172 172 For example:
173 173
174 174 # (A0 rewritten as A1)
175 175 #
176 176 # A0 <-1- A1 # Marker "1" is exclusive to A1
177 177
178 178 or
179 179
180 180 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
181 181 #
182 182 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
183 183
184 184 or
185 185
186 186 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
187 187 #
188 188 # <-2- A1 # Marker "2" is exclusive to A0,A1
189 189 # /
190 190 # <-1- A0
191 191 # \
192 192 # <-3- A2 # Marker "3" is exclusive to A0,A2
193 193 #
194 194 # in addition:
195 195 #
196 196 # Markers "2,3" are exclusive to A1,A2
197 197 # Markers "1,2,3" are exclusive to A0,A1,A2
198 198
199 199 See test/test-obsolete-bundle-strip.t for more examples.
200 200
201 201 An example usage is strip. When stripping a changeset, we also want to
202 202 strip the markers exclusive to this changeset. Otherwise we would have
203 203 "dangling"" obsolescence markers from its precursors: Obsolescence markers
204 204 marking a node as obsolete without any successors available locally.
205 205
206 206 As for relevant markers, the prune markers for children will be followed.
207 207 Of course, they will only be followed if the pruned children is
208 208 locally-known. Since the prune markers are relevant to the pruned node.
209 209 However, while prune markers are considered relevant to the parent of the
210 210 pruned changesets, prune markers for locally-known changeset (with no
211 211 successors) are considered exclusive to the pruned nodes. This allows
212 212 to strip the prune markers (with the rest of the exclusive chain) alongside
213 213 the pruned changesets.
214 214 """
215 215 # running on a filtered repository would be dangerous as markers could be
216 216 # reported as exclusive when they are relevant for other filtered nodes.
217 217 unfi = repo.unfiltered()
218 218
219 219 # shortcut to various useful item
220 220 nm = unfi.changelog.nodemap
221 221 precursorsmarkers = unfi.obsstore.predecessors
222 222 successormarkers = unfi.obsstore.successors
223 223 childrenmarkers = unfi.obsstore.children
224 224
225 225 # exclusive markers (return of the function)
226 226 exclmarkers = set()
227 227 # we need fast membership testing
228 228 nodes = set(nodes)
229 229 # looking for head in the obshistory
230 230 #
231 231 # XXX we are ignoring all issues in regard with cycle for now.
232 232 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
233 233 stack.sort()
234 234 # nodes already stacked
235 235 seennodes = set(stack)
236 236 while stack:
237 237 current = stack.pop()
238 238 # fetch precursors markers
239 239 markers = list(precursorsmarkers.get(current, ()))
240 240 # extend the list with prune markers
241 241 for mark in successormarkers.get(current, ()):
242 242 if not mark[1]:
243 243 markers.append(mark)
244 244 # and markers from children (looking for prune)
245 245 for mark in childrenmarkers.get(current, ()):
246 246 if not mark[1]:
247 247 markers.append(mark)
248 248 # traverse the markers
249 249 for mark in markers:
250 250 if mark in exclmarkers:
251 251 # markers already selected
252 252 continue
253 253
254 254 # If the markers is about the current node, select it
255 255 #
256 256 # (this delay the addition of markers from children)
257 257 if mark[1] or mark[0] == current:
258 258 exclmarkers.add(mark)
259 259
260 260 # should we keep traversing through the precursors?
261 261 prec = mark[0]
262 262
263 263 # nodes in the stack or already processed
264 264 if prec in seennodes:
265 265 continue
266 266
267 267 # is this a locally known node ?
268 268 known = prec in nm
269 269 # if locally-known and not in the <nodes> set the traversal
270 270 # stop here.
271 271 if known and prec not in nodes:
272 272 continue
273 273
274 274 # do not keep going if there are unselected markers pointing to this
275 275 # nodes. If we end up traversing these unselected markers later the
276 276 # node will be taken care of at that point.
277 277 precmarkers = _filterprunes(successormarkers.get(prec))
278 278 if precmarkers.issubset(exclmarkers):
279 279 seennodes.add(prec)
280 280 stack.append(prec)
281 281
282 282 return exclmarkers
283 283
284 284 def foreground(repo, nodes):
285 285 """return all nodes in the "foreground" of other node
286 286
287 287 The foreground of a revision is anything reachable using parent -> children
288 288 or precursor -> successor relation. It is very similar to "descendant" but
289 289 augmented with obsolescence information.
290 290
291 291 Beware that possible obsolescence cycle may result if complex situation.
292 292 """
293 293 repo = repo.unfiltered()
294 294 foreground = set(repo.set('%ln::', nodes))
295 295 if repo.obsstore:
296 296 # We only need this complicated logic if there is obsolescence
297 297 # XXX will probably deserve an optimised revset.
298 298 nm = repo.changelog.nodemap
299 299 plen = -1
300 300 # compute the whole set of successors or descendants
301 301 while len(foreground) != plen:
302 302 plen = len(foreground)
303 303 succs = set(c.node() for c in foreground)
304 304 mutable = [c.node() for c in foreground if c.mutable()]
305 305 succs.update(allsuccessors(repo.obsstore, mutable))
306 306 known = (n for n in succs if n in nm)
307 307 foreground = set(repo.set('%ln::', known))
308 308 return set(c.node() for c in foreground)
309 309
310 310 # effectflag field
311 311 #
312 312 # Effect-flag is a 1-byte bit field used to store what changed between a
313 313 # changeset and its successor(s).
314 314 #
315 315 # The effect flag is stored in obs-markers metadata while we iterate on the
316 316 # information design. That's why we have the EFFECTFLAGFIELD. If we come up
317 317 # with an incompatible design for effect flag, we can store a new design under
318 318 # another field name so we don't break readers. We plan to extend the existing
319 319 # obsmarkers bit-field when the effect flag design will be stabilized.
320 320 #
321 321 # The effect-flag is placed behind an experimental flag
322 322 # `effect-flags` set to off by default.
323 323 #
324 324
325 325 EFFECTFLAGFIELD = "ef1"
326 326
327 327 DESCCHANGED = 1 << 0 # action changed the description
328 328 METACHANGED = 1 << 1 # action change the meta
329 329 DIFFCHANGED = 1 << 3 # action change diff introduced by the changeset
330 330 PARENTCHANGED = 1 << 2 # action change the parent
331 331 USERCHANGED = 1 << 4 # the user changed
332 332 DATECHANGED = 1 << 5 # the date changed
333 333 BRANCHCHANGED = 1 << 6 # the branch changed
334 334
335 335 METABLACKLIST = [
336 336 re.compile('^branch$'),
337 337 re.compile('^.*-source$'),
338 338 re.compile('^.*_source$'),
339 339 re.compile('^source$'),
340 340 ]
341 341
342 342 def metanotblacklisted(metaitem):
343 343 """ Check that the key of a meta item (extrakey, extravalue) does not
344 344 match at least one of the blacklist pattern
345 345 """
346 346 metakey = metaitem[0]
347 347
348 348 return not any(pattern.match(metakey) for pattern in METABLACKLIST)
349 349
350 350 def _prepare_hunk(hunk):
351 351 """Drop all information but the username and patch"""
352 352 cleanhunk = []
353 353 for line in hunk.splitlines():
354 354 if line.startswith(b'# User') or not line.startswith(b'#'):
355 355 if line.startswith(b'@@'):
356 356 line = b'@@\n'
357 357 cleanhunk.append(line)
358 358 return cleanhunk
359 359
360 360 def _getdifflines(iterdiff):
361 361 """return a cleaned up lines"""
362 362 lines = next(iterdiff, None)
363 363
364 364 if lines is None:
365 365 return lines
366 366
367 367 return _prepare_hunk(lines)
368 368
369 369 def _cmpdiff(leftctx, rightctx):
370 370 """return True if both ctx introduce the "same diff"
371 371
372 372 This is a first and basic implementation, with many shortcoming.
373 373 """
374 374
375 375 # Leftctx or right ctx might be filtered, so we need to use the contexts
376 376 # with an unfiltered repository to safely compute the diff
377 377 leftunfi = leftctx._repo.unfiltered()[leftctx.rev()]
378 378 leftdiff = leftunfi.diff(git=1)
379 379 rightunfi = rightctx._repo.unfiltered()[rightctx.rev()]
380 380 rightdiff = rightunfi.diff(git=1)
381 381
382 382 left, right = (0, 0)
383 383 while None not in (left, right):
384 384 left = _getdifflines(leftdiff)
385 385 right = _getdifflines(rightdiff)
386 386
387 387 if left != right:
388 388 return False
389 389 return True
390 390
391 391 def geteffectflag(relation):
392 392 """ From an obs-marker relation, compute what changed between the
393 393 predecessor and the successor.
394 394 """
395 395 effects = 0
396 396
397 397 source = relation[0]
398 398
399 399 for changectx in relation[1]:
400 400 # Check if description has changed
401 401 if changectx.description() != source.description():
402 402 effects |= DESCCHANGED
403 403
404 404 # Check if user has changed
405 405 if changectx.user() != source.user():
406 406 effects |= USERCHANGED
407 407
408 408 # Check if date has changed
409 409 if changectx.date() != source.date():
410 410 effects |= DATECHANGED
411 411
412 412 # Check if branch has changed
413 413 if changectx.branch() != source.branch():
414 414 effects |= BRANCHCHANGED
415 415
416 416 # Check if at least one of the parent has changed
417 417 if changectx.parents() != source.parents():
418 418 effects |= PARENTCHANGED
419 419
420 420 # Check if other meta has changed
421 421 changeextra = changectx.extra().items()
422 422 ctxmeta = filter(metanotblacklisted, changeextra)
423 423
424 424 sourceextra = source.extra().items()
425 425 srcmeta = filter(metanotblacklisted, sourceextra)
426 426
427 427 if ctxmeta != srcmeta:
428 428 effects |= METACHANGED
429 429
430 430 # Check if the diff has changed
431 431 if not _cmpdiff(source, changectx):
432 432 effects |= DIFFCHANGED
433 433
434 434 return effects
435 435
436 436 def getobsoleted(repo, tr):
437 437 """return the set of pre-existing revisions obsoleted by a transaction"""
438 438 torev = repo.unfiltered().changelog.nodemap.get
439 439 phase = repo._phasecache.phase
440 440 succsmarkers = repo.obsstore.successors.get
441 441 public = phases.public
442 442 addedmarkers = tr.changes.get('obsmarkers')
443 443 addedrevs = tr.changes.get('revs')
444 444 seenrevs = set(addedrevs)
445 445 obsoleted = set()
446 446 for mark in addedmarkers:
447 447 node = mark[0]
448 448 rev = torev(node)
449 449 if rev is None or rev in seenrevs:
450 450 continue
451 451 seenrevs.add(rev)
452 452 if phase(repo, rev) == public:
453 453 continue
454 454 if set(succsmarkers(node) or []).issubset(addedmarkers):
455 455 obsoleted.add(rev)
456 456 return obsoleted
457 457
458 458 class _succs(list):
459 459 """small class to represent a successors with some metadata about it"""
460 460
461 461 def __init__(self, *args, **kwargs):
462 462 super(_succs, self).__init__(*args, **kwargs)
463 463 self.markers = set()
464 464
465 465 def copy(self):
466 466 new = _succs(self)
467 467 new.markers = self.markers.copy()
468 468 return new
469 469
470 470 @util.propertycache
471 471 def _set(self):
472 472 # immutable
473 473 return set(self)
474 474
475 475 def canmerge(self, other):
476 476 return self._set.issubset(other._set)
477 477
478 478 def successorssets(repo, initialnode, closest=False, cache=None):
479 479 """Return set of all latest successors of initial nodes
480 480
481 481 The successors set of a changeset A are the group of revisions that succeed
482 482 A. It succeeds A as a consistent whole, each revision being only a partial
483 483 replacement. By default, the successors set contains non-obsolete
484 484 changesets only, walking the obsolescence graph until reaching a leaf. If
485 485 'closest' is set to True, closest successors-sets are return (the
486 486 obsolescence walk stops on known changesets).
487 487
488 488 This function returns the full list of successor sets which is why it
489 489 returns a list of tuples and not just a single tuple. Each tuple is a valid
490 490 successors set. Note that (A,) may be a valid successors set for changeset A
491 491 (see below).
492 492
493 493 In most cases, a changeset A will have a single element (e.g. the changeset
494 494 A is replaced by A') in its successors set. Though, it is also common for a
495 495 changeset A to have no elements in its successor set (e.g. the changeset
496 496 has been pruned). Therefore, the returned list of successors sets will be
497 497 [(A',)] or [], respectively.
498 498
499 499 When a changeset A is split into A' and B', however, it will result in a
500 500 successors set containing more than a single element, i.e. [(A',B')].
501 501 Divergent changesets will result in multiple successors sets, i.e. [(A',),
502 502 (A'')].
503 503
504 504 If a changeset A is not obsolete, then it will conceptually have no
505 505 successors set. To distinguish this from a pruned changeset, the successor
506 506 set will contain itself only, i.e. [(A,)].
507 507
508 508 Finally, final successors unknown locally are considered to be pruned
509 509 (pruned: obsoleted without any successors). (Final: successors not affected
510 510 by markers).
511 511
512 512 The 'closest' mode respect the repoview filtering. For example, without
513 513 filter it will stop at the first locally known changeset, with 'visible'
514 514 filter it will stop on visible changesets).
515 515
516 516 The optional `cache` parameter is a dictionary that may contains
517 517 precomputed successors sets. It is meant to reuse the computation of a
518 518 previous call to `successorssets` when multiple calls are made at the same
519 519 time. The cache dictionary is updated in place. The caller is responsible
520 520 for its life span. Code that makes multiple calls to `successorssets`
521 521 *should* use this cache mechanism or risk a performance hit.
522 522
523 523 Since results are different depending of the 'closest' most, the same cache
524 524 cannot be reused for both mode.
525 525 """
526 526
527 527 succmarkers = repo.obsstore.successors
528 528
529 529 # Stack of nodes we search successors sets for
530 530 toproceed = [initialnode]
531 531 # set version of above list for fast loop detection
532 532 # element added to "toproceed" must be added here
533 533 stackedset = set(toproceed)
534 534 if cache is None:
535 535 cache = {}
536 536
537 537 # This while loop is the flattened version of a recursive search for
538 538 # successors sets
539 539 #
540 540 # def successorssets(x):
541 541 # successors = directsuccessors(x)
542 542 # ss = [[]]
543 543 # for succ in directsuccessors(x):
544 544 # # product as in itertools cartesian product
545 545 # ss = product(ss, successorssets(succ))
546 546 # return ss
547 547 #
548 548 # But we can not use plain recursive calls here:
549 549 # - that would blow the python call stack
550 550 # - obsolescence markers may have cycles, we need to handle them.
551 551 #
552 552 # The `toproceed` list act as our call stack. Every node we search
553 553 # successors set for are stacked there.
554 554 #
555 555 # The `stackedset` is set version of this stack used to check if a node is
556 556 # already stacked. This check is used to detect cycles and prevent infinite
557 557 # loop.
558 558 #
559 559 # successors set of all nodes are stored in the `cache` dictionary.
560 560 #
561 561 # After this while loop ends we use the cache to return the successors sets
562 562 # for the node requested by the caller.
563 563 while toproceed:
564 564 # Every iteration tries to compute the successors sets of the topmost
565 565 # node of the stack: CURRENT.
566 566 #
567 567 # There are four possible outcomes:
568 568 #
569 569 # 1) We already know the successors sets of CURRENT:
570 570 # -> mission accomplished, pop it from the stack.
571 571 # 2) Stop the walk:
572 572 # default case: Node is not obsolete
573 573 # closest case: Node is known at this repo filter level
574 574 # -> the node is its own successors sets. Add it to the cache.
575 575 # 3) We do not know successors set of direct successors of CURRENT:
576 576 # -> We add those successors to the stack.
577 577 # 4) We know successors sets of all direct successors of CURRENT:
578 578 # -> We can compute CURRENT successors set and add it to the
579 579 # cache.
580 580 #
581 581 current = toproceed[-1]
582 582
583 583 # case 2 condition is a bit hairy because of closest,
584 584 # we compute it on its own
585 585 case2condition = ((current not in succmarkers)
586 586 or (closest and current != initialnode
587 587 and current in repo))
588 588
589 589 if current in cache:
590 590 # case (1): We already know the successors sets
591 591 stackedset.remove(toproceed.pop())
592 592 elif case2condition:
593 593 # case (2): end of walk.
594 594 if current in repo:
595 595 # We have a valid successors.
596 596 cache[current] = [_succs((current,))]
597 597 else:
598 598 # Final obsolete version is unknown locally.
599 599 # Do not count that as a valid successors
600 600 cache[current] = []
601 601 else:
602 602 # cases (3) and (4)
603 603 #
604 604 # We proceed in two phases. Phase 1 aims to distinguish case (3)
605 605 # from case (4):
606 606 #
607 607 # For each direct successors of CURRENT, we check whether its
608 608 # successors sets are known. If they are not, we stack the
609 609 # unknown node and proceed to the next iteration of the while
610 610 # loop. (case 3)
611 611 #
612 612 # During this step, we may detect obsolescence cycles: a node
613 613 # with unknown successors sets but already in the call stack.
614 614 # In such a situation, we arbitrary set the successors sets of
615 615 # the node to nothing (node pruned) to break the cycle.
616 616 #
617 617 # If no break was encountered we proceed to phase 2.
618 618 #
619 619 # Phase 2 computes successors sets of CURRENT (case 4); see details
620 620 # in phase 2 itself.
621 621 #
622 622 # Note the two levels of iteration in each phase.
623 623 # - The first one handles obsolescence markers using CURRENT as
624 624 # precursor (successors markers of CURRENT).
625 625 #
626 626 # Having multiple entry here means divergence.
627 627 #
628 628 # - The second one handles successors defined in each marker.
629 629 #
630 630 # Having none means pruned node, multiple successors means split,
631 631 # single successors are standard replacement.
632 632 #
633 633 for mark in sorted(succmarkers[current]):
634 634 for suc in mark[1]:
635 635 if suc not in cache:
636 636 if suc in stackedset:
637 637 # cycle breaking
638 638 cache[suc] = []
639 639 else:
640 640 # case (3) If we have not computed successors sets
641 641 # of one of those successors we add it to the
642 642 # `toproceed` stack and stop all work for this
643 643 # iteration.
644 644 toproceed.append(suc)
645 645 stackedset.add(suc)
646 646 break
647 647 else:
648 648 continue
649 649 break
650 650 else:
651 651 # case (4): we know all successors sets of all direct
652 652 # successors
653 653 #
654 654 # Successors set contributed by each marker depends on the
655 655 # successors sets of all its "successors" node.
656 656 #
657 657 # Each different marker is a divergence in the obsolescence
658 658 # history. It contributes successors sets distinct from other
659 659 # markers.
660 660 #
661 661 # Within a marker, a successor may have divergent successors
662 662 # sets. In such a case, the marker will contribute multiple
663 663 # divergent successors sets. If multiple successors have
664 664 # divergent successors sets, a Cartesian product is used.
665 665 #
666 666 # At the end we post-process successors sets to remove
667 667 # duplicated entry and successors set that are strict subset of
668 668 # another one.
669 669 succssets = []
670 670 for mark in sorted(succmarkers[current]):
671 671 # successors sets contributed by this marker
672 672 base = _succs()
673 673 base.markers.add(mark)
674 674 markss = [base]
675 675 for suc in mark[1]:
676 676 # cardinal product with previous successors
677 677 productresult = []
678 678 for prefix in markss:
679 679 for suffix in cache[suc]:
680 680 newss = prefix.copy()
681 681 newss.markers.update(suffix.markers)
682 682 for part in suffix:
683 683 # do not duplicated entry in successors set
684 684 # first entry wins.
685 685 if part not in newss:
686 686 newss.append(part)
687 687 productresult.append(newss)
688 688 markss = productresult
689 689 succssets.extend(markss)
690 690 # remove duplicated and subset
691 691 seen = []
692 692 final = []
693 693 candidates = sorted((s for s in succssets if s),
694 694 key=len, reverse=True)
695 695 for cand in candidates:
696 696 for seensuccs in seen:
697 697 if cand.canmerge(seensuccs):
698 698 seensuccs.markers.update(cand.markers)
699 699 break
700 700 else:
701 701 final.append(cand)
702 702 seen.append(cand)
703 703 final.reverse() # put small successors set first
704 704 cache[current] = final
705 705 return cache[initialnode]
706 706
707 707 def successorsandmarkers(repo, ctx):
708 708 """compute the raw data needed for computing obsfate
709 709 Returns a list of dict, one dict per successors set
710 710 """
711 711 if not ctx.obsolete():
712 712 return None
713 713
714 714 ssets = successorssets(repo, ctx.node(), closest=True)
715 715
716 716 # closestsuccessors returns an empty list for pruned revisions, remap it
717 717 # into a list containing an empty list for future processing
718 718 if ssets == []:
719 719 ssets = [[]]
720 720
721 721 # Try to recover pruned markers
722 722 succsmap = repo.obsstore.successors
723 723 fullsuccessorsets = [] # successor set + markers
724 724 for sset in ssets:
725 725 if sset:
726 726 fullsuccessorsets.append(sset)
727 727 else:
728 728 # successorsset return an empty set() when ctx or one of its
729 729 # successors is pruned.
730 730 # In this case, walk the obs-markers tree again starting with ctx
731 731 # and find the relevant pruning obs-makers, the ones without
732 732 # successors.
733 733 # Having these markers allow us to compute some information about
734 734 # its fate, like who pruned this changeset and when.
735 735
736 736 # XXX we do not catch all prune markers (eg rewritten then pruned)
737 737 # (fix me later)
738 738 foundany = False
739 739 for mark in succsmap.get(ctx.node(), ()):
740 740 if not mark[1]:
741 741 foundany = True
742 742 sset = _succs()
743 743 sset.markers.add(mark)
744 744 fullsuccessorsets.append(sset)
745 745 if not foundany:
746 746 fullsuccessorsets.append(_succs())
747 747
748 748 values = []
749 749 for sset in fullsuccessorsets:
750 750 values.append({'successors': sset, 'markers': sset.markers})
751 751
752 752 return values
753 753
754 754 def successorsetverb(successorset):
755 755 """ Return the verb summarizing the successorset
756 756 """
757 757 if not successorset:
758 758 verb = 'pruned'
759 759 elif len(successorset) == 1:
760 760 verb = 'rewritten'
761 761 else:
762 762 verb = 'split'
763 763 return verb
764 764
765 765 def markersdates(markers):
766 766 """returns the list of dates for a list of markers
767 767 """
768 768 return [m[4] for m in markers]
769 769
770 770 def markersusers(markers):
771 771 """ Returns a sorted list of markers users without duplicates
772 772 """
773 773 markersmeta = [dict(m[3]) for m in markers]
774 774 users = set(meta.get('user') for meta in markersmeta if meta.get('user'))
775 775
776 776 return sorted(users)
777 777
778 778 def markersoperations(markers):
779 779 """ Returns a sorted list of markers operations without duplicates
780 780 """
781 781 markersmeta = [dict(m[3]) for m in markers]
782 782 operations = set(meta.get('operation') for meta in markersmeta
783 783 if meta.get('operation'))
784 784
785 785 return sorted(operations)
786 786
787 787 def obsfateprinter(successors, markers, ui):
788 788 """ Build a obsfate string for a single successorset using all obsfate
789 789 related function defined in obsutil
790 790 """
791 791 quiet = ui.quiet
792 792 verbose = ui.verbose
793 793 normal = not verbose and not quiet
794 794
795 795 line = []
796 796
797 797 # Verb
798 798 line.append(successorsetverb(successors))
799 799
800 800 # Operations
801 801 operations = markersoperations(markers)
802 802 if operations:
803 803 line.append(" using %s" % ", ".join(operations))
804 804
805 805 # Successors
806 806 if successors:
807 807 fmtsuccessors = [successors.joinfmt(succ) for succ in successors]
808 808 line.append(" as %s" % ", ".join(fmtsuccessors))
809 809
810 810 # Users
811 811 users = markersusers(markers)
812 812 # Filter out current user in not verbose mode to reduce amount of
813 813 # information
814 814 if not verbose:
815 815 currentuser = ui.username(acceptempty=True)
816 816 if len(users) == 1 and currentuser in users:
817 817 users = None
818 818
819 819 if (verbose or normal) and users:
820 820 line.append(" by %s" % ", ".join(users))
821 821
822 822 # Date
823 823 dates = markersdates(markers)
824 824
825 if verbose:
825 if dates and verbose:
826 826 min_date = min(dates)
827 827 max_date = max(dates)
828 828
829 829 if min_date == max_date:
830 830 fmtmin_date = util.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
831 831 line.append(" (at %s)" % fmtmin_date)
832 832 else:
833 833 fmtmin_date = util.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
834 834 fmtmax_date = util.datestr(max_date, '%Y-%m-%d %H:%M %1%2')
835 835 line.append(" (between %s and %s)" % (fmtmin_date, fmtmax_date))
836 836
837 837 return "".join(line)
General Comments 0
You need to be logged in to leave comments. Login now