##// END OF EJS Templates
obsutil: maintain a homogenous list when computing successors...
Matt Harbison -
r47549:d35063eb stable
parent child Browse files
Show More
@@ -1,1049 +1,1049 b''
1 1 # obsutil.py - utility functions for obsolescence
2 2 #
3 3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import re
11 11
12 12 from .i18n import _
13 13 from .node import (
14 14 hex,
15 15 short,
16 16 )
17 17 from . import (
18 18 diffutil,
19 19 encoding,
20 20 error,
21 21 phases,
22 22 pycompat,
23 23 util,
24 24 )
25 25 from .utils import dateutil
26 26
27 27 ### obsolescence marker flag
28 28
29 29 ## bumpedfix flag
30 30 #
31 31 # When a changeset A' succeed to a changeset A which became public, we call A'
32 32 # "bumped" because it's a successors of a public changesets
33 33 #
34 34 # o A' (bumped)
35 35 # |`:
36 36 # | o A
37 37 # |/
38 38 # o Z
39 39 #
40 40 # The way to solve this situation is to create a new changeset Ad as children
41 41 # of A. This changeset have the same content than A'. So the diff from A to A'
42 42 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
43 43 #
44 44 # o Ad
45 45 # |`:
46 46 # | x A'
47 47 # |'|
48 48 # o | A
49 49 # |/
50 50 # o Z
51 51 #
52 52 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
53 53 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
54 54 # This flag mean that the successors express the changes between the public and
55 55 # bumped version and fix the situation, breaking the transitivity of
56 56 # "bumped" here.
57 57 bumpedfix = 1
58 58 usingsha256 = 2
59 59
60 60
61 61 class marker(object):
62 62 """Wrap obsolete marker raw data"""
63 63
64 64 def __init__(self, repo, data):
65 65 # the repo argument will be used to create changectx in later version
66 66 self._repo = repo
67 67 self._data = data
68 68 self._decodedmeta = None
69 69
70 70 def __hash__(self):
71 71 return hash(self._data)
72 72
73 73 def __eq__(self, other):
74 74 if type(other) != type(self):
75 75 return False
76 76 return self._data == other._data
77 77
78 78 def prednode(self):
79 79 """Predecessor changeset node identifier"""
80 80 return self._data[0]
81 81
82 82 def succnodes(self):
83 83 """List of successor changesets node identifiers"""
84 84 return self._data[1]
85 85
86 86 def parentnodes(self):
87 87 """Parents of the predecessors (None if not recorded)"""
88 88 return self._data[5]
89 89
90 90 def metadata(self):
91 91 """Decoded metadata dictionary"""
92 92 return dict(self._data[3])
93 93
94 94 def date(self):
95 95 """Creation date as (unixtime, offset)"""
96 96 return self._data[4]
97 97
98 98 def flags(self):
99 99 """The flags field of the marker"""
100 100 return self._data[2]
101 101
102 102
103 103 def getmarkers(repo, nodes=None, exclusive=False):
104 104 """returns markers known in a repository
105 105
106 106 If <nodes> is specified, only markers "relevant" to those nodes are are
107 107 returned"""
108 108 if nodes is None:
109 109 rawmarkers = repo.obsstore
110 110 elif exclusive:
111 111 rawmarkers = exclusivemarkers(repo, nodes)
112 112 else:
113 113 rawmarkers = repo.obsstore.relevantmarkers(nodes)
114 114
115 115 for markerdata in rawmarkers:
116 116 yield marker(repo, markerdata)
117 117
118 118
119 119 def sortedmarkers(markers):
120 120 # last item of marker tuple ('parents') may be None or a tuple
121 121 return sorted(markers, key=lambda m: m[:-1] + (m[-1] or (),))
122 122
123 123
124 124 def closestpredecessors(repo, nodeid):
125 125 """yield the list of next predecessors pointing on visible changectx nodes
126 126
127 127 This function respect the repoview filtering, filtered revision will be
128 128 considered missing.
129 129 """
130 130
131 131 precursors = repo.obsstore.predecessors
132 132 stack = [nodeid]
133 133 seen = set(stack)
134 134
135 135 while stack:
136 136 current = stack.pop()
137 137 currentpreccs = precursors.get(current, ())
138 138
139 139 for prec in currentpreccs:
140 140 precnodeid = prec[0]
141 141
142 142 # Basic cycle protection
143 143 if precnodeid in seen:
144 144 continue
145 145 seen.add(precnodeid)
146 146
147 147 if precnodeid in repo:
148 148 yield precnodeid
149 149 else:
150 150 stack.append(precnodeid)
151 151
152 152
153 153 def allpredecessors(obsstore, nodes, ignoreflags=0):
154 154 """Yield node for every precursors of <nodes>.
155 155
156 156 Some precursors may be unknown locally.
157 157
158 158 This is a linear yield unsuited to detecting folded changesets. It includes
159 159 initial nodes too."""
160 160
161 161 remaining = set(nodes)
162 162 seen = set(remaining)
163 163 prec = obsstore.predecessors.get
164 164 while remaining:
165 165 current = remaining.pop()
166 166 yield current
167 167 for mark in prec(current, ()):
168 168 # ignore marker flagged with specified flag
169 169 if mark[2] & ignoreflags:
170 170 continue
171 171 suc = mark[0]
172 172 if suc not in seen:
173 173 seen.add(suc)
174 174 remaining.add(suc)
175 175
176 176
177 177 def allsuccessors(obsstore, nodes, ignoreflags=0):
178 178 """Yield node for every successor of <nodes>.
179 179
180 180 Some successors may be unknown locally.
181 181
182 182 This is a linear yield unsuited to detecting split changesets. It includes
183 183 initial nodes too."""
184 184 remaining = set(nodes)
185 185 seen = set(remaining)
186 186 while remaining:
187 187 current = remaining.pop()
188 188 yield current
189 189 for mark in obsstore.successors.get(current, ()):
190 190 # ignore marker flagged with specified flag
191 191 if mark[2] & ignoreflags:
192 192 continue
193 193 for suc in mark[1]:
194 194 if suc not in seen:
195 195 seen.add(suc)
196 196 remaining.add(suc)
197 197
198 198
199 199 def _filterprunes(markers):
200 200 """return a set with no prune markers"""
201 201 return {m for m in markers if m[1]}
202 202
203 203
204 204 def exclusivemarkers(repo, nodes):
205 205 """set of markers relevant to "nodes" but no other locally-known nodes
206 206
207 207 This function compute the set of markers "exclusive" to a locally-known
208 208 node. This means we walk the markers starting from <nodes> until we reach a
209 209 locally-known precursors outside of <nodes>. Element of <nodes> with
210 210 locally-known successors outside of <nodes> are ignored (since their
211 211 precursors markers are also relevant to these successors).
212 212
213 213 For example:
214 214
215 215 # (A0 rewritten as A1)
216 216 #
217 217 # A0 <-1- A1 # Marker "1" is exclusive to A1
218 218
219 219 or
220 220
221 221 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
222 222 #
223 223 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
224 224
225 225 or
226 226
227 227 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
228 228 #
229 229 # <-2- A1 # Marker "2" is exclusive to A0,A1
230 230 # /
231 231 # <-1- A0
232 232 # \
233 233 # <-3- A2 # Marker "3" is exclusive to A0,A2
234 234 #
235 235 # in addition:
236 236 #
237 237 # Markers "2,3" are exclusive to A1,A2
238 238 # Markers "1,2,3" are exclusive to A0,A1,A2
239 239
240 240 See test/test-obsolete-bundle-strip.t for more examples.
241 241
242 242 An example usage is strip. When stripping a changeset, we also want to
243 243 strip the markers exclusive to this changeset. Otherwise we would have
244 244 "dangling"" obsolescence markers from its precursors: Obsolescence markers
245 245 marking a node as obsolete without any successors available locally.
246 246
247 247 As for relevant markers, the prune markers for children will be followed.
248 248 Of course, they will only be followed if the pruned children is
249 249 locally-known. Since the prune markers are relevant to the pruned node.
250 250 However, while prune markers are considered relevant to the parent of the
251 251 pruned changesets, prune markers for locally-known changeset (with no
252 252 successors) are considered exclusive to the pruned nodes. This allows
253 253 to strip the prune markers (with the rest of the exclusive chain) alongside
254 254 the pruned changesets.
255 255 """
256 256 # running on a filtered repository would be dangerous as markers could be
257 257 # reported as exclusive when they are relevant for other filtered nodes.
258 258 unfi = repo.unfiltered()
259 259
260 260 # shortcut to various useful item
261 261 has_node = unfi.changelog.index.has_node
262 262 precursorsmarkers = unfi.obsstore.predecessors
263 263 successormarkers = unfi.obsstore.successors
264 264 childrenmarkers = unfi.obsstore.children
265 265
266 266 # exclusive markers (return of the function)
267 267 exclmarkers = set()
268 268 # we need fast membership testing
269 269 nodes = set(nodes)
270 270 # looking for head in the obshistory
271 271 #
272 272 # XXX we are ignoring all issues in regard with cycle for now.
273 273 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
274 274 stack.sort()
275 275 # nodes already stacked
276 276 seennodes = set(stack)
277 277 while stack:
278 278 current = stack.pop()
279 279 # fetch precursors markers
280 280 markers = list(precursorsmarkers.get(current, ()))
281 281 # extend the list with prune markers
282 282 for mark in successormarkers.get(current, ()):
283 283 if not mark[1]:
284 284 markers.append(mark)
285 285 # and markers from children (looking for prune)
286 286 for mark in childrenmarkers.get(current, ()):
287 287 if not mark[1]:
288 288 markers.append(mark)
289 289 # traverse the markers
290 290 for mark in markers:
291 291 if mark in exclmarkers:
292 292 # markers already selected
293 293 continue
294 294
295 295 # If the markers is about the current node, select it
296 296 #
297 297 # (this delay the addition of markers from children)
298 298 if mark[1] or mark[0] == current:
299 299 exclmarkers.add(mark)
300 300
301 301 # should we keep traversing through the precursors?
302 302 prec = mark[0]
303 303
304 304 # nodes in the stack or already processed
305 305 if prec in seennodes:
306 306 continue
307 307
308 308 # is this a locally known node ?
309 309 known = has_node(prec)
310 310 # if locally-known and not in the <nodes> set the traversal
311 311 # stop here.
312 312 if known and prec not in nodes:
313 313 continue
314 314
315 315 # do not keep going if there are unselected markers pointing to this
316 316 # nodes. If we end up traversing these unselected markers later the
317 317 # node will be taken care of at that point.
318 318 precmarkers = _filterprunes(successormarkers.get(prec))
319 319 if precmarkers.issubset(exclmarkers):
320 320 seennodes.add(prec)
321 321 stack.append(prec)
322 322
323 323 return exclmarkers
324 324
325 325
326 326 def foreground(repo, nodes):
327 327 """return all nodes in the "foreground" of other node
328 328
329 329 The foreground of a revision is anything reachable using parent -> children
330 330 or precursor -> successor relation. It is very similar to "descendant" but
331 331 augmented with obsolescence information.
332 332
333 333 Beware that possible obsolescence cycle may result if complex situation.
334 334 """
335 335 repo = repo.unfiltered()
336 336 foreground = set(repo.set(b'%ln::', nodes))
337 337 if repo.obsstore:
338 338 # We only need this complicated logic if there is obsolescence
339 339 # XXX will probably deserve an optimised revset.
340 340 has_node = repo.changelog.index.has_node
341 341 plen = -1
342 342 # compute the whole set of successors or descendants
343 343 while len(foreground) != plen:
344 344 plen = len(foreground)
345 345 succs = {c.node() for c in foreground}
346 346 mutable = [c.node() for c in foreground if c.mutable()]
347 347 succs.update(allsuccessors(repo.obsstore, mutable))
348 348 known = (n for n in succs if has_node(n))
349 349 foreground = set(repo.set(b'%ln::', known))
350 350 return {c.node() for c in foreground}
351 351
352 352
353 353 # effectflag field
354 354 #
355 355 # Effect-flag is a 1-byte bit field used to store what changed between a
356 356 # changeset and its successor(s).
357 357 #
358 358 # The effect flag is stored in obs-markers metadata while we iterate on the
359 359 # information design. That's why we have the EFFECTFLAGFIELD. If we come up
360 360 # with an incompatible design for effect flag, we can store a new design under
361 361 # another field name so we don't break readers. We plan to extend the existing
362 362 # obsmarkers bit-field when the effect flag design will be stabilized.
363 363 #
364 364 # The effect-flag is placed behind an experimental flag
365 365 # `effect-flags` set to off by default.
366 366 #
367 367
368 368 EFFECTFLAGFIELD = b"ef1"
369 369
370 370 DESCCHANGED = 1 << 0 # action changed the description
371 371 METACHANGED = 1 << 1 # action change the meta
372 372 DIFFCHANGED = 1 << 3 # action change diff introduced by the changeset
373 373 PARENTCHANGED = 1 << 2 # action change the parent
374 374 USERCHANGED = 1 << 4 # the user changed
375 375 DATECHANGED = 1 << 5 # the date changed
376 376 BRANCHCHANGED = 1 << 6 # the branch changed
377 377
378 378 METABLACKLIST = [
379 379 re.compile(b'^branch$'),
380 380 re.compile(b'^.*-source$'),
381 381 re.compile(b'^.*_source$'),
382 382 re.compile(b'^source$'),
383 383 ]
384 384
385 385
386 386 def metanotblacklisted(metaitem):
387 387 """Check that the key of a meta item (extrakey, extravalue) does not
388 388 match at least one of the blacklist pattern
389 389 """
390 390 metakey = metaitem[0]
391 391
392 392 return not any(pattern.match(metakey) for pattern in METABLACKLIST)
393 393
394 394
395 395 def _prepare_hunk(hunk):
396 396 """Drop all information but the username and patch"""
397 397 cleanhunk = []
398 398 for line in hunk.splitlines():
399 399 if line.startswith(b'# User') or not line.startswith(b'#'):
400 400 if line.startswith(b'@@'):
401 401 line = b'@@\n'
402 402 cleanhunk.append(line)
403 403 return cleanhunk
404 404
405 405
406 406 def _getdifflines(iterdiff):
407 407 """return a cleaned up lines"""
408 408 lines = next(iterdiff, None)
409 409
410 410 if lines is None:
411 411 return lines
412 412
413 413 return _prepare_hunk(lines)
414 414
415 415
416 416 def _cmpdiff(leftctx, rightctx):
417 417 """return True if both ctx introduce the "same diff"
418 418
419 419 This is a first and basic implementation, with many shortcoming.
420 420 """
421 421 diffopts = diffutil.diffallopts(leftctx.repo().ui, {b'git': True})
422 422
423 423 # Leftctx or right ctx might be filtered, so we need to use the contexts
424 424 # with an unfiltered repository to safely compute the diff
425 425
426 426 # leftctx and rightctx can be from different repository views in case of
427 427 # hgsubversion, do don't try to access them from same repository
428 428 # rightctx.repo() and leftctx.repo() are not always the same
429 429 leftunfi = leftctx._repo.unfiltered()[leftctx.rev()]
430 430 leftdiff = leftunfi.diff(opts=diffopts)
431 431 rightunfi = rightctx._repo.unfiltered()[rightctx.rev()]
432 432 rightdiff = rightunfi.diff(opts=diffopts)
433 433
434 434 left, right = (0, 0)
435 435 while None not in (left, right):
436 436 left = _getdifflines(leftdiff)
437 437 right = _getdifflines(rightdiff)
438 438
439 439 if left != right:
440 440 return False
441 441 return True
442 442
443 443
444 444 def geteffectflag(source, successors):
445 445 """From an obs-marker relation, compute what changed between the
446 446 predecessor and the successor.
447 447 """
448 448 effects = 0
449 449
450 450 for changectx in successors:
451 451 # Check if description has changed
452 452 if changectx.description() != source.description():
453 453 effects |= DESCCHANGED
454 454
455 455 # Check if user has changed
456 456 if changectx.user() != source.user():
457 457 effects |= USERCHANGED
458 458
459 459 # Check if date has changed
460 460 if changectx.date() != source.date():
461 461 effects |= DATECHANGED
462 462
463 463 # Check if branch has changed
464 464 if changectx.branch() != source.branch():
465 465 effects |= BRANCHCHANGED
466 466
467 467 # Check if at least one of the parent has changed
468 468 if changectx.parents() != source.parents():
469 469 effects |= PARENTCHANGED
470 470
471 471 # Check if other meta has changed
472 472 changeextra = changectx.extra().items()
473 473 ctxmeta = list(filter(metanotblacklisted, changeextra))
474 474
475 475 sourceextra = source.extra().items()
476 476 srcmeta = list(filter(metanotblacklisted, sourceextra))
477 477
478 478 if ctxmeta != srcmeta:
479 479 effects |= METACHANGED
480 480
481 481 # Check if the diff has changed
482 482 if not _cmpdiff(source, changectx):
483 483 effects |= DIFFCHANGED
484 484
485 485 return effects
486 486
487 487
488 488 def getobsoleted(repo, tr=None, changes=None):
489 489 """return the set of pre-existing revisions obsoleted by a transaction
490 490
491 491 Either the transaction or changes item of the transaction (for hooks)
492 492 must be provided, but not both.
493 493 """
494 494 if (tr is None) == (changes is None):
495 495 e = b"exactly one of tr and changes must be provided"
496 496 raise error.ProgrammingError(e)
497 497 torev = repo.unfiltered().changelog.index.get_rev
498 498 phase = repo._phasecache.phase
499 499 succsmarkers = repo.obsstore.successors.get
500 500 public = phases.public
501 501 if changes is None:
502 502 changes = tr.changes
503 503 addedmarkers = changes[b'obsmarkers']
504 504 origrepolen = changes[b'origrepolen']
505 505 seenrevs = set()
506 506 obsoleted = set()
507 507 for mark in addedmarkers:
508 508 node = mark[0]
509 509 rev = torev(node)
510 510 if rev is None or rev in seenrevs or rev >= origrepolen:
511 511 continue
512 512 seenrevs.add(rev)
513 513 if phase(repo, rev) == public:
514 514 continue
515 515 if set(succsmarkers(node) or []).issubset(addedmarkers):
516 516 obsoleted.add(rev)
517 517 return obsoleted
518 518
519 519
520 520 class _succs(list):
521 521 """small class to represent a successors with some metadata about it"""
522 522
523 523 def __init__(self, *args, **kwargs):
524 524 super(_succs, self).__init__(*args, **kwargs)
525 525 self.markers = set()
526 526
527 527 def copy(self):
528 528 new = _succs(self)
529 529 new.markers = self.markers.copy()
530 530 return new
531 531
532 532 @util.propertycache
533 533 def _set(self):
534 534 # immutable
535 535 return set(self)
536 536
537 537 def canmerge(self, other):
538 538 return self._set.issubset(other._set)
539 539
540 540
541 541 def successorssets(repo, initialnode, closest=False, cache=None):
542 542 """Return set of all latest successors of initial nodes
543 543
544 544 The successors set of a changeset A are the group of revisions that succeed
545 545 A. It succeeds A as a consistent whole, each revision being only a partial
546 546 replacement. By default, the successors set contains non-obsolete
547 547 changesets only, walking the obsolescence graph until reaching a leaf. If
548 548 'closest' is set to True, closest successors-sets are return (the
549 549 obsolescence walk stops on known changesets).
550 550
551 551 This function returns the full list of successor sets which is why it
552 552 returns a list of tuples and not just a single tuple. Each tuple is a valid
553 553 successors set. Note that (A,) may be a valid successors set for changeset A
554 554 (see below).
555 555
556 556 In most cases, a changeset A will have a single element (e.g. the changeset
557 557 A is replaced by A') in its successors set. Though, it is also common for a
558 558 changeset A to have no elements in its successor set (e.g. the changeset
559 559 has been pruned). Therefore, the returned list of successors sets will be
560 560 [(A',)] or [], respectively.
561 561
562 562 When a changeset A is split into A' and B', however, it will result in a
563 563 successors set containing more than a single element, i.e. [(A',B')].
564 564 Divergent changesets will result in multiple successors sets, i.e. [(A',),
565 565 (A'')].
566 566
567 567 If a changeset A is not obsolete, then it will conceptually have no
568 568 successors set. To distinguish this from a pruned changeset, the successor
569 569 set will contain itself only, i.e. [(A,)].
570 570
571 571 Finally, final successors unknown locally are considered to be pruned
572 572 (pruned: obsoleted without any successors). (Final: successors not affected
573 573 by markers).
574 574
575 575 The 'closest' mode respect the repoview filtering. For example, without
576 576 filter it will stop at the first locally known changeset, with 'visible'
577 577 filter it will stop on visible changesets).
578 578
579 579 The optional `cache` parameter is a dictionary that may contains
580 580 precomputed successors sets. It is meant to reuse the computation of a
581 581 previous call to `successorssets` when multiple calls are made at the same
582 582 time. The cache dictionary is updated in place. The caller is responsible
583 583 for its life span. Code that makes multiple calls to `successorssets`
584 584 *should* use this cache mechanism or risk a performance hit.
585 585
586 586 Since results are different depending of the 'closest' most, the same cache
587 587 cannot be reused for both mode.
588 588 """
589 589
590 590 succmarkers = repo.obsstore.successors
591 591
592 592 # Stack of nodes we search successors sets for
593 593 toproceed = [initialnode]
594 594 # set version of above list for fast loop detection
595 595 # element added to "toproceed" must be added here
596 596 stackedset = set(toproceed)
597 597 if cache is None:
598 598 cache = {}
599 599
600 600 # This while loop is the flattened version of a recursive search for
601 601 # successors sets
602 602 #
603 603 # def successorssets(x):
604 604 # successors = directsuccessors(x)
605 605 # ss = [[]]
606 606 # for succ in directsuccessors(x):
607 607 # # product as in itertools cartesian product
608 608 # ss = product(ss, successorssets(succ))
609 609 # return ss
610 610 #
611 611 # But we can not use plain recursive calls here:
612 612 # - that would blow the python call stack
613 613 # - obsolescence markers may have cycles, we need to handle them.
614 614 #
615 615 # The `toproceed` list act as our call stack. Every node we search
616 616 # successors set for are stacked there.
617 617 #
618 618 # The `stackedset` is set version of this stack used to check if a node is
619 619 # already stacked. This check is used to detect cycles and prevent infinite
620 620 # loop.
621 621 #
622 622 # successors set of all nodes are stored in the `cache` dictionary.
623 623 #
624 624 # After this while loop ends we use the cache to return the successors sets
625 625 # for the node requested by the caller.
626 626 while toproceed:
627 627 # Every iteration tries to compute the successors sets of the topmost
628 628 # node of the stack: CURRENT.
629 629 #
630 630 # There are four possible outcomes:
631 631 #
632 632 # 1) We already know the successors sets of CURRENT:
633 633 # -> mission accomplished, pop it from the stack.
634 634 # 2) Stop the walk:
635 635 # default case: Node is not obsolete
636 636 # closest case: Node is known at this repo filter level
637 637 # -> the node is its own successors sets. Add it to the cache.
638 638 # 3) We do not know successors set of direct successors of CURRENT:
639 639 # -> We add those successors to the stack.
640 640 # 4) We know successors sets of all direct successors of CURRENT:
641 641 # -> We can compute CURRENT successors set and add it to the
642 642 # cache.
643 643 #
644 644 current = toproceed[-1]
645 645
646 646 # case 2 condition is a bit hairy because of closest,
647 647 # we compute it on its own
648 648 case2condition = (current not in succmarkers) or (
649 649 closest and current != initialnode and current in repo
650 650 )
651 651
652 652 if current in cache:
653 653 # case (1): We already know the successors sets
654 654 stackedset.remove(toproceed.pop())
655 655 elif case2condition:
656 656 # case (2): end of walk.
657 657 if current in repo:
658 658 # We have a valid successors.
659 659 cache[current] = [_succs((current,))]
660 660 else:
661 661 # Final obsolete version is unknown locally.
662 662 # Do not count that as a valid successors
663 663 cache[current] = []
664 664 else:
665 665 # cases (3) and (4)
666 666 #
667 667 # We proceed in two phases. Phase 1 aims to distinguish case (3)
668 668 # from case (4):
669 669 #
670 670 # For each direct successors of CURRENT, we check whether its
671 671 # successors sets are known. If they are not, we stack the
672 672 # unknown node and proceed to the next iteration of the while
673 673 # loop. (case 3)
674 674 #
675 675 # During this step, we may detect obsolescence cycles: a node
676 676 # with unknown successors sets but already in the call stack.
677 677 # In such a situation, we arbitrary set the successors sets of
678 678 # the node to nothing (node pruned) to break the cycle.
679 679 #
680 680 # If no break was encountered we proceed to phase 2.
681 681 #
682 682 # Phase 2 computes successors sets of CURRENT (case 4); see details
683 683 # in phase 2 itself.
684 684 #
685 685 # Note the two levels of iteration in each phase.
686 686 # - The first one handles obsolescence markers using CURRENT as
687 687 # precursor (successors markers of CURRENT).
688 688 #
689 689 # Having multiple entry here means divergence.
690 690 #
691 691 # - The second one handles successors defined in each marker.
692 692 #
693 693 # Having none means pruned node, multiple successors means split,
694 694 # single successors are standard replacement.
695 695 #
696 696 for mark in sortedmarkers(succmarkers[current]):
697 697 for suc in mark[1]:
698 698 if suc not in cache:
699 699 if suc in stackedset:
700 700 # cycle breaking
701 701 cache[suc] = []
702 702 else:
703 703 # case (3) If we have not computed successors sets
704 704 # of one of those successors we add it to the
705 705 # `toproceed` stack and stop all work for this
706 706 # iteration.
707 707 toproceed.append(suc)
708 708 stackedset.add(suc)
709 709 break
710 710 else:
711 711 continue
712 712 break
713 713 else:
714 714 # case (4): we know all successors sets of all direct
715 715 # successors
716 716 #
717 717 # Successors set contributed by each marker depends on the
718 718 # successors sets of all its "successors" node.
719 719 #
720 720 # Each different marker is a divergence in the obsolescence
721 721 # history. It contributes successors sets distinct from other
722 722 # markers.
723 723 #
724 724 # Within a marker, a successor may have divergent successors
725 725 # sets. In such a case, the marker will contribute multiple
726 726 # divergent successors sets. If multiple successors have
727 727 # divergent successors sets, a Cartesian product is used.
728 728 #
729 729 # At the end we post-process successors sets to remove
730 730 # duplicated entry and successors set that are strict subset of
731 731 # another one.
732 732 succssets = []
733 733 for mark in sortedmarkers(succmarkers[current]):
734 734 # successors sets contributed by this marker
735 735 base = _succs()
736 736 base.markers.add(mark)
737 737 markss = [base]
738 738 for suc in mark[1]:
739 739 # cardinal product with previous successors
740 740 productresult = []
741 741 for prefix in markss:
742 742 for suffix in cache[suc]:
743 743 newss = prefix.copy()
744 744 newss.markers.update(suffix.markers)
745 745 for part in suffix:
746 746 # do not duplicated entry in successors set
747 747 # first entry wins.
748 748 if part not in newss:
749 749 newss.append(part)
750 750 productresult.append(newss)
751 751 if productresult:
752 752 markss = productresult
753 753 succssets.extend(markss)
754 754 # remove duplicated and subset
755 755 seen = []
756 756 final = []
757 757 candidates = sorted(
758 758 (s for s in succssets if s), key=len, reverse=True
759 759 )
760 760 for cand in candidates:
761 761 for seensuccs in seen:
762 762 if cand.canmerge(seensuccs):
763 763 seensuccs.markers.update(cand.markers)
764 764 break
765 765 else:
766 766 final.append(cand)
767 767 seen.append(cand)
768 768 final.reverse() # put small successors set first
769 769 cache[current] = final
770 770 return cache[initialnode]
771 771
772 772
773 773 def successorsandmarkers(repo, ctx):
774 774 """compute the raw data needed for computing obsfate
775 775 Returns a list of dict, one dict per successors set
776 776 """
777 777 if not ctx.obsolete():
778 778 return None
779 779
780 780 ssets = successorssets(repo, ctx.node(), closest=True)
781 781
782 782 # closestsuccessors returns an empty list for pruned revisions, remap it
783 783 # into a list containing an empty list for future processing
784 784 if ssets == []:
785 ssets = [[]]
785 ssets = [_succs()]
786 786
787 787 # Try to recover pruned markers
788 788 succsmap = repo.obsstore.successors
789 789 fullsuccessorsets = [] # successor set + markers
790 790 for sset in ssets:
791 791 if sset:
792 792 fullsuccessorsets.append(sset)
793 793 else:
794 794 # successorsset return an empty set() when ctx or one of its
795 795 # successors is pruned.
796 796 # In this case, walk the obs-markers tree again starting with ctx
797 797 # and find the relevant pruning obs-makers, the ones without
798 798 # successors.
799 799 # Having these markers allow us to compute some information about
800 800 # its fate, like who pruned this changeset and when.
801 801
802 802 # XXX we do not catch all prune markers (eg rewritten then pruned)
803 803 # (fix me later)
804 804 foundany = False
805 805 for mark in succsmap.get(ctx.node(), ()):
806 806 if not mark[1]:
807 807 foundany = True
808 808 sset = _succs()
809 809 sset.markers.add(mark)
810 810 fullsuccessorsets.append(sset)
811 811 if not foundany:
812 812 fullsuccessorsets.append(_succs())
813 813
814 814 values = []
815 815 for sset in fullsuccessorsets:
816 816 values.append({b'successors': sset, b'markers': sset.markers})
817 817
818 818 return values
819 819
820 820
821 821 def _getobsfate(successorssets):
822 822 """Compute a changeset obsolescence fate based on its successorssets.
823 823 Successors can be the tipmost ones or the immediate ones. This function
824 824 return values are not meant to be shown directly to users, it is meant to
825 825 be used by internal functions only.
826 826 Returns one fate from the following values:
827 827 - pruned
828 828 - diverged
829 829 - superseded
830 830 - superseded_split
831 831 """
832 832
833 833 if len(successorssets) == 0:
834 834 # The commit has been pruned
835 835 return b'pruned'
836 836 elif len(successorssets) > 1:
837 837 return b'diverged'
838 838 else:
839 839 # No divergence, only one set of successors
840 840 successors = successorssets[0]
841 841
842 842 if len(successors) == 1:
843 843 return b'superseded'
844 844 else:
845 845 return b'superseded_split'
846 846
847 847
848 848 def obsfateverb(successorset, markers):
849 849 """Return the verb summarizing the successorset and potentially using
850 850 information from the markers
851 851 """
852 852 if not successorset:
853 853 verb = b'pruned'
854 854 elif len(successorset) == 1:
855 855 verb = b'rewritten'
856 856 else:
857 857 verb = b'split'
858 858 return verb
859 859
860 860
861 861 def markersdates(markers):
862 862 """returns the list of dates for a list of markers"""
863 863 return [m[4] for m in markers]
864 864
865 865
866 866 def markersusers(markers):
867 867 """Returns a sorted list of markers users without duplicates"""
868 868 markersmeta = [dict(m[3]) for m in markers]
869 869 users = {
870 870 encoding.tolocal(meta[b'user'])
871 871 for meta in markersmeta
872 872 if meta.get(b'user')
873 873 }
874 874
875 875 return sorted(users)
876 876
877 877
878 878 def markersoperations(markers):
879 879 """Returns a sorted list of markers operations without duplicates"""
880 880 markersmeta = [dict(m[3]) for m in markers]
881 881 operations = {
882 882 meta.get(b'operation') for meta in markersmeta if meta.get(b'operation')
883 883 }
884 884
885 885 return sorted(operations)
886 886
887 887
888 888 def obsfateprinter(ui, repo, successors, markers, formatctx):
889 889 """Build a obsfate string for a single successorset using all obsfate
890 890 related function defined in obsutil
891 891 """
892 892 quiet = ui.quiet
893 893 verbose = ui.verbose
894 894 normal = not verbose and not quiet
895 895
896 896 line = []
897 897
898 898 # Verb
899 899 line.append(obsfateverb(successors, markers))
900 900
901 901 # Operations
902 902 operations = markersoperations(markers)
903 903 if operations:
904 904 line.append(b" using %s" % b", ".join(operations))
905 905
906 906 # Successors
907 907 if successors:
908 908 fmtsuccessors = [formatctx(repo[succ]) for succ in successors]
909 909 line.append(b" as %s" % b", ".join(fmtsuccessors))
910 910
911 911 # Users
912 912 users = markersusers(markers)
913 913 # Filter out current user in not verbose mode to reduce amount of
914 914 # information
915 915 if not verbose:
916 916 currentuser = ui.username(acceptempty=True)
917 917 if len(users) == 1 and currentuser in users:
918 918 users = None
919 919
920 920 if (verbose or normal) and users:
921 921 line.append(b" by %s" % b", ".join(users))
922 922
923 923 # Date
924 924 dates = markersdates(markers)
925 925
926 926 if dates and verbose:
927 927 min_date = min(dates)
928 928 max_date = max(dates)
929 929
930 930 if min_date == max_date:
931 931 fmtmin_date = dateutil.datestr(min_date, b'%Y-%m-%d %H:%M %1%2')
932 932 line.append(b" (at %s)" % fmtmin_date)
933 933 else:
934 934 fmtmin_date = dateutil.datestr(min_date, b'%Y-%m-%d %H:%M %1%2')
935 935 fmtmax_date = dateutil.datestr(max_date, b'%Y-%m-%d %H:%M %1%2')
936 936 line.append(b" (between %s and %s)" % (fmtmin_date, fmtmax_date))
937 937
938 938 return b"".join(line)
939 939
940 940
941 941 filteredmsgtable = {
942 942 b"pruned": _(b"hidden revision '%s' is pruned"),
943 943 b"diverged": _(b"hidden revision '%s' has diverged"),
944 944 b"superseded": _(b"hidden revision '%s' was rewritten as: %s"),
945 945 b"superseded_split": _(b"hidden revision '%s' was split as: %s"),
946 946 b"superseded_split_several": _(
947 947 b"hidden revision '%s' was split as: %s and %d more"
948 948 ),
949 949 }
950 950
951 951
952 952 def _getfilteredreason(repo, changeid, ctx):
953 953 """return a human-friendly string on why a obsolete changeset is hidden"""
954 954 successors = successorssets(repo, ctx.node())
955 955 fate = _getobsfate(successors)
956 956
957 957 # Be more precise in case the revision is superseded
958 958 if fate == b'pruned':
959 959 return filteredmsgtable[b'pruned'] % changeid
960 960 elif fate == b'diverged':
961 961 return filteredmsgtable[b'diverged'] % changeid
962 962 elif fate == b'superseded':
963 963 single_successor = short(successors[0][0])
964 964 return filteredmsgtable[b'superseded'] % (changeid, single_successor)
965 965 elif fate == b'superseded_split':
966 966
967 967 succs = []
968 968 for node_id in successors[0]:
969 969 succs.append(short(node_id))
970 970
971 971 if len(succs) <= 2:
972 972 fmtsuccs = b', '.join(succs)
973 973 return filteredmsgtable[b'superseded_split'] % (changeid, fmtsuccs)
974 974 else:
975 975 firstsuccessors = b', '.join(succs[:2])
976 976 remainingnumber = len(succs) - 2
977 977
978 978 args = (changeid, firstsuccessors, remainingnumber)
979 979 return filteredmsgtable[b'superseded_split_several'] % args
980 980
981 981
982 982 def divergentsets(repo, ctx):
983 983 """Compute sets of commits divergent with a given one"""
984 984 cache = {}
985 985 base = {}
986 986 for n in allpredecessors(repo.obsstore, [ctx.node()]):
987 987 if n == ctx.node():
988 988 # a node can't be a base for divergence with itself
989 989 continue
990 990 nsuccsets = successorssets(repo, n, cache)
991 991 for nsuccset in nsuccsets:
992 992 if ctx.node() in nsuccset:
993 993 # we are only interested in *other* successor sets
994 994 continue
995 995 if tuple(nsuccset) in base:
996 996 # we already know the latest base for this divergency
997 997 continue
998 998 base[tuple(nsuccset)] = n
999 999 return [
1000 1000 {b'divergentnodes': divset, b'commonpredecessor': b}
1001 1001 for divset, b in pycompat.iteritems(base)
1002 1002 ]
1003 1003
1004 1004
1005 1005 def whyunstable(repo, ctx):
1006 1006 result = []
1007 1007 if ctx.orphan():
1008 1008 for parent in ctx.parents():
1009 1009 kind = None
1010 1010 if parent.orphan():
1011 1011 kind = b'orphan'
1012 1012 elif parent.obsolete():
1013 1013 kind = b'obsolete'
1014 1014 if kind is not None:
1015 1015 result.append(
1016 1016 {
1017 1017 b'instability': b'orphan',
1018 1018 b'reason': b'%s parent' % kind,
1019 1019 b'node': parent.hex(),
1020 1020 }
1021 1021 )
1022 1022 if ctx.phasedivergent():
1023 1023 predecessors = allpredecessors(
1024 1024 repo.obsstore, [ctx.node()], ignoreflags=bumpedfix
1025 1025 )
1026 1026 immutable = [
1027 1027 repo[p] for p in predecessors if p in repo and not repo[p].mutable()
1028 1028 ]
1029 1029 for predecessor in immutable:
1030 1030 result.append(
1031 1031 {
1032 1032 b'instability': b'phase-divergent',
1033 1033 b'reason': b'immutable predecessor',
1034 1034 b'node': predecessor.hex(),
1035 1035 }
1036 1036 )
1037 1037 if ctx.contentdivergent():
1038 1038 dsets = divergentsets(repo, ctx)
1039 1039 for dset in dsets:
1040 1040 divnodes = [repo[n] for n in dset[b'divergentnodes']]
1041 1041 result.append(
1042 1042 {
1043 1043 b'instability': b'content-divergent',
1044 1044 b'divergentnodes': divnodes,
1045 1045 b'reason': b'predecessor',
1046 1046 b'node': hex(dset[b'commonpredecessor']),
1047 1047 }
1048 1048 )
1049 1049 return result
General Comments 0
You need to be logged in to leave comments. Login now