##// END OF EJS Templates
index: use `index.has_node` in `obsutil.exclusivemarkers`...
marmoute -
r43940:c8f1e841 default
parent child Browse files
Show More
@@ -1,1035 +1,1035 b''
1 1 # obsutil.py - utility functions for obsolescence
2 2 #
3 3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import re
11 11
12 12 from .i18n import _
13 13 from . import (
14 14 diffutil,
15 15 encoding,
16 16 node as nodemod,
17 17 phases,
18 18 pycompat,
19 19 util,
20 20 )
21 21 from .utils import dateutil
22 22
23 23 ### obsolescence marker flag
24 24
25 25 ## bumpedfix flag
26 26 #
27 27 # When a changeset A' succeed to a changeset A which became public, we call A'
28 28 # "bumped" because it's a successors of a public changesets
29 29 #
30 30 # o A' (bumped)
31 31 # |`:
32 32 # | o A
33 33 # |/
34 34 # o Z
35 35 #
36 36 # The way to solve this situation is to create a new changeset Ad as children
37 37 # of A. This changeset have the same content than A'. So the diff from A to A'
38 38 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
39 39 #
40 40 # o Ad
41 41 # |`:
42 42 # | x A'
43 43 # |'|
44 44 # o | A
45 45 # |/
46 46 # o Z
47 47 #
48 48 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
49 49 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
50 50 # This flag mean that the successors express the changes between the public and
51 51 # bumped version and fix the situation, breaking the transitivity of
52 52 # "bumped" here.
53 53 bumpedfix = 1
54 54 usingsha256 = 2
55 55
56 56
57 57 class marker(object):
58 58 """Wrap obsolete marker raw data"""
59 59
60 60 def __init__(self, repo, data):
61 61 # the repo argument will be used to create changectx in later version
62 62 self._repo = repo
63 63 self._data = data
64 64 self._decodedmeta = None
65 65
66 66 def __hash__(self):
67 67 return hash(self._data)
68 68
69 69 def __eq__(self, other):
70 70 if type(other) != type(self):
71 71 return False
72 72 return self._data == other._data
73 73
74 74 def prednode(self):
75 75 """Predecessor changeset node identifier"""
76 76 return self._data[0]
77 77
78 78 def succnodes(self):
79 79 """List of successor changesets node identifiers"""
80 80 return self._data[1]
81 81
82 82 def parentnodes(self):
83 83 """Parents of the predecessors (None if not recorded)"""
84 84 return self._data[5]
85 85
86 86 def metadata(self):
87 87 """Decoded metadata dictionary"""
88 88 return dict(self._data[3])
89 89
90 90 def date(self):
91 91 """Creation date as (unixtime, offset)"""
92 92 return self._data[4]
93 93
94 94 def flags(self):
95 95 """The flags field of the marker"""
96 96 return self._data[2]
97 97
98 98
99 99 def getmarkers(repo, nodes=None, exclusive=False):
100 100 """returns markers known in a repository
101 101
102 102 If <nodes> is specified, only markers "relevant" to those nodes are are
103 103 returned"""
104 104 if nodes is None:
105 105 rawmarkers = repo.obsstore
106 106 elif exclusive:
107 107 rawmarkers = exclusivemarkers(repo, nodes)
108 108 else:
109 109 rawmarkers = repo.obsstore.relevantmarkers(nodes)
110 110
111 111 for markerdata in rawmarkers:
112 112 yield marker(repo, markerdata)
113 113
114 114
115 115 def closestpredecessors(repo, nodeid):
116 116 """yield the list of next predecessors pointing on visible changectx nodes
117 117
118 118 This function respect the repoview filtering, filtered revision will be
119 119 considered missing.
120 120 """
121 121
122 122 precursors = repo.obsstore.predecessors
123 123 stack = [nodeid]
124 124 seen = set(stack)
125 125
126 126 while stack:
127 127 current = stack.pop()
128 128 currentpreccs = precursors.get(current, ())
129 129
130 130 for prec in currentpreccs:
131 131 precnodeid = prec[0]
132 132
133 133 # Basic cycle protection
134 134 if precnodeid in seen:
135 135 continue
136 136 seen.add(precnodeid)
137 137
138 138 if precnodeid in repo:
139 139 yield precnodeid
140 140 else:
141 141 stack.append(precnodeid)
142 142
143 143
144 144 def allpredecessors(obsstore, nodes, ignoreflags=0):
145 145 """Yield node for every precursors of <nodes>.
146 146
147 147 Some precursors may be unknown locally.
148 148
149 149 This is a linear yield unsuited to detecting folded changesets. It includes
150 150 initial nodes too."""
151 151
152 152 remaining = set(nodes)
153 153 seen = set(remaining)
154 154 prec = obsstore.predecessors.get
155 155 while remaining:
156 156 current = remaining.pop()
157 157 yield current
158 158 for mark in prec(current, ()):
159 159 # ignore marker flagged with specified flag
160 160 if mark[2] & ignoreflags:
161 161 continue
162 162 suc = mark[0]
163 163 if suc not in seen:
164 164 seen.add(suc)
165 165 remaining.add(suc)
166 166
167 167
168 168 def allsuccessors(obsstore, nodes, ignoreflags=0):
169 169 """Yield node for every successor of <nodes>.
170 170
171 171 Some successors may be unknown locally.
172 172
173 173 This is a linear yield unsuited to detecting split changesets. It includes
174 174 initial nodes too."""
175 175 remaining = set(nodes)
176 176 seen = set(remaining)
177 177 while remaining:
178 178 current = remaining.pop()
179 179 yield current
180 180 for mark in obsstore.successors.get(current, ()):
181 181 # ignore marker flagged with specified flag
182 182 if mark[2] & ignoreflags:
183 183 continue
184 184 for suc in mark[1]:
185 185 if suc not in seen:
186 186 seen.add(suc)
187 187 remaining.add(suc)
188 188
189 189
190 190 def _filterprunes(markers):
191 191 """return a set with no prune markers"""
192 192 return set(m for m in markers if m[1])
193 193
194 194
195 195 def exclusivemarkers(repo, nodes):
196 196 """set of markers relevant to "nodes" but no other locally-known nodes
197 197
198 198 This function compute the set of markers "exclusive" to a locally-known
199 199 node. This means we walk the markers starting from <nodes> until we reach a
200 200 locally-known precursors outside of <nodes>. Element of <nodes> with
201 201 locally-known successors outside of <nodes> are ignored (since their
202 202 precursors markers are also relevant to these successors).
203 203
204 204 For example:
205 205
206 206 # (A0 rewritten as A1)
207 207 #
208 208 # A0 <-1- A1 # Marker "1" is exclusive to A1
209 209
210 210 or
211 211
212 212 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
213 213 #
214 214 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
215 215
216 216 or
217 217
218 218 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
219 219 #
220 220 # <-2- A1 # Marker "2" is exclusive to A0,A1
221 221 # /
222 222 # <-1- A0
223 223 # \
224 224 # <-3- A2 # Marker "3" is exclusive to A0,A2
225 225 #
226 226 # in addition:
227 227 #
228 228 # Markers "2,3" are exclusive to A1,A2
229 229 # Markers "1,2,3" are exclusive to A0,A1,A2
230 230
231 231 See test/test-obsolete-bundle-strip.t for more examples.
232 232
233 233 An example usage is strip. When stripping a changeset, we also want to
234 234 strip the markers exclusive to this changeset. Otherwise we would have
235 235 "dangling"" obsolescence markers from its precursors: Obsolescence markers
236 236 marking a node as obsolete without any successors available locally.
237 237
238 238 As for relevant markers, the prune markers for children will be followed.
239 239 Of course, they will only be followed if the pruned children is
240 240 locally-known. Since the prune markers are relevant to the pruned node.
241 241 However, while prune markers are considered relevant to the parent of the
242 242 pruned changesets, prune markers for locally-known changeset (with no
243 243 successors) are considered exclusive to the pruned nodes. This allows
244 244 to strip the prune markers (with the rest of the exclusive chain) alongside
245 245 the pruned changesets.
246 246 """
247 247 # running on a filtered repository would be dangerous as markers could be
248 248 # reported as exclusive when they are relevant for other filtered nodes.
249 249 unfi = repo.unfiltered()
250 250
251 251 # shortcut to various useful item
252 nm = unfi.changelog.nodemap
252 has_node = unfi.changelog.index.has_node
253 253 precursorsmarkers = unfi.obsstore.predecessors
254 254 successormarkers = unfi.obsstore.successors
255 255 childrenmarkers = unfi.obsstore.children
256 256
257 257 # exclusive markers (return of the function)
258 258 exclmarkers = set()
259 259 # we need fast membership testing
260 260 nodes = set(nodes)
261 261 # looking for head in the obshistory
262 262 #
263 263 # XXX we are ignoring all issues in regard with cycle for now.
264 264 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
265 265 stack.sort()
266 266 # nodes already stacked
267 267 seennodes = set(stack)
268 268 while stack:
269 269 current = stack.pop()
270 270 # fetch precursors markers
271 271 markers = list(precursorsmarkers.get(current, ()))
272 272 # extend the list with prune markers
273 273 for mark in successormarkers.get(current, ()):
274 274 if not mark[1]:
275 275 markers.append(mark)
276 276 # and markers from children (looking for prune)
277 277 for mark in childrenmarkers.get(current, ()):
278 278 if not mark[1]:
279 279 markers.append(mark)
280 280 # traverse the markers
281 281 for mark in markers:
282 282 if mark in exclmarkers:
283 283 # markers already selected
284 284 continue
285 285
286 286 # If the markers is about the current node, select it
287 287 #
288 288 # (this delay the addition of markers from children)
289 289 if mark[1] or mark[0] == current:
290 290 exclmarkers.add(mark)
291 291
292 292 # should we keep traversing through the precursors?
293 293 prec = mark[0]
294 294
295 295 # nodes in the stack or already processed
296 296 if prec in seennodes:
297 297 continue
298 298
299 299 # is this a locally known node ?
300 known = prec in nm
300 known = has_node(prec)
301 301 # if locally-known and not in the <nodes> set the traversal
302 302 # stop here.
303 303 if known and prec not in nodes:
304 304 continue
305 305
306 306 # do not keep going if there are unselected markers pointing to this
307 307 # nodes. If we end up traversing these unselected markers later the
308 308 # node will be taken care of at that point.
309 309 precmarkers = _filterprunes(successormarkers.get(prec))
310 310 if precmarkers.issubset(exclmarkers):
311 311 seennodes.add(prec)
312 312 stack.append(prec)
313 313
314 314 return exclmarkers
315 315
316 316
317 317 def foreground(repo, nodes):
318 318 """return all nodes in the "foreground" of other node
319 319
320 320 The foreground of a revision is anything reachable using parent -> children
321 321 or precursor -> successor relation. It is very similar to "descendant" but
322 322 augmented with obsolescence information.
323 323
324 324 Beware that possible obsolescence cycle may result if complex situation.
325 325 """
326 326 repo = repo.unfiltered()
327 327 foreground = set(repo.set(b'%ln::', nodes))
328 328 if repo.obsstore:
329 329 # We only need this complicated logic if there is obsolescence
330 330 # XXX will probably deserve an optimised revset.
331 331 nm = repo.changelog.nodemap
332 332 plen = -1
333 333 # compute the whole set of successors or descendants
334 334 while len(foreground) != plen:
335 335 plen = len(foreground)
336 336 succs = set(c.node() for c in foreground)
337 337 mutable = [c.node() for c in foreground if c.mutable()]
338 338 succs.update(allsuccessors(repo.obsstore, mutable))
339 339 known = (n for n in succs if n in nm)
340 340 foreground = set(repo.set(b'%ln::', known))
341 341 return set(c.node() for c in foreground)
342 342
343 343
344 344 # effectflag field
345 345 #
346 346 # Effect-flag is a 1-byte bit field used to store what changed between a
347 347 # changeset and its successor(s).
348 348 #
349 349 # The effect flag is stored in obs-markers metadata while we iterate on the
350 350 # information design. That's why we have the EFFECTFLAGFIELD. If we come up
351 351 # with an incompatible design for effect flag, we can store a new design under
352 352 # another field name so we don't break readers. We plan to extend the existing
353 353 # obsmarkers bit-field when the effect flag design will be stabilized.
354 354 #
355 355 # The effect-flag is placed behind an experimental flag
356 356 # `effect-flags` set to off by default.
357 357 #
358 358
359 359 EFFECTFLAGFIELD = b"ef1"
360 360
361 361 DESCCHANGED = 1 << 0 # action changed the description
362 362 METACHANGED = 1 << 1 # action change the meta
363 363 DIFFCHANGED = 1 << 3 # action change diff introduced by the changeset
364 364 PARENTCHANGED = 1 << 2 # action change the parent
365 365 USERCHANGED = 1 << 4 # the user changed
366 366 DATECHANGED = 1 << 5 # the date changed
367 367 BRANCHCHANGED = 1 << 6 # the branch changed
368 368
369 369 METABLACKLIST = [
370 370 re.compile(b'^branch$'),
371 371 re.compile(b'^.*-source$'),
372 372 re.compile(b'^.*_source$'),
373 373 re.compile(b'^source$'),
374 374 ]
375 375
376 376
377 377 def metanotblacklisted(metaitem):
378 378 """ Check that the key of a meta item (extrakey, extravalue) does not
379 379 match at least one of the blacklist pattern
380 380 """
381 381 metakey = metaitem[0]
382 382
383 383 return not any(pattern.match(metakey) for pattern in METABLACKLIST)
384 384
385 385
386 386 def _prepare_hunk(hunk):
387 387 """Drop all information but the username and patch"""
388 388 cleanhunk = []
389 389 for line in hunk.splitlines():
390 390 if line.startswith(b'# User') or not line.startswith(b'#'):
391 391 if line.startswith(b'@@'):
392 392 line = b'@@\n'
393 393 cleanhunk.append(line)
394 394 return cleanhunk
395 395
396 396
397 397 def _getdifflines(iterdiff):
398 398 """return a cleaned up lines"""
399 399 lines = next(iterdiff, None)
400 400
401 401 if lines is None:
402 402 return lines
403 403
404 404 return _prepare_hunk(lines)
405 405
406 406
407 407 def _cmpdiff(leftctx, rightctx):
408 408 """return True if both ctx introduce the "same diff"
409 409
410 410 This is a first and basic implementation, with many shortcoming.
411 411 """
412 412 diffopts = diffutil.diffallopts(leftctx.repo().ui, {b'git': True})
413 413
414 414 # Leftctx or right ctx might be filtered, so we need to use the contexts
415 415 # with an unfiltered repository to safely compute the diff
416 416
417 417 # leftctx and rightctx can be from different repository views in case of
418 418 # hgsubversion, do don't try to access them from same repository
419 419 # rightctx.repo() and leftctx.repo() are not always the same
420 420 leftunfi = leftctx._repo.unfiltered()[leftctx.rev()]
421 421 leftdiff = leftunfi.diff(opts=diffopts)
422 422 rightunfi = rightctx._repo.unfiltered()[rightctx.rev()]
423 423 rightdiff = rightunfi.diff(opts=diffopts)
424 424
425 425 left, right = (0, 0)
426 426 while None not in (left, right):
427 427 left = _getdifflines(leftdiff)
428 428 right = _getdifflines(rightdiff)
429 429
430 430 if left != right:
431 431 return False
432 432 return True
433 433
434 434
435 435 def geteffectflag(source, successors):
436 436 """ From an obs-marker relation, compute what changed between the
437 437 predecessor and the successor.
438 438 """
439 439 effects = 0
440 440
441 441 for changectx in successors:
442 442 # Check if description has changed
443 443 if changectx.description() != source.description():
444 444 effects |= DESCCHANGED
445 445
446 446 # Check if user has changed
447 447 if changectx.user() != source.user():
448 448 effects |= USERCHANGED
449 449
450 450 # Check if date has changed
451 451 if changectx.date() != source.date():
452 452 effects |= DATECHANGED
453 453
454 454 # Check if branch has changed
455 455 if changectx.branch() != source.branch():
456 456 effects |= BRANCHCHANGED
457 457
458 458 # Check if at least one of the parent has changed
459 459 if changectx.parents() != source.parents():
460 460 effects |= PARENTCHANGED
461 461
462 462 # Check if other meta has changed
463 463 changeextra = changectx.extra().items()
464 464 ctxmeta = list(filter(metanotblacklisted, changeextra))
465 465
466 466 sourceextra = source.extra().items()
467 467 srcmeta = list(filter(metanotblacklisted, sourceextra))
468 468
469 469 if ctxmeta != srcmeta:
470 470 effects |= METACHANGED
471 471
472 472 # Check if the diff has changed
473 473 if not _cmpdiff(source, changectx):
474 474 effects |= DIFFCHANGED
475 475
476 476 return effects
477 477
478 478
479 479 def getobsoleted(repo, tr):
480 480 """return the set of pre-existing revisions obsoleted by a transaction"""
481 481 torev = repo.unfiltered().changelog.nodemap.get
482 482 phase = repo._phasecache.phase
483 483 succsmarkers = repo.obsstore.successors.get
484 484 public = phases.public
485 485 addedmarkers = tr.changes[b'obsmarkers']
486 486 origrepolen = tr.changes[b'origrepolen']
487 487 seenrevs = set()
488 488 obsoleted = set()
489 489 for mark in addedmarkers:
490 490 node = mark[0]
491 491 rev = torev(node)
492 492 if rev is None or rev in seenrevs or rev >= origrepolen:
493 493 continue
494 494 seenrevs.add(rev)
495 495 if phase(repo, rev) == public:
496 496 continue
497 497 if set(succsmarkers(node) or []).issubset(addedmarkers):
498 498 obsoleted.add(rev)
499 499 return obsoleted
500 500
501 501
502 502 class _succs(list):
503 503 """small class to represent a successors with some metadata about it"""
504 504
505 505 def __init__(self, *args, **kwargs):
506 506 super(_succs, self).__init__(*args, **kwargs)
507 507 self.markers = set()
508 508
509 509 def copy(self):
510 510 new = _succs(self)
511 511 new.markers = self.markers.copy()
512 512 return new
513 513
514 514 @util.propertycache
515 515 def _set(self):
516 516 # immutable
517 517 return set(self)
518 518
519 519 def canmerge(self, other):
520 520 return self._set.issubset(other._set)
521 521
522 522
523 523 def successorssets(repo, initialnode, closest=False, cache=None):
524 524 """Return set of all latest successors of initial nodes
525 525
526 526 The successors set of a changeset A are the group of revisions that succeed
527 527 A. It succeeds A as a consistent whole, each revision being only a partial
528 528 replacement. By default, the successors set contains non-obsolete
529 529 changesets only, walking the obsolescence graph until reaching a leaf. If
530 530 'closest' is set to True, closest successors-sets are return (the
531 531 obsolescence walk stops on known changesets).
532 532
533 533 This function returns the full list of successor sets which is why it
534 534 returns a list of tuples and not just a single tuple. Each tuple is a valid
535 535 successors set. Note that (A,) may be a valid successors set for changeset A
536 536 (see below).
537 537
538 538 In most cases, a changeset A will have a single element (e.g. the changeset
539 539 A is replaced by A') in its successors set. Though, it is also common for a
540 540 changeset A to have no elements in its successor set (e.g. the changeset
541 541 has been pruned). Therefore, the returned list of successors sets will be
542 542 [(A',)] or [], respectively.
543 543
544 544 When a changeset A is split into A' and B', however, it will result in a
545 545 successors set containing more than a single element, i.e. [(A',B')].
546 546 Divergent changesets will result in multiple successors sets, i.e. [(A',),
547 547 (A'')].
548 548
549 549 If a changeset A is not obsolete, then it will conceptually have no
550 550 successors set. To distinguish this from a pruned changeset, the successor
551 551 set will contain itself only, i.e. [(A,)].
552 552
553 553 Finally, final successors unknown locally are considered to be pruned
554 554 (pruned: obsoleted without any successors). (Final: successors not affected
555 555 by markers).
556 556
557 557 The 'closest' mode respect the repoview filtering. For example, without
558 558 filter it will stop at the first locally known changeset, with 'visible'
559 559 filter it will stop on visible changesets).
560 560
561 561 The optional `cache` parameter is a dictionary that may contains
562 562 precomputed successors sets. It is meant to reuse the computation of a
563 563 previous call to `successorssets` when multiple calls are made at the same
564 564 time. The cache dictionary is updated in place. The caller is responsible
565 565 for its life span. Code that makes multiple calls to `successorssets`
566 566 *should* use this cache mechanism or risk a performance hit.
567 567
568 568 Since results are different depending of the 'closest' most, the same cache
569 569 cannot be reused for both mode.
570 570 """
571 571
572 572 succmarkers = repo.obsstore.successors
573 573
574 574 # Stack of nodes we search successors sets for
575 575 toproceed = [initialnode]
576 576 # set version of above list for fast loop detection
577 577 # element added to "toproceed" must be added here
578 578 stackedset = set(toproceed)
579 579 if cache is None:
580 580 cache = {}
581 581
582 582 # This while loop is the flattened version of a recursive search for
583 583 # successors sets
584 584 #
585 585 # def successorssets(x):
586 586 # successors = directsuccessors(x)
587 587 # ss = [[]]
588 588 # for succ in directsuccessors(x):
589 589 # # product as in itertools cartesian product
590 590 # ss = product(ss, successorssets(succ))
591 591 # return ss
592 592 #
593 593 # But we can not use plain recursive calls here:
594 594 # - that would blow the python call stack
595 595 # - obsolescence markers may have cycles, we need to handle them.
596 596 #
597 597 # The `toproceed` list act as our call stack. Every node we search
598 598 # successors set for are stacked there.
599 599 #
600 600 # The `stackedset` is set version of this stack used to check if a node is
601 601 # already stacked. This check is used to detect cycles and prevent infinite
602 602 # loop.
603 603 #
604 604 # successors set of all nodes are stored in the `cache` dictionary.
605 605 #
606 606 # After this while loop ends we use the cache to return the successors sets
607 607 # for the node requested by the caller.
608 608 while toproceed:
609 609 # Every iteration tries to compute the successors sets of the topmost
610 610 # node of the stack: CURRENT.
611 611 #
612 612 # There are four possible outcomes:
613 613 #
614 614 # 1) We already know the successors sets of CURRENT:
615 615 # -> mission accomplished, pop it from the stack.
616 616 # 2) Stop the walk:
617 617 # default case: Node is not obsolete
618 618 # closest case: Node is known at this repo filter level
619 619 # -> the node is its own successors sets. Add it to the cache.
620 620 # 3) We do not know successors set of direct successors of CURRENT:
621 621 # -> We add those successors to the stack.
622 622 # 4) We know successors sets of all direct successors of CURRENT:
623 623 # -> We can compute CURRENT successors set and add it to the
624 624 # cache.
625 625 #
626 626 current = toproceed[-1]
627 627
628 628 # case 2 condition is a bit hairy because of closest,
629 629 # we compute it on its own
630 630 case2condition = (current not in succmarkers) or (
631 631 closest and current != initialnode and current in repo
632 632 )
633 633
634 634 if current in cache:
635 635 # case (1): We already know the successors sets
636 636 stackedset.remove(toproceed.pop())
637 637 elif case2condition:
638 638 # case (2): end of walk.
639 639 if current in repo:
640 640 # We have a valid successors.
641 641 cache[current] = [_succs((current,))]
642 642 else:
643 643 # Final obsolete version is unknown locally.
644 644 # Do not count that as a valid successors
645 645 cache[current] = []
646 646 else:
647 647 # cases (3) and (4)
648 648 #
649 649 # We proceed in two phases. Phase 1 aims to distinguish case (3)
650 650 # from case (4):
651 651 #
652 652 # For each direct successors of CURRENT, we check whether its
653 653 # successors sets are known. If they are not, we stack the
654 654 # unknown node and proceed to the next iteration of the while
655 655 # loop. (case 3)
656 656 #
657 657 # During this step, we may detect obsolescence cycles: a node
658 658 # with unknown successors sets but already in the call stack.
659 659 # In such a situation, we arbitrary set the successors sets of
660 660 # the node to nothing (node pruned) to break the cycle.
661 661 #
662 662 # If no break was encountered we proceed to phase 2.
663 663 #
664 664 # Phase 2 computes successors sets of CURRENT (case 4); see details
665 665 # in phase 2 itself.
666 666 #
667 667 # Note the two levels of iteration in each phase.
668 668 # - The first one handles obsolescence markers using CURRENT as
669 669 # precursor (successors markers of CURRENT).
670 670 #
671 671 # Having multiple entry here means divergence.
672 672 #
673 673 # - The second one handles successors defined in each marker.
674 674 #
675 675 # Having none means pruned node, multiple successors means split,
676 676 # single successors are standard replacement.
677 677 #
678 678 for mark in sorted(succmarkers[current]):
679 679 for suc in mark[1]:
680 680 if suc not in cache:
681 681 if suc in stackedset:
682 682 # cycle breaking
683 683 cache[suc] = []
684 684 else:
685 685 # case (3) If we have not computed successors sets
686 686 # of one of those successors we add it to the
687 687 # `toproceed` stack and stop all work for this
688 688 # iteration.
689 689 toproceed.append(suc)
690 690 stackedset.add(suc)
691 691 break
692 692 else:
693 693 continue
694 694 break
695 695 else:
696 696 # case (4): we know all successors sets of all direct
697 697 # successors
698 698 #
699 699 # Successors set contributed by each marker depends on the
700 700 # successors sets of all its "successors" node.
701 701 #
702 702 # Each different marker is a divergence in the obsolescence
703 703 # history. It contributes successors sets distinct from other
704 704 # markers.
705 705 #
706 706 # Within a marker, a successor may have divergent successors
707 707 # sets. In such a case, the marker will contribute multiple
708 708 # divergent successors sets. If multiple successors have
709 709 # divergent successors sets, a Cartesian product is used.
710 710 #
711 711 # At the end we post-process successors sets to remove
712 712 # duplicated entry and successors set that are strict subset of
713 713 # another one.
714 714 succssets = []
715 715 for mark in sorted(succmarkers[current]):
716 716 # successors sets contributed by this marker
717 717 base = _succs()
718 718 base.markers.add(mark)
719 719 markss = [base]
720 720 for suc in mark[1]:
721 721 # cardinal product with previous successors
722 722 productresult = []
723 723 for prefix in markss:
724 724 for suffix in cache[suc]:
725 725 newss = prefix.copy()
726 726 newss.markers.update(suffix.markers)
727 727 for part in suffix:
728 728 # do not duplicated entry in successors set
729 729 # first entry wins.
730 730 if part not in newss:
731 731 newss.append(part)
732 732 productresult.append(newss)
733 733 if productresult:
734 734 markss = productresult
735 735 succssets.extend(markss)
736 736 # remove duplicated and subset
737 737 seen = []
738 738 final = []
739 739 candidates = sorted(
740 740 (s for s in succssets if s), key=len, reverse=True
741 741 )
742 742 for cand in candidates:
743 743 for seensuccs in seen:
744 744 if cand.canmerge(seensuccs):
745 745 seensuccs.markers.update(cand.markers)
746 746 break
747 747 else:
748 748 final.append(cand)
749 749 seen.append(cand)
750 750 final.reverse() # put small successors set first
751 751 cache[current] = final
752 752 return cache[initialnode]
753 753
754 754
755 755 def successorsandmarkers(repo, ctx):
756 756 """compute the raw data needed for computing obsfate
757 757 Returns a list of dict, one dict per successors set
758 758 """
759 759 if not ctx.obsolete():
760 760 return None
761 761
762 762 ssets = successorssets(repo, ctx.node(), closest=True)
763 763
764 764 # closestsuccessors returns an empty list for pruned revisions, remap it
765 765 # into a list containing an empty list for future processing
766 766 if ssets == []:
767 767 ssets = [[]]
768 768
769 769 # Try to recover pruned markers
770 770 succsmap = repo.obsstore.successors
771 771 fullsuccessorsets = [] # successor set + markers
772 772 for sset in ssets:
773 773 if sset:
774 774 fullsuccessorsets.append(sset)
775 775 else:
776 776 # successorsset return an empty set() when ctx or one of its
777 777 # successors is pruned.
778 778 # In this case, walk the obs-markers tree again starting with ctx
779 779 # and find the relevant pruning obs-makers, the ones without
780 780 # successors.
781 781 # Having these markers allow us to compute some information about
782 782 # its fate, like who pruned this changeset and when.
783 783
784 784 # XXX we do not catch all prune markers (eg rewritten then pruned)
785 785 # (fix me later)
786 786 foundany = False
787 787 for mark in succsmap.get(ctx.node(), ()):
788 788 if not mark[1]:
789 789 foundany = True
790 790 sset = _succs()
791 791 sset.markers.add(mark)
792 792 fullsuccessorsets.append(sset)
793 793 if not foundany:
794 794 fullsuccessorsets.append(_succs())
795 795
796 796 values = []
797 797 for sset in fullsuccessorsets:
798 798 values.append({b'successors': sset, b'markers': sset.markers})
799 799
800 800 return values
801 801
802 802
803 803 def _getobsfate(successorssets):
804 804 """ Compute a changeset obsolescence fate based on its successorssets.
805 805 Successors can be the tipmost ones or the immediate ones. This function
806 806 return values are not meant to be shown directly to users, it is meant to
807 807 be used by internal functions only.
808 808 Returns one fate from the following values:
809 809 - pruned
810 810 - diverged
811 811 - superseded
812 812 - superseded_split
813 813 """
814 814
815 815 if len(successorssets) == 0:
816 816 # The commit has been pruned
817 817 return b'pruned'
818 818 elif len(successorssets) > 1:
819 819 return b'diverged'
820 820 else:
821 821 # No divergence, only one set of successors
822 822 successors = successorssets[0]
823 823
824 824 if len(successors) == 1:
825 825 return b'superseded'
826 826 else:
827 827 return b'superseded_split'
828 828
829 829
830 830 def obsfateverb(successorset, markers):
831 831 """ Return the verb summarizing the successorset and potentially using
832 832 information from the markers
833 833 """
834 834 if not successorset:
835 835 verb = b'pruned'
836 836 elif len(successorset) == 1:
837 837 verb = b'rewritten'
838 838 else:
839 839 verb = b'split'
840 840 return verb
841 841
842 842
843 843 def markersdates(markers):
844 844 """returns the list of dates for a list of markers
845 845 """
846 846 return [m[4] for m in markers]
847 847
848 848
849 849 def markersusers(markers):
850 850 """ Returns a sorted list of markers users without duplicates
851 851 """
852 852 markersmeta = [dict(m[3]) for m in markers]
853 853 users = set(
854 854 encoding.tolocal(meta[b'user'])
855 855 for meta in markersmeta
856 856 if meta.get(b'user')
857 857 )
858 858
859 859 return sorted(users)
860 860
861 861
862 862 def markersoperations(markers):
863 863 """ Returns a sorted list of markers operations without duplicates
864 864 """
865 865 markersmeta = [dict(m[3]) for m in markers]
866 866 operations = set(
867 867 meta.get(b'operation') for meta in markersmeta if meta.get(b'operation')
868 868 )
869 869
870 870 return sorted(operations)
871 871
872 872
873 873 def obsfateprinter(ui, repo, successors, markers, formatctx):
874 874 """ Build a obsfate string for a single successorset using all obsfate
875 875 related function defined in obsutil
876 876 """
877 877 quiet = ui.quiet
878 878 verbose = ui.verbose
879 879 normal = not verbose and not quiet
880 880
881 881 line = []
882 882
883 883 # Verb
884 884 line.append(obsfateverb(successors, markers))
885 885
886 886 # Operations
887 887 operations = markersoperations(markers)
888 888 if operations:
889 889 line.append(b" using %s" % b", ".join(operations))
890 890
891 891 # Successors
892 892 if successors:
893 893 fmtsuccessors = [formatctx(repo[succ]) for succ in successors]
894 894 line.append(b" as %s" % b", ".join(fmtsuccessors))
895 895
896 896 # Users
897 897 users = markersusers(markers)
898 898 # Filter out current user in not verbose mode to reduce amount of
899 899 # information
900 900 if not verbose:
901 901 currentuser = ui.username(acceptempty=True)
902 902 if len(users) == 1 and currentuser in users:
903 903 users = None
904 904
905 905 if (verbose or normal) and users:
906 906 line.append(b" by %s" % b", ".join(users))
907 907
908 908 # Date
909 909 dates = markersdates(markers)
910 910
911 911 if dates and verbose:
912 912 min_date = min(dates)
913 913 max_date = max(dates)
914 914
915 915 if min_date == max_date:
916 916 fmtmin_date = dateutil.datestr(min_date, b'%Y-%m-%d %H:%M %1%2')
917 917 line.append(b" (at %s)" % fmtmin_date)
918 918 else:
919 919 fmtmin_date = dateutil.datestr(min_date, b'%Y-%m-%d %H:%M %1%2')
920 920 fmtmax_date = dateutil.datestr(max_date, b'%Y-%m-%d %H:%M %1%2')
921 921 line.append(b" (between %s and %s)" % (fmtmin_date, fmtmax_date))
922 922
923 923 return b"".join(line)
924 924
925 925
926 926 filteredmsgtable = {
927 927 b"pruned": _(b"hidden revision '%s' is pruned"),
928 928 b"diverged": _(b"hidden revision '%s' has diverged"),
929 929 b"superseded": _(b"hidden revision '%s' was rewritten as: %s"),
930 930 b"superseded_split": _(b"hidden revision '%s' was split as: %s"),
931 931 b"superseded_split_several": _(
932 932 b"hidden revision '%s' was split as: %s and %d more"
933 933 ),
934 934 }
935 935
936 936
937 937 def _getfilteredreason(repo, changeid, ctx):
938 938 """return a human-friendly string on why a obsolete changeset is hidden
939 939 """
940 940 successors = successorssets(repo, ctx.node())
941 941 fate = _getobsfate(successors)
942 942
943 943 # Be more precise in case the revision is superseded
944 944 if fate == b'pruned':
945 945 return filteredmsgtable[b'pruned'] % changeid
946 946 elif fate == b'diverged':
947 947 return filteredmsgtable[b'diverged'] % changeid
948 948 elif fate == b'superseded':
949 949 single_successor = nodemod.short(successors[0][0])
950 950 return filteredmsgtable[b'superseded'] % (changeid, single_successor)
951 951 elif fate == b'superseded_split':
952 952
953 953 succs = []
954 954 for node_id in successors[0]:
955 955 succs.append(nodemod.short(node_id))
956 956
957 957 if len(succs) <= 2:
958 958 fmtsuccs = b', '.join(succs)
959 959 return filteredmsgtable[b'superseded_split'] % (changeid, fmtsuccs)
960 960 else:
961 961 firstsuccessors = b', '.join(succs[:2])
962 962 remainingnumber = len(succs) - 2
963 963
964 964 args = (changeid, firstsuccessors, remainingnumber)
965 965 return filteredmsgtable[b'superseded_split_several'] % args
966 966
967 967
968 968 def divergentsets(repo, ctx):
969 969 """Compute sets of commits divergent with a given one"""
970 970 cache = {}
971 971 base = {}
972 972 for n in allpredecessors(repo.obsstore, [ctx.node()]):
973 973 if n == ctx.node():
974 974 # a node can't be a base for divergence with itself
975 975 continue
976 976 nsuccsets = successorssets(repo, n, cache)
977 977 for nsuccset in nsuccsets:
978 978 if ctx.node() in nsuccset:
979 979 # we are only interested in *other* successor sets
980 980 continue
981 981 if tuple(nsuccset) in base:
982 982 # we already know the latest base for this divergency
983 983 continue
984 984 base[tuple(nsuccset)] = n
985 985 return [
986 986 {b'divergentnodes': divset, b'commonpredecessor': b}
987 987 for divset, b in pycompat.iteritems(base)
988 988 ]
989 989
990 990
991 991 def whyunstable(repo, ctx):
992 992 result = []
993 993 if ctx.orphan():
994 994 for parent in ctx.parents():
995 995 kind = None
996 996 if parent.orphan():
997 997 kind = b'orphan'
998 998 elif parent.obsolete():
999 999 kind = b'obsolete'
1000 1000 if kind is not None:
1001 1001 result.append(
1002 1002 {
1003 1003 b'instability': b'orphan',
1004 1004 b'reason': b'%s parent' % kind,
1005 1005 b'node': parent.hex(),
1006 1006 }
1007 1007 )
1008 1008 if ctx.phasedivergent():
1009 1009 predecessors = allpredecessors(
1010 1010 repo.obsstore, [ctx.node()], ignoreflags=bumpedfix
1011 1011 )
1012 1012 immutable = [
1013 1013 repo[p] for p in predecessors if p in repo and not repo[p].mutable()
1014 1014 ]
1015 1015 for predecessor in immutable:
1016 1016 result.append(
1017 1017 {
1018 1018 b'instability': b'phase-divergent',
1019 1019 b'reason': b'immutable predecessor',
1020 1020 b'node': predecessor.hex(),
1021 1021 }
1022 1022 )
1023 1023 if ctx.contentdivergent():
1024 1024 dsets = divergentsets(repo, ctx)
1025 1025 for dset in dsets:
1026 1026 divnodes = [repo[n] for n in dset[b'divergentnodes']]
1027 1027 result.append(
1028 1028 {
1029 1029 b'instability': b'content-divergent',
1030 1030 b'divergentnodes': divnodes,
1031 1031 b'reason': b'predecessor',
1032 1032 b'node': nodemod.hex(dset[b'commonpredecessor']),
1033 1033 }
1034 1034 )
1035 1035 return result
General Comments 0
You need to be logged in to leave comments. Login now