##// END OF EJS Templates
obsutil: sort metadata before comparing in geteffectflag()...
av6 -
r52395:a5d8f261 stable
parent child Browse files
Show More
@@ -1,1047 +1,1047 b''
1 1 # obsutil.py - utility functions for obsolescence
2 2 #
3 3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8
9 9 import re
10 10
11 11 from .i18n import _
12 12 from .node import (
13 13 hex,
14 14 short,
15 15 )
16 16 from . import (
17 17 diffutil,
18 18 encoding,
19 19 error,
20 20 phases,
21 21 util,
22 22 )
23 23 from .utils import dateutil
24 24
25 25 ### obsolescence marker flag
26 26
27 27 ## bumpedfix flag
28 28 #
29 29 # When a changeset A' succeed to a changeset A which became public, we call A'
30 30 # "bumped" because it's a successors of a public changesets
31 31 #
32 32 # o A' (bumped)
33 33 # |`:
34 34 # | o A
35 35 # |/
36 36 # o Z
37 37 #
38 38 # The way to solve this situation is to create a new changeset Ad as children
39 39 # of A. This changeset have the same content than A'. So the diff from A to A'
40 40 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
41 41 #
42 42 # o Ad
43 43 # |`:
44 44 # | x A'
45 45 # |'|
46 46 # o | A
47 47 # |/
48 48 # o Z
49 49 #
50 50 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
51 51 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
52 52 # This flag mean that the successors express the changes between the public and
53 53 # bumped version and fix the situation, breaking the transitivity of
54 54 # "bumped" here.
55 55 bumpedfix = 1
56 56 usingsha256 = 2
57 57
58 58
59 59 class marker:
60 60 """Wrap obsolete marker raw data"""
61 61
62 62 def __init__(self, repo, data):
63 63 # the repo argument will be used to create changectx in later version
64 64 self._repo = repo
65 65 self._data = data
66 66 self._decodedmeta = None
67 67
68 68 def __hash__(self):
69 69 return hash(self._data)
70 70
71 71 def __eq__(self, other):
72 72 if type(other) != type(self):
73 73 return False
74 74 return self._data == other._data
75 75
76 76 def prednode(self):
77 77 """Predecessor changeset node identifier"""
78 78 return self._data[0]
79 79
80 80 def succnodes(self):
81 81 """List of successor changesets node identifiers"""
82 82 return self._data[1]
83 83
84 84 def parentnodes(self):
85 85 """Parents of the predecessors (None if not recorded)"""
86 86 return self._data[5]
87 87
88 88 def metadata(self):
89 89 """Decoded metadata dictionary"""
90 90 return dict(self._data[3])
91 91
92 92 def date(self):
93 93 """Creation date as (unixtime, offset)"""
94 94 return self._data[4]
95 95
96 96 def flags(self):
97 97 """The flags field of the marker"""
98 98 return self._data[2]
99 99
100 100
101 101 def getmarkers(repo, nodes=None, exclusive=False):
102 102 """returns markers known in a repository
103 103
104 104 If <nodes> is specified, only markers "relevant" to those nodes are are
105 105 returned"""
106 106 if nodes is None:
107 107 rawmarkers = repo.obsstore
108 108 elif exclusive:
109 109 rawmarkers = exclusivemarkers(repo, nodes)
110 110 else:
111 111 rawmarkers = repo.obsstore.relevantmarkers(nodes)
112 112
113 113 for markerdata in rawmarkers:
114 114 yield marker(repo, markerdata)
115 115
116 116
117 117 def sortedmarkers(markers):
118 118 # last item of marker tuple ('parents') may be None or a tuple
119 119 return sorted(markers, key=lambda m: m[:-1] + (m[-1] or (),))
120 120
121 121
122 122 def closestpredecessors(repo, nodeid):
123 123 """yield the list of next predecessors pointing on visible changectx nodes
124 124
125 125 This function respect the repoview filtering, filtered revision will be
126 126 considered missing.
127 127 """
128 128
129 129 precursors = repo.obsstore.predecessors
130 130 stack = [nodeid]
131 131 seen = set(stack)
132 132
133 133 while stack:
134 134 current = stack.pop()
135 135 currentpreccs = precursors.get(current, ())
136 136
137 137 for prec in currentpreccs:
138 138 precnodeid = prec[0]
139 139
140 140 # Basic cycle protection
141 141 if precnodeid in seen:
142 142 continue
143 143 seen.add(precnodeid)
144 144
145 145 if precnodeid in repo:
146 146 yield precnodeid
147 147 else:
148 148 stack.append(precnodeid)
149 149
150 150
151 151 def allpredecessors(obsstore, nodes, ignoreflags=0):
152 152 """Yield node for every precursors of <nodes>.
153 153
154 154 Some precursors may be unknown locally.
155 155
156 156 This is a linear yield unsuited to detecting folded changesets. It includes
157 157 initial nodes too."""
158 158
159 159 remaining = set(nodes)
160 160 seen = set(remaining)
161 161 prec = obsstore.predecessors.get
162 162 while remaining:
163 163 current = remaining.pop()
164 164 yield current
165 165 for mark in prec(current, ()):
166 166 # ignore marker flagged with specified flag
167 167 if mark[2] & ignoreflags:
168 168 continue
169 169 suc = mark[0]
170 170 if suc not in seen:
171 171 seen.add(suc)
172 172 remaining.add(suc)
173 173
174 174
175 175 def allsuccessors(obsstore, nodes, ignoreflags=0):
176 176 """Yield node for every successor of <nodes>.
177 177
178 178 Some successors may be unknown locally.
179 179
180 180 This is a linear yield unsuited to detecting split changesets. It includes
181 181 initial nodes too."""
182 182 remaining = set(nodes)
183 183 seen = set(remaining)
184 184 while remaining:
185 185 current = remaining.pop()
186 186 yield current
187 187 for mark in obsstore.successors.get(current, ()):
188 188 # ignore marker flagged with specified flag
189 189 if mark[2] & ignoreflags:
190 190 continue
191 191 for suc in mark[1]:
192 192 if suc not in seen:
193 193 seen.add(suc)
194 194 remaining.add(suc)
195 195
196 196
197 197 def _filterprunes(markers):
198 198 """return a set with no prune markers"""
199 199 return {m for m in markers if m[1]}
200 200
201 201
202 202 def exclusivemarkers(repo, nodes):
203 203 """set of markers relevant to "nodes" but no other locally-known nodes
204 204
205 205 This function compute the set of markers "exclusive" to a locally-known
206 206 node. This means we walk the markers starting from <nodes> until we reach a
207 207 locally-known precursors outside of <nodes>. Element of <nodes> with
208 208 locally-known successors outside of <nodes> are ignored (since their
209 209 precursors markers are also relevant to these successors).
210 210
211 211 For example:
212 212
213 213 # (A0 rewritten as A1)
214 214 #
215 215 # A0 <-1- A1 # Marker "1" is exclusive to A1
216 216
217 217 or
218 218
219 219 # (A0 rewritten as AX; AX rewritten as A1; AX is unknown locally)
220 220 #
221 221 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
222 222
223 223 or
224 224
225 225 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
226 226 #
227 227 # <-2- A1 # Marker "2" is exclusive to A0,A1
228 228 # /
229 229 # <-1- A0
230 230 # \
231 231 # <-3- A2 # Marker "3" is exclusive to A0,A2
232 232 #
233 233 # in addition:
234 234 #
235 235 # Markers "2,3" are exclusive to A1,A2
236 236 # Markers "1,2,3" are exclusive to A0,A1,A2
237 237
238 238 See test/test-obsolete-bundle-strip.t for more examples.
239 239
240 240 An example usage is strip. When stripping a changeset, we also want to
241 241 strip the markers exclusive to this changeset. Otherwise we would have
242 242 "dangling"" obsolescence markers from its precursors: Obsolescence markers
243 243 marking a node as obsolete without any successors available locally.
244 244
245 245 As for relevant markers, the prune markers for children will be followed.
246 246 Of course, they will only be followed if the pruned children is
247 247 locally-known. Since the prune markers are relevant to the pruned node.
248 248 However, while prune markers are considered relevant to the parent of the
249 249 pruned changesets, prune markers for locally-known changeset (with no
250 250 successors) are considered exclusive to the pruned nodes. This allows
251 251 to strip the prune markers (with the rest of the exclusive chain) alongside
252 252 the pruned changesets.
253 253 """
254 254 # running on a filtered repository would be dangerous as markers could be
255 255 # reported as exclusive when they are relevant for other filtered nodes.
256 256 unfi = repo.unfiltered()
257 257
258 258 # shortcut to various useful item
259 259 has_node = unfi.changelog.index.has_node
260 260 precursorsmarkers = unfi.obsstore.predecessors
261 261 successormarkers = unfi.obsstore.successors
262 262 childrenmarkers = unfi.obsstore.children
263 263
264 264 # exclusive markers (return of the function)
265 265 exclmarkers = set()
266 266 # we need fast membership testing
267 267 nodes = set(nodes)
268 268 # looking for head in the obshistory
269 269 #
270 270 # XXX we are ignoring all issues in regard with cycle for now.
271 271 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
272 272 stack.sort()
273 273 # nodes already stacked
274 274 seennodes = set(stack)
275 275 while stack:
276 276 current = stack.pop()
277 277 # fetch precursors markers
278 278 markers = list(precursorsmarkers.get(current, ()))
279 279 # extend the list with prune markers
280 280 for mark in successormarkers.get(current, ()):
281 281 if not mark[1]:
282 282 markers.append(mark)
283 283 # and markers from children (looking for prune)
284 284 for mark in childrenmarkers.get(current, ()):
285 285 if not mark[1]:
286 286 markers.append(mark)
287 287 # traverse the markers
288 288 for mark in markers:
289 289 if mark in exclmarkers:
290 290 # markers already selected
291 291 continue
292 292
293 293 # If the markers is about the current node, select it
294 294 #
295 295 # (this delay the addition of markers from children)
296 296 if mark[1] or mark[0] == current:
297 297 exclmarkers.add(mark)
298 298
299 299 # should we keep traversing through the precursors?
300 300 prec = mark[0]
301 301
302 302 # nodes in the stack or already processed
303 303 if prec in seennodes:
304 304 continue
305 305
306 306 # is this a locally known node ?
307 307 known = has_node(prec)
308 308 # if locally-known and not in the <nodes> set the traversal
309 309 # stop here.
310 310 if known and prec not in nodes:
311 311 continue
312 312
313 313 # do not keep going if there are unselected markers pointing to this
314 314 # nodes. If we end up traversing these unselected markers later the
315 315 # node will be taken care of at that point.
316 316 precmarkers = _filterprunes(successormarkers.get(prec))
317 317 if precmarkers.issubset(exclmarkers):
318 318 seennodes.add(prec)
319 319 stack.append(prec)
320 320
321 321 return exclmarkers
322 322
323 323
324 324 def foreground(repo, nodes):
325 325 """return all nodes in the "foreground" of other node
326 326
327 327 The foreground of a revision is anything reachable using parent -> children
328 328 or precursor -> successor relation. It is very similar to "descendant" but
329 329 augmented with obsolescence information.
330 330
331 331 Beware that possible obsolescence cycle may result if complex situation.
332 332 """
333 333 repo = repo.unfiltered()
334 334 foreground = set(repo.set(b'%ln::', nodes))
335 335 if repo.obsstore:
336 336 # We only need this complicated logic if there is obsolescence
337 337 # XXX will probably deserve an optimised revset.
338 338 has_node = repo.changelog.index.has_node
339 339 plen = -1
340 340 # compute the whole set of successors or descendants
341 341 while len(foreground) != plen:
342 342 plen = len(foreground)
343 343 succs = {c.node() for c in foreground}
344 344 mutable = [c.node() for c in foreground if c.mutable()]
345 345 succs.update(allsuccessors(repo.obsstore, mutable))
346 346 known = (n for n in succs if has_node(n))
347 347 foreground = set(repo.set(b'%ln::', known))
348 348 return {c.node() for c in foreground}
349 349
350 350
351 351 # effectflag field
352 352 #
353 353 # Effect-flag is a 1-byte bit field used to store what changed between a
354 354 # changeset and its successor(s).
355 355 #
356 356 # The effect flag is stored in obs-markers metadata while we iterate on the
357 357 # information design. That's why we have the EFFECTFLAGFIELD. If we come up
358 358 # with an incompatible design for effect flag, we can store a new design under
359 359 # another field name so we don't break readers. We plan to extend the existing
360 360 # obsmarkers bit-field when the effect flag design will be stabilized.
361 361 #
362 362 # The effect-flag is placed behind an experimental flag
363 363 # `effect-flags` set to off by default.
364 364 #
365 365
366 366 EFFECTFLAGFIELD = b"ef1"
367 367
368 368 DESCCHANGED = 1 << 0 # action changed the description
369 369 METACHANGED = 1 << 1 # action change the meta
370 370 DIFFCHANGED = 1 << 3 # action change diff introduced by the changeset
371 371 PARENTCHANGED = 1 << 2 # action change the parent
372 372 USERCHANGED = 1 << 4 # the user changed
373 373 DATECHANGED = 1 << 5 # the date changed
374 374 BRANCHCHANGED = 1 << 6 # the branch changed
375 375
376 376 METABLACKLIST = [
377 377 re.compile(b'^branch$'),
378 378 re.compile(b'^.*-source$'),
379 379 re.compile(b'^.*_source$'),
380 380 re.compile(b'^source$'),
381 381 ]
382 382
383 383
384 384 def metanotblacklisted(metaitem):
385 385 """Check that the key of a meta item (extrakey, extravalue) does not
386 386 match at least one of the blacklist pattern
387 387 """
388 388 metakey = metaitem[0]
389 389
390 390 return not any(pattern.match(metakey) for pattern in METABLACKLIST)
391 391
392 392
393 393 def _prepare_hunk(hunk):
394 394 """Drop all information but the username and patch"""
395 395 cleanhunk = []
396 396 for line in hunk.splitlines():
397 397 if line.startswith(b'# User') or not line.startswith(b'#'):
398 398 if line.startswith(b'@@'):
399 399 line = b'@@\n'
400 400 cleanhunk.append(line)
401 401 return cleanhunk
402 402
403 403
404 404 def _getdifflines(iterdiff):
405 405 """return a cleaned up lines"""
406 406 lines = next(iterdiff, None)
407 407
408 408 if lines is None:
409 409 return lines
410 410
411 411 return _prepare_hunk(lines)
412 412
413 413
414 414 def _cmpdiff(leftctx, rightctx):
415 415 """return True if both ctx introduce the "same diff"
416 416
417 417 This is a first and basic implementation, with many shortcoming.
418 418 """
419 419 diffopts = diffutil.diffallopts(leftctx.repo().ui, {b'git': True})
420 420
421 421 # Leftctx or right ctx might be filtered, so we need to use the contexts
422 422 # with an unfiltered repository to safely compute the diff
423 423
424 424 # leftctx and rightctx can be from different repository views in case of
425 425 # hgsubversion, do don't try to access them from same repository
426 426 # rightctx.repo() and leftctx.repo() are not always the same
427 427 leftunfi = leftctx._repo.unfiltered()[leftctx.rev()]
428 428 leftdiff = leftunfi.diff(opts=diffopts)
429 429 rightunfi = rightctx._repo.unfiltered()[rightctx.rev()]
430 430 rightdiff = rightunfi.diff(opts=diffopts)
431 431
432 432 left, right = (0, 0)
433 433 while None not in (left, right):
434 434 left = _getdifflines(leftdiff)
435 435 right = _getdifflines(rightdiff)
436 436
437 437 if left != right:
438 438 return False
439 439 return True
440 440
441 441
442 442 def geteffectflag(source, successors):
443 443 """From an obs-marker relation, compute what changed between the
444 444 predecessor and the successor.
445 445 """
446 446 effects = 0
447 447
448 448 for changectx in successors:
449 449 # Check if description has changed
450 450 if changectx.description() != source.description():
451 451 effects |= DESCCHANGED
452 452
453 453 # Check if user has changed
454 454 if changectx.user() != source.user():
455 455 effects |= USERCHANGED
456 456
457 457 # Check if date has changed
458 458 if changectx.date() != source.date():
459 459 effects |= DATECHANGED
460 460
461 461 # Check if branch has changed
462 462 if changectx.branch() != source.branch():
463 463 effects |= BRANCHCHANGED
464 464
465 465 # Check if at least one of the parent has changed
466 466 if changectx.parents() != source.parents():
467 467 effects |= PARENTCHANGED
468 468
469 469 # Check if other meta has changed
470 470 changeextra = changectx.extra().items()
471 ctxmeta = list(filter(metanotblacklisted, changeextra))
471 ctxmeta = sorted(filter(metanotblacklisted, changeextra))
472 472
473 473 sourceextra = source.extra().items()
474 srcmeta = list(filter(metanotblacklisted, sourceextra))
474 srcmeta = sorted(filter(metanotblacklisted, sourceextra))
475 475
476 476 if ctxmeta != srcmeta:
477 477 effects |= METACHANGED
478 478
479 479 # Check if the diff has changed
480 480 if not _cmpdiff(source, changectx):
481 481 effects |= DIFFCHANGED
482 482
483 483 return effects
484 484
485 485
486 486 def getobsoleted(repo, tr=None, changes=None):
487 487 """return the set of pre-existing revisions obsoleted by a transaction
488 488
489 489 Either the transaction or changes item of the transaction (for hooks)
490 490 must be provided, but not both.
491 491 """
492 492 if (tr is None) == (changes is None):
493 493 e = b"exactly one of tr and changes must be provided"
494 494 raise error.ProgrammingError(e)
495 495 torev = repo.unfiltered().changelog.index.get_rev
496 496 phase = repo._phasecache.phase
497 497 succsmarkers = repo.obsstore.successors.get
498 498 public = phases.public
499 499 if changes is None:
500 500 changes = tr.changes
501 501 addedmarkers = changes[b'obsmarkers']
502 502 origrepolen = changes[b'origrepolen']
503 503 seenrevs = set()
504 504 obsoleted = set()
505 505 for mark in addedmarkers:
506 506 node = mark[0]
507 507 rev = torev(node)
508 508 if rev is None or rev in seenrevs or rev >= origrepolen:
509 509 continue
510 510 seenrevs.add(rev)
511 511 if phase(repo, rev) == public:
512 512 continue
513 513 if set(succsmarkers(node) or []).issubset(addedmarkers):
514 514 obsoleted.add(rev)
515 515 return obsoleted
516 516
517 517
518 518 class _succs(list):
519 519 """small class to represent a successors with some metadata about it"""
520 520
521 521 def __init__(self, *args, **kwargs):
522 522 super(_succs, self).__init__(*args, **kwargs)
523 523 self.markers = set()
524 524
525 525 def copy(self):
526 526 new = _succs(self)
527 527 new.markers = self.markers.copy()
528 528 return new
529 529
530 530 @util.propertycache
531 531 def _set(self):
532 532 # immutable
533 533 return set(self)
534 534
535 535 def canmerge(self, other):
536 536 return self._set.issubset(other._set)
537 537
538 538
539 539 def successorssets(repo, initialnode, closest=False, cache=None):
540 540 """Return set of all latest successors of initial nodes
541 541
542 542 The successors set of a changeset A are the group of revisions that succeed
543 543 A. It succeeds A as a consistent whole, each revision being only a partial
544 544 replacement. By default, the successors set contains non-obsolete
545 545 changesets only, walking the obsolescence graph until reaching a leaf. If
546 546 'closest' is set to True, closest successors-sets are return (the
547 547 obsolescence walk stops on known changesets).
548 548
549 549 This function returns the full list of successor sets which is why it
550 550 returns a list of tuples and not just a single tuple. Each tuple is a valid
551 551 successors set. Note that (A,) may be a valid successors set for changeset A
552 552 (see below).
553 553
554 554 In most cases, a changeset A will have a single element (e.g. the changeset
555 555 A is replaced by A') in its successors set. Though, it is also common for a
556 556 changeset A to have no elements in its successor set (e.g. the changeset
557 557 has been pruned). Therefore, the returned list of successors sets will be
558 558 [(A',)] or [], respectively.
559 559
560 560 When a changeset A is split into A' and B', however, it will result in a
561 561 successors set containing more than a single element, i.e. [(A',B')].
562 562 Divergent changesets will result in multiple successors sets, i.e. [(A',),
563 563 (A'')].
564 564
565 565 If a changeset A is not obsolete, then it will conceptually have no
566 566 successors set. To distinguish this from a pruned changeset, the successor
567 567 set will contain itself only, i.e. [(A,)].
568 568
569 569 Finally, final successors unknown locally are considered to be pruned
570 570 (pruned: obsoleted without any successors). (Final: successors not affected
571 571 by markers).
572 572
573 573 The 'closest' mode respect the repoview filtering. For example, without
574 574 filter it will stop at the first locally known changeset, with 'visible'
575 575 filter it will stop on visible changesets).
576 576
577 577 The optional `cache` parameter is a dictionary that may contains
578 578 precomputed successors sets. It is meant to reuse the computation of a
579 579 previous call to `successorssets` when multiple calls are made at the same
580 580 time. The cache dictionary is updated in place. The caller is responsible
581 581 for its life span. Code that makes multiple calls to `successorssets`
582 582 *should* use this cache mechanism or risk a performance hit.
583 583
584 584 Since results are different depending of the 'closest' most, the same cache
585 585 cannot be reused for both mode.
586 586 """
587 587
588 588 succmarkers = repo.obsstore.successors
589 589
590 590 # Stack of nodes we search successors sets for
591 591 toproceed = [initialnode]
592 592 # set version of above list for fast loop detection
593 593 # element added to "toproceed" must be added here
594 594 stackedset = set(toproceed)
595 595 if cache is None:
596 596 cache = {}
597 597
598 598 # This while loop is the flattened version of a recursive search for
599 599 # successors sets
600 600 #
601 601 # def successorssets(x):
602 602 # successors = directsuccessors(x)
603 603 # ss = [[]]
604 604 # for succ in directsuccessors(x):
605 605 # # product as in itertools cartesian product
606 606 # ss = product(ss, successorssets(succ))
607 607 # return ss
608 608 #
609 609 # But we can not use plain recursive calls here:
610 610 # - that would blow the python call stack
611 611 # - obsolescence markers may have cycles, we need to handle them.
612 612 #
613 613 # The `toproceed` list act as our call stack. Every node we search
614 614 # successors set for are stacked there.
615 615 #
616 616 # The `stackedset` is set version of this stack used to check if a node is
617 617 # already stacked. This check is used to detect cycles and prevent infinite
618 618 # loop.
619 619 #
620 620 # successors set of all nodes are stored in the `cache` dictionary.
621 621 #
622 622 # After this while loop ends we use the cache to return the successors sets
623 623 # for the node requested by the caller.
624 624 while toproceed:
625 625 # Every iteration tries to compute the successors sets of the topmost
626 626 # node of the stack: CURRENT.
627 627 #
628 628 # There are four possible outcomes:
629 629 #
630 630 # 1) We already know the successors sets of CURRENT:
631 631 # -> mission accomplished, pop it from the stack.
632 632 # 2) Stop the walk:
633 633 # default case: Node is not obsolete
634 634 # closest case: Node is known at this repo filter level
635 635 # -> the node is its own successors sets. Add it to the cache.
636 636 # 3) We do not know successors set of direct successors of CURRENT:
637 637 # -> We add those successors to the stack.
638 638 # 4) We know successors sets of all direct successors of CURRENT:
639 639 # -> We can compute CURRENT successors set and add it to the
640 640 # cache.
641 641 #
642 642 current = toproceed[-1]
643 643
644 644 # case 2 condition is a bit hairy because of closest,
645 645 # we compute it on its own
646 646 case2condition = (current not in succmarkers) or (
647 647 closest and current != initialnode and current in repo
648 648 )
649 649
650 650 if current in cache:
651 651 # case (1): We already know the successors sets
652 652 stackedset.remove(toproceed.pop())
653 653 elif case2condition:
654 654 # case (2): end of walk.
655 655 if current in repo:
656 656 # We have a valid successors.
657 657 cache[current] = [_succs((current,))]
658 658 else:
659 659 # Final obsolete version is unknown locally.
660 660 # Do not count that as a valid successors
661 661 cache[current] = []
662 662 else:
663 663 # cases (3) and (4)
664 664 #
665 665 # We proceed in two phases. Phase 1 aims to distinguish case (3)
666 666 # from case (4):
667 667 #
668 668 # For each direct successors of CURRENT, we check whether its
669 669 # successors sets are known. If they are not, we stack the
670 670 # unknown node and proceed to the next iteration of the while
671 671 # loop. (case 3)
672 672 #
673 673 # During this step, we may detect obsolescence cycles: a node
674 674 # with unknown successors sets but already in the call stack.
675 675 # In such a situation, we arbitrary set the successors sets of
676 676 # the node to nothing (node pruned) to break the cycle.
677 677 #
678 678 # If no break was encountered we proceed to phase 2.
679 679 #
680 680 # Phase 2 computes successors sets of CURRENT (case 4); see details
681 681 # in phase 2 itself.
682 682 #
683 683 # Note the two levels of iteration in each phase.
684 684 # - The first one handles obsolescence markers using CURRENT as
685 685 # precursor (successors markers of CURRENT).
686 686 #
687 687 # Having multiple entry here means divergence.
688 688 #
689 689 # - The second one handles successors defined in each marker.
690 690 #
691 691 # Having none means pruned node, multiple successors means split,
692 692 # single successors are standard replacement.
693 693 #
694 694 for mark in sortedmarkers(succmarkers[current]):
695 695 for suc in mark[1]:
696 696 if suc not in cache:
697 697 if suc in stackedset:
698 698 # cycle breaking
699 699 cache[suc] = []
700 700 else:
701 701 # case (3) If we have not computed successors sets
702 702 # of one of those successors we add it to the
703 703 # `toproceed` stack and stop all work for this
704 704 # iteration.
705 705 toproceed.append(suc)
706 706 stackedset.add(suc)
707 707 break
708 708 else:
709 709 continue
710 710 break
711 711 else:
712 712 # case (4): we know all successors sets of all direct
713 713 # successors
714 714 #
715 715 # Successors set contributed by each marker depends on the
716 716 # successors sets of all its "successors" node.
717 717 #
718 718 # Each different marker is a divergence in the obsolescence
719 719 # history. It contributes successors sets distinct from other
720 720 # markers.
721 721 #
722 722 # Within a marker, a successor may have divergent successors
723 723 # sets. In such a case, the marker will contribute multiple
724 724 # divergent successors sets. If multiple successors have
725 725 # divergent successors sets, a Cartesian product is used.
726 726 #
727 727 # At the end we post-process successors sets to remove
728 728 # duplicated entry and successors set that are strict subset of
729 729 # another one.
730 730 succssets = []
731 731 for mark in sortedmarkers(succmarkers[current]):
732 732 # successors sets contributed by this marker
733 733 base = _succs()
734 734 base.markers.add(mark)
735 735 markss = [base]
736 736 for suc in mark[1]:
737 737 # cardinal product with previous successors
738 738 productresult = []
739 739 for prefix in markss:
740 740 for suffix in cache[suc]:
741 741 newss = prefix.copy()
742 742 newss.markers.update(suffix.markers)
743 743 for part in suffix:
744 744 # do not duplicated entry in successors set
745 745 # first entry wins.
746 746 if part not in newss:
747 747 newss.append(part)
748 748 productresult.append(newss)
749 749 if productresult:
750 750 markss = productresult
751 751 succssets.extend(markss)
752 752 # remove duplicated and subset
753 753 seen = []
754 754 final = []
755 755 candidates = sorted(
756 756 (s for s in succssets if s), key=len, reverse=True
757 757 )
758 758 for cand in candidates:
759 759 for seensuccs in seen:
760 760 if cand.canmerge(seensuccs):
761 761 seensuccs.markers.update(cand.markers)
762 762 break
763 763 else:
764 764 final.append(cand)
765 765 seen.append(cand)
766 766 final.reverse() # put small successors set first
767 767 cache[current] = final
768 768 return cache[initialnode]
769 769
770 770
771 771 def successorsandmarkers(repo, ctx):
772 772 """compute the raw data needed for computing obsfate
773 773 Returns a list of dict, one dict per successors set
774 774 """
775 775 if not ctx.obsolete():
776 776 return None
777 777
778 778 ssets = successorssets(repo, ctx.node(), closest=True)
779 779
780 780 # closestsuccessors returns an empty list for pruned revisions, remap it
781 781 # into a list containing an empty list for future processing
782 782 if ssets == []:
783 783 ssets = [_succs()]
784 784
785 785 # Try to recover pruned markers
786 786 succsmap = repo.obsstore.successors
787 787 fullsuccessorsets = [] # successor set + markers
788 788 for sset in ssets:
789 789 if sset:
790 790 fullsuccessorsets.append(sset)
791 791 else:
792 792 # successorsset return an empty set() when ctx or one of its
793 793 # successors is pruned.
794 794 # In this case, walk the obs-markers tree again starting with ctx
795 795 # and find the relevant pruning obs-makers, the ones without
796 796 # successors.
797 797 # Having these markers allow us to compute some information about
798 798 # its fate, like who pruned this changeset and when.
799 799
800 800 # XXX we do not catch all prune markers (eg rewritten then pruned)
801 801 # (fix me later)
802 802 foundany = False
803 803 for mark in succsmap.get(ctx.node(), ()):
804 804 if not mark[1]:
805 805 foundany = True
806 806 sset = _succs()
807 807 sset.markers.add(mark)
808 808 fullsuccessorsets.append(sset)
809 809 if not foundany:
810 810 fullsuccessorsets.append(_succs())
811 811
812 812 values = []
813 813 for sset in fullsuccessorsets:
814 814 values.append({b'successors': sset, b'markers': sset.markers})
815 815
816 816 return values
817 817
818 818
819 819 def _getobsfate(successorssets):
820 820 """Compute a changeset obsolescence fate based on its successorssets.
821 821 Successors can be the tipmost ones or the immediate ones. This function
822 822 return values are not meant to be shown directly to users, it is meant to
823 823 be used by internal functions only.
824 824 Returns one fate from the following values:
825 825 - pruned
826 826 - diverged
827 827 - superseded
828 828 - superseded_split
829 829 """
830 830
831 831 if len(successorssets) == 0:
832 832 # The commit has been pruned
833 833 return b'pruned'
834 834 elif len(successorssets) > 1:
835 835 return b'diverged'
836 836 else:
837 837 # No divergence, only one set of successors
838 838 successors = successorssets[0]
839 839
840 840 if len(successors) == 1:
841 841 return b'superseded'
842 842 else:
843 843 return b'superseded_split'
844 844
845 845
846 846 def obsfateverb(successorset, markers):
847 847 """Return the verb summarizing the successorset and potentially using
848 848 information from the markers
849 849 """
850 850 if not successorset:
851 851 verb = b'pruned'
852 852 elif len(successorset) == 1:
853 853 verb = b'rewritten'
854 854 else:
855 855 verb = b'split'
856 856 return verb
857 857
858 858
859 859 def markersdates(markers):
860 860 """returns the list of dates for a list of markers"""
861 861 return [m[4] for m in markers]
862 862
863 863
864 864 def markersusers(markers):
865 865 """Returns a sorted list of markers users without duplicates"""
866 866 markersmeta = [dict(m[3]) for m in markers]
867 867 users = {
868 868 encoding.tolocal(meta[b'user'])
869 869 for meta in markersmeta
870 870 if meta.get(b'user')
871 871 }
872 872
873 873 return sorted(users)
874 874
875 875
876 876 def markersoperations(markers):
877 877 """Returns a sorted list of markers operations without duplicates"""
878 878 markersmeta = [dict(m[3]) for m in markers]
879 879 operations = {
880 880 meta.get(b'operation') for meta in markersmeta if meta.get(b'operation')
881 881 }
882 882
883 883 return sorted(operations)
884 884
885 885
886 886 def obsfateprinter(ui, repo, successors, markers, formatctx):
887 887 """Build a obsfate string for a single successorset using all obsfate
888 888 related function defined in obsutil
889 889 """
890 890 quiet = ui.quiet
891 891 verbose = ui.verbose
892 892 normal = not verbose and not quiet
893 893
894 894 line = []
895 895
896 896 # Verb
897 897 line.append(obsfateverb(successors, markers))
898 898
899 899 # Operations
900 900 operations = markersoperations(markers)
901 901 if operations:
902 902 line.append(b" using %s" % b", ".join(operations))
903 903
904 904 # Successors
905 905 if successors:
906 906 fmtsuccessors = [formatctx(repo[succ]) for succ in successors]
907 907 line.append(b" as %s" % b", ".join(fmtsuccessors))
908 908
909 909 # Users
910 910 users = markersusers(markers)
911 911 # Filter out current user in not verbose mode to reduce amount of
912 912 # information
913 913 if not verbose:
914 914 currentuser = ui.username(acceptempty=True)
915 915 if len(users) == 1 and currentuser in users:
916 916 users = None
917 917
918 918 if (verbose or normal) and users:
919 919 line.append(b" by %s" % b", ".join(users))
920 920
921 921 # Date
922 922 dates = markersdates(markers)
923 923
924 924 if dates and verbose:
925 925 min_date = min(dates)
926 926 max_date = max(dates)
927 927
928 928 if min_date == max_date:
929 929 fmtmin_date = dateutil.datestr(min_date, b'%Y-%m-%d %H:%M %1%2')
930 930 line.append(b" (at %s)" % fmtmin_date)
931 931 else:
932 932 fmtmin_date = dateutil.datestr(min_date, b'%Y-%m-%d %H:%M %1%2')
933 933 fmtmax_date = dateutil.datestr(max_date, b'%Y-%m-%d %H:%M %1%2')
934 934 line.append(b" (between %s and %s)" % (fmtmin_date, fmtmax_date))
935 935
936 936 return b"".join(line)
937 937
938 938
939 939 filteredmsgtable = {
940 940 b"pruned": _(b"hidden revision '%s' is pruned"),
941 941 b"diverged": _(b"hidden revision '%s' has diverged"),
942 942 b"superseded": _(b"hidden revision '%s' was rewritten as: %s"),
943 943 b"superseded_split": _(b"hidden revision '%s' was split as: %s"),
944 944 b"superseded_split_several": _(
945 945 b"hidden revision '%s' was split as: %s and %d more"
946 946 ),
947 947 }
948 948
949 949
950 950 def _getfilteredreason(repo, changeid, ctx):
951 951 """return a human-friendly string on why a obsolete changeset is hidden"""
952 952 successors = successorssets(repo, ctx.node())
953 953 fate = _getobsfate(successors)
954 954
955 955 # Be more precise in case the revision is superseded
956 956 if fate == b'pruned':
957 957 return filteredmsgtable[b'pruned'] % changeid
958 958 elif fate == b'diverged':
959 959 return filteredmsgtable[b'diverged'] % changeid
960 960 elif fate == b'superseded':
961 961 single_successor = short(successors[0][0])
962 962 return filteredmsgtable[b'superseded'] % (changeid, single_successor)
963 963 elif fate == b'superseded_split':
964 964
965 965 succs = []
966 966 for node_id in successors[0]:
967 967 succs.append(short(node_id))
968 968
969 969 if len(succs) <= 2:
970 970 fmtsuccs = b', '.join(succs)
971 971 return filteredmsgtable[b'superseded_split'] % (changeid, fmtsuccs)
972 972 else:
973 973 firstsuccessors = b', '.join(succs[:2])
974 974 remainingnumber = len(succs) - 2
975 975
976 976 args = (changeid, firstsuccessors, remainingnumber)
977 977 return filteredmsgtable[b'superseded_split_several'] % args
978 978
979 979
980 980 def divergentsets(repo, ctx):
981 981 """Compute sets of commits divergent with a given one"""
982 982 cache = {}
983 983 base = {}
984 984 for n in allpredecessors(repo.obsstore, [ctx.node()]):
985 985 if n == ctx.node():
986 986 # a node can't be a base for divergence with itself
987 987 continue
988 988 nsuccsets = successorssets(repo, n, cache)
989 989 for nsuccset in nsuccsets:
990 990 if ctx.node() in nsuccset:
991 991 # we are only interested in *other* successor sets
992 992 continue
993 993 if tuple(nsuccset) in base:
994 994 # we already know the latest base for this divergency
995 995 continue
996 996 base[tuple(nsuccset)] = n
997 997 return [
998 998 {b'divergentnodes': divset, b'commonpredecessor': b}
999 999 for divset, b in base.items()
1000 1000 ]
1001 1001
1002 1002
1003 1003 def whyunstable(repo, ctx):
1004 1004 result = []
1005 1005 if ctx.orphan():
1006 1006 for parent in ctx.parents():
1007 1007 kind = None
1008 1008 if parent.orphan():
1009 1009 kind = b'orphan'
1010 1010 elif parent.obsolete():
1011 1011 kind = b'obsolete'
1012 1012 if kind is not None:
1013 1013 result.append(
1014 1014 {
1015 1015 b'instability': b'orphan',
1016 1016 b'reason': b'%s parent' % kind,
1017 1017 b'node': parent.hex(),
1018 1018 }
1019 1019 )
1020 1020 if ctx.phasedivergent():
1021 1021 predecessors = allpredecessors(
1022 1022 repo.obsstore, [ctx.node()], ignoreflags=bumpedfix
1023 1023 )
1024 1024 immutable = [
1025 1025 repo[p] for p in predecessors if p in repo and not repo[p].mutable()
1026 1026 ]
1027 1027 for predecessor in immutable:
1028 1028 result.append(
1029 1029 {
1030 1030 b'instability': b'phase-divergent',
1031 1031 b'reason': b'immutable predecessor',
1032 1032 b'node': predecessor.hex(),
1033 1033 }
1034 1034 )
1035 1035 if ctx.contentdivergent():
1036 1036 dsets = divergentsets(repo, ctx)
1037 1037 for dset in dsets:
1038 1038 divnodes = [repo[n] for n in dset[b'divergentnodes']]
1039 1039 result.append(
1040 1040 {
1041 1041 b'instability': b'content-divergent',
1042 1042 b'divergentnodes': divnodes,
1043 1043 b'reason': b'predecessor',
1044 1044 b'node': hex(dset[b'commonpredecessor']),
1045 1045 }
1046 1046 )
1047 1047 return result
General Comments 0
You need to be logged in to leave comments. Login now