##// END OF EJS Templates
obsutil: use public interface to access to repo.ui
Yuya Nishihara -
r38605:79d7d715 default
parent child Browse files
Show More
@@ -1,980 +1,980 b''
1 1 # obsutil.py - utility functions for obsolescence
2 2 #
3 3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import re
11 11
12 12 from .i18n import _
13 13 from . import (
14 14 node as nodemod,
15 15 phases,
16 16 util,
17 17 )
18 18 from .utils import (
19 19 dateutil,
20 20 diffutil,
21 21 )
22 22
23 23 ### obsolescence marker flag
24 24
25 25 ## bumpedfix flag
26 26 #
27 27 # When a changeset A' succeed to a changeset A which became public, we call A'
28 28 # "bumped" because it's a successors of a public changesets
29 29 #
30 30 # o A' (bumped)
31 31 # |`:
32 32 # | o A
33 33 # |/
34 34 # o Z
35 35 #
36 36 # The way to solve this situation is to create a new changeset Ad as children
37 37 # of A. This changeset have the same content than A'. So the diff from A to A'
38 38 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
39 39 #
40 40 # o Ad
41 41 # |`:
42 42 # | x A'
43 43 # |'|
44 44 # o | A
45 45 # |/
46 46 # o Z
47 47 #
48 48 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
49 49 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
50 50 # This flag mean that the successors express the changes between the public and
51 51 # bumped version and fix the situation, breaking the transitivity of
52 52 # "bumped" here.
53 53 bumpedfix = 1
54 54 usingsha256 = 2
55 55
56 56 class marker(object):
57 57 """Wrap obsolete marker raw data"""
58 58
59 59 def __init__(self, repo, data):
60 60 # the repo argument will be used to create changectx in later version
61 61 self._repo = repo
62 62 self._data = data
63 63 self._decodedmeta = None
64 64
65 65 def __hash__(self):
66 66 return hash(self._data)
67 67
68 68 def __eq__(self, other):
69 69 if type(other) != type(self):
70 70 return False
71 71 return self._data == other._data
72 72
73 73 def prednode(self):
74 74 """Predecessor changeset node identifier"""
75 75 return self._data[0]
76 76
77 77 def succnodes(self):
78 78 """List of successor changesets node identifiers"""
79 79 return self._data[1]
80 80
81 81 def parentnodes(self):
82 82 """Parents of the predecessors (None if not recorded)"""
83 83 return self._data[5]
84 84
85 85 def metadata(self):
86 86 """Decoded metadata dictionary"""
87 87 return dict(self._data[3])
88 88
89 89 def date(self):
90 90 """Creation date as (unixtime, offset)"""
91 91 return self._data[4]
92 92
93 93 def flags(self):
94 94 """The flags field of the marker"""
95 95 return self._data[2]
96 96
97 97 def getmarkers(repo, nodes=None, exclusive=False):
98 98 """returns markers known in a repository
99 99
100 100 If <nodes> is specified, only markers "relevant" to those nodes are are
101 101 returned"""
102 102 if nodes is None:
103 103 rawmarkers = repo.obsstore
104 104 elif exclusive:
105 105 rawmarkers = exclusivemarkers(repo, nodes)
106 106 else:
107 107 rawmarkers = repo.obsstore.relevantmarkers(nodes)
108 108
109 109 for markerdata in rawmarkers:
110 110 yield marker(repo, markerdata)
111 111
112 112 def closestpredecessors(repo, nodeid):
113 113 """yield the list of next predecessors pointing on visible changectx nodes
114 114
115 115 This function respect the repoview filtering, filtered revision will be
116 116 considered missing.
117 117 """
118 118
119 119 precursors = repo.obsstore.predecessors
120 120 stack = [nodeid]
121 121 seen = set(stack)
122 122
123 123 while stack:
124 124 current = stack.pop()
125 125 currentpreccs = precursors.get(current, ())
126 126
127 127 for prec in currentpreccs:
128 128 precnodeid = prec[0]
129 129
130 130 # Basic cycle protection
131 131 if precnodeid in seen:
132 132 continue
133 133 seen.add(precnodeid)
134 134
135 135 if precnodeid in repo:
136 136 yield precnodeid
137 137 else:
138 138 stack.append(precnodeid)
139 139
140 140 def allpredecessors(obsstore, nodes, ignoreflags=0):
141 141 """Yield node for every precursors of <nodes>.
142 142
143 143 Some precursors may be unknown locally.
144 144
145 145 This is a linear yield unsuited to detecting folded changesets. It includes
146 146 initial nodes too."""
147 147
148 148 remaining = set(nodes)
149 149 seen = set(remaining)
150 150 while remaining:
151 151 current = remaining.pop()
152 152 yield current
153 153 for mark in obsstore.predecessors.get(current, ()):
154 154 # ignore marker flagged with specified flag
155 155 if mark[2] & ignoreflags:
156 156 continue
157 157 suc = mark[0]
158 158 if suc not in seen:
159 159 seen.add(suc)
160 160 remaining.add(suc)
161 161
162 162 def allsuccessors(obsstore, nodes, ignoreflags=0):
163 163 """Yield node for every successor of <nodes>.
164 164
165 165 Some successors may be unknown locally.
166 166
167 167 This is a linear yield unsuited to detecting split changesets. It includes
168 168 initial nodes too."""
169 169 remaining = set(nodes)
170 170 seen = set(remaining)
171 171 while remaining:
172 172 current = remaining.pop()
173 173 yield current
174 174 for mark in obsstore.successors.get(current, ()):
175 175 # ignore marker flagged with specified flag
176 176 if mark[2] & ignoreflags:
177 177 continue
178 178 for suc in mark[1]:
179 179 if suc not in seen:
180 180 seen.add(suc)
181 181 remaining.add(suc)
182 182
183 183 def _filterprunes(markers):
184 184 """return a set with no prune markers"""
185 185 return set(m for m in markers if m[1])
186 186
187 187 def exclusivemarkers(repo, nodes):
188 188 """set of markers relevant to "nodes" but no other locally-known nodes
189 189
190 190 This function compute the set of markers "exclusive" to a locally-known
191 191 node. This means we walk the markers starting from <nodes> until we reach a
192 192 locally-known precursors outside of <nodes>. Element of <nodes> with
193 193 locally-known successors outside of <nodes> are ignored (since their
194 194 precursors markers are also relevant to these successors).
195 195
196 196 For example:
197 197
198 198 # (A0 rewritten as A1)
199 199 #
200 200 # A0 <-1- A1 # Marker "1" is exclusive to A1
201 201
202 202 or
203 203
204 204 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
205 205 #
206 206 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
207 207
208 208 or
209 209
210 210 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
211 211 #
212 212 # <-2- A1 # Marker "2" is exclusive to A0,A1
213 213 # /
214 214 # <-1- A0
215 215 # \
216 216 # <-3- A2 # Marker "3" is exclusive to A0,A2
217 217 #
218 218 # in addition:
219 219 #
220 220 # Markers "2,3" are exclusive to A1,A2
221 221 # Markers "1,2,3" are exclusive to A0,A1,A2
222 222
223 223 See test/test-obsolete-bundle-strip.t for more examples.
224 224
225 225 An example usage is strip. When stripping a changeset, we also want to
226 226 strip the markers exclusive to this changeset. Otherwise we would have
227 227 "dangling"" obsolescence markers from its precursors: Obsolescence markers
228 228 marking a node as obsolete without any successors available locally.
229 229
230 230 As for relevant markers, the prune markers for children will be followed.
231 231 Of course, they will only be followed if the pruned children is
232 232 locally-known. Since the prune markers are relevant to the pruned node.
233 233 However, while prune markers are considered relevant to the parent of the
234 234 pruned changesets, prune markers for locally-known changeset (with no
235 235 successors) are considered exclusive to the pruned nodes. This allows
236 236 to strip the prune markers (with the rest of the exclusive chain) alongside
237 237 the pruned changesets.
238 238 """
239 239 # running on a filtered repository would be dangerous as markers could be
240 240 # reported as exclusive when they are relevant for other filtered nodes.
241 241 unfi = repo.unfiltered()
242 242
243 243 # shortcut to various useful item
244 244 nm = unfi.changelog.nodemap
245 245 precursorsmarkers = unfi.obsstore.predecessors
246 246 successormarkers = unfi.obsstore.successors
247 247 childrenmarkers = unfi.obsstore.children
248 248
249 249 # exclusive markers (return of the function)
250 250 exclmarkers = set()
251 251 # we need fast membership testing
252 252 nodes = set(nodes)
253 253 # looking for head in the obshistory
254 254 #
255 255 # XXX we are ignoring all issues in regard with cycle for now.
256 256 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
257 257 stack.sort()
258 258 # nodes already stacked
259 259 seennodes = set(stack)
260 260 while stack:
261 261 current = stack.pop()
262 262 # fetch precursors markers
263 263 markers = list(precursorsmarkers.get(current, ()))
264 264 # extend the list with prune markers
265 265 for mark in successormarkers.get(current, ()):
266 266 if not mark[1]:
267 267 markers.append(mark)
268 268 # and markers from children (looking for prune)
269 269 for mark in childrenmarkers.get(current, ()):
270 270 if not mark[1]:
271 271 markers.append(mark)
272 272 # traverse the markers
273 273 for mark in markers:
274 274 if mark in exclmarkers:
275 275 # markers already selected
276 276 continue
277 277
278 278 # If the markers is about the current node, select it
279 279 #
280 280 # (this delay the addition of markers from children)
281 281 if mark[1] or mark[0] == current:
282 282 exclmarkers.add(mark)
283 283
284 284 # should we keep traversing through the precursors?
285 285 prec = mark[0]
286 286
287 287 # nodes in the stack or already processed
288 288 if prec in seennodes:
289 289 continue
290 290
291 291 # is this a locally known node ?
292 292 known = prec in nm
293 293 # if locally-known and not in the <nodes> set the traversal
294 294 # stop here.
295 295 if known and prec not in nodes:
296 296 continue
297 297
298 298 # do not keep going if there are unselected markers pointing to this
299 299 # nodes. If we end up traversing these unselected markers later the
300 300 # node will be taken care of at that point.
301 301 precmarkers = _filterprunes(successormarkers.get(prec))
302 302 if precmarkers.issubset(exclmarkers):
303 303 seennodes.add(prec)
304 304 stack.append(prec)
305 305
306 306 return exclmarkers
307 307
308 308 def foreground(repo, nodes):
309 309 """return all nodes in the "foreground" of other node
310 310
311 311 The foreground of a revision is anything reachable using parent -> children
312 312 or precursor -> successor relation. It is very similar to "descendant" but
313 313 augmented with obsolescence information.
314 314
315 315 Beware that possible obsolescence cycle may result if complex situation.
316 316 """
317 317 repo = repo.unfiltered()
318 318 foreground = set(repo.set('%ln::', nodes))
319 319 if repo.obsstore:
320 320 # We only need this complicated logic if there is obsolescence
321 321 # XXX will probably deserve an optimised revset.
322 322 nm = repo.changelog.nodemap
323 323 plen = -1
324 324 # compute the whole set of successors or descendants
325 325 while len(foreground) != plen:
326 326 plen = len(foreground)
327 327 succs = set(c.node() for c in foreground)
328 328 mutable = [c.node() for c in foreground if c.mutable()]
329 329 succs.update(allsuccessors(repo.obsstore, mutable))
330 330 known = (n for n in succs if n in nm)
331 331 foreground = set(repo.set('%ln::', known))
332 332 return set(c.node() for c in foreground)
333 333
334 334 # effectflag field
335 335 #
336 336 # Effect-flag is a 1-byte bit field used to store what changed between a
337 337 # changeset and its successor(s).
338 338 #
339 339 # The effect flag is stored in obs-markers metadata while we iterate on the
340 340 # information design. That's why we have the EFFECTFLAGFIELD. If we come up
341 341 # with an incompatible design for effect flag, we can store a new design under
342 342 # another field name so we don't break readers. We plan to extend the existing
343 343 # obsmarkers bit-field when the effect flag design will be stabilized.
344 344 #
345 345 # The effect-flag is placed behind an experimental flag
346 346 # `effect-flags` set to off by default.
347 347 #
348 348
349 349 EFFECTFLAGFIELD = "ef1"
350 350
351 351 DESCCHANGED = 1 << 0 # action changed the description
352 352 METACHANGED = 1 << 1 # action change the meta
353 353 DIFFCHANGED = 1 << 3 # action change diff introduced by the changeset
354 354 PARENTCHANGED = 1 << 2 # action change the parent
355 355 USERCHANGED = 1 << 4 # the user changed
356 356 DATECHANGED = 1 << 5 # the date changed
357 357 BRANCHCHANGED = 1 << 6 # the branch changed
358 358
359 359 METABLACKLIST = [
360 360 re.compile('^branch$'),
361 361 re.compile('^.*-source$'),
362 362 re.compile('^.*_source$'),
363 363 re.compile('^source$'),
364 364 ]
365 365
366 366 def metanotblacklisted(metaitem):
367 367 """ Check that the key of a meta item (extrakey, extravalue) does not
368 368 match at least one of the blacklist pattern
369 369 """
370 370 metakey = metaitem[0]
371 371
372 372 return not any(pattern.match(metakey) for pattern in METABLACKLIST)
373 373
374 374 def _prepare_hunk(hunk):
375 375 """Drop all information but the username and patch"""
376 376 cleanhunk = []
377 377 for line in hunk.splitlines():
378 378 if line.startswith(b'# User') or not line.startswith(b'#'):
379 379 if line.startswith(b'@@'):
380 380 line = b'@@\n'
381 381 cleanhunk.append(line)
382 382 return cleanhunk
383 383
384 384 def _getdifflines(iterdiff):
385 385 """return a cleaned up lines"""
386 386 lines = next(iterdiff, None)
387 387
388 388 if lines is None:
389 389 return lines
390 390
391 391 return _prepare_hunk(lines)
392 392
393 393 def _cmpdiff(leftctx, rightctx):
394 394 """return True if both ctx introduce the "same diff"
395 395
396 396 This is a first and basic implementation, with many shortcoming.
397 397 """
398 diffopts = diffutil.diffopts(leftctx._repo.ui, {'git': True})
398 diffopts = diffutil.diffopts(leftctx.repo().ui, {'git': True})
399 399 # Leftctx or right ctx might be filtered, so we need to use the contexts
400 400 # with an unfiltered repository to safely compute the diff
401 401 leftunfi = leftctx._repo.unfiltered()[leftctx.rev()]
402 402 leftdiff = leftunfi.diff(opts=diffopts)
403 403 rightunfi = rightctx._repo.unfiltered()[rightctx.rev()]
404 404 rightdiff = rightunfi.diff(opts=diffopts)
405 405
406 406 left, right = (0, 0)
407 407 while None not in (left, right):
408 408 left = _getdifflines(leftdiff)
409 409 right = _getdifflines(rightdiff)
410 410
411 411 if left != right:
412 412 return False
413 413 return True
414 414
415 415 def geteffectflag(relation):
416 416 """ From an obs-marker relation, compute what changed between the
417 417 predecessor and the successor.
418 418 """
419 419 effects = 0
420 420
421 421 source = relation[0]
422 422
423 423 for changectx in relation[1]:
424 424 # Check if description has changed
425 425 if changectx.description() != source.description():
426 426 effects |= DESCCHANGED
427 427
428 428 # Check if user has changed
429 429 if changectx.user() != source.user():
430 430 effects |= USERCHANGED
431 431
432 432 # Check if date has changed
433 433 if changectx.date() != source.date():
434 434 effects |= DATECHANGED
435 435
436 436 # Check if branch has changed
437 437 if changectx.branch() != source.branch():
438 438 effects |= BRANCHCHANGED
439 439
440 440 # Check if at least one of the parent has changed
441 441 if changectx.parents() != source.parents():
442 442 effects |= PARENTCHANGED
443 443
444 444 # Check if other meta has changed
445 445 changeextra = changectx.extra().items()
446 446 ctxmeta = list(filter(metanotblacklisted, changeextra))
447 447
448 448 sourceextra = source.extra().items()
449 449 srcmeta = list(filter(metanotblacklisted, sourceextra))
450 450
451 451 if ctxmeta != srcmeta:
452 452 effects |= METACHANGED
453 453
454 454 # Check if the diff has changed
455 455 if not _cmpdiff(source, changectx):
456 456 effects |= DIFFCHANGED
457 457
458 458 return effects
459 459
460 460 def getobsoleted(repo, tr):
461 461 """return the set of pre-existing revisions obsoleted by a transaction"""
462 462 torev = repo.unfiltered().changelog.nodemap.get
463 463 phase = repo._phasecache.phase
464 464 succsmarkers = repo.obsstore.successors.get
465 465 public = phases.public
466 466 addedmarkers = tr.changes.get('obsmarkers')
467 467 addedrevs = tr.changes.get('revs')
468 468 seenrevs = set()
469 469 obsoleted = set()
470 470 for mark in addedmarkers:
471 471 node = mark[0]
472 472 rev = torev(node)
473 473 if rev is None or rev in seenrevs or rev in addedrevs:
474 474 continue
475 475 seenrevs.add(rev)
476 476 if phase(repo, rev) == public:
477 477 continue
478 478 if set(succsmarkers(node) or []).issubset(addedmarkers):
479 479 obsoleted.add(rev)
480 480 return obsoleted
481 481
482 482 class _succs(list):
483 483 """small class to represent a successors with some metadata about it"""
484 484
485 485 def __init__(self, *args, **kwargs):
486 486 super(_succs, self).__init__(*args, **kwargs)
487 487 self.markers = set()
488 488
489 489 def copy(self):
490 490 new = _succs(self)
491 491 new.markers = self.markers.copy()
492 492 return new
493 493
494 494 @util.propertycache
495 495 def _set(self):
496 496 # immutable
497 497 return set(self)
498 498
499 499 def canmerge(self, other):
500 500 return self._set.issubset(other._set)
501 501
502 502 def successorssets(repo, initialnode, closest=False, cache=None):
503 503 """Return set of all latest successors of initial nodes
504 504
505 505 The successors set of a changeset A are the group of revisions that succeed
506 506 A. It succeeds A as a consistent whole, each revision being only a partial
507 507 replacement. By default, the successors set contains non-obsolete
508 508 changesets only, walking the obsolescence graph until reaching a leaf. If
509 509 'closest' is set to True, closest successors-sets are return (the
510 510 obsolescence walk stops on known changesets).
511 511
512 512 This function returns the full list of successor sets which is why it
513 513 returns a list of tuples and not just a single tuple. Each tuple is a valid
514 514 successors set. Note that (A,) may be a valid successors set for changeset A
515 515 (see below).
516 516
517 517 In most cases, a changeset A will have a single element (e.g. the changeset
518 518 A is replaced by A') in its successors set. Though, it is also common for a
519 519 changeset A to have no elements in its successor set (e.g. the changeset
520 520 has been pruned). Therefore, the returned list of successors sets will be
521 521 [(A',)] or [], respectively.
522 522
523 523 When a changeset A is split into A' and B', however, it will result in a
524 524 successors set containing more than a single element, i.e. [(A',B')].
525 525 Divergent changesets will result in multiple successors sets, i.e. [(A',),
526 526 (A'')].
527 527
528 528 If a changeset A is not obsolete, then it will conceptually have no
529 529 successors set. To distinguish this from a pruned changeset, the successor
530 530 set will contain itself only, i.e. [(A,)].
531 531
532 532 Finally, final successors unknown locally are considered to be pruned
533 533 (pruned: obsoleted without any successors). (Final: successors not affected
534 534 by markers).
535 535
536 536 The 'closest' mode respect the repoview filtering. For example, without
537 537 filter it will stop at the first locally known changeset, with 'visible'
538 538 filter it will stop on visible changesets).
539 539
540 540 The optional `cache` parameter is a dictionary that may contains
541 541 precomputed successors sets. It is meant to reuse the computation of a
542 542 previous call to `successorssets` when multiple calls are made at the same
543 543 time. The cache dictionary is updated in place. The caller is responsible
544 544 for its life span. Code that makes multiple calls to `successorssets`
545 545 *should* use this cache mechanism or risk a performance hit.
546 546
547 547 Since results are different depending of the 'closest' most, the same cache
548 548 cannot be reused for both mode.
549 549 """
550 550
551 551 succmarkers = repo.obsstore.successors
552 552
553 553 # Stack of nodes we search successors sets for
554 554 toproceed = [initialnode]
555 555 # set version of above list for fast loop detection
556 556 # element added to "toproceed" must be added here
557 557 stackedset = set(toproceed)
558 558 if cache is None:
559 559 cache = {}
560 560
561 561 # This while loop is the flattened version of a recursive search for
562 562 # successors sets
563 563 #
564 564 # def successorssets(x):
565 565 # successors = directsuccessors(x)
566 566 # ss = [[]]
567 567 # for succ in directsuccessors(x):
568 568 # # product as in itertools cartesian product
569 569 # ss = product(ss, successorssets(succ))
570 570 # return ss
571 571 #
572 572 # But we can not use plain recursive calls here:
573 573 # - that would blow the python call stack
574 574 # - obsolescence markers may have cycles, we need to handle them.
575 575 #
576 576 # The `toproceed` list act as our call stack. Every node we search
577 577 # successors set for are stacked there.
578 578 #
579 579 # The `stackedset` is set version of this stack used to check if a node is
580 580 # already stacked. This check is used to detect cycles and prevent infinite
581 581 # loop.
582 582 #
583 583 # successors set of all nodes are stored in the `cache` dictionary.
584 584 #
585 585 # After this while loop ends we use the cache to return the successors sets
586 586 # for the node requested by the caller.
587 587 while toproceed:
588 588 # Every iteration tries to compute the successors sets of the topmost
589 589 # node of the stack: CURRENT.
590 590 #
591 591 # There are four possible outcomes:
592 592 #
593 593 # 1) We already know the successors sets of CURRENT:
594 594 # -> mission accomplished, pop it from the stack.
595 595 # 2) Stop the walk:
596 596 # default case: Node is not obsolete
597 597 # closest case: Node is known at this repo filter level
598 598 # -> the node is its own successors sets. Add it to the cache.
599 599 # 3) We do not know successors set of direct successors of CURRENT:
600 600 # -> We add those successors to the stack.
601 601 # 4) We know successors sets of all direct successors of CURRENT:
602 602 # -> We can compute CURRENT successors set and add it to the
603 603 # cache.
604 604 #
605 605 current = toproceed[-1]
606 606
607 607 # case 2 condition is a bit hairy because of closest,
608 608 # we compute it on its own
609 609 case2condition = ((current not in succmarkers)
610 610 or (closest and current != initialnode
611 611 and current in repo))
612 612
613 613 if current in cache:
614 614 # case (1): We already know the successors sets
615 615 stackedset.remove(toproceed.pop())
616 616 elif case2condition:
617 617 # case (2): end of walk.
618 618 if current in repo:
619 619 # We have a valid successors.
620 620 cache[current] = [_succs((current,))]
621 621 else:
622 622 # Final obsolete version is unknown locally.
623 623 # Do not count that as a valid successors
624 624 cache[current] = []
625 625 else:
626 626 # cases (3) and (4)
627 627 #
628 628 # We proceed in two phases. Phase 1 aims to distinguish case (3)
629 629 # from case (4):
630 630 #
631 631 # For each direct successors of CURRENT, we check whether its
632 632 # successors sets are known. If they are not, we stack the
633 633 # unknown node and proceed to the next iteration of the while
634 634 # loop. (case 3)
635 635 #
636 636 # During this step, we may detect obsolescence cycles: a node
637 637 # with unknown successors sets but already in the call stack.
638 638 # In such a situation, we arbitrary set the successors sets of
639 639 # the node to nothing (node pruned) to break the cycle.
640 640 #
641 641 # If no break was encountered we proceed to phase 2.
642 642 #
643 643 # Phase 2 computes successors sets of CURRENT (case 4); see details
644 644 # in phase 2 itself.
645 645 #
646 646 # Note the two levels of iteration in each phase.
647 647 # - The first one handles obsolescence markers using CURRENT as
648 648 # precursor (successors markers of CURRENT).
649 649 #
650 650 # Having multiple entry here means divergence.
651 651 #
652 652 # - The second one handles successors defined in each marker.
653 653 #
654 654 # Having none means pruned node, multiple successors means split,
655 655 # single successors are standard replacement.
656 656 #
657 657 for mark in sorted(succmarkers[current]):
658 658 for suc in mark[1]:
659 659 if suc not in cache:
660 660 if suc in stackedset:
661 661 # cycle breaking
662 662 cache[suc] = []
663 663 else:
664 664 # case (3) If we have not computed successors sets
665 665 # of one of those successors we add it to the
666 666 # `toproceed` stack and stop all work for this
667 667 # iteration.
668 668 toproceed.append(suc)
669 669 stackedset.add(suc)
670 670 break
671 671 else:
672 672 continue
673 673 break
674 674 else:
675 675 # case (4): we know all successors sets of all direct
676 676 # successors
677 677 #
678 678 # Successors set contributed by each marker depends on the
679 679 # successors sets of all its "successors" node.
680 680 #
681 681 # Each different marker is a divergence in the obsolescence
682 682 # history. It contributes successors sets distinct from other
683 683 # markers.
684 684 #
685 685 # Within a marker, a successor may have divergent successors
686 686 # sets. In such a case, the marker will contribute multiple
687 687 # divergent successors sets. If multiple successors have
688 688 # divergent successors sets, a Cartesian product is used.
689 689 #
690 690 # At the end we post-process successors sets to remove
691 691 # duplicated entry and successors set that are strict subset of
692 692 # another one.
693 693 succssets = []
694 694 for mark in sorted(succmarkers[current]):
695 695 # successors sets contributed by this marker
696 696 base = _succs()
697 697 base.markers.add(mark)
698 698 markss = [base]
699 699 for suc in mark[1]:
700 700 # cardinal product with previous successors
701 701 productresult = []
702 702 for prefix in markss:
703 703 for suffix in cache[suc]:
704 704 newss = prefix.copy()
705 705 newss.markers.update(suffix.markers)
706 706 for part in suffix:
707 707 # do not duplicated entry in successors set
708 708 # first entry wins.
709 709 if part not in newss:
710 710 newss.append(part)
711 711 productresult.append(newss)
712 712 markss = productresult
713 713 succssets.extend(markss)
714 714 # remove duplicated and subset
715 715 seen = []
716 716 final = []
717 717 candidates = sorted((s for s in succssets if s),
718 718 key=len, reverse=True)
719 719 for cand in candidates:
720 720 for seensuccs in seen:
721 721 if cand.canmerge(seensuccs):
722 722 seensuccs.markers.update(cand.markers)
723 723 break
724 724 else:
725 725 final.append(cand)
726 726 seen.append(cand)
727 727 final.reverse() # put small successors set first
728 728 cache[current] = final
729 729 return cache[initialnode]
730 730
731 731 def successorsandmarkers(repo, ctx):
732 732 """compute the raw data needed for computing obsfate
733 733 Returns a list of dict, one dict per successors set
734 734 """
735 735 if not ctx.obsolete():
736 736 return None
737 737
738 738 ssets = successorssets(repo, ctx.node(), closest=True)
739 739
740 740 # closestsuccessors returns an empty list for pruned revisions, remap it
741 741 # into a list containing an empty list for future processing
742 742 if ssets == []:
743 743 ssets = [[]]
744 744
745 745 # Try to recover pruned markers
746 746 succsmap = repo.obsstore.successors
747 747 fullsuccessorsets = [] # successor set + markers
748 748 for sset in ssets:
749 749 if sset:
750 750 fullsuccessorsets.append(sset)
751 751 else:
752 752 # successorsset return an empty set() when ctx or one of its
753 753 # successors is pruned.
754 754 # In this case, walk the obs-markers tree again starting with ctx
755 755 # and find the relevant pruning obs-makers, the ones without
756 756 # successors.
757 757 # Having these markers allow us to compute some information about
758 758 # its fate, like who pruned this changeset and when.
759 759
760 760 # XXX we do not catch all prune markers (eg rewritten then pruned)
761 761 # (fix me later)
762 762 foundany = False
763 763 for mark in succsmap.get(ctx.node(), ()):
764 764 if not mark[1]:
765 765 foundany = True
766 766 sset = _succs()
767 767 sset.markers.add(mark)
768 768 fullsuccessorsets.append(sset)
769 769 if not foundany:
770 770 fullsuccessorsets.append(_succs())
771 771
772 772 values = []
773 773 for sset in fullsuccessorsets:
774 774 values.append({'successors': sset, 'markers': sset.markers})
775 775
776 776 return values
777 777
778 778 def _getobsfate(successorssets):
779 779 """ Compute a changeset obsolescence fate based on its successorssets.
780 780 Successors can be the tipmost ones or the immediate ones. This function
781 781 return values are not meant to be shown directly to users, it is meant to
782 782 be used by internal functions only.
783 783 Returns one fate from the following values:
784 784 - pruned
785 785 - diverged
786 786 - superseded
787 787 - superseded_split
788 788 """
789 789
790 790 if len(successorssets) == 0:
791 791 # The commit has been pruned
792 792 return 'pruned'
793 793 elif len(successorssets) > 1:
794 794 return 'diverged'
795 795 else:
796 796 # No divergence, only one set of successors
797 797 successors = successorssets[0]
798 798
799 799 if len(successors) == 1:
800 800 return 'superseded'
801 801 else:
802 802 return 'superseded_split'
803 803
804 804 def obsfateverb(successorset, markers):
805 805 """ Return the verb summarizing the successorset and potentially using
806 806 information from the markers
807 807 """
808 808 if not successorset:
809 809 verb = 'pruned'
810 810 elif len(successorset) == 1:
811 811 verb = 'rewritten'
812 812 else:
813 813 verb = 'split'
814 814 return verb
815 815
816 816 def markersdates(markers):
817 817 """returns the list of dates for a list of markers
818 818 """
819 819 return [m[4] for m in markers]
820 820
821 821 def markersusers(markers):
822 822 """ Returns a sorted list of markers users without duplicates
823 823 """
824 824 markersmeta = [dict(m[3]) for m in markers]
825 825 users = set(meta.get('user') for meta in markersmeta if meta.get('user'))
826 826
827 827 return sorted(users)
828 828
829 829 def markersoperations(markers):
830 830 """ Returns a sorted list of markers operations without duplicates
831 831 """
832 832 markersmeta = [dict(m[3]) for m in markers]
833 833 operations = set(meta.get('operation') for meta in markersmeta
834 834 if meta.get('operation'))
835 835
836 836 return sorted(operations)
837 837
838 838 def obsfateprinter(ui, repo, successors, markers, formatctx):
839 839 """ Build a obsfate string for a single successorset using all obsfate
840 840 related function defined in obsutil
841 841 """
842 842 quiet = ui.quiet
843 843 verbose = ui.verbose
844 844 normal = not verbose and not quiet
845 845
846 846 line = []
847 847
848 848 # Verb
849 849 line.append(obsfateverb(successors, markers))
850 850
851 851 # Operations
852 852 operations = markersoperations(markers)
853 853 if operations:
854 854 line.append(" using %s" % ", ".join(operations))
855 855
856 856 # Successors
857 857 if successors:
858 858 fmtsuccessors = [formatctx(repo[succ]) for succ in successors]
859 859 line.append(" as %s" % ", ".join(fmtsuccessors))
860 860
861 861 # Users
862 862 users = markersusers(markers)
863 863 # Filter out current user in not verbose mode to reduce amount of
864 864 # information
865 865 if not verbose:
866 866 currentuser = ui.username(acceptempty=True)
867 867 if len(users) == 1 and currentuser in users:
868 868 users = None
869 869
870 870 if (verbose or normal) and users:
871 871 line.append(" by %s" % ", ".join(users))
872 872
873 873 # Date
874 874 dates = markersdates(markers)
875 875
876 876 if dates and verbose:
877 877 min_date = min(dates)
878 878 max_date = max(dates)
879 879
880 880 if min_date == max_date:
881 881 fmtmin_date = dateutil.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
882 882 line.append(" (at %s)" % fmtmin_date)
883 883 else:
884 884 fmtmin_date = dateutil.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
885 885 fmtmax_date = dateutil.datestr(max_date, '%Y-%m-%d %H:%M %1%2')
886 886 line.append(" (between %s and %s)" % (fmtmin_date, fmtmax_date))
887 887
888 888 return "".join(line)
889 889
890 890
891 891 filteredmsgtable = {
892 892 "pruned": _("hidden revision '%s' is pruned"),
893 893 "diverged": _("hidden revision '%s' has diverged"),
894 894 "superseded": _("hidden revision '%s' was rewritten as: %s"),
895 895 "superseded_split": _("hidden revision '%s' was split as: %s"),
896 896 "superseded_split_several": _("hidden revision '%s' was split as: %s and "
897 897 "%d more"),
898 898 }
899 899
900 900 def _getfilteredreason(repo, changeid, ctx):
901 901 """return a human-friendly string on why a obsolete changeset is hidden
902 902 """
903 903 successors = successorssets(repo, ctx.node())
904 904 fate = _getobsfate(successors)
905 905
906 906 # Be more precise in case the revision is superseded
907 907 if fate == 'pruned':
908 908 return filteredmsgtable['pruned'] % changeid
909 909 elif fate == 'diverged':
910 910 return filteredmsgtable['diverged'] % changeid
911 911 elif fate == 'superseded':
912 912 single_successor = nodemod.short(successors[0][0])
913 913 return filteredmsgtable['superseded'] % (changeid, single_successor)
914 914 elif fate == 'superseded_split':
915 915
916 916 succs = []
917 917 for node_id in successors[0]:
918 918 succs.append(nodemod.short(node_id))
919 919
920 920 if len(succs) <= 2:
921 921 fmtsuccs = ', '.join(succs)
922 922 return filteredmsgtable['superseded_split'] % (changeid, fmtsuccs)
923 923 else:
924 924 firstsuccessors = ', '.join(succs[:2])
925 925 remainingnumber = len(succs) - 2
926 926
927 927 args = (changeid, firstsuccessors, remainingnumber)
928 928 return filteredmsgtable['superseded_split_several'] % args
929 929
930 930 def divergentsets(repo, ctx):
931 931 """Compute sets of commits divergent with a given one"""
932 932 cache = {}
933 933 base = {}
934 934 for n in allpredecessors(repo.obsstore, [ctx.node()]):
935 935 if n == ctx.node():
936 936 # a node can't be a base for divergence with itself
937 937 continue
938 938 nsuccsets = successorssets(repo, n, cache)
939 939 for nsuccset in nsuccsets:
940 940 if ctx.node() in nsuccset:
941 941 # we are only interested in *other* successor sets
942 942 continue
943 943 if tuple(nsuccset) in base:
944 944 # we already know the latest base for this divergency
945 945 continue
946 946 base[tuple(nsuccset)] = n
947 947 return [{'divergentnodes': divset, 'commonpredecessor': b}
948 948 for divset, b in base.iteritems()]
949 949
950 950 def whyunstable(repo, ctx):
951 951 result = []
952 952 if ctx.orphan():
953 953 for parent in ctx.parents():
954 954 kind = None
955 955 if parent.orphan():
956 956 kind = 'orphan'
957 957 elif parent.obsolete():
958 958 kind = 'obsolete'
959 959 if kind is not None:
960 960 result.append({'instability': 'orphan',
961 961 'reason': '%s parent' % kind,
962 962 'node': parent.hex()})
963 963 if ctx.phasedivergent():
964 964 predecessors = allpredecessors(repo.obsstore, [ctx.node()],
965 965 ignoreflags=bumpedfix)
966 966 immutable = [repo[p] for p in predecessors
967 967 if p in repo and not repo[p].mutable()]
968 968 for predecessor in immutable:
969 969 result.append({'instability': 'phase-divergent',
970 970 'reason': 'immutable predecessor',
971 971 'node': predecessor.hex()})
972 972 if ctx.contentdivergent():
973 973 dsets = divergentsets(repo, ctx)
974 974 for dset in dsets:
975 975 divnodes = [repo[n] for n in dset['divergentnodes']]
976 976 result.append({'instability': 'content-divergent',
977 977 'divergentnodes': divnodes,
978 978 'reason': 'predecessor',
979 979 'node': nodemod.hex(dset['commonpredecessor'])})
980 980 return result
General Comments 0
You need to be logged in to leave comments. Login now