##// END OF EJS Templates
obsutil: prefetch method in allpredecessors loop...
Boris Feld -
r40497:c7618901 default
parent child Browse files
Show More
@@ -1,980 +1,981 b''
1 1 # obsutil.py - utility functions for obsolescence
2 2 #
3 3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import re
11 11
12 12 from .i18n import _
13 13 from . import (
14 14 diffutil,
15 15 encoding,
16 16 node as nodemod,
17 17 phases,
18 18 util,
19 19 )
20 20 from .utils import (
21 21 dateutil,
22 22 )
23 23
24 24 ### obsolescence marker flag
25 25
26 26 ## bumpedfix flag
27 27 #
28 28 # When a changeset A' succeed to a changeset A which became public, we call A'
29 29 # "bumped" because it's a successors of a public changesets
30 30 #
31 31 # o A' (bumped)
32 32 # |`:
33 33 # | o A
34 34 # |/
35 35 # o Z
36 36 #
37 37 # The way to solve this situation is to create a new changeset Ad as children
38 38 # of A. This changeset have the same content than A'. So the diff from A to A'
39 39 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
40 40 #
41 41 # o Ad
42 42 # |`:
43 43 # | x A'
44 44 # |'|
45 45 # o | A
46 46 # |/
47 47 # o Z
48 48 #
49 49 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
50 50 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
51 51 # This flag mean that the successors express the changes between the public and
52 52 # bumped version and fix the situation, breaking the transitivity of
53 53 # "bumped" here.
54 54 bumpedfix = 1
55 55 usingsha256 = 2
56 56
57 57 class marker(object):
58 58 """Wrap obsolete marker raw data"""
59 59
60 60 def __init__(self, repo, data):
61 61 # the repo argument will be used to create changectx in later version
62 62 self._repo = repo
63 63 self._data = data
64 64 self._decodedmeta = None
65 65
66 66 def __hash__(self):
67 67 return hash(self._data)
68 68
69 69 def __eq__(self, other):
70 70 if type(other) != type(self):
71 71 return False
72 72 return self._data == other._data
73 73
74 74 def prednode(self):
75 75 """Predecessor changeset node identifier"""
76 76 return self._data[0]
77 77
78 78 def succnodes(self):
79 79 """List of successor changesets node identifiers"""
80 80 return self._data[1]
81 81
82 82 def parentnodes(self):
83 83 """Parents of the predecessors (None if not recorded)"""
84 84 return self._data[5]
85 85
86 86 def metadata(self):
87 87 """Decoded metadata dictionary"""
88 88 return dict(self._data[3])
89 89
90 90 def date(self):
91 91 """Creation date as (unixtime, offset)"""
92 92 return self._data[4]
93 93
94 94 def flags(self):
95 95 """The flags field of the marker"""
96 96 return self._data[2]
97 97
98 98 def getmarkers(repo, nodes=None, exclusive=False):
99 99 """returns markers known in a repository
100 100
101 101 If <nodes> is specified, only markers "relevant" to those nodes are are
102 102 returned"""
103 103 if nodes is None:
104 104 rawmarkers = repo.obsstore
105 105 elif exclusive:
106 106 rawmarkers = exclusivemarkers(repo, nodes)
107 107 else:
108 108 rawmarkers = repo.obsstore.relevantmarkers(nodes)
109 109
110 110 for markerdata in rawmarkers:
111 111 yield marker(repo, markerdata)
112 112
113 113 def closestpredecessors(repo, nodeid):
114 114 """yield the list of next predecessors pointing on visible changectx nodes
115 115
116 116 This function respect the repoview filtering, filtered revision will be
117 117 considered missing.
118 118 """
119 119
120 120 precursors = repo.obsstore.predecessors
121 121 stack = [nodeid]
122 122 seen = set(stack)
123 123
124 124 while stack:
125 125 current = stack.pop()
126 126 currentpreccs = precursors.get(current, ())
127 127
128 128 for prec in currentpreccs:
129 129 precnodeid = prec[0]
130 130
131 131 # Basic cycle protection
132 132 if precnodeid in seen:
133 133 continue
134 134 seen.add(precnodeid)
135 135
136 136 if precnodeid in repo:
137 137 yield precnodeid
138 138 else:
139 139 stack.append(precnodeid)
140 140
141 141 def allpredecessors(obsstore, nodes, ignoreflags=0):
142 142 """Yield node for every precursors of <nodes>.
143 143
144 144 Some precursors may be unknown locally.
145 145
146 146 This is a linear yield unsuited to detecting folded changesets. It includes
147 147 initial nodes too."""
148 148
149 149 remaining = set(nodes)
150 150 seen = set(remaining)
151 prec = obsstore.predecessors.get
151 152 while remaining:
152 153 current = remaining.pop()
153 154 yield current
154 for mark in obsstore.predecessors.get(current, ()):
155 for mark in prec(current, ()):
155 156 # ignore marker flagged with specified flag
156 157 if mark[2] & ignoreflags:
157 158 continue
158 159 suc = mark[0]
159 160 if suc not in seen:
160 161 seen.add(suc)
161 162 remaining.add(suc)
162 163
163 164 def allsuccessors(obsstore, nodes, ignoreflags=0):
164 165 """Yield node for every successor of <nodes>.
165 166
166 167 Some successors may be unknown locally.
167 168
168 169 This is a linear yield unsuited to detecting split changesets. It includes
169 170 initial nodes too."""
170 171 remaining = set(nodes)
171 172 seen = set(remaining)
172 173 while remaining:
173 174 current = remaining.pop()
174 175 yield current
175 176 for mark in obsstore.successors.get(current, ()):
176 177 # ignore marker flagged with specified flag
177 178 if mark[2] & ignoreflags:
178 179 continue
179 180 for suc in mark[1]:
180 181 if suc not in seen:
181 182 seen.add(suc)
182 183 remaining.add(suc)
183 184
184 185 def _filterprunes(markers):
185 186 """return a set with no prune markers"""
186 187 return set(m for m in markers if m[1])
187 188
188 189 def exclusivemarkers(repo, nodes):
189 190 """set of markers relevant to "nodes" but no other locally-known nodes
190 191
191 192 This function compute the set of markers "exclusive" to a locally-known
192 193 node. This means we walk the markers starting from <nodes> until we reach a
193 194 locally-known precursors outside of <nodes>. Element of <nodes> with
194 195 locally-known successors outside of <nodes> are ignored (since their
195 196 precursors markers are also relevant to these successors).
196 197
197 198 For example:
198 199
199 200 # (A0 rewritten as A1)
200 201 #
201 202 # A0 <-1- A1 # Marker "1" is exclusive to A1
202 203
203 204 or
204 205
205 206 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
206 207 #
207 208 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
208 209
209 210 or
210 211
211 212 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
212 213 #
213 214 # <-2- A1 # Marker "2" is exclusive to A0,A1
214 215 # /
215 216 # <-1- A0
216 217 # \
217 218 # <-3- A2 # Marker "3" is exclusive to A0,A2
218 219 #
219 220 # in addition:
220 221 #
221 222 # Markers "2,3" are exclusive to A1,A2
222 223 # Markers "1,2,3" are exclusive to A0,A1,A2
223 224
224 225 See test/test-obsolete-bundle-strip.t for more examples.
225 226
226 227 An example usage is strip. When stripping a changeset, we also want to
227 228 strip the markers exclusive to this changeset. Otherwise we would have
228 229 "dangling"" obsolescence markers from its precursors: Obsolescence markers
229 230 marking a node as obsolete without any successors available locally.
230 231
231 232 As for relevant markers, the prune markers for children will be followed.
232 233 Of course, they will only be followed if the pruned children is
233 234 locally-known. Since the prune markers are relevant to the pruned node.
234 235 However, while prune markers are considered relevant to the parent of the
235 236 pruned changesets, prune markers for locally-known changeset (with no
236 237 successors) are considered exclusive to the pruned nodes. This allows
237 238 to strip the prune markers (with the rest of the exclusive chain) alongside
238 239 the pruned changesets.
239 240 """
240 241 # running on a filtered repository would be dangerous as markers could be
241 242 # reported as exclusive when they are relevant for other filtered nodes.
242 243 unfi = repo.unfiltered()
243 244
244 245 # shortcut to various useful item
245 246 nm = unfi.changelog.nodemap
246 247 precursorsmarkers = unfi.obsstore.predecessors
247 248 successormarkers = unfi.obsstore.successors
248 249 childrenmarkers = unfi.obsstore.children
249 250
250 251 # exclusive markers (return of the function)
251 252 exclmarkers = set()
252 253 # we need fast membership testing
253 254 nodes = set(nodes)
254 255 # looking for head in the obshistory
255 256 #
256 257 # XXX we are ignoring all issues in regard with cycle for now.
257 258 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
258 259 stack.sort()
259 260 # nodes already stacked
260 261 seennodes = set(stack)
261 262 while stack:
262 263 current = stack.pop()
263 264 # fetch precursors markers
264 265 markers = list(precursorsmarkers.get(current, ()))
265 266 # extend the list with prune markers
266 267 for mark in successormarkers.get(current, ()):
267 268 if not mark[1]:
268 269 markers.append(mark)
269 270 # and markers from children (looking for prune)
270 271 for mark in childrenmarkers.get(current, ()):
271 272 if not mark[1]:
272 273 markers.append(mark)
273 274 # traverse the markers
274 275 for mark in markers:
275 276 if mark in exclmarkers:
276 277 # markers already selected
277 278 continue
278 279
279 280 # If the markers is about the current node, select it
280 281 #
281 282 # (this delay the addition of markers from children)
282 283 if mark[1] or mark[0] == current:
283 284 exclmarkers.add(mark)
284 285
285 286 # should we keep traversing through the precursors?
286 287 prec = mark[0]
287 288
288 289 # nodes in the stack or already processed
289 290 if prec in seennodes:
290 291 continue
291 292
292 293 # is this a locally known node ?
293 294 known = prec in nm
294 295 # if locally-known and not in the <nodes> set the traversal
295 296 # stop here.
296 297 if known and prec not in nodes:
297 298 continue
298 299
299 300 # do not keep going if there are unselected markers pointing to this
300 301 # nodes. If we end up traversing these unselected markers later the
301 302 # node will be taken care of at that point.
302 303 precmarkers = _filterprunes(successormarkers.get(prec))
303 304 if precmarkers.issubset(exclmarkers):
304 305 seennodes.add(prec)
305 306 stack.append(prec)
306 307
307 308 return exclmarkers
308 309
309 310 def foreground(repo, nodes):
310 311 """return all nodes in the "foreground" of other node
311 312
312 313 The foreground of a revision is anything reachable using parent -> children
313 314 or precursor -> successor relation. It is very similar to "descendant" but
314 315 augmented with obsolescence information.
315 316
316 317 Beware that possible obsolescence cycle may result if complex situation.
317 318 """
318 319 repo = repo.unfiltered()
319 320 foreground = set(repo.set('%ln::', nodes))
320 321 if repo.obsstore:
321 322 # We only need this complicated logic if there is obsolescence
322 323 # XXX will probably deserve an optimised revset.
323 324 nm = repo.changelog.nodemap
324 325 plen = -1
325 326 # compute the whole set of successors or descendants
326 327 while len(foreground) != plen:
327 328 plen = len(foreground)
328 329 succs = set(c.node() for c in foreground)
329 330 mutable = [c.node() for c in foreground if c.mutable()]
330 331 succs.update(allsuccessors(repo.obsstore, mutable))
331 332 known = (n for n in succs if n in nm)
332 333 foreground = set(repo.set('%ln::', known))
333 334 return set(c.node() for c in foreground)
334 335
335 336 # effectflag field
336 337 #
337 338 # Effect-flag is a 1-byte bit field used to store what changed between a
338 339 # changeset and its successor(s).
339 340 #
340 341 # The effect flag is stored in obs-markers metadata while we iterate on the
341 342 # information design. That's why we have the EFFECTFLAGFIELD. If we come up
342 343 # with an incompatible design for effect flag, we can store a new design under
343 344 # another field name so we don't break readers. We plan to extend the existing
344 345 # obsmarkers bit-field when the effect flag design will be stabilized.
345 346 #
346 347 # The effect-flag is placed behind an experimental flag
347 348 # `effect-flags` set to off by default.
348 349 #
349 350
350 351 EFFECTFLAGFIELD = "ef1"
351 352
352 353 DESCCHANGED = 1 << 0 # action changed the description
353 354 METACHANGED = 1 << 1 # action change the meta
354 355 DIFFCHANGED = 1 << 3 # action change diff introduced by the changeset
355 356 PARENTCHANGED = 1 << 2 # action change the parent
356 357 USERCHANGED = 1 << 4 # the user changed
357 358 DATECHANGED = 1 << 5 # the date changed
358 359 BRANCHCHANGED = 1 << 6 # the branch changed
359 360
360 361 METABLACKLIST = [
361 362 re.compile('^branch$'),
362 363 re.compile('^.*-source$'),
363 364 re.compile('^.*_source$'),
364 365 re.compile('^source$'),
365 366 ]
366 367
367 368 def metanotblacklisted(metaitem):
368 369 """ Check that the key of a meta item (extrakey, extravalue) does not
369 370 match at least one of the blacklist pattern
370 371 """
371 372 metakey = metaitem[0]
372 373
373 374 return not any(pattern.match(metakey) for pattern in METABLACKLIST)
374 375
375 376 def _prepare_hunk(hunk):
376 377 """Drop all information but the username and patch"""
377 378 cleanhunk = []
378 379 for line in hunk.splitlines():
379 380 if line.startswith(b'# User') or not line.startswith(b'#'):
380 381 if line.startswith(b'@@'):
381 382 line = b'@@\n'
382 383 cleanhunk.append(line)
383 384 return cleanhunk
384 385
385 386 def _getdifflines(iterdiff):
386 387 """return a cleaned up lines"""
387 388 lines = next(iterdiff, None)
388 389
389 390 if lines is None:
390 391 return lines
391 392
392 393 return _prepare_hunk(lines)
393 394
394 395 def _cmpdiff(leftctx, rightctx):
395 396 """return True if both ctx introduce the "same diff"
396 397
397 398 This is a first and basic implementation, with many shortcoming.
398 399 """
399 400 diffopts = diffutil.diffallopts(leftctx.repo().ui, {'git': True})
400 401 # Leftctx or right ctx might be filtered, so we need to use the contexts
401 402 # with an unfiltered repository to safely compute the diff
402 403 leftunfi = leftctx._repo.unfiltered()[leftctx.rev()]
403 404 leftdiff = leftunfi.diff(opts=diffopts)
404 405 rightunfi = rightctx._repo.unfiltered()[rightctx.rev()]
405 406 rightdiff = rightunfi.diff(opts=diffopts)
406 407
407 408 left, right = (0, 0)
408 409 while None not in (left, right):
409 410 left = _getdifflines(leftdiff)
410 411 right = _getdifflines(rightdiff)
411 412
412 413 if left != right:
413 414 return False
414 415 return True
415 416
416 417 def geteffectflag(source, successors):
417 418 """ From an obs-marker relation, compute what changed between the
418 419 predecessor and the successor.
419 420 """
420 421 effects = 0
421 422
422 423 for changectx in successors:
423 424 # Check if description has changed
424 425 if changectx.description() != source.description():
425 426 effects |= DESCCHANGED
426 427
427 428 # Check if user has changed
428 429 if changectx.user() != source.user():
429 430 effects |= USERCHANGED
430 431
431 432 # Check if date has changed
432 433 if changectx.date() != source.date():
433 434 effects |= DATECHANGED
434 435
435 436 # Check if branch has changed
436 437 if changectx.branch() != source.branch():
437 438 effects |= BRANCHCHANGED
438 439
439 440 # Check if at least one of the parent has changed
440 441 if changectx.parents() != source.parents():
441 442 effects |= PARENTCHANGED
442 443
443 444 # Check if other meta has changed
444 445 changeextra = changectx.extra().items()
445 446 ctxmeta = list(filter(metanotblacklisted, changeextra))
446 447
447 448 sourceextra = source.extra().items()
448 449 srcmeta = list(filter(metanotblacklisted, sourceextra))
449 450
450 451 if ctxmeta != srcmeta:
451 452 effects |= METACHANGED
452 453
453 454 # Check if the diff has changed
454 455 if not _cmpdiff(source, changectx):
455 456 effects |= DIFFCHANGED
456 457
457 458 return effects
458 459
459 460 def getobsoleted(repo, tr):
460 461 """return the set of pre-existing revisions obsoleted by a transaction"""
461 462 torev = repo.unfiltered().changelog.nodemap.get
462 463 phase = repo._phasecache.phase
463 464 succsmarkers = repo.obsstore.successors.get
464 465 public = phases.public
465 466 addedmarkers = tr.changes['obsmarkers']
466 467 origrepolen = tr.changes['origrepolen']
467 468 seenrevs = set()
468 469 obsoleted = set()
469 470 for mark in addedmarkers:
470 471 node = mark[0]
471 472 rev = torev(node)
472 473 if rev is None or rev in seenrevs or rev >= origrepolen:
473 474 continue
474 475 seenrevs.add(rev)
475 476 if phase(repo, rev) == public:
476 477 continue
477 478 if set(succsmarkers(node) or []).issubset(addedmarkers):
478 479 obsoleted.add(rev)
479 480 return obsoleted
480 481
481 482 class _succs(list):
482 483 """small class to represent a successors with some metadata about it"""
483 484
484 485 def __init__(self, *args, **kwargs):
485 486 super(_succs, self).__init__(*args, **kwargs)
486 487 self.markers = set()
487 488
488 489 def copy(self):
489 490 new = _succs(self)
490 491 new.markers = self.markers.copy()
491 492 return new
492 493
493 494 @util.propertycache
494 495 def _set(self):
495 496 # immutable
496 497 return set(self)
497 498
498 499 def canmerge(self, other):
499 500 return self._set.issubset(other._set)
500 501
501 502 def successorssets(repo, initialnode, closest=False, cache=None):
502 503 """Return set of all latest successors of initial nodes
503 504
504 505 The successors set of a changeset A are the group of revisions that succeed
505 506 A. It succeeds A as a consistent whole, each revision being only a partial
506 507 replacement. By default, the successors set contains non-obsolete
507 508 changesets only, walking the obsolescence graph until reaching a leaf. If
508 509 'closest' is set to True, closest successors-sets are return (the
509 510 obsolescence walk stops on known changesets).
510 511
511 512 This function returns the full list of successor sets which is why it
512 513 returns a list of tuples and not just a single tuple. Each tuple is a valid
513 514 successors set. Note that (A,) may be a valid successors set for changeset A
514 515 (see below).
515 516
516 517 In most cases, a changeset A will have a single element (e.g. the changeset
517 518 A is replaced by A') in its successors set. Though, it is also common for a
518 519 changeset A to have no elements in its successor set (e.g. the changeset
519 520 has been pruned). Therefore, the returned list of successors sets will be
520 521 [(A',)] or [], respectively.
521 522
522 523 When a changeset A is split into A' and B', however, it will result in a
523 524 successors set containing more than a single element, i.e. [(A',B')].
524 525 Divergent changesets will result in multiple successors sets, i.e. [(A',),
525 526 (A'')].
526 527
527 528 If a changeset A is not obsolete, then it will conceptually have no
528 529 successors set. To distinguish this from a pruned changeset, the successor
529 530 set will contain itself only, i.e. [(A,)].
530 531
531 532 Finally, final successors unknown locally are considered to be pruned
532 533 (pruned: obsoleted without any successors). (Final: successors not affected
533 534 by markers).
534 535
535 536 The 'closest' mode respect the repoview filtering. For example, without
536 537 filter it will stop at the first locally known changeset, with 'visible'
537 538 filter it will stop on visible changesets).
538 539
539 540 The optional `cache` parameter is a dictionary that may contains
540 541 precomputed successors sets. It is meant to reuse the computation of a
541 542 previous call to `successorssets` when multiple calls are made at the same
542 543 time. The cache dictionary is updated in place. The caller is responsible
543 544 for its life span. Code that makes multiple calls to `successorssets`
544 545 *should* use this cache mechanism or risk a performance hit.
545 546
546 547 Since results are different depending of the 'closest' most, the same cache
547 548 cannot be reused for both mode.
548 549 """
549 550
550 551 succmarkers = repo.obsstore.successors
551 552
552 553 # Stack of nodes we search successors sets for
553 554 toproceed = [initialnode]
554 555 # set version of above list for fast loop detection
555 556 # element added to "toproceed" must be added here
556 557 stackedset = set(toproceed)
557 558 if cache is None:
558 559 cache = {}
559 560
560 561 # This while loop is the flattened version of a recursive search for
561 562 # successors sets
562 563 #
563 564 # def successorssets(x):
564 565 # successors = directsuccessors(x)
565 566 # ss = [[]]
566 567 # for succ in directsuccessors(x):
567 568 # # product as in itertools cartesian product
568 569 # ss = product(ss, successorssets(succ))
569 570 # return ss
570 571 #
571 572 # But we can not use plain recursive calls here:
572 573 # - that would blow the python call stack
573 574 # - obsolescence markers may have cycles, we need to handle them.
574 575 #
575 576 # The `toproceed` list act as our call stack. Every node we search
576 577 # successors set for are stacked there.
577 578 #
578 579 # The `stackedset` is set version of this stack used to check if a node is
579 580 # already stacked. This check is used to detect cycles and prevent infinite
580 581 # loop.
581 582 #
582 583 # successors set of all nodes are stored in the `cache` dictionary.
583 584 #
584 585 # After this while loop ends we use the cache to return the successors sets
585 586 # for the node requested by the caller.
586 587 while toproceed:
587 588 # Every iteration tries to compute the successors sets of the topmost
588 589 # node of the stack: CURRENT.
589 590 #
590 591 # There are four possible outcomes:
591 592 #
592 593 # 1) We already know the successors sets of CURRENT:
593 594 # -> mission accomplished, pop it from the stack.
594 595 # 2) Stop the walk:
595 596 # default case: Node is not obsolete
596 597 # closest case: Node is known at this repo filter level
597 598 # -> the node is its own successors sets. Add it to the cache.
598 599 # 3) We do not know successors set of direct successors of CURRENT:
599 600 # -> We add those successors to the stack.
600 601 # 4) We know successors sets of all direct successors of CURRENT:
601 602 # -> We can compute CURRENT successors set and add it to the
602 603 # cache.
603 604 #
604 605 current = toproceed[-1]
605 606
606 607 # case 2 condition is a bit hairy because of closest,
607 608 # we compute it on its own
608 609 case2condition = ((current not in succmarkers)
609 610 or (closest and current != initialnode
610 611 and current in repo))
611 612
612 613 if current in cache:
613 614 # case (1): We already know the successors sets
614 615 stackedset.remove(toproceed.pop())
615 616 elif case2condition:
616 617 # case (2): end of walk.
617 618 if current in repo:
618 619 # We have a valid successors.
619 620 cache[current] = [_succs((current,))]
620 621 else:
621 622 # Final obsolete version is unknown locally.
622 623 # Do not count that as a valid successors
623 624 cache[current] = []
624 625 else:
625 626 # cases (3) and (4)
626 627 #
627 628 # We proceed in two phases. Phase 1 aims to distinguish case (3)
628 629 # from case (4):
629 630 #
630 631 # For each direct successors of CURRENT, we check whether its
631 632 # successors sets are known. If they are not, we stack the
632 633 # unknown node and proceed to the next iteration of the while
633 634 # loop. (case 3)
634 635 #
635 636 # During this step, we may detect obsolescence cycles: a node
636 637 # with unknown successors sets but already in the call stack.
637 638 # In such a situation, we arbitrary set the successors sets of
638 639 # the node to nothing (node pruned) to break the cycle.
639 640 #
640 641 # If no break was encountered we proceed to phase 2.
641 642 #
642 643 # Phase 2 computes successors sets of CURRENT (case 4); see details
643 644 # in phase 2 itself.
644 645 #
645 646 # Note the two levels of iteration in each phase.
646 647 # - The first one handles obsolescence markers using CURRENT as
647 648 # precursor (successors markers of CURRENT).
648 649 #
649 650 # Having multiple entry here means divergence.
650 651 #
651 652 # - The second one handles successors defined in each marker.
652 653 #
653 654 # Having none means pruned node, multiple successors means split,
654 655 # single successors are standard replacement.
655 656 #
656 657 for mark in sorted(succmarkers[current]):
657 658 for suc in mark[1]:
658 659 if suc not in cache:
659 660 if suc in stackedset:
660 661 # cycle breaking
661 662 cache[suc] = []
662 663 else:
663 664 # case (3) If we have not computed successors sets
664 665 # of one of those successors we add it to the
665 666 # `toproceed` stack and stop all work for this
666 667 # iteration.
667 668 toproceed.append(suc)
668 669 stackedset.add(suc)
669 670 break
670 671 else:
671 672 continue
672 673 break
673 674 else:
674 675 # case (4): we know all successors sets of all direct
675 676 # successors
676 677 #
677 678 # Successors set contributed by each marker depends on the
678 679 # successors sets of all its "successors" node.
679 680 #
680 681 # Each different marker is a divergence in the obsolescence
681 682 # history. It contributes successors sets distinct from other
682 683 # markers.
683 684 #
684 685 # Within a marker, a successor may have divergent successors
685 686 # sets. In such a case, the marker will contribute multiple
686 687 # divergent successors sets. If multiple successors have
687 688 # divergent successors sets, a Cartesian product is used.
688 689 #
689 690 # At the end we post-process successors sets to remove
690 691 # duplicated entry and successors set that are strict subset of
691 692 # another one.
692 693 succssets = []
693 694 for mark in sorted(succmarkers[current]):
694 695 # successors sets contributed by this marker
695 696 base = _succs()
696 697 base.markers.add(mark)
697 698 markss = [base]
698 699 for suc in mark[1]:
699 700 # cardinal product with previous successors
700 701 productresult = []
701 702 for prefix in markss:
702 703 for suffix in cache[suc]:
703 704 newss = prefix.copy()
704 705 newss.markers.update(suffix.markers)
705 706 for part in suffix:
706 707 # do not duplicated entry in successors set
707 708 # first entry wins.
708 709 if part not in newss:
709 710 newss.append(part)
710 711 productresult.append(newss)
711 712 markss = productresult
712 713 succssets.extend(markss)
713 714 # remove duplicated and subset
714 715 seen = []
715 716 final = []
716 717 candidates = sorted((s for s in succssets if s),
717 718 key=len, reverse=True)
718 719 for cand in candidates:
719 720 for seensuccs in seen:
720 721 if cand.canmerge(seensuccs):
721 722 seensuccs.markers.update(cand.markers)
722 723 break
723 724 else:
724 725 final.append(cand)
725 726 seen.append(cand)
726 727 final.reverse() # put small successors set first
727 728 cache[current] = final
728 729 return cache[initialnode]
729 730
730 731 def successorsandmarkers(repo, ctx):
731 732 """compute the raw data needed for computing obsfate
732 733 Returns a list of dict, one dict per successors set
733 734 """
734 735 if not ctx.obsolete():
735 736 return None
736 737
737 738 ssets = successorssets(repo, ctx.node(), closest=True)
738 739
739 740 # closestsuccessors returns an empty list for pruned revisions, remap it
740 741 # into a list containing an empty list for future processing
741 742 if ssets == []:
742 743 ssets = [[]]
743 744
744 745 # Try to recover pruned markers
745 746 succsmap = repo.obsstore.successors
746 747 fullsuccessorsets = [] # successor set + markers
747 748 for sset in ssets:
748 749 if sset:
749 750 fullsuccessorsets.append(sset)
750 751 else:
751 752 # successorsset return an empty set() when ctx or one of its
752 753 # successors is pruned.
753 754 # In this case, walk the obs-markers tree again starting with ctx
754 755 # and find the relevant pruning obs-makers, the ones without
755 756 # successors.
756 757 # Having these markers allow us to compute some information about
757 758 # its fate, like who pruned this changeset and when.
758 759
759 760 # XXX we do not catch all prune markers (eg rewritten then pruned)
760 761 # (fix me later)
761 762 foundany = False
762 763 for mark in succsmap.get(ctx.node(), ()):
763 764 if not mark[1]:
764 765 foundany = True
765 766 sset = _succs()
766 767 sset.markers.add(mark)
767 768 fullsuccessorsets.append(sset)
768 769 if not foundany:
769 770 fullsuccessorsets.append(_succs())
770 771
771 772 values = []
772 773 for sset in fullsuccessorsets:
773 774 values.append({'successors': sset, 'markers': sset.markers})
774 775
775 776 return values
776 777
777 778 def _getobsfate(successorssets):
778 779 """ Compute a changeset obsolescence fate based on its successorssets.
779 780 Successors can be the tipmost ones or the immediate ones. This function
780 781 return values are not meant to be shown directly to users, it is meant to
781 782 be used by internal functions only.
782 783 Returns one fate from the following values:
783 784 - pruned
784 785 - diverged
785 786 - superseded
786 787 - superseded_split
787 788 """
788 789
789 790 if len(successorssets) == 0:
790 791 # The commit has been pruned
791 792 return 'pruned'
792 793 elif len(successorssets) > 1:
793 794 return 'diverged'
794 795 else:
795 796 # No divergence, only one set of successors
796 797 successors = successorssets[0]
797 798
798 799 if len(successors) == 1:
799 800 return 'superseded'
800 801 else:
801 802 return 'superseded_split'
802 803
803 804 def obsfateverb(successorset, markers):
804 805 """ Return the verb summarizing the successorset and potentially using
805 806 information from the markers
806 807 """
807 808 if not successorset:
808 809 verb = 'pruned'
809 810 elif len(successorset) == 1:
810 811 verb = 'rewritten'
811 812 else:
812 813 verb = 'split'
813 814 return verb
814 815
815 816 def markersdates(markers):
816 817 """returns the list of dates for a list of markers
817 818 """
818 819 return [m[4] for m in markers]
819 820
820 821 def markersusers(markers):
821 822 """ Returns a sorted list of markers users without duplicates
822 823 """
823 824 markersmeta = [dict(m[3]) for m in markers]
824 825 users = set(encoding.tolocal(meta['user']) for meta in markersmeta
825 826 if meta.get('user'))
826 827
827 828 return sorted(users)
828 829
829 830 def markersoperations(markers):
830 831 """ Returns a sorted list of markers operations without duplicates
831 832 """
832 833 markersmeta = [dict(m[3]) for m in markers]
833 834 operations = set(meta.get('operation') for meta in markersmeta
834 835 if meta.get('operation'))
835 836
836 837 return sorted(operations)
837 838
838 839 def obsfateprinter(ui, repo, successors, markers, formatctx):
839 840 """ Build a obsfate string for a single successorset using all obsfate
840 841 related function defined in obsutil
841 842 """
842 843 quiet = ui.quiet
843 844 verbose = ui.verbose
844 845 normal = not verbose and not quiet
845 846
846 847 line = []
847 848
848 849 # Verb
849 850 line.append(obsfateverb(successors, markers))
850 851
851 852 # Operations
852 853 operations = markersoperations(markers)
853 854 if operations:
854 855 line.append(" using %s" % ", ".join(operations))
855 856
856 857 # Successors
857 858 if successors:
858 859 fmtsuccessors = [formatctx(repo[succ]) for succ in successors]
859 860 line.append(" as %s" % ", ".join(fmtsuccessors))
860 861
861 862 # Users
862 863 users = markersusers(markers)
863 864 # Filter out current user in not verbose mode to reduce amount of
864 865 # information
865 866 if not verbose:
866 867 currentuser = ui.username(acceptempty=True)
867 868 if len(users) == 1 and currentuser in users:
868 869 users = None
869 870
870 871 if (verbose or normal) and users:
871 872 line.append(" by %s" % ", ".join(users))
872 873
873 874 # Date
874 875 dates = markersdates(markers)
875 876
876 877 if dates and verbose:
877 878 min_date = min(dates)
878 879 max_date = max(dates)
879 880
880 881 if min_date == max_date:
881 882 fmtmin_date = dateutil.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
882 883 line.append(" (at %s)" % fmtmin_date)
883 884 else:
884 885 fmtmin_date = dateutil.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
885 886 fmtmax_date = dateutil.datestr(max_date, '%Y-%m-%d %H:%M %1%2')
886 887 line.append(" (between %s and %s)" % (fmtmin_date, fmtmax_date))
887 888
888 889 return "".join(line)
889 890
890 891
891 892 filteredmsgtable = {
892 893 "pruned": _("hidden revision '%s' is pruned"),
893 894 "diverged": _("hidden revision '%s' has diverged"),
894 895 "superseded": _("hidden revision '%s' was rewritten as: %s"),
895 896 "superseded_split": _("hidden revision '%s' was split as: %s"),
896 897 "superseded_split_several": _("hidden revision '%s' was split as: %s and "
897 898 "%d more"),
898 899 }
899 900
900 901 def _getfilteredreason(repo, changeid, ctx):
901 902 """return a human-friendly string on why a obsolete changeset is hidden
902 903 """
903 904 successors = successorssets(repo, ctx.node())
904 905 fate = _getobsfate(successors)
905 906
906 907 # Be more precise in case the revision is superseded
907 908 if fate == 'pruned':
908 909 return filteredmsgtable['pruned'] % changeid
909 910 elif fate == 'diverged':
910 911 return filteredmsgtable['diverged'] % changeid
911 912 elif fate == 'superseded':
912 913 single_successor = nodemod.short(successors[0][0])
913 914 return filteredmsgtable['superseded'] % (changeid, single_successor)
914 915 elif fate == 'superseded_split':
915 916
916 917 succs = []
917 918 for node_id in successors[0]:
918 919 succs.append(nodemod.short(node_id))
919 920
920 921 if len(succs) <= 2:
921 922 fmtsuccs = ', '.join(succs)
922 923 return filteredmsgtable['superseded_split'] % (changeid, fmtsuccs)
923 924 else:
924 925 firstsuccessors = ', '.join(succs[:2])
925 926 remainingnumber = len(succs) - 2
926 927
927 928 args = (changeid, firstsuccessors, remainingnumber)
928 929 return filteredmsgtable['superseded_split_several'] % args
929 930
930 931 def divergentsets(repo, ctx):
931 932 """Compute sets of commits divergent with a given one"""
932 933 cache = {}
933 934 base = {}
934 935 for n in allpredecessors(repo.obsstore, [ctx.node()]):
935 936 if n == ctx.node():
936 937 # a node can't be a base for divergence with itself
937 938 continue
938 939 nsuccsets = successorssets(repo, n, cache)
939 940 for nsuccset in nsuccsets:
940 941 if ctx.node() in nsuccset:
941 942 # we are only interested in *other* successor sets
942 943 continue
943 944 if tuple(nsuccset) in base:
944 945 # we already know the latest base for this divergency
945 946 continue
946 947 base[tuple(nsuccset)] = n
947 948 return [{'divergentnodes': divset, 'commonpredecessor': b}
948 949 for divset, b in base.iteritems()]
949 950
950 951 def whyunstable(repo, ctx):
951 952 result = []
952 953 if ctx.orphan():
953 954 for parent in ctx.parents():
954 955 kind = None
955 956 if parent.orphan():
956 957 kind = 'orphan'
957 958 elif parent.obsolete():
958 959 kind = 'obsolete'
959 960 if kind is not None:
960 961 result.append({'instability': 'orphan',
961 962 'reason': '%s parent' % kind,
962 963 'node': parent.hex()})
963 964 if ctx.phasedivergent():
964 965 predecessors = allpredecessors(repo.obsstore, [ctx.node()],
965 966 ignoreflags=bumpedfix)
966 967 immutable = [repo[p] for p in predecessors
967 968 if p in repo and not repo[p].mutable()]
968 969 for predecessor in immutable:
969 970 result.append({'instability': 'phase-divergent',
970 971 'reason': 'immutable predecessor',
971 972 'node': predecessor.hex()})
972 973 if ctx.contentdivergent():
973 974 dsets = divergentsets(repo, ctx)
974 975 for dset in dsets:
975 976 divnodes = [repo[n] for n in dset['divergentnodes']]
976 977 result.append({'instability': 'content-divergent',
977 978 'divergentnodes': divnodes,
978 979 'reason': 'predecessor',
979 980 'node': nodemod.hex(dset['commonpredecessor'])})
980 981 return result
General Comments 0
You need to be logged in to leave comments. Login now