##// END OF EJS Templates
obsutil: make sure "addedrevs" is not None in getobsoleted()...
Yuya Nishihara -
r39577:52e6171e default
parent child Browse files
Show More
@@ -1,982 +1,982 b''
1 1 # obsutil.py - utility functions for obsolescence
2 2 #
3 3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import re
11 11
12 12 from .i18n import _
13 13 from . import (
14 14 diffutil,
15 15 encoding,
16 16 node as nodemod,
17 17 phases,
18 18 util,
19 19 )
20 20 from .utils import (
21 21 dateutil,
22 22 )
23 23
24 24 ### obsolescence marker flag
25 25
26 26 ## bumpedfix flag
27 27 #
28 28 # When a changeset A' succeed to a changeset A which became public, we call A'
29 29 # "bumped" because it's a successors of a public changesets
30 30 #
31 31 # o A' (bumped)
32 32 # |`:
33 33 # | o A
34 34 # |/
35 35 # o Z
36 36 #
37 37 # The way to solve this situation is to create a new changeset Ad as children
38 38 # of A. This changeset have the same content than A'. So the diff from A to A'
39 39 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
40 40 #
41 41 # o Ad
42 42 # |`:
43 43 # | x A'
44 44 # |'|
45 45 # o | A
46 46 # |/
47 47 # o Z
48 48 #
49 49 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
50 50 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
51 51 # This flag mean that the successors express the changes between the public and
52 52 # bumped version and fix the situation, breaking the transitivity of
53 53 # "bumped" here.
54 54 bumpedfix = 1
55 55 usingsha256 = 2
56 56
57 57 class marker(object):
58 58 """Wrap obsolete marker raw data"""
59 59
60 60 def __init__(self, repo, data):
61 61 # the repo argument will be used to create changectx in later version
62 62 self._repo = repo
63 63 self._data = data
64 64 self._decodedmeta = None
65 65
66 66 def __hash__(self):
67 67 return hash(self._data)
68 68
69 69 def __eq__(self, other):
70 70 if type(other) != type(self):
71 71 return False
72 72 return self._data == other._data
73 73
74 74 def prednode(self):
75 75 """Predecessor changeset node identifier"""
76 76 return self._data[0]
77 77
78 78 def succnodes(self):
79 79 """List of successor changesets node identifiers"""
80 80 return self._data[1]
81 81
82 82 def parentnodes(self):
83 83 """Parents of the predecessors (None if not recorded)"""
84 84 return self._data[5]
85 85
86 86 def metadata(self):
87 87 """Decoded metadata dictionary"""
88 88 return dict(self._data[3])
89 89
90 90 def date(self):
91 91 """Creation date as (unixtime, offset)"""
92 92 return self._data[4]
93 93
94 94 def flags(self):
95 95 """The flags field of the marker"""
96 96 return self._data[2]
97 97
98 98 def getmarkers(repo, nodes=None, exclusive=False):
99 99 """returns markers known in a repository
100 100
101 101 If <nodes> is specified, only markers "relevant" to those nodes are are
102 102 returned"""
103 103 if nodes is None:
104 104 rawmarkers = repo.obsstore
105 105 elif exclusive:
106 106 rawmarkers = exclusivemarkers(repo, nodes)
107 107 else:
108 108 rawmarkers = repo.obsstore.relevantmarkers(nodes)
109 109
110 110 for markerdata in rawmarkers:
111 111 yield marker(repo, markerdata)
112 112
113 113 def closestpredecessors(repo, nodeid):
114 114 """yield the list of next predecessors pointing on visible changectx nodes
115 115
116 116 This function respect the repoview filtering, filtered revision will be
117 117 considered missing.
118 118 """
119 119
120 120 precursors = repo.obsstore.predecessors
121 121 stack = [nodeid]
122 122 seen = set(stack)
123 123
124 124 while stack:
125 125 current = stack.pop()
126 126 currentpreccs = precursors.get(current, ())
127 127
128 128 for prec in currentpreccs:
129 129 precnodeid = prec[0]
130 130
131 131 # Basic cycle protection
132 132 if precnodeid in seen:
133 133 continue
134 134 seen.add(precnodeid)
135 135
136 136 if precnodeid in repo:
137 137 yield precnodeid
138 138 else:
139 139 stack.append(precnodeid)
140 140
141 141 def allpredecessors(obsstore, nodes, ignoreflags=0):
142 142 """Yield node for every precursors of <nodes>.
143 143
144 144 Some precursors may be unknown locally.
145 145
146 146 This is a linear yield unsuited to detecting folded changesets. It includes
147 147 initial nodes too."""
148 148
149 149 remaining = set(nodes)
150 150 seen = set(remaining)
151 151 while remaining:
152 152 current = remaining.pop()
153 153 yield current
154 154 for mark in obsstore.predecessors.get(current, ()):
155 155 # ignore marker flagged with specified flag
156 156 if mark[2] & ignoreflags:
157 157 continue
158 158 suc = mark[0]
159 159 if suc not in seen:
160 160 seen.add(suc)
161 161 remaining.add(suc)
162 162
163 163 def allsuccessors(obsstore, nodes, ignoreflags=0):
164 164 """Yield node for every successor of <nodes>.
165 165
166 166 Some successors may be unknown locally.
167 167
168 168 This is a linear yield unsuited to detecting split changesets. It includes
169 169 initial nodes too."""
170 170 remaining = set(nodes)
171 171 seen = set(remaining)
172 172 while remaining:
173 173 current = remaining.pop()
174 174 yield current
175 175 for mark in obsstore.successors.get(current, ()):
176 176 # ignore marker flagged with specified flag
177 177 if mark[2] & ignoreflags:
178 178 continue
179 179 for suc in mark[1]:
180 180 if suc not in seen:
181 181 seen.add(suc)
182 182 remaining.add(suc)
183 183
184 184 def _filterprunes(markers):
185 185 """return a set with no prune markers"""
186 186 return set(m for m in markers if m[1])
187 187
188 188 def exclusivemarkers(repo, nodes):
189 189 """set of markers relevant to "nodes" but no other locally-known nodes
190 190
191 191 This function compute the set of markers "exclusive" to a locally-known
192 192 node. This means we walk the markers starting from <nodes> until we reach a
193 193 locally-known precursors outside of <nodes>. Element of <nodes> with
194 194 locally-known successors outside of <nodes> are ignored (since their
195 195 precursors markers are also relevant to these successors).
196 196
197 197 For example:
198 198
199 199 # (A0 rewritten as A1)
200 200 #
201 201 # A0 <-1- A1 # Marker "1" is exclusive to A1
202 202
203 203 or
204 204
205 205 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
206 206 #
207 207 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
208 208
209 209 or
210 210
211 211 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
212 212 #
213 213 # <-2- A1 # Marker "2" is exclusive to A0,A1
214 214 # /
215 215 # <-1- A0
216 216 # \
217 217 # <-3- A2 # Marker "3" is exclusive to A0,A2
218 218 #
219 219 # in addition:
220 220 #
221 221 # Markers "2,3" are exclusive to A1,A2
222 222 # Markers "1,2,3" are exclusive to A0,A1,A2
223 223
224 224 See test/test-obsolete-bundle-strip.t for more examples.
225 225
226 226 An example usage is strip. When stripping a changeset, we also want to
227 227 strip the markers exclusive to this changeset. Otherwise we would have
228 228 "dangling"" obsolescence markers from its precursors: Obsolescence markers
229 229 marking a node as obsolete without any successors available locally.
230 230
231 231 As for relevant markers, the prune markers for children will be followed.
232 232 Of course, they will only be followed if the pruned children is
233 233 locally-known. Since the prune markers are relevant to the pruned node.
234 234 However, while prune markers are considered relevant to the parent of the
235 235 pruned changesets, prune markers for locally-known changeset (with no
236 236 successors) are considered exclusive to the pruned nodes. This allows
237 237 to strip the prune markers (with the rest of the exclusive chain) alongside
238 238 the pruned changesets.
239 239 """
240 240 # running on a filtered repository would be dangerous as markers could be
241 241 # reported as exclusive when they are relevant for other filtered nodes.
242 242 unfi = repo.unfiltered()
243 243
244 244 # shortcut to various useful item
245 245 nm = unfi.changelog.nodemap
246 246 precursorsmarkers = unfi.obsstore.predecessors
247 247 successormarkers = unfi.obsstore.successors
248 248 childrenmarkers = unfi.obsstore.children
249 249
250 250 # exclusive markers (return of the function)
251 251 exclmarkers = set()
252 252 # we need fast membership testing
253 253 nodes = set(nodes)
254 254 # looking for head in the obshistory
255 255 #
256 256 # XXX we are ignoring all issues in regard with cycle for now.
257 257 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
258 258 stack.sort()
259 259 # nodes already stacked
260 260 seennodes = set(stack)
261 261 while stack:
262 262 current = stack.pop()
263 263 # fetch precursors markers
264 264 markers = list(precursorsmarkers.get(current, ()))
265 265 # extend the list with prune markers
266 266 for mark in successormarkers.get(current, ()):
267 267 if not mark[1]:
268 268 markers.append(mark)
269 269 # and markers from children (looking for prune)
270 270 for mark in childrenmarkers.get(current, ()):
271 271 if not mark[1]:
272 272 markers.append(mark)
273 273 # traverse the markers
274 274 for mark in markers:
275 275 if mark in exclmarkers:
276 276 # markers already selected
277 277 continue
278 278
279 279 # If the markers is about the current node, select it
280 280 #
281 281 # (this delay the addition of markers from children)
282 282 if mark[1] or mark[0] == current:
283 283 exclmarkers.add(mark)
284 284
285 285 # should we keep traversing through the precursors?
286 286 prec = mark[0]
287 287
288 288 # nodes in the stack or already processed
289 289 if prec in seennodes:
290 290 continue
291 291
292 292 # is this a locally known node ?
293 293 known = prec in nm
294 294 # if locally-known and not in the <nodes> set the traversal
295 295 # stop here.
296 296 if known and prec not in nodes:
297 297 continue
298 298
299 299 # do not keep going if there are unselected markers pointing to this
300 300 # nodes. If we end up traversing these unselected markers later the
301 301 # node will be taken care of at that point.
302 302 precmarkers = _filterprunes(successormarkers.get(prec))
303 303 if precmarkers.issubset(exclmarkers):
304 304 seennodes.add(prec)
305 305 stack.append(prec)
306 306
307 307 return exclmarkers
308 308
309 309 def foreground(repo, nodes):
310 310 """return all nodes in the "foreground" of other node
311 311
312 312 The foreground of a revision is anything reachable using parent -> children
313 313 or precursor -> successor relation. It is very similar to "descendant" but
314 314 augmented with obsolescence information.
315 315
316 316 Beware that possible obsolescence cycle may result if complex situation.
317 317 """
318 318 repo = repo.unfiltered()
319 319 foreground = set(repo.set('%ln::', nodes))
320 320 if repo.obsstore:
321 321 # We only need this complicated logic if there is obsolescence
322 322 # XXX will probably deserve an optimised revset.
323 323 nm = repo.changelog.nodemap
324 324 plen = -1
325 325 # compute the whole set of successors or descendants
326 326 while len(foreground) != plen:
327 327 plen = len(foreground)
328 328 succs = set(c.node() for c in foreground)
329 329 mutable = [c.node() for c in foreground if c.mutable()]
330 330 succs.update(allsuccessors(repo.obsstore, mutable))
331 331 known = (n for n in succs if n in nm)
332 332 foreground = set(repo.set('%ln::', known))
333 333 return set(c.node() for c in foreground)
334 334
335 335 # effectflag field
336 336 #
337 337 # Effect-flag is a 1-byte bit field used to store what changed between a
338 338 # changeset and its successor(s).
339 339 #
340 340 # The effect flag is stored in obs-markers metadata while we iterate on the
341 341 # information design. That's why we have the EFFECTFLAGFIELD. If we come up
342 342 # with an incompatible design for effect flag, we can store a new design under
343 343 # another field name so we don't break readers. We plan to extend the existing
344 344 # obsmarkers bit-field when the effect flag design will be stabilized.
345 345 #
346 346 # The effect-flag is placed behind an experimental flag
347 347 # `effect-flags` set to off by default.
348 348 #
349 349
350 350 EFFECTFLAGFIELD = "ef1"
351 351
352 352 DESCCHANGED = 1 << 0 # action changed the description
353 353 METACHANGED = 1 << 1 # action change the meta
354 354 DIFFCHANGED = 1 << 3 # action change diff introduced by the changeset
355 355 PARENTCHANGED = 1 << 2 # action change the parent
356 356 USERCHANGED = 1 << 4 # the user changed
357 357 DATECHANGED = 1 << 5 # the date changed
358 358 BRANCHCHANGED = 1 << 6 # the branch changed
359 359
360 360 METABLACKLIST = [
361 361 re.compile('^branch$'),
362 362 re.compile('^.*-source$'),
363 363 re.compile('^.*_source$'),
364 364 re.compile('^source$'),
365 365 ]
366 366
367 367 def metanotblacklisted(metaitem):
368 368 """ Check that the key of a meta item (extrakey, extravalue) does not
369 369 match at least one of the blacklist pattern
370 370 """
371 371 metakey = metaitem[0]
372 372
373 373 return not any(pattern.match(metakey) for pattern in METABLACKLIST)
374 374
375 375 def _prepare_hunk(hunk):
376 376 """Drop all information but the username and patch"""
377 377 cleanhunk = []
378 378 for line in hunk.splitlines():
379 379 if line.startswith(b'# User') or not line.startswith(b'#'):
380 380 if line.startswith(b'@@'):
381 381 line = b'@@\n'
382 382 cleanhunk.append(line)
383 383 return cleanhunk
384 384
385 385 def _getdifflines(iterdiff):
386 386 """return a cleaned up lines"""
387 387 lines = next(iterdiff, None)
388 388
389 389 if lines is None:
390 390 return lines
391 391
392 392 return _prepare_hunk(lines)
393 393
394 394 def _cmpdiff(leftctx, rightctx):
395 395 """return True if both ctx introduce the "same diff"
396 396
397 397 This is a first and basic implementation, with many shortcoming.
398 398 """
399 399 diffopts = diffutil.diffallopts(leftctx.repo().ui, {'git': True})
400 400 # Leftctx or right ctx might be filtered, so we need to use the contexts
401 401 # with an unfiltered repository to safely compute the diff
402 402 leftunfi = leftctx._repo.unfiltered()[leftctx.rev()]
403 403 leftdiff = leftunfi.diff(opts=diffopts)
404 404 rightunfi = rightctx._repo.unfiltered()[rightctx.rev()]
405 405 rightdiff = rightunfi.diff(opts=diffopts)
406 406
407 407 left, right = (0, 0)
408 408 while None not in (left, right):
409 409 left = _getdifflines(leftdiff)
410 410 right = _getdifflines(rightdiff)
411 411
412 412 if left != right:
413 413 return False
414 414 return True
415 415
416 416 def geteffectflag(relation):
417 417 """ From an obs-marker relation, compute what changed between the
418 418 predecessor and the successor.
419 419 """
420 420 effects = 0
421 421
422 422 source = relation[0]
423 423
424 424 for changectx in relation[1]:
425 425 # Check if description has changed
426 426 if changectx.description() != source.description():
427 427 effects |= DESCCHANGED
428 428
429 429 # Check if user has changed
430 430 if changectx.user() != source.user():
431 431 effects |= USERCHANGED
432 432
433 433 # Check if date has changed
434 434 if changectx.date() != source.date():
435 435 effects |= DATECHANGED
436 436
437 437 # Check if branch has changed
438 438 if changectx.branch() != source.branch():
439 439 effects |= BRANCHCHANGED
440 440
441 441 # Check if at least one of the parent has changed
442 442 if changectx.parents() != source.parents():
443 443 effects |= PARENTCHANGED
444 444
445 445 # Check if other meta has changed
446 446 changeextra = changectx.extra().items()
447 447 ctxmeta = list(filter(metanotblacklisted, changeextra))
448 448
449 449 sourceextra = source.extra().items()
450 450 srcmeta = list(filter(metanotblacklisted, sourceextra))
451 451
452 452 if ctxmeta != srcmeta:
453 453 effects |= METACHANGED
454 454
455 455 # Check if the diff has changed
456 456 if not _cmpdiff(source, changectx):
457 457 effects |= DIFFCHANGED
458 458
459 459 return effects
460 460
461 461 def getobsoleted(repo, tr):
462 462 """return the set of pre-existing revisions obsoleted by a transaction"""
463 463 torev = repo.unfiltered().changelog.nodemap.get
464 464 phase = repo._phasecache.phase
465 465 succsmarkers = repo.obsstore.successors.get
466 466 public = phases.public
467 467 addedmarkers = tr.changes.get('obsmarkers')
468 addedrevs = tr.changes.get('revs')
468 addedrevs = tr.changes['revs']
469 469 seenrevs = set()
470 470 obsoleted = set()
471 471 for mark in addedmarkers:
472 472 node = mark[0]
473 473 rev = torev(node)
474 474 if rev is None or rev in seenrevs or rev in addedrevs:
475 475 continue
476 476 seenrevs.add(rev)
477 477 if phase(repo, rev) == public:
478 478 continue
479 479 if set(succsmarkers(node) or []).issubset(addedmarkers):
480 480 obsoleted.add(rev)
481 481 return obsoleted
482 482
483 483 class _succs(list):
484 484 """small class to represent a successors with some metadata about it"""
485 485
486 486 def __init__(self, *args, **kwargs):
487 487 super(_succs, self).__init__(*args, **kwargs)
488 488 self.markers = set()
489 489
490 490 def copy(self):
491 491 new = _succs(self)
492 492 new.markers = self.markers.copy()
493 493 return new
494 494
495 495 @util.propertycache
496 496 def _set(self):
497 497 # immutable
498 498 return set(self)
499 499
500 500 def canmerge(self, other):
501 501 return self._set.issubset(other._set)
502 502
503 503 def successorssets(repo, initialnode, closest=False, cache=None):
504 504 """Return set of all latest successors of initial nodes
505 505
506 506 The successors set of a changeset A are the group of revisions that succeed
507 507 A. It succeeds A as a consistent whole, each revision being only a partial
508 508 replacement. By default, the successors set contains non-obsolete
509 509 changesets only, walking the obsolescence graph until reaching a leaf. If
510 510 'closest' is set to True, closest successors-sets are return (the
511 511 obsolescence walk stops on known changesets).
512 512
513 513 This function returns the full list of successor sets which is why it
514 514 returns a list of tuples and not just a single tuple. Each tuple is a valid
515 515 successors set. Note that (A,) may be a valid successors set for changeset A
516 516 (see below).
517 517
518 518 In most cases, a changeset A will have a single element (e.g. the changeset
519 519 A is replaced by A') in its successors set. Though, it is also common for a
520 520 changeset A to have no elements in its successor set (e.g. the changeset
521 521 has been pruned). Therefore, the returned list of successors sets will be
522 522 [(A',)] or [], respectively.
523 523
524 524 When a changeset A is split into A' and B', however, it will result in a
525 525 successors set containing more than a single element, i.e. [(A',B')].
526 526 Divergent changesets will result in multiple successors sets, i.e. [(A',),
527 527 (A'')].
528 528
529 529 If a changeset A is not obsolete, then it will conceptually have no
530 530 successors set. To distinguish this from a pruned changeset, the successor
531 531 set will contain itself only, i.e. [(A,)].
532 532
533 533 Finally, final successors unknown locally are considered to be pruned
534 534 (pruned: obsoleted without any successors). (Final: successors not affected
535 535 by markers).
536 536
537 537 The 'closest' mode respect the repoview filtering. For example, without
538 538 filter it will stop at the first locally known changeset, with 'visible'
539 539 filter it will stop on visible changesets).
540 540
541 541 The optional `cache` parameter is a dictionary that may contains
542 542 precomputed successors sets. It is meant to reuse the computation of a
543 543 previous call to `successorssets` when multiple calls are made at the same
544 544 time. The cache dictionary is updated in place. The caller is responsible
545 545 for its life span. Code that makes multiple calls to `successorssets`
546 546 *should* use this cache mechanism or risk a performance hit.
547 547
548 548 Since results are different depending of the 'closest' most, the same cache
549 549 cannot be reused for both mode.
550 550 """
551 551
552 552 succmarkers = repo.obsstore.successors
553 553
554 554 # Stack of nodes we search successors sets for
555 555 toproceed = [initialnode]
556 556 # set version of above list for fast loop detection
557 557 # element added to "toproceed" must be added here
558 558 stackedset = set(toproceed)
559 559 if cache is None:
560 560 cache = {}
561 561
562 562 # This while loop is the flattened version of a recursive search for
563 563 # successors sets
564 564 #
565 565 # def successorssets(x):
566 566 # successors = directsuccessors(x)
567 567 # ss = [[]]
568 568 # for succ in directsuccessors(x):
569 569 # # product as in itertools cartesian product
570 570 # ss = product(ss, successorssets(succ))
571 571 # return ss
572 572 #
573 573 # But we can not use plain recursive calls here:
574 574 # - that would blow the python call stack
575 575 # - obsolescence markers may have cycles, we need to handle them.
576 576 #
577 577 # The `toproceed` list act as our call stack. Every node we search
578 578 # successors set for are stacked there.
579 579 #
580 580 # The `stackedset` is set version of this stack used to check if a node is
581 581 # already stacked. This check is used to detect cycles and prevent infinite
582 582 # loop.
583 583 #
584 584 # successors set of all nodes are stored in the `cache` dictionary.
585 585 #
586 586 # After this while loop ends we use the cache to return the successors sets
587 587 # for the node requested by the caller.
588 588 while toproceed:
589 589 # Every iteration tries to compute the successors sets of the topmost
590 590 # node of the stack: CURRENT.
591 591 #
592 592 # There are four possible outcomes:
593 593 #
594 594 # 1) We already know the successors sets of CURRENT:
595 595 # -> mission accomplished, pop it from the stack.
596 596 # 2) Stop the walk:
597 597 # default case: Node is not obsolete
598 598 # closest case: Node is known at this repo filter level
599 599 # -> the node is its own successors sets. Add it to the cache.
600 600 # 3) We do not know successors set of direct successors of CURRENT:
601 601 # -> We add those successors to the stack.
602 602 # 4) We know successors sets of all direct successors of CURRENT:
603 603 # -> We can compute CURRENT successors set and add it to the
604 604 # cache.
605 605 #
606 606 current = toproceed[-1]
607 607
608 608 # case 2 condition is a bit hairy because of closest,
609 609 # we compute it on its own
610 610 case2condition = ((current not in succmarkers)
611 611 or (closest and current != initialnode
612 612 and current in repo))
613 613
614 614 if current in cache:
615 615 # case (1): We already know the successors sets
616 616 stackedset.remove(toproceed.pop())
617 617 elif case2condition:
618 618 # case (2): end of walk.
619 619 if current in repo:
620 620 # We have a valid successors.
621 621 cache[current] = [_succs((current,))]
622 622 else:
623 623 # Final obsolete version is unknown locally.
624 624 # Do not count that as a valid successors
625 625 cache[current] = []
626 626 else:
627 627 # cases (3) and (4)
628 628 #
629 629 # We proceed in two phases. Phase 1 aims to distinguish case (3)
630 630 # from case (4):
631 631 #
632 632 # For each direct successors of CURRENT, we check whether its
633 633 # successors sets are known. If they are not, we stack the
634 634 # unknown node and proceed to the next iteration of the while
635 635 # loop. (case 3)
636 636 #
637 637 # During this step, we may detect obsolescence cycles: a node
638 638 # with unknown successors sets but already in the call stack.
639 639 # In such a situation, we arbitrary set the successors sets of
640 640 # the node to nothing (node pruned) to break the cycle.
641 641 #
642 642 # If no break was encountered we proceed to phase 2.
643 643 #
644 644 # Phase 2 computes successors sets of CURRENT (case 4); see details
645 645 # in phase 2 itself.
646 646 #
647 647 # Note the two levels of iteration in each phase.
648 648 # - The first one handles obsolescence markers using CURRENT as
649 649 # precursor (successors markers of CURRENT).
650 650 #
651 651 # Having multiple entry here means divergence.
652 652 #
653 653 # - The second one handles successors defined in each marker.
654 654 #
655 655 # Having none means pruned node, multiple successors means split,
656 656 # single successors are standard replacement.
657 657 #
658 658 for mark in sorted(succmarkers[current]):
659 659 for suc in mark[1]:
660 660 if suc not in cache:
661 661 if suc in stackedset:
662 662 # cycle breaking
663 663 cache[suc] = []
664 664 else:
665 665 # case (3) If we have not computed successors sets
666 666 # of one of those successors we add it to the
667 667 # `toproceed` stack and stop all work for this
668 668 # iteration.
669 669 toproceed.append(suc)
670 670 stackedset.add(suc)
671 671 break
672 672 else:
673 673 continue
674 674 break
675 675 else:
676 676 # case (4): we know all successors sets of all direct
677 677 # successors
678 678 #
679 679 # Successors set contributed by each marker depends on the
680 680 # successors sets of all its "successors" node.
681 681 #
682 682 # Each different marker is a divergence in the obsolescence
683 683 # history. It contributes successors sets distinct from other
684 684 # markers.
685 685 #
686 686 # Within a marker, a successor may have divergent successors
687 687 # sets. In such a case, the marker will contribute multiple
688 688 # divergent successors sets. If multiple successors have
689 689 # divergent successors sets, a Cartesian product is used.
690 690 #
691 691 # At the end we post-process successors sets to remove
692 692 # duplicated entry and successors set that are strict subset of
693 693 # another one.
694 694 succssets = []
695 695 for mark in sorted(succmarkers[current]):
696 696 # successors sets contributed by this marker
697 697 base = _succs()
698 698 base.markers.add(mark)
699 699 markss = [base]
700 700 for suc in mark[1]:
701 701 # cardinal product with previous successors
702 702 productresult = []
703 703 for prefix in markss:
704 704 for suffix in cache[suc]:
705 705 newss = prefix.copy()
706 706 newss.markers.update(suffix.markers)
707 707 for part in suffix:
708 708 # do not duplicated entry in successors set
709 709 # first entry wins.
710 710 if part not in newss:
711 711 newss.append(part)
712 712 productresult.append(newss)
713 713 markss = productresult
714 714 succssets.extend(markss)
715 715 # remove duplicated and subset
716 716 seen = []
717 717 final = []
718 718 candidates = sorted((s for s in succssets if s),
719 719 key=len, reverse=True)
720 720 for cand in candidates:
721 721 for seensuccs in seen:
722 722 if cand.canmerge(seensuccs):
723 723 seensuccs.markers.update(cand.markers)
724 724 break
725 725 else:
726 726 final.append(cand)
727 727 seen.append(cand)
728 728 final.reverse() # put small successors set first
729 729 cache[current] = final
730 730 return cache[initialnode]
731 731
732 732 def successorsandmarkers(repo, ctx):
733 733 """compute the raw data needed for computing obsfate
734 734 Returns a list of dict, one dict per successors set
735 735 """
736 736 if not ctx.obsolete():
737 737 return None
738 738
739 739 ssets = successorssets(repo, ctx.node(), closest=True)
740 740
741 741 # closestsuccessors returns an empty list for pruned revisions, remap it
742 742 # into a list containing an empty list for future processing
743 743 if ssets == []:
744 744 ssets = [[]]
745 745
746 746 # Try to recover pruned markers
747 747 succsmap = repo.obsstore.successors
748 748 fullsuccessorsets = [] # successor set + markers
749 749 for sset in ssets:
750 750 if sset:
751 751 fullsuccessorsets.append(sset)
752 752 else:
753 753 # successorsset return an empty set() when ctx or one of its
754 754 # successors is pruned.
755 755 # In this case, walk the obs-markers tree again starting with ctx
756 756 # and find the relevant pruning obs-makers, the ones without
757 757 # successors.
758 758 # Having these markers allow us to compute some information about
759 759 # its fate, like who pruned this changeset and when.
760 760
761 761 # XXX we do not catch all prune markers (eg rewritten then pruned)
762 762 # (fix me later)
763 763 foundany = False
764 764 for mark in succsmap.get(ctx.node(), ()):
765 765 if not mark[1]:
766 766 foundany = True
767 767 sset = _succs()
768 768 sset.markers.add(mark)
769 769 fullsuccessorsets.append(sset)
770 770 if not foundany:
771 771 fullsuccessorsets.append(_succs())
772 772
773 773 values = []
774 774 for sset in fullsuccessorsets:
775 775 values.append({'successors': sset, 'markers': sset.markers})
776 776
777 777 return values
778 778
779 779 def _getobsfate(successorssets):
780 780 """ Compute a changeset obsolescence fate based on its successorssets.
781 781 Successors can be the tipmost ones or the immediate ones. This function
782 782 return values are not meant to be shown directly to users, it is meant to
783 783 be used by internal functions only.
784 784 Returns one fate from the following values:
785 785 - pruned
786 786 - diverged
787 787 - superseded
788 788 - superseded_split
789 789 """
790 790
791 791 if len(successorssets) == 0:
792 792 # The commit has been pruned
793 793 return 'pruned'
794 794 elif len(successorssets) > 1:
795 795 return 'diverged'
796 796 else:
797 797 # No divergence, only one set of successors
798 798 successors = successorssets[0]
799 799
800 800 if len(successors) == 1:
801 801 return 'superseded'
802 802 else:
803 803 return 'superseded_split'
804 804
805 805 def obsfateverb(successorset, markers):
806 806 """ Return the verb summarizing the successorset and potentially using
807 807 information from the markers
808 808 """
809 809 if not successorset:
810 810 verb = 'pruned'
811 811 elif len(successorset) == 1:
812 812 verb = 'rewritten'
813 813 else:
814 814 verb = 'split'
815 815 return verb
816 816
817 817 def markersdates(markers):
818 818 """returns the list of dates for a list of markers
819 819 """
820 820 return [m[4] for m in markers]
821 821
822 822 def markersusers(markers):
823 823 """ Returns a sorted list of markers users without duplicates
824 824 """
825 825 markersmeta = [dict(m[3]) for m in markers]
826 826 users = set(encoding.tolocal(meta['user']) for meta in markersmeta
827 827 if meta.get('user'))
828 828
829 829 return sorted(users)
830 830
831 831 def markersoperations(markers):
832 832 """ Returns a sorted list of markers operations without duplicates
833 833 """
834 834 markersmeta = [dict(m[3]) for m in markers]
835 835 operations = set(meta.get('operation') for meta in markersmeta
836 836 if meta.get('operation'))
837 837
838 838 return sorted(operations)
839 839
840 840 def obsfateprinter(ui, repo, successors, markers, formatctx):
841 841 """ Build a obsfate string for a single successorset using all obsfate
842 842 related function defined in obsutil
843 843 """
844 844 quiet = ui.quiet
845 845 verbose = ui.verbose
846 846 normal = not verbose and not quiet
847 847
848 848 line = []
849 849
850 850 # Verb
851 851 line.append(obsfateverb(successors, markers))
852 852
853 853 # Operations
854 854 operations = markersoperations(markers)
855 855 if operations:
856 856 line.append(" using %s" % ", ".join(operations))
857 857
858 858 # Successors
859 859 if successors:
860 860 fmtsuccessors = [formatctx(repo[succ]) for succ in successors]
861 861 line.append(" as %s" % ", ".join(fmtsuccessors))
862 862
863 863 # Users
864 864 users = markersusers(markers)
865 865 # Filter out current user in not verbose mode to reduce amount of
866 866 # information
867 867 if not verbose:
868 868 currentuser = ui.username(acceptempty=True)
869 869 if len(users) == 1 and currentuser in users:
870 870 users = None
871 871
872 872 if (verbose or normal) and users:
873 873 line.append(" by %s" % ", ".join(users))
874 874
875 875 # Date
876 876 dates = markersdates(markers)
877 877
878 878 if dates and verbose:
879 879 min_date = min(dates)
880 880 max_date = max(dates)
881 881
882 882 if min_date == max_date:
883 883 fmtmin_date = dateutil.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
884 884 line.append(" (at %s)" % fmtmin_date)
885 885 else:
886 886 fmtmin_date = dateutil.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
887 887 fmtmax_date = dateutil.datestr(max_date, '%Y-%m-%d %H:%M %1%2')
888 888 line.append(" (between %s and %s)" % (fmtmin_date, fmtmax_date))
889 889
890 890 return "".join(line)
891 891
892 892
893 893 filteredmsgtable = {
894 894 "pruned": _("hidden revision '%s' is pruned"),
895 895 "diverged": _("hidden revision '%s' has diverged"),
896 896 "superseded": _("hidden revision '%s' was rewritten as: %s"),
897 897 "superseded_split": _("hidden revision '%s' was split as: %s"),
898 898 "superseded_split_several": _("hidden revision '%s' was split as: %s and "
899 899 "%d more"),
900 900 }
901 901
902 902 def _getfilteredreason(repo, changeid, ctx):
903 903 """return a human-friendly string on why a obsolete changeset is hidden
904 904 """
905 905 successors = successorssets(repo, ctx.node())
906 906 fate = _getobsfate(successors)
907 907
908 908 # Be more precise in case the revision is superseded
909 909 if fate == 'pruned':
910 910 return filteredmsgtable['pruned'] % changeid
911 911 elif fate == 'diverged':
912 912 return filteredmsgtable['diverged'] % changeid
913 913 elif fate == 'superseded':
914 914 single_successor = nodemod.short(successors[0][0])
915 915 return filteredmsgtable['superseded'] % (changeid, single_successor)
916 916 elif fate == 'superseded_split':
917 917
918 918 succs = []
919 919 for node_id in successors[0]:
920 920 succs.append(nodemod.short(node_id))
921 921
922 922 if len(succs) <= 2:
923 923 fmtsuccs = ', '.join(succs)
924 924 return filteredmsgtable['superseded_split'] % (changeid, fmtsuccs)
925 925 else:
926 926 firstsuccessors = ', '.join(succs[:2])
927 927 remainingnumber = len(succs) - 2
928 928
929 929 args = (changeid, firstsuccessors, remainingnumber)
930 930 return filteredmsgtable['superseded_split_several'] % args
931 931
932 932 def divergentsets(repo, ctx):
933 933 """Compute sets of commits divergent with a given one"""
934 934 cache = {}
935 935 base = {}
936 936 for n in allpredecessors(repo.obsstore, [ctx.node()]):
937 937 if n == ctx.node():
938 938 # a node can't be a base for divergence with itself
939 939 continue
940 940 nsuccsets = successorssets(repo, n, cache)
941 941 for nsuccset in nsuccsets:
942 942 if ctx.node() in nsuccset:
943 943 # we are only interested in *other* successor sets
944 944 continue
945 945 if tuple(nsuccset) in base:
946 946 # we already know the latest base for this divergency
947 947 continue
948 948 base[tuple(nsuccset)] = n
949 949 return [{'divergentnodes': divset, 'commonpredecessor': b}
950 950 for divset, b in base.iteritems()]
951 951
952 952 def whyunstable(repo, ctx):
953 953 result = []
954 954 if ctx.orphan():
955 955 for parent in ctx.parents():
956 956 kind = None
957 957 if parent.orphan():
958 958 kind = 'orphan'
959 959 elif parent.obsolete():
960 960 kind = 'obsolete'
961 961 if kind is not None:
962 962 result.append({'instability': 'orphan',
963 963 'reason': '%s parent' % kind,
964 964 'node': parent.hex()})
965 965 if ctx.phasedivergent():
966 966 predecessors = allpredecessors(repo.obsstore, [ctx.node()],
967 967 ignoreflags=bumpedfix)
968 968 immutable = [repo[p] for p in predecessors
969 969 if p in repo and not repo[p].mutable()]
970 970 for predecessor in immutable:
971 971 result.append({'instability': 'phase-divergent',
972 972 'reason': 'immutable predecessor',
973 973 'node': predecessor.hex()})
974 974 if ctx.contentdivergent():
975 975 dsets = divergentsets(repo, ctx)
976 976 for dset in dsets:
977 977 divnodes = [repo[n] for n in dset['divergentnodes']]
978 978 result.append({'instability': 'content-divergent',
979 979 'divergentnodes': divnodes,
980 980 'reason': 'predecessor',
981 981 'node': nodemod.hex(dset['commonpredecessor'])})
982 982 return result
General Comments 0
You need to be logged in to leave comments. Login now