##// END OF EJS Templates
obsutil: pass a diffopts object to context.diff
Boris Feld -
r38587:f65e6095 default
parent child Browse files
Show More
@@ -1,977 +1,980 b''
1 1 # obsutil.py - utility functions for obsolescence
2 2 #
3 3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import re
11 11
12 12 from .i18n import _
13 13 from . import (
14 14 node as nodemod,
15 15 phases,
16 16 util,
17 17 )
18 from .utils import dateutil
18 from .utils import (
19 dateutil,
20 diffutil,
21 )
19 22
20 23 ### obsolescence marker flag
21 24
22 25 ## bumpedfix flag
23 26 #
24 27 # When a changeset A' succeed to a changeset A which became public, we call A'
25 28 # "bumped" because it's a successors of a public changesets
26 29 #
27 30 # o A' (bumped)
28 31 # |`:
29 32 # | o A
30 33 # |/
31 34 # o Z
32 35 #
33 36 # The way to solve this situation is to create a new changeset Ad as children
34 37 # of A. This changeset have the same content than A'. So the diff from A to A'
35 38 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
36 39 #
37 40 # o Ad
38 41 # |`:
39 42 # | x A'
40 43 # |'|
41 44 # o | A
42 45 # |/
43 46 # o Z
44 47 #
45 48 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
46 49 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
47 50 # This flag mean that the successors express the changes between the public and
48 51 # bumped version and fix the situation, breaking the transitivity of
49 52 # "bumped" here.
50 53 bumpedfix = 1
51 54 usingsha256 = 2
52 55
53 56 class marker(object):
54 57 """Wrap obsolete marker raw data"""
55 58
56 59 def __init__(self, repo, data):
57 60 # the repo argument will be used to create changectx in later version
58 61 self._repo = repo
59 62 self._data = data
60 63 self._decodedmeta = None
61 64
62 65 def __hash__(self):
63 66 return hash(self._data)
64 67
65 68 def __eq__(self, other):
66 69 if type(other) != type(self):
67 70 return False
68 71 return self._data == other._data
69 72
70 73 def prednode(self):
71 74 """Predecessor changeset node identifier"""
72 75 return self._data[0]
73 76
74 77 def succnodes(self):
75 78 """List of successor changesets node identifiers"""
76 79 return self._data[1]
77 80
78 81 def parentnodes(self):
79 82 """Parents of the predecessors (None if not recorded)"""
80 83 return self._data[5]
81 84
82 85 def metadata(self):
83 86 """Decoded metadata dictionary"""
84 87 return dict(self._data[3])
85 88
86 89 def date(self):
87 90 """Creation date as (unixtime, offset)"""
88 91 return self._data[4]
89 92
90 93 def flags(self):
91 94 """The flags field of the marker"""
92 95 return self._data[2]
93 96
94 97 def getmarkers(repo, nodes=None, exclusive=False):
95 98 """returns markers known in a repository
96 99
97 100 If <nodes> is specified, only markers "relevant" to those nodes are are
98 101 returned"""
99 102 if nodes is None:
100 103 rawmarkers = repo.obsstore
101 104 elif exclusive:
102 105 rawmarkers = exclusivemarkers(repo, nodes)
103 106 else:
104 107 rawmarkers = repo.obsstore.relevantmarkers(nodes)
105 108
106 109 for markerdata in rawmarkers:
107 110 yield marker(repo, markerdata)
108 111
109 112 def closestpredecessors(repo, nodeid):
110 113 """yield the list of next predecessors pointing on visible changectx nodes
111 114
112 115 This function respect the repoview filtering, filtered revision will be
113 116 considered missing.
114 117 """
115 118
116 119 precursors = repo.obsstore.predecessors
117 120 stack = [nodeid]
118 121 seen = set(stack)
119 122
120 123 while stack:
121 124 current = stack.pop()
122 125 currentpreccs = precursors.get(current, ())
123 126
124 127 for prec in currentpreccs:
125 128 precnodeid = prec[0]
126 129
127 130 # Basic cycle protection
128 131 if precnodeid in seen:
129 132 continue
130 133 seen.add(precnodeid)
131 134
132 135 if precnodeid in repo:
133 136 yield precnodeid
134 137 else:
135 138 stack.append(precnodeid)
136 139
137 140 def allpredecessors(obsstore, nodes, ignoreflags=0):
138 141 """Yield node for every precursors of <nodes>.
139 142
140 143 Some precursors may be unknown locally.
141 144
142 145 This is a linear yield unsuited to detecting folded changesets. It includes
143 146 initial nodes too."""
144 147
145 148 remaining = set(nodes)
146 149 seen = set(remaining)
147 150 while remaining:
148 151 current = remaining.pop()
149 152 yield current
150 153 for mark in obsstore.predecessors.get(current, ()):
151 154 # ignore marker flagged with specified flag
152 155 if mark[2] & ignoreflags:
153 156 continue
154 157 suc = mark[0]
155 158 if suc not in seen:
156 159 seen.add(suc)
157 160 remaining.add(suc)
158 161
159 162 def allsuccessors(obsstore, nodes, ignoreflags=0):
160 163 """Yield node for every successor of <nodes>.
161 164
162 165 Some successors may be unknown locally.
163 166
164 167 This is a linear yield unsuited to detecting split changesets. It includes
165 168 initial nodes too."""
166 169 remaining = set(nodes)
167 170 seen = set(remaining)
168 171 while remaining:
169 172 current = remaining.pop()
170 173 yield current
171 174 for mark in obsstore.successors.get(current, ()):
172 175 # ignore marker flagged with specified flag
173 176 if mark[2] & ignoreflags:
174 177 continue
175 178 for suc in mark[1]:
176 179 if suc not in seen:
177 180 seen.add(suc)
178 181 remaining.add(suc)
179 182
180 183 def _filterprunes(markers):
181 184 """return a set with no prune markers"""
182 185 return set(m for m in markers if m[1])
183 186
184 187 def exclusivemarkers(repo, nodes):
185 188 """set of markers relevant to "nodes" but no other locally-known nodes
186 189
187 190 This function compute the set of markers "exclusive" to a locally-known
188 191 node. This means we walk the markers starting from <nodes> until we reach a
189 192 locally-known precursors outside of <nodes>. Element of <nodes> with
190 193 locally-known successors outside of <nodes> are ignored (since their
191 194 precursors markers are also relevant to these successors).
192 195
193 196 For example:
194 197
195 198 # (A0 rewritten as A1)
196 199 #
197 200 # A0 <-1- A1 # Marker "1" is exclusive to A1
198 201
199 202 or
200 203
201 204 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
202 205 #
203 206 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
204 207
205 208 or
206 209
207 210 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
208 211 #
209 212 # <-2- A1 # Marker "2" is exclusive to A0,A1
210 213 # /
211 214 # <-1- A0
212 215 # \
213 216 # <-3- A2 # Marker "3" is exclusive to A0,A2
214 217 #
215 218 # in addition:
216 219 #
217 220 # Markers "2,3" are exclusive to A1,A2
218 221 # Markers "1,2,3" are exclusive to A0,A1,A2
219 222
220 223 See test/test-obsolete-bundle-strip.t for more examples.
221 224
222 225 An example usage is strip. When stripping a changeset, we also want to
223 226 strip the markers exclusive to this changeset. Otherwise we would have
224 227 "dangling"" obsolescence markers from its precursors: Obsolescence markers
225 228 marking a node as obsolete without any successors available locally.
226 229
227 230 As for relevant markers, the prune markers for children will be followed.
228 231 Of course, they will only be followed if the pruned children is
229 232 locally-known. Since the prune markers are relevant to the pruned node.
230 233 However, while prune markers are considered relevant to the parent of the
231 234 pruned changesets, prune markers for locally-known changeset (with no
232 235 successors) are considered exclusive to the pruned nodes. This allows
233 236 to strip the prune markers (with the rest of the exclusive chain) alongside
234 237 the pruned changesets.
235 238 """
236 239 # running on a filtered repository would be dangerous as markers could be
237 240 # reported as exclusive when they are relevant for other filtered nodes.
238 241 unfi = repo.unfiltered()
239 242
240 243 # shortcut to various useful item
241 244 nm = unfi.changelog.nodemap
242 245 precursorsmarkers = unfi.obsstore.predecessors
243 246 successormarkers = unfi.obsstore.successors
244 247 childrenmarkers = unfi.obsstore.children
245 248
246 249 # exclusive markers (return of the function)
247 250 exclmarkers = set()
248 251 # we need fast membership testing
249 252 nodes = set(nodes)
250 253 # looking for head in the obshistory
251 254 #
252 255 # XXX we are ignoring all issues in regard with cycle for now.
253 256 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
254 257 stack.sort()
255 258 # nodes already stacked
256 259 seennodes = set(stack)
257 260 while stack:
258 261 current = stack.pop()
259 262 # fetch precursors markers
260 263 markers = list(precursorsmarkers.get(current, ()))
261 264 # extend the list with prune markers
262 265 for mark in successormarkers.get(current, ()):
263 266 if not mark[1]:
264 267 markers.append(mark)
265 268 # and markers from children (looking for prune)
266 269 for mark in childrenmarkers.get(current, ()):
267 270 if not mark[1]:
268 271 markers.append(mark)
269 272 # traverse the markers
270 273 for mark in markers:
271 274 if mark in exclmarkers:
272 275 # markers already selected
273 276 continue
274 277
275 278 # If the markers is about the current node, select it
276 279 #
277 280 # (this delay the addition of markers from children)
278 281 if mark[1] or mark[0] == current:
279 282 exclmarkers.add(mark)
280 283
281 284 # should we keep traversing through the precursors?
282 285 prec = mark[0]
283 286
284 287 # nodes in the stack or already processed
285 288 if prec in seennodes:
286 289 continue
287 290
288 291 # is this a locally known node ?
289 292 known = prec in nm
290 293 # if locally-known and not in the <nodes> set the traversal
291 294 # stop here.
292 295 if known and prec not in nodes:
293 296 continue
294 297
295 298 # do not keep going if there are unselected markers pointing to this
296 299 # nodes. If we end up traversing these unselected markers later the
297 300 # node will be taken care of at that point.
298 301 precmarkers = _filterprunes(successormarkers.get(prec))
299 302 if precmarkers.issubset(exclmarkers):
300 303 seennodes.add(prec)
301 304 stack.append(prec)
302 305
303 306 return exclmarkers
304 307
305 308 def foreground(repo, nodes):
306 309 """return all nodes in the "foreground" of other node
307 310
308 311 The foreground of a revision is anything reachable using parent -> children
309 312 or precursor -> successor relation. It is very similar to "descendant" but
310 313 augmented with obsolescence information.
311 314
312 315 Beware that possible obsolescence cycle may result if complex situation.
313 316 """
314 317 repo = repo.unfiltered()
315 318 foreground = set(repo.set('%ln::', nodes))
316 319 if repo.obsstore:
317 320 # We only need this complicated logic if there is obsolescence
318 321 # XXX will probably deserve an optimised revset.
319 322 nm = repo.changelog.nodemap
320 323 plen = -1
321 324 # compute the whole set of successors or descendants
322 325 while len(foreground) != plen:
323 326 plen = len(foreground)
324 327 succs = set(c.node() for c in foreground)
325 328 mutable = [c.node() for c in foreground if c.mutable()]
326 329 succs.update(allsuccessors(repo.obsstore, mutable))
327 330 known = (n for n in succs if n in nm)
328 331 foreground = set(repo.set('%ln::', known))
329 332 return set(c.node() for c in foreground)
330 333
331 334 # effectflag field
332 335 #
333 336 # Effect-flag is a 1-byte bit field used to store what changed between a
334 337 # changeset and its successor(s).
335 338 #
336 339 # The effect flag is stored in obs-markers metadata while we iterate on the
337 340 # information design. That's why we have the EFFECTFLAGFIELD. If we come up
338 341 # with an incompatible design for effect flag, we can store a new design under
339 342 # another field name so we don't break readers. We plan to extend the existing
340 343 # obsmarkers bit-field when the effect flag design will be stabilized.
341 344 #
342 345 # The effect-flag is placed behind an experimental flag
343 346 # `effect-flags` set to off by default.
344 347 #
345 348
346 349 EFFECTFLAGFIELD = "ef1"
347 350
348 351 DESCCHANGED = 1 << 0 # action changed the description
349 352 METACHANGED = 1 << 1 # action change the meta
350 353 DIFFCHANGED = 1 << 3 # action change diff introduced by the changeset
351 354 PARENTCHANGED = 1 << 2 # action change the parent
352 355 USERCHANGED = 1 << 4 # the user changed
353 356 DATECHANGED = 1 << 5 # the date changed
354 357 BRANCHCHANGED = 1 << 6 # the branch changed
355 358
356 359 METABLACKLIST = [
357 360 re.compile('^branch$'),
358 361 re.compile('^.*-source$'),
359 362 re.compile('^.*_source$'),
360 363 re.compile('^source$'),
361 364 ]
362 365
363 366 def metanotblacklisted(metaitem):
364 367 """ Check that the key of a meta item (extrakey, extravalue) does not
365 368 match at least one of the blacklist pattern
366 369 """
367 370 metakey = metaitem[0]
368 371
369 372 return not any(pattern.match(metakey) for pattern in METABLACKLIST)
370 373
371 374 def _prepare_hunk(hunk):
372 375 """Drop all information but the username and patch"""
373 376 cleanhunk = []
374 377 for line in hunk.splitlines():
375 378 if line.startswith(b'# User') or not line.startswith(b'#'):
376 379 if line.startswith(b'@@'):
377 380 line = b'@@\n'
378 381 cleanhunk.append(line)
379 382 return cleanhunk
380 383
381 384 def _getdifflines(iterdiff):
382 385 """return a cleaned up lines"""
383 386 lines = next(iterdiff, None)
384 387
385 388 if lines is None:
386 389 return lines
387 390
388 391 return _prepare_hunk(lines)
389 392
390 393 def _cmpdiff(leftctx, rightctx):
391 394 """return True if both ctx introduce the "same diff"
392 395
393 396 This is a first and basic implementation, with many shortcoming.
394 397 """
395
398 diffopts = diffutil.diffopts(leftctx._repo.ui, {'git': True})
396 399 # Leftctx or right ctx might be filtered, so we need to use the contexts
397 400 # with an unfiltered repository to safely compute the diff
398 401 leftunfi = leftctx._repo.unfiltered()[leftctx.rev()]
399 leftdiff = leftunfi.diff(opts={'git': True})
402 leftdiff = leftunfi.diff(opts=diffopts)
400 403 rightunfi = rightctx._repo.unfiltered()[rightctx.rev()]
401 rightdiff = rightunfi.diff(opts={'git': True})
404 rightdiff = rightunfi.diff(opts=diffopts)
402 405
403 406 left, right = (0, 0)
404 407 while None not in (left, right):
405 408 left = _getdifflines(leftdiff)
406 409 right = _getdifflines(rightdiff)
407 410
408 411 if left != right:
409 412 return False
410 413 return True
411 414
412 415 def geteffectflag(relation):
413 416 """ From an obs-marker relation, compute what changed between the
414 417 predecessor and the successor.
415 418 """
416 419 effects = 0
417 420
418 421 source = relation[0]
419 422
420 423 for changectx in relation[1]:
421 424 # Check if description has changed
422 425 if changectx.description() != source.description():
423 426 effects |= DESCCHANGED
424 427
425 428 # Check if user has changed
426 429 if changectx.user() != source.user():
427 430 effects |= USERCHANGED
428 431
429 432 # Check if date has changed
430 433 if changectx.date() != source.date():
431 434 effects |= DATECHANGED
432 435
433 436 # Check if branch has changed
434 437 if changectx.branch() != source.branch():
435 438 effects |= BRANCHCHANGED
436 439
437 440 # Check if at least one of the parent has changed
438 441 if changectx.parents() != source.parents():
439 442 effects |= PARENTCHANGED
440 443
441 444 # Check if other meta has changed
442 445 changeextra = changectx.extra().items()
443 446 ctxmeta = list(filter(metanotblacklisted, changeextra))
444 447
445 448 sourceextra = source.extra().items()
446 449 srcmeta = list(filter(metanotblacklisted, sourceextra))
447 450
448 451 if ctxmeta != srcmeta:
449 452 effects |= METACHANGED
450 453
451 454 # Check if the diff has changed
452 455 if not _cmpdiff(source, changectx):
453 456 effects |= DIFFCHANGED
454 457
455 458 return effects
456 459
457 460 def getobsoleted(repo, tr):
458 461 """return the set of pre-existing revisions obsoleted by a transaction"""
459 462 torev = repo.unfiltered().changelog.nodemap.get
460 463 phase = repo._phasecache.phase
461 464 succsmarkers = repo.obsstore.successors.get
462 465 public = phases.public
463 466 addedmarkers = tr.changes.get('obsmarkers')
464 467 addedrevs = tr.changes.get('revs')
465 468 seenrevs = set()
466 469 obsoleted = set()
467 470 for mark in addedmarkers:
468 471 node = mark[0]
469 472 rev = torev(node)
470 473 if rev is None or rev in seenrevs or rev in addedrevs:
471 474 continue
472 475 seenrevs.add(rev)
473 476 if phase(repo, rev) == public:
474 477 continue
475 478 if set(succsmarkers(node) or []).issubset(addedmarkers):
476 479 obsoleted.add(rev)
477 480 return obsoleted
478 481
479 482 class _succs(list):
480 483 """small class to represent a successors with some metadata about it"""
481 484
482 485 def __init__(self, *args, **kwargs):
483 486 super(_succs, self).__init__(*args, **kwargs)
484 487 self.markers = set()
485 488
486 489 def copy(self):
487 490 new = _succs(self)
488 491 new.markers = self.markers.copy()
489 492 return new
490 493
491 494 @util.propertycache
492 495 def _set(self):
493 496 # immutable
494 497 return set(self)
495 498
496 499 def canmerge(self, other):
497 500 return self._set.issubset(other._set)
498 501
499 502 def successorssets(repo, initialnode, closest=False, cache=None):
500 503 """Return set of all latest successors of initial nodes
501 504
502 505 The successors set of a changeset A are the group of revisions that succeed
503 506 A. It succeeds A as a consistent whole, each revision being only a partial
504 507 replacement. By default, the successors set contains non-obsolete
505 508 changesets only, walking the obsolescence graph until reaching a leaf. If
506 509 'closest' is set to True, closest successors-sets are return (the
507 510 obsolescence walk stops on known changesets).
508 511
509 512 This function returns the full list of successor sets which is why it
510 513 returns a list of tuples and not just a single tuple. Each tuple is a valid
511 514 successors set. Note that (A,) may be a valid successors set for changeset A
512 515 (see below).
513 516
514 517 In most cases, a changeset A will have a single element (e.g. the changeset
515 518 A is replaced by A') in its successors set. Though, it is also common for a
516 519 changeset A to have no elements in its successor set (e.g. the changeset
517 520 has been pruned). Therefore, the returned list of successors sets will be
518 521 [(A',)] or [], respectively.
519 522
520 523 When a changeset A is split into A' and B', however, it will result in a
521 524 successors set containing more than a single element, i.e. [(A',B')].
522 525 Divergent changesets will result in multiple successors sets, i.e. [(A',),
523 526 (A'')].
524 527
525 528 If a changeset A is not obsolete, then it will conceptually have no
526 529 successors set. To distinguish this from a pruned changeset, the successor
527 530 set will contain itself only, i.e. [(A,)].
528 531
529 532 Finally, final successors unknown locally are considered to be pruned
530 533 (pruned: obsoleted without any successors). (Final: successors not affected
531 534 by markers).
532 535
533 536 The 'closest' mode respect the repoview filtering. For example, without
534 537 filter it will stop at the first locally known changeset, with 'visible'
535 538 filter it will stop on visible changesets).
536 539
537 540 The optional `cache` parameter is a dictionary that may contains
538 541 precomputed successors sets. It is meant to reuse the computation of a
539 542 previous call to `successorssets` when multiple calls are made at the same
540 543 time. The cache dictionary is updated in place. The caller is responsible
541 544 for its life span. Code that makes multiple calls to `successorssets`
542 545 *should* use this cache mechanism or risk a performance hit.
543 546
544 547 Since results are different depending of the 'closest' most, the same cache
545 548 cannot be reused for both mode.
546 549 """
547 550
548 551 succmarkers = repo.obsstore.successors
549 552
550 553 # Stack of nodes we search successors sets for
551 554 toproceed = [initialnode]
552 555 # set version of above list for fast loop detection
553 556 # element added to "toproceed" must be added here
554 557 stackedset = set(toproceed)
555 558 if cache is None:
556 559 cache = {}
557 560
558 561 # This while loop is the flattened version of a recursive search for
559 562 # successors sets
560 563 #
561 564 # def successorssets(x):
562 565 # successors = directsuccessors(x)
563 566 # ss = [[]]
564 567 # for succ in directsuccessors(x):
565 568 # # product as in itertools cartesian product
566 569 # ss = product(ss, successorssets(succ))
567 570 # return ss
568 571 #
569 572 # But we can not use plain recursive calls here:
570 573 # - that would blow the python call stack
571 574 # - obsolescence markers may have cycles, we need to handle them.
572 575 #
573 576 # The `toproceed` list act as our call stack. Every node we search
574 577 # successors set for are stacked there.
575 578 #
576 579 # The `stackedset` is set version of this stack used to check if a node is
577 580 # already stacked. This check is used to detect cycles and prevent infinite
578 581 # loop.
579 582 #
580 583 # successors set of all nodes are stored in the `cache` dictionary.
581 584 #
582 585 # After this while loop ends we use the cache to return the successors sets
583 586 # for the node requested by the caller.
584 587 while toproceed:
585 588 # Every iteration tries to compute the successors sets of the topmost
586 589 # node of the stack: CURRENT.
587 590 #
588 591 # There are four possible outcomes:
589 592 #
590 593 # 1) We already know the successors sets of CURRENT:
591 594 # -> mission accomplished, pop it from the stack.
592 595 # 2) Stop the walk:
593 596 # default case: Node is not obsolete
594 597 # closest case: Node is known at this repo filter level
595 598 # -> the node is its own successors sets. Add it to the cache.
596 599 # 3) We do not know successors set of direct successors of CURRENT:
597 600 # -> We add those successors to the stack.
598 601 # 4) We know successors sets of all direct successors of CURRENT:
599 602 # -> We can compute CURRENT successors set and add it to the
600 603 # cache.
601 604 #
602 605 current = toproceed[-1]
603 606
604 607 # case 2 condition is a bit hairy because of closest,
605 608 # we compute it on its own
606 609 case2condition = ((current not in succmarkers)
607 610 or (closest and current != initialnode
608 611 and current in repo))
609 612
610 613 if current in cache:
611 614 # case (1): We already know the successors sets
612 615 stackedset.remove(toproceed.pop())
613 616 elif case2condition:
614 617 # case (2): end of walk.
615 618 if current in repo:
616 619 # We have a valid successors.
617 620 cache[current] = [_succs((current,))]
618 621 else:
619 622 # Final obsolete version is unknown locally.
620 623 # Do not count that as a valid successors
621 624 cache[current] = []
622 625 else:
623 626 # cases (3) and (4)
624 627 #
625 628 # We proceed in two phases. Phase 1 aims to distinguish case (3)
626 629 # from case (4):
627 630 #
628 631 # For each direct successors of CURRENT, we check whether its
629 632 # successors sets are known. If they are not, we stack the
630 633 # unknown node and proceed to the next iteration of the while
631 634 # loop. (case 3)
632 635 #
633 636 # During this step, we may detect obsolescence cycles: a node
634 637 # with unknown successors sets but already in the call stack.
635 638 # In such a situation, we arbitrary set the successors sets of
636 639 # the node to nothing (node pruned) to break the cycle.
637 640 #
638 641 # If no break was encountered we proceed to phase 2.
639 642 #
640 643 # Phase 2 computes successors sets of CURRENT (case 4); see details
641 644 # in phase 2 itself.
642 645 #
643 646 # Note the two levels of iteration in each phase.
644 647 # - The first one handles obsolescence markers using CURRENT as
645 648 # precursor (successors markers of CURRENT).
646 649 #
647 650 # Having multiple entry here means divergence.
648 651 #
649 652 # - The second one handles successors defined in each marker.
650 653 #
651 654 # Having none means pruned node, multiple successors means split,
652 655 # single successors are standard replacement.
653 656 #
654 657 for mark in sorted(succmarkers[current]):
655 658 for suc in mark[1]:
656 659 if suc not in cache:
657 660 if suc in stackedset:
658 661 # cycle breaking
659 662 cache[suc] = []
660 663 else:
661 664 # case (3) If we have not computed successors sets
662 665 # of one of those successors we add it to the
663 666 # `toproceed` stack and stop all work for this
664 667 # iteration.
665 668 toproceed.append(suc)
666 669 stackedset.add(suc)
667 670 break
668 671 else:
669 672 continue
670 673 break
671 674 else:
672 675 # case (4): we know all successors sets of all direct
673 676 # successors
674 677 #
675 678 # Successors set contributed by each marker depends on the
676 679 # successors sets of all its "successors" node.
677 680 #
678 681 # Each different marker is a divergence in the obsolescence
679 682 # history. It contributes successors sets distinct from other
680 683 # markers.
681 684 #
682 685 # Within a marker, a successor may have divergent successors
683 686 # sets. In such a case, the marker will contribute multiple
684 687 # divergent successors sets. If multiple successors have
685 688 # divergent successors sets, a Cartesian product is used.
686 689 #
687 690 # At the end we post-process successors sets to remove
688 691 # duplicated entry and successors set that are strict subset of
689 692 # another one.
690 693 succssets = []
691 694 for mark in sorted(succmarkers[current]):
692 695 # successors sets contributed by this marker
693 696 base = _succs()
694 697 base.markers.add(mark)
695 698 markss = [base]
696 699 for suc in mark[1]:
697 700 # cardinal product with previous successors
698 701 productresult = []
699 702 for prefix in markss:
700 703 for suffix in cache[suc]:
701 704 newss = prefix.copy()
702 705 newss.markers.update(suffix.markers)
703 706 for part in suffix:
704 707 # do not duplicated entry in successors set
705 708 # first entry wins.
706 709 if part not in newss:
707 710 newss.append(part)
708 711 productresult.append(newss)
709 712 markss = productresult
710 713 succssets.extend(markss)
711 714 # remove duplicated and subset
712 715 seen = []
713 716 final = []
714 717 candidates = sorted((s for s in succssets if s),
715 718 key=len, reverse=True)
716 719 for cand in candidates:
717 720 for seensuccs in seen:
718 721 if cand.canmerge(seensuccs):
719 722 seensuccs.markers.update(cand.markers)
720 723 break
721 724 else:
722 725 final.append(cand)
723 726 seen.append(cand)
724 727 final.reverse() # put small successors set first
725 728 cache[current] = final
726 729 return cache[initialnode]
727 730
728 731 def successorsandmarkers(repo, ctx):
729 732 """compute the raw data needed for computing obsfate
730 733 Returns a list of dict, one dict per successors set
731 734 """
732 735 if not ctx.obsolete():
733 736 return None
734 737
735 738 ssets = successorssets(repo, ctx.node(), closest=True)
736 739
737 740 # closestsuccessors returns an empty list for pruned revisions, remap it
738 741 # into a list containing an empty list for future processing
739 742 if ssets == []:
740 743 ssets = [[]]
741 744
742 745 # Try to recover pruned markers
743 746 succsmap = repo.obsstore.successors
744 747 fullsuccessorsets = [] # successor set + markers
745 748 for sset in ssets:
746 749 if sset:
747 750 fullsuccessorsets.append(sset)
748 751 else:
749 752 # successorsset return an empty set() when ctx or one of its
750 753 # successors is pruned.
751 754 # In this case, walk the obs-markers tree again starting with ctx
752 755 # and find the relevant pruning obs-makers, the ones without
753 756 # successors.
754 757 # Having these markers allow us to compute some information about
755 758 # its fate, like who pruned this changeset and when.
756 759
757 760 # XXX we do not catch all prune markers (eg rewritten then pruned)
758 761 # (fix me later)
759 762 foundany = False
760 763 for mark in succsmap.get(ctx.node(), ()):
761 764 if not mark[1]:
762 765 foundany = True
763 766 sset = _succs()
764 767 sset.markers.add(mark)
765 768 fullsuccessorsets.append(sset)
766 769 if not foundany:
767 770 fullsuccessorsets.append(_succs())
768 771
769 772 values = []
770 773 for sset in fullsuccessorsets:
771 774 values.append({'successors': sset, 'markers': sset.markers})
772 775
773 776 return values
774 777
775 778 def _getobsfate(successorssets):
776 779 """ Compute a changeset obsolescence fate based on its successorssets.
777 780 Successors can be the tipmost ones or the immediate ones. This function
778 781 return values are not meant to be shown directly to users, it is meant to
779 782 be used by internal functions only.
780 783 Returns one fate from the following values:
781 784 - pruned
782 785 - diverged
783 786 - superseded
784 787 - superseded_split
785 788 """
786 789
787 790 if len(successorssets) == 0:
788 791 # The commit has been pruned
789 792 return 'pruned'
790 793 elif len(successorssets) > 1:
791 794 return 'diverged'
792 795 else:
793 796 # No divergence, only one set of successors
794 797 successors = successorssets[0]
795 798
796 799 if len(successors) == 1:
797 800 return 'superseded'
798 801 else:
799 802 return 'superseded_split'
800 803
801 804 def obsfateverb(successorset, markers):
802 805 """ Return the verb summarizing the successorset and potentially using
803 806 information from the markers
804 807 """
805 808 if not successorset:
806 809 verb = 'pruned'
807 810 elif len(successorset) == 1:
808 811 verb = 'rewritten'
809 812 else:
810 813 verb = 'split'
811 814 return verb
812 815
813 816 def markersdates(markers):
814 817 """returns the list of dates for a list of markers
815 818 """
816 819 return [m[4] for m in markers]
817 820
818 821 def markersusers(markers):
819 822 """ Returns a sorted list of markers users without duplicates
820 823 """
821 824 markersmeta = [dict(m[3]) for m in markers]
822 825 users = set(meta.get('user') for meta in markersmeta if meta.get('user'))
823 826
824 827 return sorted(users)
825 828
826 829 def markersoperations(markers):
827 830 """ Returns a sorted list of markers operations without duplicates
828 831 """
829 832 markersmeta = [dict(m[3]) for m in markers]
830 833 operations = set(meta.get('operation') for meta in markersmeta
831 834 if meta.get('operation'))
832 835
833 836 return sorted(operations)
834 837
835 838 def obsfateprinter(ui, repo, successors, markers, formatctx):
836 839 """ Build a obsfate string for a single successorset using all obsfate
837 840 related function defined in obsutil
838 841 """
839 842 quiet = ui.quiet
840 843 verbose = ui.verbose
841 844 normal = not verbose and not quiet
842 845
843 846 line = []
844 847
845 848 # Verb
846 849 line.append(obsfateverb(successors, markers))
847 850
848 851 # Operations
849 852 operations = markersoperations(markers)
850 853 if operations:
851 854 line.append(" using %s" % ", ".join(operations))
852 855
853 856 # Successors
854 857 if successors:
855 858 fmtsuccessors = [formatctx(repo[succ]) for succ in successors]
856 859 line.append(" as %s" % ", ".join(fmtsuccessors))
857 860
858 861 # Users
859 862 users = markersusers(markers)
860 863 # Filter out current user in not verbose mode to reduce amount of
861 864 # information
862 865 if not verbose:
863 866 currentuser = ui.username(acceptempty=True)
864 867 if len(users) == 1 and currentuser in users:
865 868 users = None
866 869
867 870 if (verbose or normal) and users:
868 871 line.append(" by %s" % ", ".join(users))
869 872
870 873 # Date
871 874 dates = markersdates(markers)
872 875
873 876 if dates and verbose:
874 877 min_date = min(dates)
875 878 max_date = max(dates)
876 879
877 880 if min_date == max_date:
878 881 fmtmin_date = dateutil.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
879 882 line.append(" (at %s)" % fmtmin_date)
880 883 else:
881 884 fmtmin_date = dateutil.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
882 885 fmtmax_date = dateutil.datestr(max_date, '%Y-%m-%d %H:%M %1%2')
883 886 line.append(" (between %s and %s)" % (fmtmin_date, fmtmax_date))
884 887
885 888 return "".join(line)
886 889
887 890
888 891 filteredmsgtable = {
889 892 "pruned": _("hidden revision '%s' is pruned"),
890 893 "diverged": _("hidden revision '%s' has diverged"),
891 894 "superseded": _("hidden revision '%s' was rewritten as: %s"),
892 895 "superseded_split": _("hidden revision '%s' was split as: %s"),
893 896 "superseded_split_several": _("hidden revision '%s' was split as: %s and "
894 897 "%d more"),
895 898 }
896 899
897 900 def _getfilteredreason(repo, changeid, ctx):
898 901 """return a human-friendly string on why a obsolete changeset is hidden
899 902 """
900 903 successors = successorssets(repo, ctx.node())
901 904 fate = _getobsfate(successors)
902 905
903 906 # Be more precise in case the revision is superseded
904 907 if fate == 'pruned':
905 908 return filteredmsgtable['pruned'] % changeid
906 909 elif fate == 'diverged':
907 910 return filteredmsgtable['diverged'] % changeid
908 911 elif fate == 'superseded':
909 912 single_successor = nodemod.short(successors[0][0])
910 913 return filteredmsgtable['superseded'] % (changeid, single_successor)
911 914 elif fate == 'superseded_split':
912 915
913 916 succs = []
914 917 for node_id in successors[0]:
915 918 succs.append(nodemod.short(node_id))
916 919
917 920 if len(succs) <= 2:
918 921 fmtsuccs = ', '.join(succs)
919 922 return filteredmsgtable['superseded_split'] % (changeid, fmtsuccs)
920 923 else:
921 924 firstsuccessors = ', '.join(succs[:2])
922 925 remainingnumber = len(succs) - 2
923 926
924 927 args = (changeid, firstsuccessors, remainingnumber)
925 928 return filteredmsgtable['superseded_split_several'] % args
926 929
927 930 def divergentsets(repo, ctx):
928 931 """Compute sets of commits divergent with a given one"""
929 932 cache = {}
930 933 base = {}
931 934 for n in allpredecessors(repo.obsstore, [ctx.node()]):
932 935 if n == ctx.node():
933 936 # a node can't be a base for divergence with itself
934 937 continue
935 938 nsuccsets = successorssets(repo, n, cache)
936 939 for nsuccset in nsuccsets:
937 940 if ctx.node() in nsuccset:
938 941 # we are only interested in *other* successor sets
939 942 continue
940 943 if tuple(nsuccset) in base:
941 944 # we already know the latest base for this divergency
942 945 continue
943 946 base[tuple(nsuccset)] = n
944 947 return [{'divergentnodes': divset, 'commonpredecessor': b}
945 948 for divset, b in base.iteritems()]
946 949
947 950 def whyunstable(repo, ctx):
948 951 result = []
949 952 if ctx.orphan():
950 953 for parent in ctx.parents():
951 954 kind = None
952 955 if parent.orphan():
953 956 kind = 'orphan'
954 957 elif parent.obsolete():
955 958 kind = 'obsolete'
956 959 if kind is not None:
957 960 result.append({'instability': 'orphan',
958 961 'reason': '%s parent' % kind,
959 962 'node': parent.hex()})
960 963 if ctx.phasedivergent():
961 964 predecessors = allpredecessors(repo.obsstore, [ctx.node()],
962 965 ignoreflags=bumpedfix)
963 966 immutable = [repo[p] for p in predecessors
964 967 if p in repo and not repo[p].mutable()]
965 968 for predecessor in immutable:
966 969 result.append({'instability': 'phase-divergent',
967 970 'reason': 'immutable predecessor',
968 971 'node': predecessor.hex()})
969 972 if ctx.contentdivergent():
970 973 dsets = divergentsets(repo, ctx)
971 974 for dset in dsets:
972 975 divnodes = [repo[n] for n in dset['divergentnodes']]
973 976 result.append({'instability': 'content-divergent',
974 977 'divergentnodes': divnodes,
975 978 'reason': 'predecessor',
976 979 'node': nodemod.hex(dset['commonpredecessor'])})
977 980 return result
General Comments 0
You need to be logged in to leave comments. Login now