##// END OF EJS Templates
obsutil: clarify the access to "repo"...
Boris Feld -
r40564:520514af default
parent child Browse files
Show More
@@ -1,981 +1,983 b''
1 1 # obsutil.py - utility functions for obsolescence
2 2 #
3 3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import re
11 11
12 12 from .i18n import _
13 13 from . import (
14 14 diffutil,
15 15 encoding,
16 16 node as nodemod,
17 17 phases,
18 18 util,
19 19 )
20 20 from .utils import (
21 21 dateutil,
22 22 )
23 23
24 24 ### obsolescence marker flag
25 25
26 26 ## bumpedfix flag
27 27 #
28 28 # When a changeset A' succeed to a changeset A which became public, we call A'
29 29 # "bumped" because it's a successors of a public changesets
30 30 #
31 31 # o A' (bumped)
32 32 # |`:
33 33 # | o A
34 34 # |/
35 35 # o Z
36 36 #
37 37 # The way to solve this situation is to create a new changeset Ad as children
38 38 # of A. This changeset have the same content than A'. So the diff from A to A'
39 39 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
40 40 #
41 41 # o Ad
42 42 # |`:
43 43 # | x A'
44 44 # |'|
45 45 # o | A
46 46 # |/
47 47 # o Z
48 48 #
49 49 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
50 50 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
51 51 # This flag mean that the successors express the changes between the public and
52 52 # bumped version and fix the situation, breaking the transitivity of
53 53 # "bumped" here.
54 54 bumpedfix = 1
55 55 usingsha256 = 2
56 56
57 57 class marker(object):
58 58 """Wrap obsolete marker raw data"""
59 59
60 60 def __init__(self, repo, data):
61 61 # the repo argument will be used to create changectx in later version
62 62 self._repo = repo
63 63 self._data = data
64 64 self._decodedmeta = None
65 65
66 66 def __hash__(self):
67 67 return hash(self._data)
68 68
69 69 def __eq__(self, other):
70 70 if type(other) != type(self):
71 71 return False
72 72 return self._data == other._data
73 73
74 74 def prednode(self):
75 75 """Predecessor changeset node identifier"""
76 76 return self._data[0]
77 77
78 78 def succnodes(self):
79 79 """List of successor changesets node identifiers"""
80 80 return self._data[1]
81 81
82 82 def parentnodes(self):
83 83 """Parents of the predecessors (None if not recorded)"""
84 84 return self._data[5]
85 85
86 86 def metadata(self):
87 87 """Decoded metadata dictionary"""
88 88 return dict(self._data[3])
89 89
90 90 def date(self):
91 91 """Creation date as (unixtime, offset)"""
92 92 return self._data[4]
93 93
94 94 def flags(self):
95 95 """The flags field of the marker"""
96 96 return self._data[2]
97 97
98 98 def getmarkers(repo, nodes=None, exclusive=False):
99 99 """returns markers known in a repository
100 100
101 101 If <nodes> is specified, only markers "relevant" to those nodes are are
102 102 returned"""
103 103 if nodes is None:
104 104 rawmarkers = repo.obsstore
105 105 elif exclusive:
106 106 rawmarkers = exclusivemarkers(repo, nodes)
107 107 else:
108 108 rawmarkers = repo.obsstore.relevantmarkers(nodes)
109 109
110 110 for markerdata in rawmarkers:
111 111 yield marker(repo, markerdata)
112 112
113 113 def closestpredecessors(repo, nodeid):
114 114 """yield the list of next predecessors pointing on visible changectx nodes
115 115
116 116 This function respect the repoview filtering, filtered revision will be
117 117 considered missing.
118 118 """
119 119
120 120 precursors = repo.obsstore.predecessors
121 121 stack = [nodeid]
122 122 seen = set(stack)
123 123
124 124 while stack:
125 125 current = stack.pop()
126 126 currentpreccs = precursors.get(current, ())
127 127
128 128 for prec in currentpreccs:
129 129 precnodeid = prec[0]
130 130
131 131 # Basic cycle protection
132 132 if precnodeid in seen:
133 133 continue
134 134 seen.add(precnodeid)
135 135
136 136 if precnodeid in repo:
137 137 yield precnodeid
138 138 else:
139 139 stack.append(precnodeid)
140 140
141 141 def allpredecessors(obsstore, nodes, ignoreflags=0):
142 142 """Yield node for every precursors of <nodes>.
143 143
144 144 Some precursors may be unknown locally.
145 145
146 146 This is a linear yield unsuited to detecting folded changesets. It includes
147 147 initial nodes too."""
148 148
149 149 remaining = set(nodes)
150 150 seen = set(remaining)
151 151 prec = obsstore.predecessors.get
152 152 while remaining:
153 153 current = remaining.pop()
154 154 yield current
155 155 for mark in prec(current, ()):
156 156 # ignore marker flagged with specified flag
157 157 if mark[2] & ignoreflags:
158 158 continue
159 159 suc = mark[0]
160 160 if suc not in seen:
161 161 seen.add(suc)
162 162 remaining.add(suc)
163 163
164 164 def allsuccessors(obsstore, nodes, ignoreflags=0):
165 165 """Yield node for every successor of <nodes>.
166 166
167 167 Some successors may be unknown locally.
168 168
169 169 This is a linear yield unsuited to detecting split changesets. It includes
170 170 initial nodes too."""
171 171 remaining = set(nodes)
172 172 seen = set(remaining)
173 173 while remaining:
174 174 current = remaining.pop()
175 175 yield current
176 176 for mark in obsstore.successors.get(current, ()):
177 177 # ignore marker flagged with specified flag
178 178 if mark[2] & ignoreflags:
179 179 continue
180 180 for suc in mark[1]:
181 181 if suc not in seen:
182 182 seen.add(suc)
183 183 remaining.add(suc)
184 184
185 185 def _filterprunes(markers):
186 186 """return a set with no prune markers"""
187 187 return set(m for m in markers if m[1])
188 188
189 189 def exclusivemarkers(repo, nodes):
190 190 """set of markers relevant to "nodes" but no other locally-known nodes
191 191
192 192 This function compute the set of markers "exclusive" to a locally-known
193 193 node. This means we walk the markers starting from <nodes> until we reach a
194 194 locally-known precursors outside of <nodes>. Element of <nodes> with
195 195 locally-known successors outside of <nodes> are ignored (since their
196 196 precursors markers are also relevant to these successors).
197 197
198 198 For example:
199 199
200 200 # (A0 rewritten as A1)
201 201 #
202 202 # A0 <-1- A1 # Marker "1" is exclusive to A1
203 203
204 204 or
205 205
206 206 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
207 207 #
208 208 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
209 209
210 210 or
211 211
212 212 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
213 213 #
214 214 # <-2- A1 # Marker "2" is exclusive to A0,A1
215 215 # /
216 216 # <-1- A0
217 217 # \
218 218 # <-3- A2 # Marker "3" is exclusive to A0,A2
219 219 #
220 220 # in addition:
221 221 #
222 222 # Markers "2,3" are exclusive to A1,A2
223 223 # Markers "1,2,3" are exclusive to A0,A1,A2
224 224
225 225 See test/test-obsolete-bundle-strip.t for more examples.
226 226
227 227 An example usage is strip. When stripping a changeset, we also want to
228 228 strip the markers exclusive to this changeset. Otherwise we would have
229 229 "dangling"" obsolescence markers from its precursors: Obsolescence markers
230 230 marking a node as obsolete without any successors available locally.
231 231
232 232 As for relevant markers, the prune markers for children will be followed.
233 233 Of course, they will only be followed if the pruned children is
234 234 locally-known. Since the prune markers are relevant to the pruned node.
235 235 However, while prune markers are considered relevant to the parent of the
236 236 pruned changesets, prune markers for locally-known changeset (with no
237 237 successors) are considered exclusive to the pruned nodes. This allows
238 238 to strip the prune markers (with the rest of the exclusive chain) alongside
239 239 the pruned changesets.
240 240 """
241 241 # running on a filtered repository would be dangerous as markers could be
242 242 # reported as exclusive when they are relevant for other filtered nodes.
243 243 unfi = repo.unfiltered()
244 244
245 245 # shortcut to various useful item
246 246 nm = unfi.changelog.nodemap
247 247 precursorsmarkers = unfi.obsstore.predecessors
248 248 successormarkers = unfi.obsstore.successors
249 249 childrenmarkers = unfi.obsstore.children
250 250
251 251 # exclusive markers (return of the function)
252 252 exclmarkers = set()
253 253 # we need fast membership testing
254 254 nodes = set(nodes)
255 255 # looking for head in the obshistory
256 256 #
257 257 # XXX we are ignoring all issues in regard with cycle for now.
258 258 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
259 259 stack.sort()
260 260 # nodes already stacked
261 261 seennodes = set(stack)
262 262 while stack:
263 263 current = stack.pop()
264 264 # fetch precursors markers
265 265 markers = list(precursorsmarkers.get(current, ()))
266 266 # extend the list with prune markers
267 267 for mark in successormarkers.get(current, ()):
268 268 if not mark[1]:
269 269 markers.append(mark)
270 270 # and markers from children (looking for prune)
271 271 for mark in childrenmarkers.get(current, ()):
272 272 if not mark[1]:
273 273 markers.append(mark)
274 274 # traverse the markers
275 275 for mark in markers:
276 276 if mark in exclmarkers:
277 277 # markers already selected
278 278 continue
279 279
280 280 # If the markers is about the current node, select it
281 281 #
282 282 # (this delay the addition of markers from children)
283 283 if mark[1] or mark[0] == current:
284 284 exclmarkers.add(mark)
285 285
286 286 # should we keep traversing through the precursors?
287 287 prec = mark[0]
288 288
289 289 # nodes in the stack or already processed
290 290 if prec in seennodes:
291 291 continue
292 292
293 293 # is this a locally known node ?
294 294 known = prec in nm
295 295 # if locally-known and not in the <nodes> set the traversal
296 296 # stop here.
297 297 if known and prec not in nodes:
298 298 continue
299 299
300 300 # do not keep going if there are unselected markers pointing to this
301 301 # nodes. If we end up traversing these unselected markers later the
302 302 # node will be taken care of at that point.
303 303 precmarkers = _filterprunes(successormarkers.get(prec))
304 304 if precmarkers.issubset(exclmarkers):
305 305 seennodes.add(prec)
306 306 stack.append(prec)
307 307
308 308 return exclmarkers
309 309
310 310 def foreground(repo, nodes):
311 311 """return all nodes in the "foreground" of other node
312 312
313 313 The foreground of a revision is anything reachable using parent -> children
314 314 or precursor -> successor relation. It is very similar to "descendant" but
315 315 augmented with obsolescence information.
316 316
317 317 Beware that possible obsolescence cycle may result if complex situation.
318 318 """
319 319 repo = repo.unfiltered()
320 320 foreground = set(repo.set('%ln::', nodes))
321 321 if repo.obsstore:
322 322 # We only need this complicated logic if there is obsolescence
323 323 # XXX will probably deserve an optimised revset.
324 324 nm = repo.changelog.nodemap
325 325 plen = -1
326 326 # compute the whole set of successors or descendants
327 327 while len(foreground) != plen:
328 328 plen = len(foreground)
329 329 succs = set(c.node() for c in foreground)
330 330 mutable = [c.node() for c in foreground if c.mutable()]
331 331 succs.update(allsuccessors(repo.obsstore, mutable))
332 332 known = (n for n in succs if n in nm)
333 333 foreground = set(repo.set('%ln::', known))
334 334 return set(c.node() for c in foreground)
335 335
336 336 # effectflag field
337 337 #
338 338 # Effect-flag is a 1-byte bit field used to store what changed between a
339 339 # changeset and its successor(s).
340 340 #
341 341 # The effect flag is stored in obs-markers metadata while we iterate on the
342 342 # information design. That's why we have the EFFECTFLAGFIELD. If we come up
343 343 # with an incompatible design for effect flag, we can store a new design under
344 344 # another field name so we don't break readers. We plan to extend the existing
345 345 # obsmarkers bit-field when the effect flag design will be stabilized.
346 346 #
347 347 # The effect-flag is placed behind an experimental flag
348 348 # `effect-flags` set to off by default.
349 349 #
350 350
351 351 EFFECTFLAGFIELD = "ef1"
352 352
353 353 DESCCHANGED = 1 << 0 # action changed the description
354 354 METACHANGED = 1 << 1 # action change the meta
355 355 DIFFCHANGED = 1 << 3 # action change diff introduced by the changeset
356 356 PARENTCHANGED = 1 << 2 # action change the parent
357 357 USERCHANGED = 1 << 4 # the user changed
358 358 DATECHANGED = 1 << 5 # the date changed
359 359 BRANCHCHANGED = 1 << 6 # the branch changed
360 360
361 361 METABLACKLIST = [
362 362 re.compile('^branch$'),
363 363 re.compile('^.*-source$'),
364 364 re.compile('^.*_source$'),
365 365 re.compile('^source$'),
366 366 ]
367 367
368 368 def metanotblacklisted(metaitem):
369 369 """ Check that the key of a meta item (extrakey, extravalue) does not
370 370 match at least one of the blacklist pattern
371 371 """
372 372 metakey = metaitem[0]
373 373
374 374 return not any(pattern.match(metakey) for pattern in METABLACKLIST)
375 375
376 376 def _prepare_hunk(hunk):
377 377 """Drop all information but the username and patch"""
378 378 cleanhunk = []
379 379 for line in hunk.splitlines():
380 380 if line.startswith(b'# User') or not line.startswith(b'#'):
381 381 if line.startswith(b'@@'):
382 382 line = b'@@\n'
383 383 cleanhunk.append(line)
384 384 return cleanhunk
385 385
386 386 def _getdifflines(iterdiff):
387 387 """return a cleaned up lines"""
388 388 lines = next(iterdiff, None)
389 389
390 390 if lines is None:
391 391 return lines
392 392
393 393 return _prepare_hunk(lines)
394 394
395 395 def _cmpdiff(leftctx, rightctx):
396 396 """return True if both ctx introduce the "same diff"
397 397
398 398 This is a first and basic implementation, with many shortcoming.
399 399 """
400 diffopts = diffutil.diffallopts(leftctx.repo().ui, {'git': True})
400 # lefctx.repo() and rightctx.repo() are the same here
401 repo = leftctx.repo()
402 diffopts = diffutil.diffallopts(repo.ui, {'git': True})
401 403 # Leftctx or right ctx might be filtered, so we need to use the contexts
402 404 # with an unfiltered repository to safely compute the diff
403 leftunfi = leftctx._repo.unfiltered()[leftctx.rev()]
405 leftunfi = repo.unfiltered()[leftctx.rev()]
404 406 leftdiff = leftunfi.diff(opts=diffopts)
405 rightunfi = rightctx._repo.unfiltered()[rightctx.rev()]
407 rightunfi = repo.unfiltered()[rightctx.rev()]
406 408 rightdiff = rightunfi.diff(opts=diffopts)
407 409
408 410 left, right = (0, 0)
409 411 while None not in (left, right):
410 412 left = _getdifflines(leftdiff)
411 413 right = _getdifflines(rightdiff)
412 414
413 415 if left != right:
414 416 return False
415 417 return True
416 418
417 419 def geteffectflag(source, successors):
418 420 """ From an obs-marker relation, compute what changed between the
419 421 predecessor and the successor.
420 422 """
421 423 effects = 0
422 424
423 425 for changectx in successors:
424 426 # Check if description has changed
425 427 if changectx.description() != source.description():
426 428 effects |= DESCCHANGED
427 429
428 430 # Check if user has changed
429 431 if changectx.user() != source.user():
430 432 effects |= USERCHANGED
431 433
432 434 # Check if date has changed
433 435 if changectx.date() != source.date():
434 436 effects |= DATECHANGED
435 437
436 438 # Check if branch has changed
437 439 if changectx.branch() != source.branch():
438 440 effects |= BRANCHCHANGED
439 441
440 442 # Check if at least one of the parent has changed
441 443 if changectx.parents() != source.parents():
442 444 effects |= PARENTCHANGED
443 445
444 446 # Check if other meta has changed
445 447 changeextra = changectx.extra().items()
446 448 ctxmeta = list(filter(metanotblacklisted, changeextra))
447 449
448 450 sourceextra = source.extra().items()
449 451 srcmeta = list(filter(metanotblacklisted, sourceextra))
450 452
451 453 if ctxmeta != srcmeta:
452 454 effects |= METACHANGED
453 455
454 456 # Check if the diff has changed
455 457 if not _cmpdiff(source, changectx):
456 458 effects |= DIFFCHANGED
457 459
458 460 return effects
459 461
460 462 def getobsoleted(repo, tr):
461 463 """return the set of pre-existing revisions obsoleted by a transaction"""
462 464 torev = repo.unfiltered().changelog.nodemap.get
463 465 phase = repo._phasecache.phase
464 466 succsmarkers = repo.obsstore.successors.get
465 467 public = phases.public
466 468 addedmarkers = tr.changes['obsmarkers']
467 469 origrepolen = tr.changes['origrepolen']
468 470 seenrevs = set()
469 471 obsoleted = set()
470 472 for mark in addedmarkers:
471 473 node = mark[0]
472 474 rev = torev(node)
473 475 if rev is None or rev in seenrevs or rev >= origrepolen:
474 476 continue
475 477 seenrevs.add(rev)
476 478 if phase(repo, rev) == public:
477 479 continue
478 480 if set(succsmarkers(node) or []).issubset(addedmarkers):
479 481 obsoleted.add(rev)
480 482 return obsoleted
481 483
482 484 class _succs(list):
483 485 """small class to represent a successors with some metadata about it"""
484 486
485 487 def __init__(self, *args, **kwargs):
486 488 super(_succs, self).__init__(*args, **kwargs)
487 489 self.markers = set()
488 490
489 491 def copy(self):
490 492 new = _succs(self)
491 493 new.markers = self.markers.copy()
492 494 return new
493 495
494 496 @util.propertycache
495 497 def _set(self):
496 498 # immutable
497 499 return set(self)
498 500
499 501 def canmerge(self, other):
500 502 return self._set.issubset(other._set)
501 503
502 504 def successorssets(repo, initialnode, closest=False, cache=None):
503 505 """Return set of all latest successors of initial nodes
504 506
505 507 The successors set of a changeset A are the group of revisions that succeed
506 508 A. It succeeds A as a consistent whole, each revision being only a partial
507 509 replacement. By default, the successors set contains non-obsolete
508 510 changesets only, walking the obsolescence graph until reaching a leaf. If
509 511 'closest' is set to True, closest successors-sets are return (the
510 512 obsolescence walk stops on known changesets).
511 513
512 514 This function returns the full list of successor sets which is why it
513 515 returns a list of tuples and not just a single tuple. Each tuple is a valid
514 516 successors set. Note that (A,) may be a valid successors set for changeset A
515 517 (see below).
516 518
517 519 In most cases, a changeset A will have a single element (e.g. the changeset
518 520 A is replaced by A') in its successors set. Though, it is also common for a
519 521 changeset A to have no elements in its successor set (e.g. the changeset
520 522 has been pruned). Therefore, the returned list of successors sets will be
521 523 [(A',)] or [], respectively.
522 524
523 525 When a changeset A is split into A' and B', however, it will result in a
524 526 successors set containing more than a single element, i.e. [(A',B')].
525 527 Divergent changesets will result in multiple successors sets, i.e. [(A',),
526 528 (A'')].
527 529
528 530 If a changeset A is not obsolete, then it will conceptually have no
529 531 successors set. To distinguish this from a pruned changeset, the successor
530 532 set will contain itself only, i.e. [(A,)].
531 533
532 534 Finally, final successors unknown locally are considered to be pruned
533 535 (pruned: obsoleted without any successors). (Final: successors not affected
534 536 by markers).
535 537
536 538 The 'closest' mode respect the repoview filtering. For example, without
537 539 filter it will stop at the first locally known changeset, with 'visible'
538 540 filter it will stop on visible changesets).
539 541
540 542 The optional `cache` parameter is a dictionary that may contains
541 543 precomputed successors sets. It is meant to reuse the computation of a
542 544 previous call to `successorssets` when multiple calls are made at the same
543 545 time. The cache dictionary is updated in place. The caller is responsible
544 546 for its life span. Code that makes multiple calls to `successorssets`
545 547 *should* use this cache mechanism or risk a performance hit.
546 548
547 549 Since results are different depending of the 'closest' most, the same cache
548 550 cannot be reused for both mode.
549 551 """
550 552
551 553 succmarkers = repo.obsstore.successors
552 554
553 555 # Stack of nodes we search successors sets for
554 556 toproceed = [initialnode]
555 557 # set version of above list for fast loop detection
556 558 # element added to "toproceed" must be added here
557 559 stackedset = set(toproceed)
558 560 if cache is None:
559 561 cache = {}
560 562
561 563 # This while loop is the flattened version of a recursive search for
562 564 # successors sets
563 565 #
564 566 # def successorssets(x):
565 567 # successors = directsuccessors(x)
566 568 # ss = [[]]
567 569 # for succ in directsuccessors(x):
568 570 # # product as in itertools cartesian product
569 571 # ss = product(ss, successorssets(succ))
570 572 # return ss
571 573 #
572 574 # But we can not use plain recursive calls here:
573 575 # - that would blow the python call stack
574 576 # - obsolescence markers may have cycles, we need to handle them.
575 577 #
576 578 # The `toproceed` list act as our call stack. Every node we search
577 579 # successors set for are stacked there.
578 580 #
579 581 # The `stackedset` is set version of this stack used to check if a node is
580 582 # already stacked. This check is used to detect cycles and prevent infinite
581 583 # loop.
582 584 #
583 585 # successors set of all nodes are stored in the `cache` dictionary.
584 586 #
585 587 # After this while loop ends we use the cache to return the successors sets
586 588 # for the node requested by the caller.
587 589 while toproceed:
588 590 # Every iteration tries to compute the successors sets of the topmost
589 591 # node of the stack: CURRENT.
590 592 #
591 593 # There are four possible outcomes:
592 594 #
593 595 # 1) We already know the successors sets of CURRENT:
594 596 # -> mission accomplished, pop it from the stack.
595 597 # 2) Stop the walk:
596 598 # default case: Node is not obsolete
597 599 # closest case: Node is known at this repo filter level
598 600 # -> the node is its own successors sets. Add it to the cache.
599 601 # 3) We do not know successors set of direct successors of CURRENT:
600 602 # -> We add those successors to the stack.
601 603 # 4) We know successors sets of all direct successors of CURRENT:
602 604 # -> We can compute CURRENT successors set and add it to the
603 605 # cache.
604 606 #
605 607 current = toproceed[-1]
606 608
607 609 # case 2 condition is a bit hairy because of closest,
608 610 # we compute it on its own
609 611 case2condition = ((current not in succmarkers)
610 612 or (closest and current != initialnode
611 613 and current in repo))
612 614
613 615 if current in cache:
614 616 # case (1): We already know the successors sets
615 617 stackedset.remove(toproceed.pop())
616 618 elif case2condition:
617 619 # case (2): end of walk.
618 620 if current in repo:
619 621 # We have a valid successors.
620 622 cache[current] = [_succs((current,))]
621 623 else:
622 624 # Final obsolete version is unknown locally.
623 625 # Do not count that as a valid successors
624 626 cache[current] = []
625 627 else:
626 628 # cases (3) and (4)
627 629 #
628 630 # We proceed in two phases. Phase 1 aims to distinguish case (3)
629 631 # from case (4):
630 632 #
631 633 # For each direct successors of CURRENT, we check whether its
632 634 # successors sets are known. If they are not, we stack the
633 635 # unknown node and proceed to the next iteration of the while
634 636 # loop. (case 3)
635 637 #
636 638 # During this step, we may detect obsolescence cycles: a node
637 639 # with unknown successors sets but already in the call stack.
638 640 # In such a situation, we arbitrary set the successors sets of
639 641 # the node to nothing (node pruned) to break the cycle.
640 642 #
641 643 # If no break was encountered we proceed to phase 2.
642 644 #
643 645 # Phase 2 computes successors sets of CURRENT (case 4); see details
644 646 # in phase 2 itself.
645 647 #
646 648 # Note the two levels of iteration in each phase.
647 649 # - The first one handles obsolescence markers using CURRENT as
648 650 # precursor (successors markers of CURRENT).
649 651 #
650 652 # Having multiple entry here means divergence.
651 653 #
652 654 # - The second one handles successors defined in each marker.
653 655 #
654 656 # Having none means pruned node, multiple successors means split,
655 657 # single successors are standard replacement.
656 658 #
657 659 for mark in sorted(succmarkers[current]):
658 660 for suc in mark[1]:
659 661 if suc not in cache:
660 662 if suc in stackedset:
661 663 # cycle breaking
662 664 cache[suc] = []
663 665 else:
664 666 # case (3) If we have not computed successors sets
665 667 # of one of those successors we add it to the
666 668 # `toproceed` stack and stop all work for this
667 669 # iteration.
668 670 toproceed.append(suc)
669 671 stackedset.add(suc)
670 672 break
671 673 else:
672 674 continue
673 675 break
674 676 else:
675 677 # case (4): we know all successors sets of all direct
676 678 # successors
677 679 #
678 680 # Successors set contributed by each marker depends on the
679 681 # successors sets of all its "successors" node.
680 682 #
681 683 # Each different marker is a divergence in the obsolescence
682 684 # history. It contributes successors sets distinct from other
683 685 # markers.
684 686 #
685 687 # Within a marker, a successor may have divergent successors
686 688 # sets. In such a case, the marker will contribute multiple
687 689 # divergent successors sets. If multiple successors have
688 690 # divergent successors sets, a Cartesian product is used.
689 691 #
690 692 # At the end we post-process successors sets to remove
691 693 # duplicated entry and successors set that are strict subset of
692 694 # another one.
693 695 succssets = []
694 696 for mark in sorted(succmarkers[current]):
695 697 # successors sets contributed by this marker
696 698 base = _succs()
697 699 base.markers.add(mark)
698 700 markss = [base]
699 701 for suc in mark[1]:
700 702 # cardinal product with previous successors
701 703 productresult = []
702 704 for prefix in markss:
703 705 for suffix in cache[suc]:
704 706 newss = prefix.copy()
705 707 newss.markers.update(suffix.markers)
706 708 for part in suffix:
707 709 # do not duplicated entry in successors set
708 710 # first entry wins.
709 711 if part not in newss:
710 712 newss.append(part)
711 713 productresult.append(newss)
712 714 markss = productresult
713 715 succssets.extend(markss)
714 716 # remove duplicated and subset
715 717 seen = []
716 718 final = []
717 719 candidates = sorted((s for s in succssets if s),
718 720 key=len, reverse=True)
719 721 for cand in candidates:
720 722 for seensuccs in seen:
721 723 if cand.canmerge(seensuccs):
722 724 seensuccs.markers.update(cand.markers)
723 725 break
724 726 else:
725 727 final.append(cand)
726 728 seen.append(cand)
727 729 final.reverse() # put small successors set first
728 730 cache[current] = final
729 731 return cache[initialnode]
730 732
731 733 def successorsandmarkers(repo, ctx):
732 734 """compute the raw data needed for computing obsfate
733 735 Returns a list of dict, one dict per successors set
734 736 """
735 737 if not ctx.obsolete():
736 738 return None
737 739
738 740 ssets = successorssets(repo, ctx.node(), closest=True)
739 741
740 742 # closestsuccessors returns an empty list for pruned revisions, remap it
741 743 # into a list containing an empty list for future processing
742 744 if ssets == []:
743 745 ssets = [[]]
744 746
745 747 # Try to recover pruned markers
746 748 succsmap = repo.obsstore.successors
747 749 fullsuccessorsets = [] # successor set + markers
748 750 for sset in ssets:
749 751 if sset:
750 752 fullsuccessorsets.append(sset)
751 753 else:
752 754 # successorsset return an empty set() when ctx or one of its
753 755 # successors is pruned.
754 756 # In this case, walk the obs-markers tree again starting with ctx
755 757 # and find the relevant pruning obs-makers, the ones without
756 758 # successors.
757 759 # Having these markers allow us to compute some information about
758 760 # its fate, like who pruned this changeset and when.
759 761
760 762 # XXX we do not catch all prune markers (eg rewritten then pruned)
761 763 # (fix me later)
762 764 foundany = False
763 765 for mark in succsmap.get(ctx.node(), ()):
764 766 if not mark[1]:
765 767 foundany = True
766 768 sset = _succs()
767 769 sset.markers.add(mark)
768 770 fullsuccessorsets.append(sset)
769 771 if not foundany:
770 772 fullsuccessorsets.append(_succs())
771 773
772 774 values = []
773 775 for sset in fullsuccessorsets:
774 776 values.append({'successors': sset, 'markers': sset.markers})
775 777
776 778 return values
777 779
778 780 def _getobsfate(successorssets):
779 781 """ Compute a changeset obsolescence fate based on its successorssets.
780 782 Successors can be the tipmost ones or the immediate ones. This function
781 783 return values are not meant to be shown directly to users, it is meant to
782 784 be used by internal functions only.
783 785 Returns one fate from the following values:
784 786 - pruned
785 787 - diverged
786 788 - superseded
787 789 - superseded_split
788 790 """
789 791
790 792 if len(successorssets) == 0:
791 793 # The commit has been pruned
792 794 return 'pruned'
793 795 elif len(successorssets) > 1:
794 796 return 'diverged'
795 797 else:
796 798 # No divergence, only one set of successors
797 799 successors = successorssets[0]
798 800
799 801 if len(successors) == 1:
800 802 return 'superseded'
801 803 else:
802 804 return 'superseded_split'
803 805
804 806 def obsfateverb(successorset, markers):
805 807 """ Return the verb summarizing the successorset and potentially using
806 808 information from the markers
807 809 """
808 810 if not successorset:
809 811 verb = 'pruned'
810 812 elif len(successorset) == 1:
811 813 verb = 'rewritten'
812 814 else:
813 815 verb = 'split'
814 816 return verb
815 817
816 818 def markersdates(markers):
817 819 """returns the list of dates for a list of markers
818 820 """
819 821 return [m[4] for m in markers]
820 822
821 823 def markersusers(markers):
822 824 """ Returns a sorted list of markers users without duplicates
823 825 """
824 826 markersmeta = [dict(m[3]) for m in markers]
825 827 users = set(encoding.tolocal(meta['user']) for meta in markersmeta
826 828 if meta.get('user'))
827 829
828 830 return sorted(users)
829 831
830 832 def markersoperations(markers):
831 833 """ Returns a sorted list of markers operations without duplicates
832 834 """
833 835 markersmeta = [dict(m[3]) for m in markers]
834 836 operations = set(meta.get('operation') for meta in markersmeta
835 837 if meta.get('operation'))
836 838
837 839 return sorted(operations)
838 840
839 841 def obsfateprinter(ui, repo, successors, markers, formatctx):
840 842 """ Build a obsfate string for a single successorset using all obsfate
841 843 related function defined in obsutil
842 844 """
843 845 quiet = ui.quiet
844 846 verbose = ui.verbose
845 847 normal = not verbose and not quiet
846 848
847 849 line = []
848 850
849 851 # Verb
850 852 line.append(obsfateverb(successors, markers))
851 853
852 854 # Operations
853 855 operations = markersoperations(markers)
854 856 if operations:
855 857 line.append(" using %s" % ", ".join(operations))
856 858
857 859 # Successors
858 860 if successors:
859 861 fmtsuccessors = [formatctx(repo[succ]) for succ in successors]
860 862 line.append(" as %s" % ", ".join(fmtsuccessors))
861 863
862 864 # Users
863 865 users = markersusers(markers)
864 866 # Filter out current user in not verbose mode to reduce amount of
865 867 # information
866 868 if not verbose:
867 869 currentuser = ui.username(acceptempty=True)
868 870 if len(users) == 1 and currentuser in users:
869 871 users = None
870 872
871 873 if (verbose or normal) and users:
872 874 line.append(" by %s" % ", ".join(users))
873 875
874 876 # Date
875 877 dates = markersdates(markers)
876 878
877 879 if dates and verbose:
878 880 min_date = min(dates)
879 881 max_date = max(dates)
880 882
881 883 if min_date == max_date:
882 884 fmtmin_date = dateutil.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
883 885 line.append(" (at %s)" % fmtmin_date)
884 886 else:
885 887 fmtmin_date = dateutil.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
886 888 fmtmax_date = dateutil.datestr(max_date, '%Y-%m-%d %H:%M %1%2')
887 889 line.append(" (between %s and %s)" % (fmtmin_date, fmtmax_date))
888 890
889 891 return "".join(line)
890 892
891 893
892 894 filteredmsgtable = {
893 895 "pruned": _("hidden revision '%s' is pruned"),
894 896 "diverged": _("hidden revision '%s' has diverged"),
895 897 "superseded": _("hidden revision '%s' was rewritten as: %s"),
896 898 "superseded_split": _("hidden revision '%s' was split as: %s"),
897 899 "superseded_split_several": _("hidden revision '%s' was split as: %s and "
898 900 "%d more"),
899 901 }
900 902
901 903 def _getfilteredreason(repo, changeid, ctx):
902 904 """return a human-friendly string on why a obsolete changeset is hidden
903 905 """
904 906 successors = successorssets(repo, ctx.node())
905 907 fate = _getobsfate(successors)
906 908
907 909 # Be more precise in case the revision is superseded
908 910 if fate == 'pruned':
909 911 return filteredmsgtable['pruned'] % changeid
910 912 elif fate == 'diverged':
911 913 return filteredmsgtable['diverged'] % changeid
912 914 elif fate == 'superseded':
913 915 single_successor = nodemod.short(successors[0][0])
914 916 return filteredmsgtable['superseded'] % (changeid, single_successor)
915 917 elif fate == 'superseded_split':
916 918
917 919 succs = []
918 920 for node_id in successors[0]:
919 921 succs.append(nodemod.short(node_id))
920 922
921 923 if len(succs) <= 2:
922 924 fmtsuccs = ', '.join(succs)
923 925 return filteredmsgtable['superseded_split'] % (changeid, fmtsuccs)
924 926 else:
925 927 firstsuccessors = ', '.join(succs[:2])
926 928 remainingnumber = len(succs) - 2
927 929
928 930 args = (changeid, firstsuccessors, remainingnumber)
929 931 return filteredmsgtable['superseded_split_several'] % args
930 932
931 933 def divergentsets(repo, ctx):
932 934 """Compute sets of commits divergent with a given one"""
933 935 cache = {}
934 936 base = {}
935 937 for n in allpredecessors(repo.obsstore, [ctx.node()]):
936 938 if n == ctx.node():
937 939 # a node can't be a base for divergence with itself
938 940 continue
939 941 nsuccsets = successorssets(repo, n, cache)
940 942 for nsuccset in nsuccsets:
941 943 if ctx.node() in nsuccset:
942 944 # we are only interested in *other* successor sets
943 945 continue
944 946 if tuple(nsuccset) in base:
945 947 # we already know the latest base for this divergency
946 948 continue
947 949 base[tuple(nsuccset)] = n
948 950 return [{'divergentnodes': divset, 'commonpredecessor': b}
949 951 for divset, b in base.iteritems()]
950 952
951 953 def whyunstable(repo, ctx):
952 954 result = []
953 955 if ctx.orphan():
954 956 for parent in ctx.parents():
955 957 kind = None
956 958 if parent.orphan():
957 959 kind = 'orphan'
958 960 elif parent.obsolete():
959 961 kind = 'obsolete'
960 962 if kind is not None:
961 963 result.append({'instability': 'orphan',
962 964 'reason': '%s parent' % kind,
963 965 'node': parent.hex()})
964 966 if ctx.phasedivergent():
965 967 predecessors = allpredecessors(repo.obsstore, [ctx.node()],
966 968 ignoreflags=bumpedfix)
967 969 immutable = [repo[p] for p in predecessors
968 970 if p in repo and not repo[p].mutable()]
969 971 for predecessor in immutable:
970 972 result.append({'instability': 'phase-divergent',
971 973 'reason': 'immutable predecessor',
972 974 'node': predecessor.hex()})
973 975 if ctx.contentdivergent():
974 976 dsets = divergentsets(repo, ctx)
975 977 for dset in dsets:
976 978 divnodes = [repo[n] for n in dset['divergentnodes']]
977 979 result.append({'instability': 'content-divergent',
978 980 'divergentnodes': divnodes,
979 981 'reason': 'predecessor',
980 982 'node': nodemod.hex(dset['commonpredecessor'])})
981 983 return result
General Comments 0
You need to be logged in to leave comments. Login now