##// END OF EJS Templates
obsutil: drop deprecated methods (API)...
Matt Harbison -
r35911:78f33ded default
parent child Browse files
Show More
@@ -1,906 +1,891
1 1 # obsutil.py - utility functions for obsolescence
2 2 #
3 3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import re
11 11
12 12 from .i18n import _
13 13 from . import (
14 14 node as nodemod,
15 15 phases,
16 16 util,
17 17 )
18 18
19 19 class marker(object):
20 20 """Wrap obsolete marker raw data"""
21 21
22 22 def __init__(self, repo, data):
23 23 # the repo argument will be used to create changectx in later version
24 24 self._repo = repo
25 25 self._data = data
26 26 self._decodedmeta = None
27 27
28 28 def __hash__(self):
29 29 return hash(self._data)
30 30
31 31 def __eq__(self, other):
32 32 if type(other) != type(self):
33 33 return False
34 34 return self._data == other._data
35 35
36 def precnode(self):
37 msg = ("'marker.precnode' is deprecated, "
38 "use 'marker.prednode'")
39 util.nouideprecwarn(msg, '4.4')
40 return self.prednode()
41
42 36 def prednode(self):
43 37 """Predecessor changeset node identifier"""
44 38 return self._data[0]
45 39
46 40 def succnodes(self):
47 41 """List of successor changesets node identifiers"""
48 42 return self._data[1]
49 43
50 44 def parentnodes(self):
51 45 """Parents of the predecessors (None if not recorded)"""
52 46 return self._data[5]
53 47
54 48 def metadata(self):
55 49 """Decoded metadata dictionary"""
56 50 return dict(self._data[3])
57 51
58 52 def date(self):
59 53 """Creation date as (unixtime, offset)"""
60 54 return self._data[4]
61 55
62 56 def flags(self):
63 57 """The flags field of the marker"""
64 58 return self._data[2]
65 59
66 60 def getmarkers(repo, nodes=None, exclusive=False):
67 61 """returns markers known in a repository
68 62
69 63 If <nodes> is specified, only markers "relevant" to those nodes are are
70 64 returned"""
71 65 if nodes is None:
72 66 rawmarkers = repo.obsstore
73 67 elif exclusive:
74 68 rawmarkers = exclusivemarkers(repo, nodes)
75 69 else:
76 70 rawmarkers = repo.obsstore.relevantmarkers(nodes)
77 71
78 72 for markerdata in rawmarkers:
79 73 yield marker(repo, markerdata)
80 74
81 75 def closestpredecessors(repo, nodeid):
82 76 """yield the list of next predecessors pointing on visible changectx nodes
83 77
84 78 This function respect the repoview filtering, filtered revision will be
85 79 considered missing.
86 80 """
87 81
88 82 precursors = repo.obsstore.predecessors
89 83 stack = [nodeid]
90 84 seen = set(stack)
91 85
92 86 while stack:
93 87 current = stack.pop()
94 88 currentpreccs = precursors.get(current, ())
95 89
96 90 for prec in currentpreccs:
97 91 precnodeid = prec[0]
98 92
99 93 # Basic cycle protection
100 94 if precnodeid in seen:
101 95 continue
102 96 seen.add(precnodeid)
103 97
104 98 if precnodeid in repo:
105 99 yield precnodeid
106 100 else:
107 101 stack.append(precnodeid)
108 102
109 def allprecursors(*args, **kwargs):
110 """ (DEPRECATED)
111 """
112 msg = ("'obsutil.allprecursors' is deprecated, "
113 "use 'obsutil.allpredecessors'")
114 util.nouideprecwarn(msg, '4.4')
115
116 return allpredecessors(*args, **kwargs)
117
118 103 def allpredecessors(obsstore, nodes, ignoreflags=0):
119 104 """Yield node for every precursors of <nodes>.
120 105
121 106 Some precursors may be unknown locally.
122 107
123 108 This is a linear yield unsuited to detecting folded changesets. It includes
124 109 initial nodes too."""
125 110
126 111 remaining = set(nodes)
127 112 seen = set(remaining)
128 113 while remaining:
129 114 current = remaining.pop()
130 115 yield current
131 116 for mark in obsstore.predecessors.get(current, ()):
132 117 # ignore marker flagged with specified flag
133 118 if mark[2] & ignoreflags:
134 119 continue
135 120 suc = mark[0]
136 121 if suc not in seen:
137 122 seen.add(suc)
138 123 remaining.add(suc)
139 124
140 125 def allsuccessors(obsstore, nodes, ignoreflags=0):
141 126 """Yield node for every successor of <nodes>.
142 127
143 128 Some successors may be unknown locally.
144 129
145 130 This is a linear yield unsuited to detecting split changesets. It includes
146 131 initial nodes too."""
147 132 remaining = set(nodes)
148 133 seen = set(remaining)
149 134 while remaining:
150 135 current = remaining.pop()
151 136 yield current
152 137 for mark in obsstore.successors.get(current, ()):
153 138 # ignore marker flagged with specified flag
154 139 if mark[2] & ignoreflags:
155 140 continue
156 141 for suc in mark[1]:
157 142 if suc not in seen:
158 143 seen.add(suc)
159 144 remaining.add(suc)
160 145
161 146 def _filterprunes(markers):
162 147 """return a set with no prune markers"""
163 148 return set(m for m in markers if m[1])
164 149
165 150 def exclusivemarkers(repo, nodes):
166 151 """set of markers relevant to "nodes" but no other locally-known nodes
167 152
168 153 This function compute the set of markers "exclusive" to a locally-known
169 154 node. This means we walk the markers starting from <nodes> until we reach a
170 155 locally-known precursors outside of <nodes>. Element of <nodes> with
171 156 locally-known successors outside of <nodes> are ignored (since their
172 157 precursors markers are also relevant to these successors).
173 158
174 159 For example:
175 160
176 161 # (A0 rewritten as A1)
177 162 #
178 163 # A0 <-1- A1 # Marker "1" is exclusive to A1
179 164
180 165 or
181 166
182 167 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
183 168 #
184 169 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
185 170
186 171 or
187 172
188 173 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
189 174 #
190 175 # <-2- A1 # Marker "2" is exclusive to A0,A1
191 176 # /
192 177 # <-1- A0
193 178 # \
194 179 # <-3- A2 # Marker "3" is exclusive to A0,A2
195 180 #
196 181 # in addition:
197 182 #
198 183 # Markers "2,3" are exclusive to A1,A2
199 184 # Markers "1,2,3" are exclusive to A0,A1,A2
200 185
201 186 See test/test-obsolete-bundle-strip.t for more examples.
202 187
203 188 An example usage is strip. When stripping a changeset, we also want to
204 189 strip the markers exclusive to this changeset. Otherwise we would have
205 190 "dangling"" obsolescence markers from its precursors: Obsolescence markers
206 191 marking a node as obsolete without any successors available locally.
207 192
208 193 As for relevant markers, the prune markers for children will be followed.
209 194 Of course, they will only be followed if the pruned children is
210 195 locally-known. Since the prune markers are relevant to the pruned node.
211 196 However, while prune markers are considered relevant to the parent of the
212 197 pruned changesets, prune markers for locally-known changeset (with no
213 198 successors) are considered exclusive to the pruned nodes. This allows
214 199 to strip the prune markers (with the rest of the exclusive chain) alongside
215 200 the pruned changesets.
216 201 """
217 202 # running on a filtered repository would be dangerous as markers could be
218 203 # reported as exclusive when they are relevant for other filtered nodes.
219 204 unfi = repo.unfiltered()
220 205
221 206 # shortcut to various useful item
222 207 nm = unfi.changelog.nodemap
223 208 precursorsmarkers = unfi.obsstore.predecessors
224 209 successormarkers = unfi.obsstore.successors
225 210 childrenmarkers = unfi.obsstore.children
226 211
227 212 # exclusive markers (return of the function)
228 213 exclmarkers = set()
229 214 # we need fast membership testing
230 215 nodes = set(nodes)
231 216 # looking for head in the obshistory
232 217 #
233 218 # XXX we are ignoring all issues in regard with cycle for now.
234 219 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
235 220 stack.sort()
236 221 # nodes already stacked
237 222 seennodes = set(stack)
238 223 while stack:
239 224 current = stack.pop()
240 225 # fetch precursors markers
241 226 markers = list(precursorsmarkers.get(current, ()))
242 227 # extend the list with prune markers
243 228 for mark in successormarkers.get(current, ()):
244 229 if not mark[1]:
245 230 markers.append(mark)
246 231 # and markers from children (looking for prune)
247 232 for mark in childrenmarkers.get(current, ()):
248 233 if not mark[1]:
249 234 markers.append(mark)
250 235 # traverse the markers
251 236 for mark in markers:
252 237 if mark in exclmarkers:
253 238 # markers already selected
254 239 continue
255 240
256 241 # If the markers is about the current node, select it
257 242 #
258 243 # (this delay the addition of markers from children)
259 244 if mark[1] or mark[0] == current:
260 245 exclmarkers.add(mark)
261 246
262 247 # should we keep traversing through the precursors?
263 248 prec = mark[0]
264 249
265 250 # nodes in the stack or already processed
266 251 if prec in seennodes:
267 252 continue
268 253
269 254 # is this a locally known node ?
270 255 known = prec in nm
271 256 # if locally-known and not in the <nodes> set the traversal
272 257 # stop here.
273 258 if known and prec not in nodes:
274 259 continue
275 260
276 261 # do not keep going if there are unselected markers pointing to this
277 262 # nodes. If we end up traversing these unselected markers later the
278 263 # node will be taken care of at that point.
279 264 precmarkers = _filterprunes(successormarkers.get(prec))
280 265 if precmarkers.issubset(exclmarkers):
281 266 seennodes.add(prec)
282 267 stack.append(prec)
283 268
284 269 return exclmarkers
285 270
286 271 def foreground(repo, nodes):
287 272 """return all nodes in the "foreground" of other node
288 273
289 274 The foreground of a revision is anything reachable using parent -> children
290 275 or precursor -> successor relation. It is very similar to "descendant" but
291 276 augmented with obsolescence information.
292 277
293 278 Beware that possible obsolescence cycle may result if complex situation.
294 279 """
295 280 repo = repo.unfiltered()
296 281 foreground = set(repo.set('%ln::', nodes))
297 282 if repo.obsstore:
298 283 # We only need this complicated logic if there is obsolescence
299 284 # XXX will probably deserve an optimised revset.
300 285 nm = repo.changelog.nodemap
301 286 plen = -1
302 287 # compute the whole set of successors or descendants
303 288 while len(foreground) != plen:
304 289 plen = len(foreground)
305 290 succs = set(c.node() for c in foreground)
306 291 mutable = [c.node() for c in foreground if c.mutable()]
307 292 succs.update(allsuccessors(repo.obsstore, mutable))
308 293 known = (n for n in succs if n in nm)
309 294 foreground = set(repo.set('%ln::', known))
310 295 return set(c.node() for c in foreground)
311 296
312 297 # effectflag field
313 298 #
314 299 # Effect-flag is a 1-byte bit field used to store what changed between a
315 300 # changeset and its successor(s).
316 301 #
317 302 # The effect flag is stored in obs-markers metadata while we iterate on the
318 303 # information design. That's why we have the EFFECTFLAGFIELD. If we come up
319 304 # with an incompatible design for effect flag, we can store a new design under
320 305 # another field name so we don't break readers. We plan to extend the existing
321 306 # obsmarkers bit-field when the effect flag design will be stabilized.
322 307 #
323 308 # The effect-flag is placed behind an experimental flag
324 309 # `effect-flags` set to off by default.
325 310 #
326 311
327 312 EFFECTFLAGFIELD = "ef1"
328 313
329 314 DESCCHANGED = 1 << 0 # action changed the description
330 315 METACHANGED = 1 << 1 # action change the meta
331 316 DIFFCHANGED = 1 << 3 # action change diff introduced by the changeset
332 317 PARENTCHANGED = 1 << 2 # action change the parent
333 318 USERCHANGED = 1 << 4 # the user changed
334 319 DATECHANGED = 1 << 5 # the date changed
335 320 BRANCHCHANGED = 1 << 6 # the branch changed
336 321
337 322 METABLACKLIST = [
338 323 re.compile('^branch$'),
339 324 re.compile('^.*-source$'),
340 325 re.compile('^.*_source$'),
341 326 re.compile('^source$'),
342 327 ]
343 328
344 329 def metanotblacklisted(metaitem):
345 330 """ Check that the key of a meta item (extrakey, extravalue) does not
346 331 match at least one of the blacklist pattern
347 332 """
348 333 metakey = metaitem[0]
349 334
350 335 return not any(pattern.match(metakey) for pattern in METABLACKLIST)
351 336
352 337 def _prepare_hunk(hunk):
353 338 """Drop all information but the username and patch"""
354 339 cleanhunk = []
355 340 for line in hunk.splitlines():
356 341 if line.startswith(b'# User') or not line.startswith(b'#'):
357 342 if line.startswith(b'@@'):
358 343 line = b'@@\n'
359 344 cleanhunk.append(line)
360 345 return cleanhunk
361 346
362 347 def _getdifflines(iterdiff):
363 348 """return a cleaned up lines"""
364 349 lines = next(iterdiff, None)
365 350
366 351 if lines is None:
367 352 return lines
368 353
369 354 return _prepare_hunk(lines)
370 355
371 356 def _cmpdiff(leftctx, rightctx):
372 357 """return True if both ctx introduce the "same diff"
373 358
374 359 This is a first and basic implementation, with many shortcoming.
375 360 """
376 361
377 362 # Leftctx or right ctx might be filtered, so we need to use the contexts
378 363 # with an unfiltered repository to safely compute the diff
379 364 leftunfi = leftctx._repo.unfiltered()[leftctx.rev()]
380 365 leftdiff = leftunfi.diff(git=1)
381 366 rightunfi = rightctx._repo.unfiltered()[rightctx.rev()]
382 367 rightdiff = rightunfi.diff(git=1)
383 368
384 369 left, right = (0, 0)
385 370 while None not in (left, right):
386 371 left = _getdifflines(leftdiff)
387 372 right = _getdifflines(rightdiff)
388 373
389 374 if left != right:
390 375 return False
391 376 return True
392 377
393 378 def geteffectflag(relation):
394 379 """ From an obs-marker relation, compute what changed between the
395 380 predecessor and the successor.
396 381 """
397 382 effects = 0
398 383
399 384 source = relation[0]
400 385
401 386 for changectx in relation[1]:
402 387 # Check if description has changed
403 388 if changectx.description() != source.description():
404 389 effects |= DESCCHANGED
405 390
406 391 # Check if user has changed
407 392 if changectx.user() != source.user():
408 393 effects |= USERCHANGED
409 394
410 395 # Check if date has changed
411 396 if changectx.date() != source.date():
412 397 effects |= DATECHANGED
413 398
414 399 # Check if branch has changed
415 400 if changectx.branch() != source.branch():
416 401 effects |= BRANCHCHANGED
417 402
418 403 # Check if at least one of the parent has changed
419 404 if changectx.parents() != source.parents():
420 405 effects |= PARENTCHANGED
421 406
422 407 # Check if other meta has changed
423 408 changeextra = changectx.extra().items()
424 409 ctxmeta = list(filter(metanotblacklisted, changeextra))
425 410
426 411 sourceextra = source.extra().items()
427 412 srcmeta = list(filter(metanotblacklisted, sourceextra))
428 413
429 414 if ctxmeta != srcmeta:
430 415 effects |= METACHANGED
431 416
432 417 # Check if the diff has changed
433 418 if not _cmpdiff(source, changectx):
434 419 effects |= DIFFCHANGED
435 420
436 421 return effects
437 422
438 423 def getobsoleted(repo, tr):
439 424 """return the set of pre-existing revisions obsoleted by a transaction"""
440 425 torev = repo.unfiltered().changelog.nodemap.get
441 426 phase = repo._phasecache.phase
442 427 succsmarkers = repo.obsstore.successors.get
443 428 public = phases.public
444 429 addedmarkers = tr.changes.get('obsmarkers')
445 430 addedrevs = tr.changes.get('revs')
446 431 seenrevs = set()
447 432 obsoleted = set()
448 433 for mark in addedmarkers:
449 434 node = mark[0]
450 435 rev = torev(node)
451 436 if rev is None or rev in seenrevs or rev in addedrevs:
452 437 continue
453 438 seenrevs.add(rev)
454 439 if phase(repo, rev) == public:
455 440 continue
456 441 if set(succsmarkers(node) or []).issubset(addedmarkers):
457 442 obsoleted.add(rev)
458 443 return obsoleted
459 444
460 445 class _succs(list):
461 446 """small class to represent a successors with some metadata about it"""
462 447
463 448 def __init__(self, *args, **kwargs):
464 449 super(_succs, self).__init__(*args, **kwargs)
465 450 self.markers = set()
466 451
467 452 def copy(self):
468 453 new = _succs(self)
469 454 new.markers = self.markers.copy()
470 455 return new
471 456
472 457 @util.propertycache
473 458 def _set(self):
474 459 # immutable
475 460 return set(self)
476 461
477 462 def canmerge(self, other):
478 463 return self._set.issubset(other._set)
479 464
480 465 def successorssets(repo, initialnode, closest=False, cache=None):
481 466 """Return set of all latest successors of initial nodes
482 467
483 468 The successors set of a changeset A are the group of revisions that succeed
484 469 A. It succeeds A as a consistent whole, each revision being only a partial
485 470 replacement. By default, the successors set contains non-obsolete
486 471 changesets only, walking the obsolescence graph until reaching a leaf. If
487 472 'closest' is set to True, closest successors-sets are return (the
488 473 obsolescence walk stops on known changesets).
489 474
490 475 This function returns the full list of successor sets which is why it
491 476 returns a list of tuples and not just a single tuple. Each tuple is a valid
492 477 successors set. Note that (A,) may be a valid successors set for changeset A
493 478 (see below).
494 479
495 480 In most cases, a changeset A will have a single element (e.g. the changeset
496 481 A is replaced by A') in its successors set. Though, it is also common for a
497 482 changeset A to have no elements in its successor set (e.g. the changeset
498 483 has been pruned). Therefore, the returned list of successors sets will be
499 484 [(A',)] or [], respectively.
500 485
501 486 When a changeset A is split into A' and B', however, it will result in a
502 487 successors set containing more than a single element, i.e. [(A',B')].
503 488 Divergent changesets will result in multiple successors sets, i.e. [(A',),
504 489 (A'')].
505 490
506 491 If a changeset A is not obsolete, then it will conceptually have no
507 492 successors set. To distinguish this from a pruned changeset, the successor
508 493 set will contain itself only, i.e. [(A,)].
509 494
510 495 Finally, final successors unknown locally are considered to be pruned
511 496 (pruned: obsoleted without any successors). (Final: successors not affected
512 497 by markers).
513 498
514 499 The 'closest' mode respect the repoview filtering. For example, without
515 500 filter it will stop at the first locally known changeset, with 'visible'
516 501 filter it will stop on visible changesets).
517 502
518 503 The optional `cache` parameter is a dictionary that may contains
519 504 precomputed successors sets. It is meant to reuse the computation of a
520 505 previous call to `successorssets` when multiple calls are made at the same
521 506 time. The cache dictionary is updated in place. The caller is responsible
522 507 for its life span. Code that makes multiple calls to `successorssets`
523 508 *should* use this cache mechanism or risk a performance hit.
524 509
525 510 Since results are different depending of the 'closest' most, the same cache
526 511 cannot be reused for both mode.
527 512 """
528 513
529 514 succmarkers = repo.obsstore.successors
530 515
531 516 # Stack of nodes we search successors sets for
532 517 toproceed = [initialnode]
533 518 # set version of above list for fast loop detection
534 519 # element added to "toproceed" must be added here
535 520 stackedset = set(toproceed)
536 521 if cache is None:
537 522 cache = {}
538 523
539 524 # This while loop is the flattened version of a recursive search for
540 525 # successors sets
541 526 #
542 527 # def successorssets(x):
543 528 # successors = directsuccessors(x)
544 529 # ss = [[]]
545 530 # for succ in directsuccessors(x):
546 531 # # product as in itertools cartesian product
547 532 # ss = product(ss, successorssets(succ))
548 533 # return ss
549 534 #
550 535 # But we can not use plain recursive calls here:
551 536 # - that would blow the python call stack
552 537 # - obsolescence markers may have cycles, we need to handle them.
553 538 #
554 539 # The `toproceed` list act as our call stack. Every node we search
555 540 # successors set for are stacked there.
556 541 #
557 542 # The `stackedset` is set version of this stack used to check if a node is
558 543 # already stacked. This check is used to detect cycles and prevent infinite
559 544 # loop.
560 545 #
561 546 # successors set of all nodes are stored in the `cache` dictionary.
562 547 #
563 548 # After this while loop ends we use the cache to return the successors sets
564 549 # for the node requested by the caller.
565 550 while toproceed:
566 551 # Every iteration tries to compute the successors sets of the topmost
567 552 # node of the stack: CURRENT.
568 553 #
569 554 # There are four possible outcomes:
570 555 #
571 556 # 1) We already know the successors sets of CURRENT:
572 557 # -> mission accomplished, pop it from the stack.
573 558 # 2) Stop the walk:
574 559 # default case: Node is not obsolete
575 560 # closest case: Node is known at this repo filter level
576 561 # -> the node is its own successors sets. Add it to the cache.
577 562 # 3) We do not know successors set of direct successors of CURRENT:
578 563 # -> We add those successors to the stack.
579 564 # 4) We know successors sets of all direct successors of CURRENT:
580 565 # -> We can compute CURRENT successors set and add it to the
581 566 # cache.
582 567 #
583 568 current = toproceed[-1]
584 569
585 570 # case 2 condition is a bit hairy because of closest,
586 571 # we compute it on its own
587 572 case2condition = ((current not in succmarkers)
588 573 or (closest and current != initialnode
589 574 and current in repo))
590 575
591 576 if current in cache:
592 577 # case (1): We already know the successors sets
593 578 stackedset.remove(toproceed.pop())
594 579 elif case2condition:
595 580 # case (2): end of walk.
596 581 if current in repo:
597 582 # We have a valid successors.
598 583 cache[current] = [_succs((current,))]
599 584 else:
600 585 # Final obsolete version is unknown locally.
601 586 # Do not count that as a valid successors
602 587 cache[current] = []
603 588 else:
604 589 # cases (3) and (4)
605 590 #
606 591 # We proceed in two phases. Phase 1 aims to distinguish case (3)
607 592 # from case (4):
608 593 #
609 594 # For each direct successors of CURRENT, we check whether its
610 595 # successors sets are known. If they are not, we stack the
611 596 # unknown node and proceed to the next iteration of the while
612 597 # loop. (case 3)
613 598 #
614 599 # During this step, we may detect obsolescence cycles: a node
615 600 # with unknown successors sets but already in the call stack.
616 601 # In such a situation, we arbitrary set the successors sets of
617 602 # the node to nothing (node pruned) to break the cycle.
618 603 #
619 604 # If no break was encountered we proceed to phase 2.
620 605 #
621 606 # Phase 2 computes successors sets of CURRENT (case 4); see details
622 607 # in phase 2 itself.
623 608 #
624 609 # Note the two levels of iteration in each phase.
625 610 # - The first one handles obsolescence markers using CURRENT as
626 611 # precursor (successors markers of CURRENT).
627 612 #
628 613 # Having multiple entry here means divergence.
629 614 #
630 615 # - The second one handles successors defined in each marker.
631 616 #
632 617 # Having none means pruned node, multiple successors means split,
633 618 # single successors are standard replacement.
634 619 #
635 620 for mark in sorted(succmarkers[current]):
636 621 for suc in mark[1]:
637 622 if suc not in cache:
638 623 if suc in stackedset:
639 624 # cycle breaking
640 625 cache[suc] = []
641 626 else:
642 627 # case (3) If we have not computed successors sets
643 628 # of one of those successors we add it to the
644 629 # `toproceed` stack and stop all work for this
645 630 # iteration.
646 631 toproceed.append(suc)
647 632 stackedset.add(suc)
648 633 break
649 634 else:
650 635 continue
651 636 break
652 637 else:
653 638 # case (4): we know all successors sets of all direct
654 639 # successors
655 640 #
656 641 # Successors set contributed by each marker depends on the
657 642 # successors sets of all its "successors" node.
658 643 #
659 644 # Each different marker is a divergence in the obsolescence
660 645 # history. It contributes successors sets distinct from other
661 646 # markers.
662 647 #
663 648 # Within a marker, a successor may have divergent successors
664 649 # sets. In such a case, the marker will contribute multiple
665 650 # divergent successors sets. If multiple successors have
666 651 # divergent successors sets, a Cartesian product is used.
667 652 #
668 653 # At the end we post-process successors sets to remove
669 654 # duplicated entry and successors set that are strict subset of
670 655 # another one.
671 656 succssets = []
672 657 for mark in sorted(succmarkers[current]):
673 658 # successors sets contributed by this marker
674 659 base = _succs()
675 660 base.markers.add(mark)
676 661 markss = [base]
677 662 for suc in mark[1]:
678 663 # cardinal product with previous successors
679 664 productresult = []
680 665 for prefix in markss:
681 666 for suffix in cache[suc]:
682 667 newss = prefix.copy()
683 668 newss.markers.update(suffix.markers)
684 669 for part in suffix:
685 670 # do not duplicated entry in successors set
686 671 # first entry wins.
687 672 if part not in newss:
688 673 newss.append(part)
689 674 productresult.append(newss)
690 675 markss = productresult
691 676 succssets.extend(markss)
692 677 # remove duplicated and subset
693 678 seen = []
694 679 final = []
695 680 candidates = sorted((s for s in succssets if s),
696 681 key=len, reverse=True)
697 682 for cand in candidates:
698 683 for seensuccs in seen:
699 684 if cand.canmerge(seensuccs):
700 685 seensuccs.markers.update(cand.markers)
701 686 break
702 687 else:
703 688 final.append(cand)
704 689 seen.append(cand)
705 690 final.reverse() # put small successors set first
706 691 cache[current] = final
707 692 return cache[initialnode]
708 693
709 694 def successorsandmarkers(repo, ctx):
710 695 """compute the raw data needed for computing obsfate
711 696 Returns a list of dict, one dict per successors set
712 697 """
713 698 if not ctx.obsolete():
714 699 return None
715 700
716 701 ssets = successorssets(repo, ctx.node(), closest=True)
717 702
718 703 # closestsuccessors returns an empty list for pruned revisions, remap it
719 704 # into a list containing an empty list for future processing
720 705 if ssets == []:
721 706 ssets = [[]]
722 707
723 708 # Try to recover pruned markers
724 709 succsmap = repo.obsstore.successors
725 710 fullsuccessorsets = [] # successor set + markers
726 711 for sset in ssets:
727 712 if sset:
728 713 fullsuccessorsets.append(sset)
729 714 else:
730 715 # successorsset return an empty set() when ctx or one of its
731 716 # successors is pruned.
732 717 # In this case, walk the obs-markers tree again starting with ctx
733 718 # and find the relevant pruning obs-makers, the ones without
734 719 # successors.
735 720 # Having these markers allow us to compute some information about
736 721 # its fate, like who pruned this changeset and when.
737 722
738 723 # XXX we do not catch all prune markers (eg rewritten then pruned)
739 724 # (fix me later)
740 725 foundany = False
741 726 for mark in succsmap.get(ctx.node(), ()):
742 727 if not mark[1]:
743 728 foundany = True
744 729 sset = _succs()
745 730 sset.markers.add(mark)
746 731 fullsuccessorsets.append(sset)
747 732 if not foundany:
748 733 fullsuccessorsets.append(_succs())
749 734
750 735 values = []
751 736 for sset in fullsuccessorsets:
752 737 values.append({'successors': sset, 'markers': sset.markers})
753 738
754 739 return values
755 740
756 741 def _getobsfate(successorssets):
757 742 """ Compute a changeset obsolescence fate based on its successorssets.
758 743 Successors can be the tipmost ones or the immediate ones. This function
759 744 return values are not meant to be shown directly to users, it is meant to
760 745 be used by internal functions only.
761 746 Returns one fate from the following values:
762 747 - pruned
763 748 - diverged
764 749 - superseded
765 750 - superseded_split
766 751 """
767 752
768 753 if len(successorssets) == 0:
769 754 # The commit has been pruned
770 755 return 'pruned'
771 756 elif len(successorssets) > 1:
772 757 return 'diverged'
773 758 else:
774 759 # No divergence, only one set of successors
775 760 successors = successorssets[0]
776 761
777 762 if len(successors) == 1:
778 763 return 'superseded'
779 764 else:
780 765 return 'superseded_split'
781 766
782 767 def obsfateverb(successorset, markers):
783 768 """ Return the verb summarizing the successorset and potentially using
784 769 information from the markers
785 770 """
786 771 if not successorset:
787 772 verb = 'pruned'
788 773 elif len(successorset) == 1:
789 774 verb = 'rewritten'
790 775 else:
791 776 verb = 'split'
792 777 return verb
793 778
794 779 def markersdates(markers):
795 780 """returns the list of dates for a list of markers
796 781 """
797 782 return [m[4] for m in markers]
798 783
799 784 def markersusers(markers):
800 785 """ Returns a sorted list of markers users without duplicates
801 786 """
802 787 markersmeta = [dict(m[3]) for m in markers]
803 788 users = set(meta.get('user') for meta in markersmeta if meta.get('user'))
804 789
805 790 return sorted(users)
806 791
807 792 def markersoperations(markers):
808 793 """ Returns a sorted list of markers operations without duplicates
809 794 """
810 795 markersmeta = [dict(m[3]) for m in markers]
811 796 operations = set(meta.get('operation') for meta in markersmeta
812 797 if meta.get('operation'))
813 798
814 799 return sorted(operations)
815 800
816 801 def obsfateprinter(successors, markers, ui):
817 802 """ Build a obsfate string for a single successorset using all obsfate
818 803 related function defined in obsutil
819 804 """
820 805 quiet = ui.quiet
821 806 verbose = ui.verbose
822 807 normal = not verbose and not quiet
823 808
824 809 line = []
825 810
826 811 # Verb
827 812 line.append(obsfateverb(successors, markers))
828 813
829 814 # Operations
830 815 operations = markersoperations(markers)
831 816 if operations:
832 817 line.append(" using %s" % ", ".join(operations))
833 818
834 819 # Successors
835 820 if successors:
836 821 fmtsuccessors = [successors.joinfmt(succ) for succ in successors]
837 822 line.append(" as %s" % ", ".join(fmtsuccessors))
838 823
839 824 # Users
840 825 users = markersusers(markers)
841 826 # Filter out current user in not verbose mode to reduce amount of
842 827 # information
843 828 if not verbose:
844 829 currentuser = ui.username(acceptempty=True)
845 830 if len(users) == 1 and currentuser in users:
846 831 users = None
847 832
848 833 if (verbose or normal) and users:
849 834 line.append(" by %s" % ", ".join(users))
850 835
851 836 # Date
852 837 dates = markersdates(markers)
853 838
854 839 if dates and verbose:
855 840 min_date = min(dates)
856 841 max_date = max(dates)
857 842
858 843 if min_date == max_date:
859 844 fmtmin_date = util.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
860 845 line.append(" (at %s)" % fmtmin_date)
861 846 else:
862 847 fmtmin_date = util.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
863 848 fmtmax_date = util.datestr(max_date, '%Y-%m-%d %H:%M %1%2')
864 849 line.append(" (between %s and %s)" % (fmtmin_date, fmtmax_date))
865 850
866 851 return "".join(line)
867 852
868 853
869 854 filteredmsgtable = {
870 855 "pruned": _("hidden revision '%s' is pruned"),
871 856 "diverged": _("hidden revision '%s' has diverged"),
872 857 "superseded": _("hidden revision '%s' was rewritten as: %s"),
873 858 "superseded_split": _("hidden revision '%s' was split as: %s"),
874 859 "superseded_split_several": _("hidden revision '%s' was split as: %s and "
875 860 "%d more"),
876 861 }
877 862
878 863 def _getfilteredreason(repo, changeid, ctx):
879 864 """return a human-friendly string on why a obsolete changeset is hidden
880 865 """
881 866 successors = successorssets(repo, ctx.node())
882 867 fate = _getobsfate(successors)
883 868
884 869 # Be more precise in case the revision is superseded
885 870 if fate == 'pruned':
886 871 return filteredmsgtable['pruned'] % changeid
887 872 elif fate == 'diverged':
888 873 return filteredmsgtable['diverged'] % changeid
889 874 elif fate == 'superseded':
890 875 single_successor = nodemod.short(successors[0][0])
891 876 return filteredmsgtable['superseded'] % (changeid, single_successor)
892 877 elif fate == 'superseded_split':
893 878
894 879 succs = []
895 880 for node_id in successors[0]:
896 881 succs.append(nodemod.short(node_id))
897 882
898 883 if len(succs) <= 2:
899 884 fmtsuccs = ', '.join(succs)
900 885 return filteredmsgtable['superseded_split'] % (changeid, fmtsuccs)
901 886 else:
902 887 firstsuccessors = ', '.join(succs[:2])
903 888 remainingnumber = len(succs) - 2
904 889
905 890 args = (changeid, firstsuccessors, remainingnumber)
906 891 return filteredmsgtable['superseded_split_several'] % args
General Comments 0
You need to be logged in to leave comments. Login now