##// END OF EJS Templates
obsolete: fix old typo...
Boris Feld -
r33944:54c21114 default
parent child Browse files
Show More
@@ -1,592 +1,592
1 1 # obsutil.py - utility functions for obsolescence
2 2 #
3 3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 from . import (
11 11 phases,
12 12 util
13 13 )
14 14
15 15 class marker(object):
16 16 """Wrap obsolete marker raw data"""
17 17
18 18 def __init__(self, repo, data):
19 19 # the repo argument will be used to create changectx in later version
20 20 self._repo = repo
21 21 self._data = data
22 22 self._decodedmeta = None
23 23
24 24 def __hash__(self):
25 25 return hash(self._data)
26 26
27 27 def __eq__(self, other):
28 28 if type(other) != type(self):
29 29 return False
30 30 return self._data == other._data
31 31
32 32 def precnode(self):
33 33 msg = ("'marker.precnode' is deprecated, "
34 34 "use 'marker.prednode'")
35 35 util.nouideprecwarn(msg, '4.4')
36 36 return self.prednode()
37 37
38 38 def prednode(self):
39 39 """Predecessor changeset node identifier"""
40 40 return self._data[0]
41 41
42 42 def succnodes(self):
43 43 """List of successor changesets node identifiers"""
44 44 return self._data[1]
45 45
46 46 def parentnodes(self):
47 47 """Parents of the predecessors (None if not recorded)"""
48 48 return self._data[5]
49 49
50 50 def metadata(self):
51 51 """Decoded metadata dictionary"""
52 52 return dict(self._data[3])
53 53
54 54 def date(self):
55 55 """Creation date as (unixtime, offset)"""
56 56 return self._data[4]
57 57
58 58 def flags(self):
59 59 """The flags field of the marker"""
60 60 return self._data[2]
61 61
62 62 def getmarkers(repo, nodes=None, exclusive=False):
63 63 """returns markers known in a repository
64 64
65 65 If <nodes> is specified, only markers "relevant" to those nodes are are
66 66 returned"""
67 67 if nodes is None:
68 68 rawmarkers = repo.obsstore
69 69 elif exclusive:
70 70 rawmarkers = exclusivemarkers(repo, nodes)
71 71 else:
72 72 rawmarkers = repo.obsstore.relevantmarkers(nodes)
73 73
74 74 for markerdata in rawmarkers:
75 75 yield marker(repo, markerdata)
76 76
77 77 def closestpredecessors(repo, nodeid):
78 78 """yield the list of next predecessors pointing on visible changectx nodes
79 79
80 80 This function respect the repoview filtering, filtered revision will be
81 81 considered missing.
82 82 """
83 83
84 84 precursors = repo.obsstore.predecessors
85 85 stack = [nodeid]
86 86 seen = set(stack)
87 87
88 88 while stack:
89 89 current = stack.pop()
90 90 currentpreccs = precursors.get(current, ())
91 91
92 92 for prec in currentpreccs:
93 93 precnodeid = prec[0]
94 94
95 95 # Basic cycle protection
96 96 if precnodeid in seen:
97 97 continue
98 98 seen.add(precnodeid)
99 99
100 100 if precnodeid in repo:
101 101 yield precnodeid
102 102 else:
103 103 stack.append(precnodeid)
104 104
105 105 def allprecursors(*args, **kwargs):
106 106 """ (DEPRECATED)
107 107 """
108 108 msg = ("'obsutil.allprecursors' is deprecated, "
109 109 "use 'obsutil.allpredecessors'")
110 110 util.nouideprecwarn(msg, '4.4')
111 111
112 112 return allpredecessors(*args, **kwargs)
113 113
114 114 def allpredecessors(obsstore, nodes, ignoreflags=0):
115 115 """Yield node for every precursors of <nodes>.
116 116
117 117 Some precursors may be unknown locally.
118 118
119 119 This is a linear yield unsuited to detecting folded changesets. It includes
120 120 initial nodes too."""
121 121
122 122 remaining = set(nodes)
123 123 seen = set(remaining)
124 124 while remaining:
125 125 current = remaining.pop()
126 126 yield current
127 127 for mark in obsstore.predecessors.get(current, ()):
128 128 # ignore marker flagged with specified flag
129 129 if mark[2] & ignoreflags:
130 130 continue
131 131 suc = mark[0]
132 132 if suc not in seen:
133 133 seen.add(suc)
134 134 remaining.add(suc)
135 135
136 136 def allsuccessors(obsstore, nodes, ignoreflags=0):
137 137 """Yield node for every successor of <nodes>.
138 138
139 139 Some successors may be unknown locally.
140 140
141 141 This is a linear yield unsuited to detecting split changesets. It includes
142 142 initial nodes too."""
143 143 remaining = set(nodes)
144 144 seen = set(remaining)
145 145 while remaining:
146 146 current = remaining.pop()
147 147 yield current
148 148 for mark in obsstore.successors.get(current, ()):
149 149 # ignore marker flagged with specified flag
150 150 if mark[2] & ignoreflags:
151 151 continue
152 152 for suc in mark[1]:
153 153 if suc not in seen:
154 154 seen.add(suc)
155 155 remaining.add(suc)
156 156
157 157 def _filterprunes(markers):
158 158 """return a set with no prune markers"""
159 159 return set(m for m in markers if m[1])
160 160
161 161 def exclusivemarkers(repo, nodes):
162 162 """set of markers relevant to "nodes" but no other locally-known nodes
163 163
164 164 This function compute the set of markers "exclusive" to a locally-known
165 165 node. This means we walk the markers starting from <nodes> until we reach a
166 166 locally-known precursors outside of <nodes>. Element of <nodes> with
167 167 locally-known successors outside of <nodes> are ignored (since their
168 168 precursors markers are also relevant to these successors).
169 169
170 170 For example:
171 171
172 172 # (A0 rewritten as A1)
173 173 #
174 174 # A0 <-1- A1 # Marker "1" is exclusive to A1
175 175
176 176 or
177 177
178 178 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
179 179 #
180 180 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
181 181
182 182 or
183 183
184 184 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
185 185 #
186 186 # <-2- A1 # Marker "2" is exclusive to A0,A1
187 187 # /
188 188 # <-1- A0
189 189 # \
190 190 # <-3- A2 # Marker "3" is exclusive to A0,A2
191 191 #
192 192 # in addition:
193 193 #
194 194 # Markers "2,3" are exclusive to A1,A2
195 195 # Markers "1,2,3" are exclusive to A0,A1,A2
196 196
197 197 See test/test-obsolete-bundle-strip.t for more examples.
198 198
199 199 An example usage is strip. When stripping a changeset, we also want to
200 200 strip the markers exclusive to this changeset. Otherwise we would have
201 201 "dangling"" obsolescence markers from its precursors: Obsolescence markers
202 202 marking a node as obsolete without any successors available locally.
203 203
204 204 As for relevant markers, the prune markers for children will be followed.
205 205 Of course, they will only be followed if the pruned children is
206 206 locally-known. Since the prune markers are relevant to the pruned node.
207 207 However, while prune markers are considered relevant to the parent of the
208 208 pruned changesets, prune markers for locally-known changeset (with no
209 209 successors) are considered exclusive to the pruned nodes. This allows
210 210 to strip the prune markers (with the rest of the exclusive chain) alongside
211 211 the pruned changesets.
212 212 """
213 213 # running on a filtered repository would be dangerous as markers could be
214 214 # reported as exclusive when they are relevant for other filtered nodes.
215 215 unfi = repo.unfiltered()
216 216
217 217 # shortcut to various useful item
218 218 nm = unfi.changelog.nodemap
219 219 precursorsmarkers = unfi.obsstore.predecessors
220 220 successormarkers = unfi.obsstore.successors
221 221 childrenmarkers = unfi.obsstore.children
222 222
223 223 # exclusive markers (return of the function)
224 224 exclmarkers = set()
225 225 # we need fast membership testing
226 226 nodes = set(nodes)
227 227 # looking for head in the obshistory
228 228 #
229 229 # XXX we are ignoring all issues in regard with cycle for now.
230 230 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
231 231 stack.sort()
232 232 # nodes already stacked
233 233 seennodes = set(stack)
234 234 while stack:
235 235 current = stack.pop()
236 236 # fetch precursors markers
237 237 markers = list(precursorsmarkers.get(current, ()))
238 238 # extend the list with prune markers
239 239 for mark in successormarkers.get(current, ()):
240 240 if not mark[1]:
241 241 markers.append(mark)
242 242 # and markers from children (looking for prune)
243 243 for mark in childrenmarkers.get(current, ()):
244 244 if not mark[1]:
245 245 markers.append(mark)
246 246 # traverse the markers
247 247 for mark in markers:
248 248 if mark in exclmarkers:
249 249 # markers already selected
250 250 continue
251 251
252 252 # If the markers is about the current node, select it
253 253 #
254 254 # (this delay the addition of markers from children)
255 255 if mark[1] or mark[0] == current:
256 256 exclmarkers.add(mark)
257 257
258 258 # should we keep traversing through the precursors?
259 259 prec = mark[0]
260 260
261 261 # nodes in the stack or already processed
262 262 if prec in seennodes:
263 263 continue
264 264
265 265 # is this a locally known node ?
266 266 known = prec in nm
267 267 # if locally-known and not in the <nodes> set the traversal
268 268 # stop here.
269 269 if known and prec not in nodes:
270 270 continue
271 271
272 272 # do not keep going if there are unselected markers pointing to this
273 273 # nodes. If we end up traversing these unselected markers later the
274 274 # node will be taken care of at that point.
275 275 precmarkers = _filterprunes(successormarkers.get(prec))
276 276 if precmarkers.issubset(exclmarkers):
277 277 seennodes.add(prec)
278 278 stack.append(prec)
279 279
280 280 return exclmarkers
281 281
282 282 def foreground(repo, nodes):
283 283 """return all nodes in the "foreground" of other node
284 284
285 285 The foreground of a revision is anything reachable using parent -> children
286 286 or precursor -> successor relation. It is very similar to "descendant" but
287 287 augmented with obsolescence information.
288 288
289 289 Beware that possible obsolescence cycle may result if complex situation.
290 290 """
291 291 repo = repo.unfiltered()
292 292 foreground = set(repo.set('%ln::', nodes))
293 293 if repo.obsstore:
294 294 # We only need this complicated logic if there is obsolescence
295 295 # XXX will probably deserve an optimised revset.
296 296 nm = repo.changelog.nodemap
297 297 plen = -1
298 298 # compute the whole set of successors or descendants
299 299 while len(foreground) != plen:
300 300 plen = len(foreground)
301 301 succs = set(c.node() for c in foreground)
302 302 mutable = [c.node() for c in foreground if c.mutable()]
303 303 succs.update(allsuccessors(repo.obsstore, mutable))
304 304 known = (n for n in succs if n in nm)
305 305 foreground = set(repo.set('%ln::', known))
306 306 return set(c.node() for c in foreground)
307 307
308 308 def getobsoleted(repo, tr):
309 309 """return the set of pre-existing revisions obsoleted by a transaction"""
310 310 torev = repo.unfiltered().changelog.nodemap.get
311 311 phase = repo._phasecache.phase
312 312 succsmarkers = repo.obsstore.successors.get
313 313 public = phases.public
314 314 addedmarkers = tr.changes.get('obsmarkers')
315 315 addedrevs = tr.changes.get('revs')
316 316 seenrevs = set(addedrevs)
317 317 obsoleted = set()
318 318 for mark in addedmarkers:
319 319 node = mark[0]
320 320 rev = torev(node)
321 321 if rev is None or rev in seenrevs:
322 322 continue
323 323 seenrevs.add(rev)
324 324 if phase(repo, rev) == public:
325 325 continue
326 326 if set(succsmarkers(node) or []).issubset(addedmarkers):
327 327 obsoleted.add(rev)
328 328 return obsoleted
329 329
330 330 class _succs(list):
331 331 """small class to represent a successors with some metadata about it"""
332 332
333 333 def __init__(self, *args, **kwargs):
334 334 super(_succs, self).__init__(*args, **kwargs)
335 335 self.markers = set()
336 336
337 337 def copy(self):
338 338 new = _succs(self)
339 339 new.markers = self.markers.copy()
340 340 return new
341 341
342 342 @util.propertycache
343 343 def _set(self):
344 344 # immutable
345 345 return set(self)
346 346
347 347 def canmerge(self, other):
348 348 return self._set.issubset(other._set)
349 349
350 350 def successorssets(repo, initialnode, closest=False, cache=None):
351 351 """Return set of all latest successors of initial nodes
352 352
353 353 The successors set of a changeset A are the group of revisions that succeed
354 354 A. It succeeds A as a consistent whole, each revision being only a partial
355 355 replacement. By default, the successors set contains non-obsolete
356 356 changesets only, walking the obsolescence graph until reaching a leaf. If
357 357 'closest' is set to True, closest successors-sets are return (the
358 358 obsolescence walk stops on known changesets).
359 359
360 360 This function returns the full list of successor sets which is why it
361 361 returns a list of tuples and not just a single tuple. Each tuple is a valid
362 362 successors set. Note that (A,) may be a valid successors set for changeset A
363 363 (see below).
364 364
365 365 In most cases, a changeset A will have a single element (e.g. the changeset
366 366 A is replaced by A') in its successors set. Though, it is also common for a
367 367 changeset A to have no elements in its successor set (e.g. the changeset
368 368 has been pruned). Therefore, the returned list of successors sets will be
369 369 [(A',)] or [], respectively.
370 370
371 371 When a changeset A is split into A' and B', however, it will result in a
372 372 successors set containing more than a single element, i.e. [(A',B')].
373 373 Divergent changesets will result in multiple successors sets, i.e. [(A',),
374 374 (A'')].
375 375
376 376 If a changeset A is not obsolete, then it will conceptually have no
377 377 successors set. To distinguish this from a pruned changeset, the successor
378 378 set will contain itself only, i.e. [(A,)].
379 379
380 380 Finally, final successors unknown locally are considered to be pruned
381 381 (pruned: obsoleted without any successors). (Final: successors not affected
382 382 by markers).
383 383
384 384 The 'closest' mode respect the repoview filtering. For example, without
385 385 filter it will stop at the first locally known changeset, with 'visible'
386 386 filter it will stop on visible changesets).
387 387
388 388 The optional `cache` parameter is a dictionary that may contains
389 389 precomputed successors sets. It is meant to reuse the computation of a
390 390 previous call to `successorssets` when multiple calls are made at the same
391 391 time. The cache dictionary is updated in place. The caller is responsible
392 392 for its life span. Code that makes multiple calls to `successorssets`
393 393 *should* use this cache mechanism or risk a performance hit.
394 394
395 395 Since results are different depending of the 'closest' most, the same cache
396 396 cannot be reused for both mode.
397 397 """
398 398
399 399 succmarkers = repo.obsstore.successors
400 400
401 401 # Stack of nodes we search successors sets for
402 402 toproceed = [initialnode]
403 403 # set version of above list for fast loop detection
404 404 # element added to "toproceed" must be added here
405 405 stackedset = set(toproceed)
406 406 if cache is None:
407 407 cache = {}
408 408
409 409 # This while loop is the flattened version of a recursive search for
410 410 # successors sets
411 411 #
412 412 # def successorssets(x):
413 413 # successors = directsuccessors(x)
414 414 # ss = [[]]
415 415 # for succ in directsuccessors(x):
416 416 # # product as in itertools cartesian product
417 417 # ss = product(ss, successorssets(succ))
418 418 # return ss
419 419 #
420 420 # But we can not use plain recursive calls here:
421 421 # - that would blow the python call stack
422 422 # - obsolescence markers may have cycles, we need to handle them.
423 423 #
424 424 # The `toproceed` list act as our call stack. Every node we search
425 425 # successors set for are stacked there.
426 426 #
427 427 # The `stackedset` is set version of this stack used to check if a node is
428 428 # already stacked. This check is used to detect cycles and prevent infinite
429 429 # loop.
430 430 #
431 431 # successors set of all nodes are stored in the `cache` dictionary.
432 432 #
433 433 # After this while loop ends we use the cache to return the successors sets
434 434 # for the node requested by the caller.
435 435 while toproceed:
436 436 # Every iteration tries to compute the successors sets of the topmost
437 437 # node of the stack: CURRENT.
438 438 #
439 439 # There are four possible outcomes:
440 440 #
441 441 # 1) We already know the successors sets of CURRENT:
442 442 # -> mission accomplished, pop it from the stack.
443 443 # 2) Stop the walk:
444 444 # default case: Node is not obsolete
445 445 # closest case: Node is known at this repo filter level
446 446 # -> the node is its own successors sets. Add it to the cache.
447 447 # 3) We do not know successors set of direct successors of CURRENT:
448 448 # -> We add those successors to the stack.
449 449 # 4) We know successors sets of all direct successors of CURRENT:
450 450 # -> We can compute CURRENT successors set and add it to the
451 451 # cache.
452 452 #
453 453 current = toproceed[-1]
454 454
455 455 # case 2 condition is a bit hairy because of closest,
456 456 # we compute it on its own
457 457 case2condition = ((current not in succmarkers)
458 458 or (closest and current != initialnode
459 459 and current in repo))
460 460
461 461 if current in cache:
462 462 # case (1): We already know the successors sets
463 463 stackedset.remove(toproceed.pop())
464 464 elif case2condition:
465 465 # case (2): end of walk.
466 466 if current in repo:
467 467 # We have a valid successors.
468 468 cache[current] = [_succs((current,))]
469 469 else:
470 470 # Final obsolete version is unknown locally.
471 471 # Do not count that as a valid successors
472 472 cache[current] = []
473 473 else:
474 474 # cases (3) and (4)
475 475 #
476 476 # We proceed in two phases. Phase 1 aims to distinguish case (3)
477 477 # from case (4):
478 478 #
479 479 # For each direct successors of CURRENT, we check whether its
480 480 # successors sets are known. If they are not, we stack the
481 481 # unknown node and proceed to the next iteration of the while
482 482 # loop. (case 3)
483 483 #
484 484 # During this step, we may detect obsolescence cycles: a node
485 485 # with unknown successors sets but already in the call stack.
486 486 # In such a situation, we arbitrary set the successors sets of
487 487 # the node to nothing (node pruned) to break the cycle.
488 488 #
489 489 # If no break was encountered we proceed to phase 2.
490 490 #
491 491 # Phase 2 computes successors sets of CURRENT (case 4); see details
492 492 # in phase 2 itself.
493 493 #
494 494 # Note the two levels of iteration in each phase.
495 495 # - The first one handles obsolescence markers using CURRENT as
496 496 # precursor (successors markers of CURRENT).
497 497 #
498 498 # Having multiple entry here means divergence.
499 499 #
500 500 # - The second one handles successors defined in each marker.
501 501 #
502 502 # Having none means pruned node, multiple successors means split,
503 503 # single successors are standard replacement.
504 504 #
505 505 for mark in sorted(succmarkers[current]):
506 506 for suc in mark[1]:
507 507 if suc not in cache:
508 508 if suc in stackedset:
509 509 # cycle breaking
510 510 cache[suc] = []
511 511 else:
512 512 # case (3) If we have not computed successors sets
513 513 # of one of those successors we add it to the
514 514 # `toproceed` stack and stop all work for this
515 515 # iteration.
516 516 toproceed.append(suc)
517 517 stackedset.add(suc)
518 518 break
519 519 else:
520 520 continue
521 521 break
522 522 else:
523 523 # case (4): we know all successors sets of all direct
524 524 # successors
525 525 #
526 526 # Successors set contributed by each marker depends on the
527 527 # successors sets of all its "successors" node.
528 528 #
529 529 # Each different marker is a divergence in the obsolescence
530 530 # history. It contributes successors sets distinct from other
531 531 # markers.
532 532 #
533 533 # Within a marker, a successor may have divergent successors
534 534 # sets. In such a case, the marker will contribute multiple
535 535 # divergent successors sets. If multiple successors have
536 536 # divergent successors sets, a Cartesian product is used.
537 537 #
538 538 # At the end we post-process successors sets to remove
539 539 # duplicated entry and successors set that are strict subset of
540 540 # another one.
541 541 succssets = []
542 542 for mark in sorted(succmarkers[current]):
543 543 # successors sets contributed by this marker
544 544 base = _succs()
545 545 base.markers.add(mark)
546 546 markss = [base]
547 547 for suc in mark[1]:
548 548 # cardinal product with previous successors
549 549 productresult = []
550 550 for prefix in markss:
551 551 for suffix in cache[suc]:
552 552 newss = prefix.copy()
553 553 newss.markers.update(suffix.markers)
554 554 for part in suffix:
555 555 # do not duplicated entry in successors set
556 556 # first entry wins.
557 557 if part not in newss:
558 558 newss.append(part)
559 559 productresult.append(newss)
560 560 markss = productresult
561 561 succssets.extend(markss)
562 562 # remove duplicated and subset
563 563 seen = []
564 564 final = []
565 candidate = sorted((s for s in succssets if s),
566 key=len, reverse=True)
567 for cand in candidate:
565 candidates = sorted((s for s in succssets if s),
566 key=len, reverse=True)
567 for cand in candidates:
568 568 for seensuccs in seen:
569 569 if cand.canmerge(seensuccs):
570 570 seensuccs.markers.update(cand.markers)
571 571 break
572 572 else:
573 573 final.append(cand)
574 574 seen.append(cand)
575 575 final.reverse() # put small successors set first
576 576 cache[current] = final
577 577 return cache[initialnode]
578 578
579 579 def successorsandmarkers(repo, ctx):
580 580 """compute the raw data needed for computing obsfate
581 581 Returns a list of dict, one dict per successors set
582 582 """
583 583 if not ctx.obsolete():
584 584 return None
585 585
586 586 ssets = successorssets(repo, ctx.node(), closest=True)
587 587
588 588 values = []
589 589 for sset in ssets:
590 590 values.append({'successors': sset, 'markers': sset.markers})
591 591
592 592 return values
General Comments 0
You need to be logged in to leave comments. Login now