##// END OF EJS Templates
performance: speedup computation of suspended revisions...
Pierre-Yves David -
r18276:834ef7e7 default
parent child Browse files
Show More
@@ -1,751 +1,752 b''
1 1 # obsolete.py - obsolete markers handling
2 2 #
3 3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 4 # Logilab SA <contact@logilab.fr>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 """Obsolete markers handling
10 10
11 11 An obsolete marker maps an old changeset to a list of new
12 12 changesets. If the list of new changesets is empty, the old changeset
13 13 is said to be "killed". Otherwise, the old changeset is being
14 14 "replaced" by the new changesets.
15 15
16 16 Obsolete markers can be used to record and distribute changeset graph
17 17 transformations performed by history rewriting operations, and help
18 18 building new tools to reconciliate conflicting rewriting actions. To
19 19 facilitate conflicts resolution, markers include various annotations
20 20 besides old and news changeset identifiers, such as creation date or
21 21 author name.
22 22
23 23 The old obsoleted changeset is called "precursor" and possible replacements are
24 24 called "successors". Markers that used changeset X as a precursors are called
25 25 "successor markers of X" because they hold information about the successors of
26 26 X. Markers that use changeset Y as a successors are call "precursor markers of
27 27 Y" because they hold information about the precursors of Y.
28 28
29 29 Examples:
30 30
31 31 - When changeset A is replacement by a changeset A', one marker is stored:
32 32
33 33 (A, (A'))
34 34
35 35 - When changesets A and B are folded into a new changeset C two markers are
36 36 stored:
37 37
38 38 (A, (C,)) and (B, (C,))
39 39
40 40 - When changeset A is simply "pruned" from the graph, a marker in create:
41 41
42 42 (A, ())
43 43
44 44 - When changeset A is split into B and C, a single marker are used:
45 45
46 46 (A, (C, C))
47 47
48 48 We use a single marker to distinct the "split" case from the "divergence"
49 49 case. If two independants operation rewrite the same changeset A in to A' and
50 50 A'' when have an error case: divergent rewriting. We can detect it because
51 51 two markers will be created independently:
52 52
53 53 (A, (B,)) and (A, (C,))
54 54
55 55 Format
56 56 ------
57 57
58 58 Markers are stored in an append-only file stored in
59 59 '.hg/store/obsstore'.
60 60
61 61 The file starts with a version header:
62 62
63 63 - 1 unsigned byte: version number, starting at zero.
64 64
65 65
66 66 The header is followed by the markers. Each marker is made of:
67 67
68 68 - 1 unsigned byte: number of new changesets "R", could be zero.
69 69
70 70 - 1 unsigned 32-bits integer: metadata size "M" in bytes.
71 71
72 72 - 1 byte: a bit field. It is reserved for flags used in obsolete
73 73 markers common operations, to avoid repeated decoding of metadata
74 74 entries.
75 75
76 76 - 20 bytes: obsoleted changeset identifier.
77 77
78 78 - N*20 bytes: new changesets identifiers.
79 79
80 80 - M bytes: metadata as a sequence of nul-terminated strings. Each
81 81 string contains a key and a value, separated by a color ':', without
82 82 additional encoding. Keys cannot contain '\0' or ':' and values
83 83 cannot contain '\0'.
84 84 """
85 85 import struct
86 86 import util, base85, node
87 87 from i18n import _
88 88
89 89 _pack = struct.pack
90 90 _unpack = struct.unpack
91 91
92 92 _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5
93 93
94 94 # the obsolete feature is not mature enough to be enabled by default.
95 95 # you have to rely on third party extension extension to enable this.
96 96 _enabled = False
97 97
98 98 # data used for parsing and writing
99 99 _fmversion = 0
100 100 _fmfixed = '>BIB20s'
101 101 _fmnode = '20s'
102 102 _fmfsize = struct.calcsize(_fmfixed)
103 103 _fnodesize = struct.calcsize(_fmnode)
104 104
105 105 ### obsolescence marker flag
106 106
107 107 ## bumpedfix flag
108 108 #
109 109 # When a changeset A' succeed to a changeset A which became public, we call A'
110 110 # "bumped" because it's a successors of a public changesets
111 111 #
112 112 # o A' (bumped)
113 113 # |`:
114 114 # | o A
115 115 # |/
116 116 # o Z
117 117 #
118 118 # The way to solve this situation is to create a new changeset Ad as children
119 119 # of A. This changeset have the same content than A'. So the diff from A to A'
120 120 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
121 121 #
122 122 # o Ad
123 123 # |`:
124 124 # | x A'
125 125 # |'|
126 126 # o | A
127 127 # |/
128 128 # o Z
129 129 #
130 130 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
131 131 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
132 132 # This flag mean that the successors are an interdiff that fix the bumped
133 133 # situation, breaking the transitivity of "bumped" here.
134 134 bumpedfix = 1
135 135
136 136 def _readmarkers(data):
137 137 """Read and enumerate markers from raw data"""
138 138 off = 0
139 139 diskversion = _unpack('>B', data[off:off + 1])[0]
140 140 off += 1
141 141 if diskversion != _fmversion:
142 142 raise util.Abort(_('parsing obsolete marker: unknown version %r')
143 143 % diskversion)
144 144
145 145 # Loop on markers
146 146 l = len(data)
147 147 while off + _fmfsize <= l:
148 148 # read fixed part
149 149 cur = data[off:off + _fmfsize]
150 150 off += _fmfsize
151 151 nbsuc, mdsize, flags, pre = _unpack(_fmfixed, cur)
152 152 # read replacement
153 153 sucs = ()
154 154 if nbsuc:
155 155 s = (_fnodesize * nbsuc)
156 156 cur = data[off:off + s]
157 157 sucs = _unpack(_fmnode * nbsuc, cur)
158 158 off += s
159 159 # read metadata
160 160 # (metadata will be decoded on demand)
161 161 metadata = data[off:off + mdsize]
162 162 if len(metadata) != mdsize:
163 163 raise util.Abort(_('parsing obsolete marker: metadata is too '
164 164 'short, %d bytes expected, got %d')
165 165 % (mdsize, len(metadata)))
166 166 off += mdsize
167 167 yield (pre, sucs, flags, metadata)
168 168
169 169 def encodemeta(meta):
170 170 """Return encoded metadata string to string mapping.
171 171
172 172 Assume no ':' in key and no '\0' in both key and value."""
173 173 for key, value in meta.iteritems():
174 174 if ':' in key or '\0' in key:
175 175 raise ValueError("':' and '\0' are forbidden in metadata key'")
176 176 if '\0' in value:
177 177 raise ValueError("':' are forbidden in metadata value'")
178 178 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
179 179
180 180 def decodemeta(data):
181 181 """Return string to string dictionary from encoded version."""
182 182 d = {}
183 183 for l in data.split('\0'):
184 184 if l:
185 185 key, value = l.split(':')
186 186 d[key] = value
187 187 return d
188 188
189 189 class marker(object):
190 190 """Wrap obsolete marker raw data"""
191 191
192 192 def __init__(self, repo, data):
193 193 # the repo argument will be used to create changectx in later version
194 194 self._repo = repo
195 195 self._data = data
196 196 self._decodedmeta = None
197 197
198 198 def precnode(self):
199 199 """Precursor changeset node identifier"""
200 200 return self._data[0]
201 201
202 202 def succnodes(self):
203 203 """List of successor changesets node identifiers"""
204 204 return self._data[1]
205 205
206 206 def metadata(self):
207 207 """Decoded metadata dictionary"""
208 208 if self._decodedmeta is None:
209 209 self._decodedmeta = decodemeta(self._data[3])
210 210 return self._decodedmeta
211 211
212 212 def date(self):
213 213 """Creation date as (unixtime, offset)"""
214 214 parts = self.metadata()['date'].split(' ')
215 215 return (float(parts[0]), int(parts[1]))
216 216
217 217 class obsstore(object):
218 218 """Store obsolete markers
219 219
220 220 Markers can be accessed with two mappings:
221 221 - precursors[x] -> set(markers on precursors edges of x)
222 222 - successors[x] -> set(markers on successors edges of x)
223 223 """
224 224
225 225 def __init__(self, sopener):
226 226 # caches for various obsolescence related cache
227 227 self.caches = {}
228 228 self._all = []
229 229 # new markers to serialize
230 230 self.precursors = {}
231 231 self.successors = {}
232 232 self.sopener = sopener
233 233 data = sopener.tryread('obsstore')
234 234 if data:
235 235 self._load(_readmarkers(data))
236 236
237 237 def __iter__(self):
238 238 return iter(self._all)
239 239
240 240 def __nonzero__(self):
241 241 return bool(self._all)
242 242
243 243 def create(self, transaction, prec, succs=(), flag=0, metadata=None):
244 244 """obsolete: add a new obsolete marker
245 245
246 246 * ensuring it is hashable
247 247 * check mandatory metadata
248 248 * encode metadata
249 249 """
250 250 if metadata is None:
251 251 metadata = {}
252 252 if len(prec) != 20:
253 253 raise ValueError(prec)
254 254 for succ in succs:
255 255 if len(succ) != 20:
256 256 raise ValueError(succ)
257 257 marker = (str(prec), tuple(succs), int(flag), encodemeta(metadata))
258 258 self.add(transaction, [marker])
259 259
260 260 def add(self, transaction, markers):
261 261 """Add new markers to the store
262 262
263 263 Take care of filtering duplicate.
264 264 Return the number of new marker."""
265 265 if not _enabled:
266 266 raise util.Abort('obsolete feature is not enabled on this repo')
267 267 new = [m for m in markers if m not in self._all]
268 268 if new:
269 269 f = self.sopener('obsstore', 'ab')
270 270 try:
271 271 # Whether the file's current position is at the begin or at
272 272 # the end after opening a file for appending is implementation
273 273 # defined. So we must seek to the end before calling tell(),
274 274 # or we may get a zero offset for non-zero sized files on
275 275 # some platforms (issue3543).
276 276 f.seek(0, _SEEK_END)
277 277 offset = f.tell()
278 278 transaction.add('obsstore', offset)
279 279 # offset == 0: new file - add the version header
280 280 for bytes in _encodemarkers(new, offset == 0):
281 281 f.write(bytes)
282 282 finally:
283 283 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
284 284 # call 'filecacheentry.refresh()' here
285 285 f.close()
286 286 self._load(new)
287 287 # new marker *may* have changed several set. invalidate the cache.
288 288 self.caches.clear()
289 289 return len(new)
290 290
291 291 def mergemarkers(self, transaction, data):
292 292 markers = _readmarkers(data)
293 293 self.add(transaction, markers)
294 294
295 295 def _load(self, markers):
296 296 for mark in markers:
297 297 self._all.append(mark)
298 298 pre, sucs = mark[:2]
299 299 self.successors.setdefault(pre, set()).add(mark)
300 300 for suc in sucs:
301 301 self.precursors.setdefault(suc, set()).add(mark)
302 302 if node.nullid in self.precursors:
303 303 raise util.Abort(_('bad obsolescence marker detected: '
304 304 'invalid successors nullid'))
305 305
306 306 def _encodemarkers(markers, addheader=False):
307 307 # Kept separate from flushmarkers(), it will be reused for
308 308 # markers exchange.
309 309 if addheader:
310 310 yield _pack('>B', _fmversion)
311 311 for marker in markers:
312 312 yield _encodeonemarker(marker)
313 313
314 314
315 315 def _encodeonemarker(marker):
316 316 pre, sucs, flags, metadata = marker
317 317 nbsuc = len(sucs)
318 318 format = _fmfixed + (_fmnode * nbsuc)
319 319 data = [nbsuc, len(metadata), flags, pre]
320 320 data.extend(sucs)
321 321 return _pack(format, *data) + metadata
322 322
323 323 # arbitrary picked to fit into 8K limit from HTTP server
324 324 # you have to take in account:
325 325 # - the version header
326 326 # - the base85 encoding
327 327 _maxpayload = 5300
328 328
329 329 def listmarkers(repo):
330 330 """List markers over pushkey"""
331 331 if not repo.obsstore:
332 332 return {}
333 333 keys = {}
334 334 parts = []
335 335 currentlen = _maxpayload * 2 # ensure we create a new part
336 336 for marker in repo.obsstore:
337 337 nextdata = _encodeonemarker(marker)
338 338 if (len(nextdata) + currentlen > _maxpayload):
339 339 currentpart = []
340 340 currentlen = 0
341 341 parts.append(currentpart)
342 342 currentpart.append(nextdata)
343 343 currentlen += len(nextdata)
344 344 for idx, part in enumerate(reversed(parts)):
345 345 data = ''.join([_pack('>B', _fmversion)] + part)
346 346 keys['dump%i' % idx] = base85.b85encode(data)
347 347 return keys
348 348
349 349 def pushmarker(repo, key, old, new):
350 350 """Push markers over pushkey"""
351 351 if not key.startswith('dump'):
352 352 repo.ui.warn(_('unknown key: %r') % key)
353 353 return 0
354 354 if old:
355 355 repo.ui.warn(_('unexpected old value') % key)
356 356 return 0
357 357 data = base85.b85decode(new)
358 358 lock = repo.lock()
359 359 try:
360 360 tr = repo.transaction('pushkey: obsolete markers')
361 361 try:
362 362 repo.obsstore.mergemarkers(tr, data)
363 363 tr.close()
364 364 return 1
365 365 finally:
366 366 tr.release()
367 367 finally:
368 368 lock.release()
369 369
370 370 def allmarkers(repo):
371 371 """all obsolete markers known in a repository"""
372 372 for markerdata in repo.obsstore:
373 373 yield marker(repo, markerdata)
374 374
375 375 def precursormarkers(ctx):
376 376 """obsolete marker marking this changeset as a successors"""
377 377 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
378 378 yield marker(ctx._repo, data)
379 379
380 380 def successormarkers(ctx):
381 381 """obsolete marker making this changeset obsolete"""
382 382 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
383 383 yield marker(ctx._repo, data)
384 384
385 385 def allsuccessors(obsstore, nodes, ignoreflags=0):
386 386 """Yield node for every successor of <nodes>.
387 387
388 388 Some successors may be unknown locally.
389 389
390 390 This is a linear yield unsuited to detecting split changesets."""
391 391 remaining = set(nodes)
392 392 seen = set(remaining)
393 393 while remaining:
394 394 current = remaining.pop()
395 395 yield current
396 396 for mark in obsstore.successors.get(current, ()):
397 397 # ignore marker flagged with with specified flag
398 398 if mark[2] & ignoreflags:
399 399 continue
400 400 for suc in mark[1]:
401 401 if suc not in seen:
402 402 seen.add(suc)
403 403 remaining.add(suc)
404 404
405 405 def successorssets(repo, initialnode, cache=None):
406 406 """Return all set of successors of initial nodes
407 407
408 408 Successors set of changeset A are a group of revision that succeed A. It
409 409 succeed A as a consistent whole, each revision being only partial
410 410 replacement. Successors set contains non-obsolete changeset only.
411 411
412 412 In most cases a changeset A have zero (changeset pruned) or a single
413 413 successors set that contains a single successor (changeset A replaced by
414 414 A')
415 415
416 416 When changeset is split, it results successors set containing more than
417 417 a single element. Divergent rewriting will result in multiple successors
418 418 sets.
419 419
420 420 They are returned as a list of tuples containing all valid successors sets.
421 421
422 422 Final successors unknown locally are considered plain prune (obsoleted
423 423 without successors).
424 424
425 425 The optional `cache` parameter is a dictionary that may contains
426 426 precomputed successors sets. It is meant to reuse the computation of
427 427 previous call to `successorssets` when multiple calls are made at the same
428 428 time. The cache dictionary is updated in place. The caller is responsible
429 429 for its live spawn. Code that makes multiple calls to `successorssets`
430 430 *must* use this cache mechanism or suffer terrible performances."""
431 431
432 432 succmarkers = repo.obsstore.successors
433 433
434 434 # Stack of nodes we search successors sets for
435 435 toproceed = [initialnode]
436 436 # set version of above list for fast loop detection
437 437 # element added to "toproceed" must be added here
438 438 stackedset = set(toproceed)
439 439 if cache is None:
440 440 cache = {}
441 441
442 442 # This while loop is the flattened version of a recursive search for
443 443 # successors sets
444 444 #
445 445 # def successorssets(x):
446 446 # successors = directsuccessors(x)
447 447 # ss = [[]]
448 448 # for succ in directsuccessors(x):
449 449 # # product as in itertools cartesian product
450 450 # ss = product(ss, successorssets(succ))
451 451 # return ss
452 452 #
453 453 # But we can not use plain recursive calls here:
454 454 # - that would blow the python call stack
455 455 # - obsolescence markers may have cycles, we need to handle them.
456 456 #
457 457 # The `toproceed` list act as our call stack. Every node we search
458 458 # successors set for are stacked there.
459 459 #
460 460 # The `stackedset` is set version of this stack used to check if a node is
461 461 # already stacked. This check is used to detect cycles and prevent infinite
462 462 # loop.
463 463 #
464 464 # successors set of all nodes are stored in the `cache` dictionary.
465 465 #
466 466 # After this while loop ends we use the cache to return the successors sets
467 467 # for the node requested by the caller.
468 468 while toproceed:
469 469 # Every iteration tries to compute the successors sets of the topmost
470 470 # node of the stack: CURRENT.
471 471 #
472 472 # There are four possible outcomes:
473 473 #
474 474 # 1) We already know the successors sets of CURRENT:
475 475 # -> mission accomplished, pop it from the stack.
476 476 # 2) Node is not obsolete:
477 477 # -> the node is its own successors sets. Add it to the cache.
478 478 # 3) We do not know successors set of direct successors of CURRENT:
479 479 # -> We add those successors to the stack.
480 480 # 4) We know successors sets of all direct successors of CURRENT:
481 481 # -> We can compute CURRENT successors set and add it to the
482 482 # cache.
483 483 #
484 484 current = toproceed[-1]
485 485 if current in cache:
486 486 # case (1): We already know the successors sets
487 487 stackedset.remove(toproceed.pop())
488 488 elif current not in succmarkers:
489 489 # case (2): The node is not obsolete.
490 490 if current in repo:
491 491 # We have a valid last successors.
492 492 cache[current] = [(current,)]
493 493 else:
494 494 # Final obsolete version is unknown locally.
495 495 # Do not count that as a valid successors
496 496 cache[current] = []
497 497 else:
498 498 # cases (3) and (4)
499 499 #
500 500 # We proceed in two phases. Phase 1 aims to distinguish case (3)
501 501 # from case (4):
502 502 #
503 503 # For each direct successors of CURRENT, we check whether its
504 504 # successors sets are known. If they are not, we stack the
505 505 # unknown node and proceed to the next iteration of the while
506 506 # loop. (case 3)
507 507 #
508 508 # During this step, we may detect obsolescence cycles: a node
509 509 # with unknown successors sets but already in the call stack.
510 510 # In such a situation, we arbitrary set the successors sets of
511 511 # the node to nothing (node pruned) to break the cycle.
512 512 #
513 513 # If no break was encountered we proceeed to phase 2.
514 514 #
515 515 # Phase 2 computes successors sets of CURRENT (case 4); see details
516 516 # in phase 2 itself.
517 517 #
518 518 # Note the two levels of iteration in each phase.
519 519 # - The first one handles obsolescence markers using CURRENT as
520 520 # precursor (successors markers of CURRENT).
521 521 #
522 522 # Having multiple entry here means divergence.
523 523 #
524 524 # - The second one handles successors defined in each marker.
525 525 #
526 526 # Having none means pruned node, multiple successors means split,
527 527 # single successors are standard replacement.
528 528 #
529 529 for mark in succmarkers[current]:
530 530 for suc in mark[1]:
531 531 if suc not in cache:
532 532 if suc in stackedset:
533 533 # cycle breaking
534 534 cache[suc] = []
535 535 else:
536 536 # case (3) If we have not computed successors sets
537 537 # of one of those successors we add it to the
538 538 # `toproceed` stack and stop all work for this
539 539 # iteration.
540 540 toproceed.append(suc)
541 541 stackedset.add(suc)
542 542 break
543 543 else:
544 544 continue
545 545 break
546 546 else:
547 547 # case (4): we know all successors sets of all direct
548 548 # successors
549 549 #
550 550 # Successors set contributed by each marker depends on the
551 551 # successors sets of all its "successors" node.
552 552 #
553 553 # Each different marker is a divergence in the obsolescence
554 554 # history. It contributes successors sets dictinct from other
555 555 # markers.
556 556 #
557 557 # Within a marker, a successor may have divergent successors
558 558 # sets. In such a case, the marker will contribute multiple
559 559 # divergent successors sets. If multiple successors have
560 560 # divergents successors sets, a cartesian product is used.
561 561 #
562 562 # At the end we post-process successors sets to remove
563 563 # duplicated entry and successors set that are strict subset of
564 564 # another one.
565 565 succssets = []
566 566 for mark in succmarkers[current]:
567 567 # successors sets contributed by this marker
568 568 markss = [[]]
569 569 for suc in mark[1]:
570 570 # cardinal product with previous successors
571 571 productresult = []
572 572 for prefix in markss:
573 573 for suffix in cache[suc]:
574 574 newss = list(prefix)
575 575 for part in suffix:
576 576 # do not duplicated entry in successors set
577 577 # first entry wins.
578 578 if part not in newss:
579 579 newss.append(part)
580 580 productresult.append(newss)
581 581 markss = productresult
582 582 succssets.extend(markss)
583 583 # remove duplicated and subset
584 584 seen = []
585 585 final = []
586 586 candidate = sorted(((set(s), s) for s in succssets if s),
587 587 key=lambda x: len(x[1]), reverse=True)
588 588 for setversion, listversion in candidate:
589 589 for seenset in seen:
590 590 if setversion.issubset(seenset):
591 591 break
592 592 else:
593 593 final.append(listversion)
594 594 seen.append(setversion)
595 595 final.reverse() # put small successors set first
596 596 cache[current] = final
597 597 return cache[initialnode]
598 598
599 599 def _knownrevs(repo, nodes):
600 600 """yield revision numbers of known nodes passed in parameters
601 601
602 602 Unknown revisions are silently ignored."""
603 603 torev = repo.changelog.nodemap.get
604 604 for n in nodes:
605 605 rev = torev(n)
606 606 if rev is not None:
607 607 yield rev
608 608
609 609 # mapping of 'set-name' -> <function to compute this set>
610 610 cachefuncs = {}
611 611 def cachefor(name):
612 612 """Decorator to register a function as computing the cache for a set"""
613 613 def decorator(func):
614 614 assert name not in cachefuncs
615 615 cachefuncs[name] = func
616 616 return func
617 617 return decorator
618 618
619 619 def getrevs(repo, name):
620 620 """Return the set of revision that belong to the <name> set
621 621
622 622 Such access may compute the set and cache it for future use"""
623 623 repo = repo.unfiltered()
624 624 if not repo.obsstore:
625 625 return ()
626 626 if name not in repo.obsstore.caches:
627 627 repo.obsstore.caches[name] = cachefuncs[name](repo)
628 628 return repo.obsstore.caches[name]
629 629
630 630 # To be simple we need to invalidate obsolescence cache when:
631 631 #
632 632 # - new changeset is added:
633 633 # - public phase is changed
634 634 # - obsolescence marker are added
635 635 # - strip is used a repo
636 636 def clearobscaches(repo):
637 637 """Remove all obsolescence related cache from a repo
638 638
639 639 This remove all cache in obsstore is the obsstore already exist on the
640 640 repo.
641 641
642 642 (We could be smarter here given the exact event that trigger the cache
643 643 clearing)"""
644 644 # only clear cache is there is obsstore data in this repo
645 645 if 'obsstore' in repo._filecache:
646 646 repo.obsstore.caches.clear()
647 647
648 648 @cachefor('obsolete')
649 649 def _computeobsoleteset(repo):
650 650 """the set of obsolete revisions"""
651 651 obs = set()
652 652 getrev = repo.changelog.nodemap.get
653 653 getphase = repo._phasecache.phase
654 654 for node in repo.obsstore.successors:
655 655 rev = getrev(node)
656 656 if rev is not None and getphase(repo, rev):
657 657 obs.add(rev)
658 658 return obs
659 659
660 660 @cachefor('unstable')
661 661 def _computeunstableset(repo):
662 662 """the set of non obsolete revisions with obsolete parents"""
663 663 # revset is not efficient enough here
664 664 # we do (obsolete()::) - obsolete() by hand
665 665 obs = getrevs(repo, 'obsolete')
666 666 if not obs:
667 667 return set()
668 668 cl = repo.changelog
669 669 return set(r for r in cl.descendants(obs) if r not in obs)
670 670
671 671 @cachefor('suspended')
672 672 def _computesuspendedset(repo):
673 673 """the set of obsolete parents with non obsolete descendants"""
674 return set(repo.revs('obsolete() and obsolete()::unstable()'))
674 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
675 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
675 676
676 677 @cachefor('extinct')
677 678 def _computeextinctset(repo):
678 679 """the set of obsolete parents without non obsolete descendants"""
679 680 return set(repo.revs('obsolete() - obsolete()::unstable()'))
680 681
681 682
682 683 @cachefor('bumped')
683 684 def _computebumpedset(repo):
684 685 """the set of revs trying to obsolete public revisions"""
685 686 # get all possible bumped changesets
686 687 tonode = repo.changelog.node
687 688 publicnodes = (tonode(r) for r in repo.revs('public()'))
688 689 successors = allsuccessors(repo.obsstore, publicnodes,
689 690 ignoreflags=bumpedfix)
690 691 # revision public or already obsolete don't count as bumped
691 692 query = '%ld - obsolete() - public()'
692 693 return set(repo.revs(query, _knownrevs(repo, successors)))
693 694
694 695 @cachefor('divergent')
695 696 def _computedivergentset(repo):
696 697 """the set of rev that compete to be the final successors of some revision.
697 698 """
698 699 divergent = set()
699 700 obsstore = repo.obsstore
700 701 newermap = {}
701 702 for ctx in repo.set('(not public()) - obsolete()'):
702 703 mark = obsstore.precursors.get(ctx.node(), ())
703 704 toprocess = set(mark)
704 705 while toprocess:
705 706 prec = toprocess.pop()[0]
706 707 if prec not in newermap:
707 708 successorssets(repo, prec, newermap)
708 709 newer = [n for n in newermap[prec] if n]
709 710 if len(newer) > 1:
710 711 divergent.add(ctx.rev())
711 712 break
712 713 toprocess.update(obsstore.precursors.get(prec, ()))
713 714 return divergent
714 715
715 716
716 717 def createmarkers(repo, relations, flag=0, metadata=None):
717 718 """Add obsolete markers between changesets in a repo
718 719
719 720 <relations> must be an iterable of (<old>, (<new>, ...)) tuple.
720 721 `old` and `news` are changectx.
721 722
722 723 Trying to obsolete a public changeset will raise an exception.
723 724
724 725 Current user and date are used except if specified otherwise in the
725 726 metadata attribute.
726 727
727 728 This function operates within a transaction of its own, but does
728 729 not take any lock on the repo.
729 730 """
730 731 # prepare metadata
731 732 if metadata is None:
732 733 metadata = {}
733 734 if 'date' not in metadata:
734 735 metadata['date'] = '%i %i' % util.makedate()
735 736 if 'user' not in metadata:
736 737 metadata['user'] = repo.ui.username()
737 738 tr = repo.transaction('add-obsolescence-marker')
738 739 try:
739 740 for prec, sucs in relations:
740 741 if not prec.mutable():
741 742 raise util.Abort("cannot obsolete immutable changeset: %s"
742 743 % prec)
743 744 nprec = prec.node()
744 745 nsucs = tuple(s.node() for s in sucs)
745 746 if nprec in nsucs:
746 747 raise util.Abort("changeset %s cannot obsolete itself" % prec)
747 748 repo.obsstore.create(tr, nprec, nsucs, flag, metadata)
748 749 repo.filteredrevcache.clear()
749 750 tr.close()
750 751 finally:
751 752 tr.release()
General Comments 0
You need to be logged in to leave comments. Login now