##// END OF EJS Templates
obsolete: allow passing a revision to successorssets()
Dan Villiom Podlaski Christiansen -
r19615:77d43476 default
parent child Browse files
Show More
@@ -1,819 +1,822 b''
1 1 # obsolete.py - obsolete markers handling
2 2 #
3 3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 4 # Logilab SA <contact@logilab.fr>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 """Obsolete markers handling
10 10
11 11 An obsolete marker maps an old changeset to a list of new
12 12 changesets. If the list of new changesets is empty, the old changeset
13 13 is said to be "killed". Otherwise, the old changeset is being
14 14 "replaced" by the new changesets.
15 15
16 16 Obsolete markers can be used to record and distribute changeset graph
17 17 transformations performed by history rewriting operations, and help
18 18 building new tools to reconciliate conflicting rewriting actions. To
19 19 facilitate conflicts resolution, markers include various annotations
20 20 besides old and news changeset identifiers, such as creation date or
21 21 author name.
22 22
23 23 The old obsoleted changeset is called "precursor" and possible replacements are
24 24 called "successors". Markers that used changeset X as a precursors are called
25 25 "successor markers of X" because they hold information about the successors of
26 26 X. Markers that use changeset Y as a successors are call "precursor markers of
27 27 Y" because they hold information about the precursors of Y.
28 28
29 29 Examples:
30 30
31 31 - When changeset A is replacement by a changeset A', one marker is stored:
32 32
33 33 (A, (A'))
34 34
35 35 - When changesets A and B are folded into a new changeset C two markers are
36 36 stored:
37 37
38 38 (A, (C,)) and (B, (C,))
39 39
40 40 - When changeset A is simply "pruned" from the graph, a marker in create:
41 41
42 42 (A, ())
43 43
44 44 - When changeset A is split into B and C, a single marker are used:
45 45
46 46 (A, (C, C))
47 47
48 48 We use a single marker to distinct the "split" case from the "divergence"
49 49 case. If two independents operation rewrite the same changeset A in to A' and
50 50 A'' when have an error case: divergent rewriting. We can detect it because
51 51 two markers will be created independently:
52 52
53 53 (A, (B,)) and (A, (C,))
54 54
55 55 Format
56 56 ------
57 57
58 58 Markers are stored in an append-only file stored in
59 59 '.hg/store/obsstore'.
60 60
61 61 The file starts with a version header:
62 62
63 63 - 1 unsigned byte: version number, starting at zero.
64 64
65 65
66 66 The header is followed by the markers. Each marker is made of:
67 67
68 68 - 1 unsigned byte: number of new changesets "R", could be zero.
69 69
70 70 - 1 unsigned 32-bits integer: metadata size "M" in bytes.
71 71
72 72 - 1 byte: a bit field. It is reserved for flags used in obsolete
73 73 markers common operations, to avoid repeated decoding of metadata
74 74 entries.
75 75
76 76 - 20 bytes: obsoleted changeset identifier.
77 77
78 78 - N*20 bytes: new changesets identifiers.
79 79
80 80 - M bytes: metadata as a sequence of nul-terminated strings. Each
81 81 string contains a key and a value, separated by a color ':', without
82 82 additional encoding. Keys cannot contain '\0' or ':' and values
83 83 cannot contain '\0'.
84 84 """
85 85 import struct
86 86 import util, base85, node
87 87 from i18n import _
88 88
89 89 _pack = struct.pack
90 90 _unpack = struct.unpack
91 91
92 92 _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5
93 93
94 94 # the obsolete feature is not mature enough to be enabled by default.
95 95 # you have to rely on third party extension extension to enable this.
96 96 _enabled = False
97 97
98 98 # data used for parsing and writing
99 99 _fmversion = 0
100 100 _fmfixed = '>BIB20s'
101 101 _fmnode = '20s'
102 102 _fmfsize = struct.calcsize(_fmfixed)
103 103 _fnodesize = struct.calcsize(_fmnode)
104 104
105 105 ### obsolescence marker flag
106 106
107 107 ## bumpedfix flag
108 108 #
109 109 # When a changeset A' succeed to a changeset A which became public, we call A'
110 110 # "bumped" because it's a successors of a public changesets
111 111 #
112 112 # o A' (bumped)
113 113 # |`:
114 114 # | o A
115 115 # |/
116 116 # o Z
117 117 #
118 118 # The way to solve this situation is to create a new changeset Ad as children
119 119 # of A. This changeset have the same content than A'. So the diff from A to A'
120 120 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
121 121 #
122 122 # o Ad
123 123 # |`:
124 124 # | x A'
125 125 # |'|
126 126 # o | A
127 127 # |/
128 128 # o Z
129 129 #
130 130 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
131 131 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
132 132 # This flag mean that the successors express the changes between the public and
133 133 # bumped version and fix the situation, breaking the transitivity of
134 134 # "bumped" here.
135 135 bumpedfix = 1
136 136
137 137 def _readmarkers(data):
138 138 """Read and enumerate markers from raw data"""
139 139 off = 0
140 140 diskversion = _unpack('>B', data[off:off + 1])[0]
141 141 off += 1
142 142 if diskversion != _fmversion:
143 143 raise util.Abort(_('parsing obsolete marker: unknown version %r')
144 144 % diskversion)
145 145
146 146 # Loop on markers
147 147 l = len(data)
148 148 while off + _fmfsize <= l:
149 149 # read fixed part
150 150 cur = data[off:off + _fmfsize]
151 151 off += _fmfsize
152 152 nbsuc, mdsize, flags, pre = _unpack(_fmfixed, cur)
153 153 # read replacement
154 154 sucs = ()
155 155 if nbsuc:
156 156 s = (_fnodesize * nbsuc)
157 157 cur = data[off:off + s]
158 158 sucs = _unpack(_fmnode * nbsuc, cur)
159 159 off += s
160 160 # read metadata
161 161 # (metadata will be decoded on demand)
162 162 metadata = data[off:off + mdsize]
163 163 if len(metadata) != mdsize:
164 164 raise util.Abort(_('parsing obsolete marker: metadata is too '
165 165 'short, %d bytes expected, got %d')
166 166 % (mdsize, len(metadata)))
167 167 off += mdsize
168 168 yield (pre, sucs, flags, metadata)
169 169
170 170 def encodemeta(meta):
171 171 """Return encoded metadata string to string mapping.
172 172
173 173 Assume no ':' in key and no '\0' in both key and value."""
174 174 for key, value in meta.iteritems():
175 175 if ':' in key or '\0' in key:
176 176 raise ValueError("':' and '\0' are forbidden in metadata key'")
177 177 if '\0' in value:
178 178 raise ValueError("':' are forbidden in metadata value'")
179 179 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
180 180
181 181 def decodemeta(data):
182 182 """Return string to string dictionary from encoded version."""
183 183 d = {}
184 184 for l in data.split('\0'):
185 185 if l:
186 186 key, value = l.split(':')
187 187 d[key] = value
188 188 return d
189 189
190 190 class marker(object):
191 191 """Wrap obsolete marker raw data"""
192 192
193 193 def __init__(self, repo, data):
194 194 # the repo argument will be used to create changectx in later version
195 195 self._repo = repo
196 196 self._data = data
197 197 self._decodedmeta = None
198 198
199 199 def precnode(self):
200 200 """Precursor changeset node identifier"""
201 201 return self._data[0]
202 202
203 203 def succnodes(self):
204 204 """List of successor changesets node identifiers"""
205 205 return self._data[1]
206 206
207 207 def metadata(self):
208 208 """Decoded metadata dictionary"""
209 209 if self._decodedmeta is None:
210 210 self._decodedmeta = decodemeta(self._data[3])
211 211 return self._decodedmeta
212 212
213 213 def date(self):
214 214 """Creation date as (unixtime, offset)"""
215 215 parts = self.metadata()['date'].split(' ')
216 216 return (float(parts[0]), int(parts[1]))
217 217
218 218 class obsstore(object):
219 219 """Store obsolete markers
220 220
221 221 Markers can be accessed with two mappings:
222 222 - precursors[x] -> set(markers on precursors edges of x)
223 223 - successors[x] -> set(markers on successors edges of x)
224 224 """
225 225
226 226 def __init__(self, sopener):
227 227 # caches for various obsolescence related cache
228 228 self.caches = {}
229 229 self._all = []
230 230 # new markers to serialize
231 231 self.precursors = {}
232 232 self.successors = {}
233 233 self.sopener = sopener
234 234 data = sopener.tryread('obsstore')
235 235 if data:
236 236 self._load(_readmarkers(data))
237 237
238 238 def __iter__(self):
239 239 return iter(self._all)
240 240
241 241 def __nonzero__(self):
242 242 return bool(self._all)
243 243
244 244 def create(self, transaction, prec, succs=(), flag=0, metadata=None):
245 245 """obsolete: add a new obsolete marker
246 246
247 247 * ensuring it is hashable
248 248 * check mandatory metadata
249 249 * encode metadata
250 250 """
251 251 if metadata is None:
252 252 metadata = {}
253 253 if 'date' not in metadata:
254 254 metadata['date'] = "%d %d" % util.makedate()
255 255 if len(prec) != 20:
256 256 raise ValueError(prec)
257 257 for succ in succs:
258 258 if len(succ) != 20:
259 259 raise ValueError(succ)
260 260 marker = (str(prec), tuple(succs), int(flag), encodemeta(metadata))
261 261 self.add(transaction, [marker])
262 262
263 263 def add(self, transaction, markers):
264 264 """Add new markers to the store
265 265
266 266 Take care of filtering duplicate.
267 267 Return the number of new marker."""
268 268 if not _enabled:
269 269 raise util.Abort('obsolete feature is not enabled on this repo')
270 270 new = [m for m in markers if m not in self._all]
271 271 if new:
272 272 f = self.sopener('obsstore', 'ab')
273 273 try:
274 274 # Whether the file's current position is at the begin or at
275 275 # the end after opening a file for appending is implementation
276 276 # defined. So we must seek to the end before calling tell(),
277 277 # or we may get a zero offset for non-zero sized files on
278 278 # some platforms (issue3543).
279 279 f.seek(0, _SEEK_END)
280 280 offset = f.tell()
281 281 transaction.add('obsstore', offset)
282 282 # offset == 0: new file - add the version header
283 283 for bytes in _encodemarkers(new, offset == 0):
284 284 f.write(bytes)
285 285 finally:
286 286 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
287 287 # call 'filecacheentry.refresh()' here
288 288 f.close()
289 289 self._load(new)
290 290 # new marker *may* have changed several set. invalidate the cache.
291 291 self.caches.clear()
292 292 return len(new)
293 293
294 294 def mergemarkers(self, transaction, data):
295 295 markers = _readmarkers(data)
296 296 self.add(transaction, markers)
297 297
298 298 def _load(self, markers):
299 299 for mark in markers:
300 300 self._all.append(mark)
301 301 pre, sucs = mark[:2]
302 302 self.successors.setdefault(pre, set()).add(mark)
303 303 for suc in sucs:
304 304 self.precursors.setdefault(suc, set()).add(mark)
305 305 if node.nullid in self.precursors:
306 306 raise util.Abort(_('bad obsolescence marker detected: '
307 307 'invalid successors nullid'))
308 308
309 309 def _encodemarkers(markers, addheader=False):
310 310 # Kept separate from flushmarkers(), it will be reused for
311 311 # markers exchange.
312 312 if addheader:
313 313 yield _pack('>B', _fmversion)
314 314 for marker in markers:
315 315 yield _encodeonemarker(marker)
316 316
317 317
318 318 def _encodeonemarker(marker):
319 319 pre, sucs, flags, metadata = marker
320 320 nbsuc = len(sucs)
321 321 format = _fmfixed + (_fmnode * nbsuc)
322 322 data = [nbsuc, len(metadata), flags, pre]
323 323 data.extend(sucs)
324 324 return _pack(format, *data) + metadata
325 325
326 326 # arbitrary picked to fit into 8K limit from HTTP server
327 327 # you have to take in account:
328 328 # - the version header
329 329 # - the base85 encoding
330 330 _maxpayload = 5300
331 331
332 332 def listmarkers(repo):
333 333 """List markers over pushkey"""
334 334 if not repo.obsstore:
335 335 return {}
336 336 keys = {}
337 337 parts = []
338 338 currentlen = _maxpayload * 2 # ensure we create a new part
339 339 for marker in repo.obsstore:
340 340 nextdata = _encodeonemarker(marker)
341 341 if (len(nextdata) + currentlen > _maxpayload):
342 342 currentpart = []
343 343 currentlen = 0
344 344 parts.append(currentpart)
345 345 currentpart.append(nextdata)
346 346 currentlen += len(nextdata)
347 347 for idx, part in enumerate(reversed(parts)):
348 348 data = ''.join([_pack('>B', _fmversion)] + part)
349 349 keys['dump%i' % idx] = base85.b85encode(data)
350 350 return keys
351 351
352 352 def pushmarker(repo, key, old, new):
353 353 """Push markers over pushkey"""
354 354 if not key.startswith('dump'):
355 355 repo.ui.warn(_('unknown key: %r') % key)
356 356 return 0
357 357 if old:
358 358 repo.ui.warn(_('unexpected old value') % key)
359 359 return 0
360 360 data = base85.b85decode(new)
361 361 lock = repo.lock()
362 362 try:
363 363 tr = repo.transaction('pushkey: obsolete markers')
364 364 try:
365 365 repo.obsstore.mergemarkers(tr, data)
366 366 tr.close()
367 367 return 1
368 368 finally:
369 369 tr.release()
370 370 finally:
371 371 lock.release()
372 372
373 373 def syncpush(repo, remote):
374 374 """utility function to push obsolete markers to a remote
375 375
376 376 Exist mostly to allow overridding for experimentation purpose"""
377 377 if (_enabled and repo.obsstore and
378 378 'obsolete' in remote.listkeys('namespaces')):
379 379 rslts = []
380 380 remotedata = repo.listkeys('obsolete')
381 381 for key in sorted(remotedata, reverse=True):
382 382 # reverse sort to ensure we end with dump0
383 383 data = remotedata[key]
384 384 rslts.append(remote.pushkey('obsolete', key, '', data))
385 385 if [r for r in rslts if not r]:
386 386 msg = _('failed to push some obsolete markers!\n')
387 387 repo.ui.warn(msg)
388 388
389 389 def syncpull(repo, remote, gettransaction):
390 390 """utility function to pull obsolete markers from a remote
391 391
392 392 The `gettransaction` is function that return the pull transaction, creating
393 393 one if necessary. We return the transaction to inform the calling code that
394 394 a new transaction have been created (when applicable).
395 395
396 396 Exists mostly to allow overridding for experimentation purpose"""
397 397 tr = None
398 398 if _enabled:
399 399 repo.ui.debug('fetching remote obsolete markers\n')
400 400 remoteobs = remote.listkeys('obsolete')
401 401 if 'dump0' in remoteobs:
402 402 tr = gettransaction()
403 403 for key in sorted(remoteobs, reverse=True):
404 404 if key.startswith('dump'):
405 405 data = base85.b85decode(remoteobs[key])
406 406 repo.obsstore.mergemarkers(tr, data)
407 407 repo.invalidatevolatilesets()
408 408 return tr
409 409
410 410 def allmarkers(repo):
411 411 """all obsolete markers known in a repository"""
412 412 for markerdata in repo.obsstore:
413 413 yield marker(repo, markerdata)
414 414
415 415 def precursormarkers(ctx):
416 416 """obsolete marker marking this changeset as a successors"""
417 417 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
418 418 yield marker(ctx._repo, data)
419 419
420 420 def successormarkers(ctx):
421 421 """obsolete marker making this changeset obsolete"""
422 422 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
423 423 yield marker(ctx._repo, data)
424 424
425 425 def allsuccessors(obsstore, nodes, ignoreflags=0):
426 426 """Yield node for every successor of <nodes>.
427 427
428 428 Some successors may be unknown locally.
429 429
430 430 This is a linear yield unsuited to detecting split changesets."""
431 431 remaining = set(nodes)
432 432 seen = set(remaining)
433 433 while remaining:
434 434 current = remaining.pop()
435 435 yield current
436 436 for mark in obsstore.successors.get(current, ()):
437 437 # ignore marker flagged with with specified flag
438 438 if mark[2] & ignoreflags:
439 439 continue
440 440 for suc in mark[1]:
441 441 if suc not in seen:
442 442 seen.add(suc)
443 443 remaining.add(suc)
444 444
445 445 def foreground(repo, nodes):
446 446 """return all nodes in the "foreground" of other node
447 447
448 448 The foreground of a revision is anything reachable using parent -> children
449 449 or precursor -> sucessor relation. It is very similars to "descendant" but
450 450 augmented with obsolescence information.
451 451
452 452 Beware that possible obsolescence cycle may result if complexe situation.
453 453 """
454 454 repo = repo.unfiltered()
455 455 foreground = set(repo.set('%ln::', nodes))
456 456 if repo.obsstore:
457 457 # We only need this complicated logic if there is obsolescence
458 458 # XXX will probably deserve an optimised revset.
459 459 nm = repo.changelog.nodemap
460 460 plen = -1
461 461 # compute the whole set of successors or descendants
462 462 while len(foreground) != plen:
463 463 plen = len(foreground)
464 464 succs = set(c.node() for c in foreground)
465 465 mutable = [c.node() for c in foreground if c.mutable()]
466 466 succs.update(allsuccessors(repo.obsstore, mutable))
467 467 known = (n for n in succs if n in nm)
468 468 foreground = set(repo.set('%ln::', known))
469 469 return set(c.node() for c in foreground)
470 470
471 471
472 472 def successorssets(repo, initialnode, cache=None):
473 473 """Return all set of successors of initial nodes
474 474
475 475 Successors set of changeset A are a group of revision that succeed A. It
476 476 succeed A as a consistent whole, each revision being only partial
477 477 replacement. Successors set contains non-obsolete changeset only.
478 478
479 479 In most cases a changeset A have zero (changeset pruned) or a single
480 480 successors set that contains a single successor (changeset A replaced by
481 481 A')
482 482
483 483 When changeset is split, it results successors set containing more than
484 484 a single element. Divergent rewriting will result in multiple successors
485 485 sets.
486 486
487 487 They are returned as a list of tuples containing all valid successors sets.
488 488
489 489 Final successors unknown locally are considered plain prune (obsoleted
490 490 without successors).
491 491
492 492 The optional `cache` parameter is a dictionary that may contains
493 493 precomputed successors sets. It is meant to reuse the computation of
494 494 previous call to `successorssets` when multiple calls are made at the same
495 495 time. The cache dictionary is updated in place. The caller is responsible
496 496 for its live spawn. Code that makes multiple calls to `successorssets`
497 497 *must* use this cache mechanism or suffer terrible performances."""
498 498
499 if isinstance(initialnode, int):
500 initialnode = repo.unfiltered().changelog.node(initialnode)
501
499 502 succmarkers = repo.obsstore.successors
500 503
501 504 # Stack of nodes we search successors sets for
502 505 toproceed = [initialnode]
503 506 # set version of above list for fast loop detection
504 507 # element added to "toproceed" must be added here
505 508 stackedset = set(toproceed)
506 509 if cache is None:
507 510 cache = {}
508 511
509 512 # This while loop is the flattened version of a recursive search for
510 513 # successors sets
511 514 #
512 515 # def successorssets(x):
513 516 # successors = directsuccessors(x)
514 517 # ss = [[]]
515 518 # for succ in directsuccessors(x):
516 519 # # product as in itertools cartesian product
517 520 # ss = product(ss, successorssets(succ))
518 521 # return ss
519 522 #
520 523 # But we can not use plain recursive calls here:
521 524 # - that would blow the python call stack
522 525 # - obsolescence markers may have cycles, we need to handle them.
523 526 #
524 527 # The `toproceed` list act as our call stack. Every node we search
525 528 # successors set for are stacked there.
526 529 #
527 530 # The `stackedset` is set version of this stack used to check if a node is
528 531 # already stacked. This check is used to detect cycles and prevent infinite
529 532 # loop.
530 533 #
531 534 # successors set of all nodes are stored in the `cache` dictionary.
532 535 #
533 536 # After this while loop ends we use the cache to return the successors sets
534 537 # for the node requested by the caller.
535 538 while toproceed:
536 539 # Every iteration tries to compute the successors sets of the topmost
537 540 # node of the stack: CURRENT.
538 541 #
539 542 # There are four possible outcomes:
540 543 #
541 544 # 1) We already know the successors sets of CURRENT:
542 545 # -> mission accomplished, pop it from the stack.
543 546 # 2) Node is not obsolete:
544 547 # -> the node is its own successors sets. Add it to the cache.
545 548 # 3) We do not know successors set of direct successors of CURRENT:
546 549 # -> We add those successors to the stack.
547 550 # 4) We know successors sets of all direct successors of CURRENT:
548 551 # -> We can compute CURRENT successors set and add it to the
549 552 # cache.
550 553 #
551 554 current = toproceed[-1]
552 555 if current in cache:
553 556 # case (1): We already know the successors sets
554 557 stackedset.remove(toproceed.pop())
555 558 elif current not in succmarkers:
556 559 # case (2): The node is not obsolete.
557 560 if current in repo:
558 561 # We have a valid last successors.
559 562 cache[current] = [(current,)]
560 563 else:
561 564 # Final obsolete version is unknown locally.
562 565 # Do not count that as a valid successors
563 566 cache[current] = []
564 567 else:
565 568 # cases (3) and (4)
566 569 #
567 570 # We proceed in two phases. Phase 1 aims to distinguish case (3)
568 571 # from case (4):
569 572 #
570 573 # For each direct successors of CURRENT, we check whether its
571 574 # successors sets are known. If they are not, we stack the
572 575 # unknown node and proceed to the next iteration of the while
573 576 # loop. (case 3)
574 577 #
575 578 # During this step, we may detect obsolescence cycles: a node
576 579 # with unknown successors sets but already in the call stack.
577 580 # In such a situation, we arbitrary set the successors sets of
578 581 # the node to nothing (node pruned) to break the cycle.
579 582 #
580 583 # If no break was encountered we proceed to phase 2.
581 584 #
582 585 # Phase 2 computes successors sets of CURRENT (case 4); see details
583 586 # in phase 2 itself.
584 587 #
585 588 # Note the two levels of iteration in each phase.
586 589 # - The first one handles obsolescence markers using CURRENT as
587 590 # precursor (successors markers of CURRENT).
588 591 #
589 592 # Having multiple entry here means divergence.
590 593 #
591 594 # - The second one handles successors defined in each marker.
592 595 #
593 596 # Having none means pruned node, multiple successors means split,
594 597 # single successors are standard replacement.
595 598 #
596 599 for mark in sorted(succmarkers[current]):
597 600 for suc in mark[1]:
598 601 if suc not in cache:
599 602 if suc in stackedset:
600 603 # cycle breaking
601 604 cache[suc] = []
602 605 else:
603 606 # case (3) If we have not computed successors sets
604 607 # of one of those successors we add it to the
605 608 # `toproceed` stack and stop all work for this
606 609 # iteration.
607 610 toproceed.append(suc)
608 611 stackedset.add(suc)
609 612 break
610 613 else:
611 614 continue
612 615 break
613 616 else:
614 617 # case (4): we know all successors sets of all direct
615 618 # successors
616 619 #
617 620 # Successors set contributed by each marker depends on the
618 621 # successors sets of all its "successors" node.
619 622 #
620 623 # Each different marker is a divergence in the obsolescence
621 624 # history. It contributes successors sets distinct from other
622 625 # markers.
623 626 #
624 627 # Within a marker, a successor may have divergent successors
625 628 # sets. In such a case, the marker will contribute multiple
626 629 # divergent successors sets. If multiple successors have
627 630 # divergent successors sets, a cartesian product is used.
628 631 #
629 632 # At the end we post-process successors sets to remove
630 633 # duplicated entry and successors set that are strict subset of
631 634 # another one.
632 635 succssets = []
633 636 for mark in sorted(succmarkers[current]):
634 637 # successors sets contributed by this marker
635 638 markss = [[]]
636 639 for suc in mark[1]:
637 640 # cardinal product with previous successors
638 641 productresult = []
639 642 for prefix in markss:
640 643 for suffix in cache[suc]:
641 644 newss = list(prefix)
642 645 for part in suffix:
643 646 # do not duplicated entry in successors set
644 647 # first entry wins.
645 648 if part not in newss:
646 649 newss.append(part)
647 650 productresult.append(newss)
648 651 markss = productresult
649 652 succssets.extend(markss)
650 653 # remove duplicated and subset
651 654 seen = []
652 655 final = []
653 656 candidate = sorted(((set(s), s) for s in succssets if s),
654 657 key=lambda x: len(x[1]), reverse=True)
655 658 for setversion, listversion in candidate:
656 659 for seenset in seen:
657 660 if setversion.issubset(seenset):
658 661 break
659 662 else:
660 663 final.append(listversion)
661 664 seen.append(setversion)
662 665 final.reverse() # put small successors set first
663 666 cache[current] = final
664 667 return cache[initialnode]
665 668
666 669 def _knownrevs(repo, nodes):
667 670 """yield revision numbers of known nodes passed in parameters
668 671
669 672 Unknown revisions are silently ignored."""
670 673 torev = repo.changelog.nodemap.get
671 674 for n in nodes:
672 675 rev = torev(n)
673 676 if rev is not None:
674 677 yield rev
675 678
676 679 # mapping of 'set-name' -> <function to compute this set>
677 680 cachefuncs = {}
678 681 def cachefor(name):
679 682 """Decorator to register a function as computing the cache for a set"""
680 683 def decorator(func):
681 684 assert name not in cachefuncs
682 685 cachefuncs[name] = func
683 686 return func
684 687 return decorator
685 688
686 689 def getrevs(repo, name):
687 690 """Return the set of revision that belong to the <name> set
688 691
689 692 Such access may compute the set and cache it for future use"""
690 693 repo = repo.unfiltered()
691 694 if not repo.obsstore:
692 695 return ()
693 696 if name not in repo.obsstore.caches:
694 697 repo.obsstore.caches[name] = cachefuncs[name](repo)
695 698 return repo.obsstore.caches[name]
696 699
697 700 # To be simple we need to invalidate obsolescence cache when:
698 701 #
699 702 # - new changeset is added:
700 703 # - public phase is changed
701 704 # - obsolescence marker are added
702 705 # - strip is used a repo
703 706 def clearobscaches(repo):
704 707 """Remove all obsolescence related cache from a repo
705 708
706 709 This remove all cache in obsstore is the obsstore already exist on the
707 710 repo.
708 711
709 712 (We could be smarter here given the exact event that trigger the cache
710 713 clearing)"""
711 714 # only clear cache is there is obsstore data in this repo
712 715 if 'obsstore' in repo._filecache:
713 716 repo.obsstore.caches.clear()
714 717
715 718 @cachefor('obsolete')
716 719 def _computeobsoleteset(repo):
717 720 """the set of obsolete revisions"""
718 721 obs = set()
719 722 getrev = repo.changelog.nodemap.get
720 723 getphase = repo._phasecache.phase
721 724 for node in repo.obsstore.successors:
722 725 rev = getrev(node)
723 726 if rev is not None and getphase(repo, rev):
724 727 obs.add(rev)
725 728 return obs
726 729
727 730 @cachefor('unstable')
728 731 def _computeunstableset(repo):
729 732 """the set of non obsolete revisions with obsolete parents"""
730 733 # revset is not efficient enough here
731 734 # we do (obsolete()::) - obsolete() by hand
732 735 obs = getrevs(repo, 'obsolete')
733 736 if not obs:
734 737 return set()
735 738 cl = repo.changelog
736 739 return set(r for r in cl.descendants(obs) if r not in obs)
737 740
738 741 @cachefor('suspended')
739 742 def _computesuspendedset(repo):
740 743 """the set of obsolete parents with non obsolete descendants"""
741 744 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
742 745 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
743 746
744 747 @cachefor('extinct')
745 748 def _computeextinctset(repo):
746 749 """the set of obsolete parents without non obsolete descendants"""
747 750 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
748 751
749 752
750 753 @cachefor('bumped')
751 754 def _computebumpedset(repo):
752 755 """the set of revs trying to obsolete public revisions"""
753 756 # get all possible bumped changesets
754 757 tonode = repo.changelog.node
755 758 publicnodes = (tonode(r) for r in repo.revs('public()'))
756 759 successors = allsuccessors(repo.obsstore, publicnodes,
757 760 ignoreflags=bumpedfix)
758 761 # revision public or already obsolete don't count as bumped
759 762 query = '%ld - obsolete() - public()'
760 763 return set(repo.revs(query, _knownrevs(repo, successors)))
761 764
762 765 @cachefor('divergent')
763 766 def _computedivergentset(repo):
764 767 """the set of rev that compete to be the final successors of some revision.
765 768 """
766 769 divergent = set()
767 770 obsstore = repo.obsstore
768 771 newermap = {}
769 772 for ctx in repo.set('(not public()) - obsolete()'):
770 773 mark = obsstore.precursors.get(ctx.node(), ())
771 774 toprocess = set(mark)
772 775 while toprocess:
773 776 prec = toprocess.pop()[0]
774 777 if prec not in newermap:
775 778 successorssets(repo, prec, newermap)
776 779 newer = [n for n in newermap[prec] if n]
777 780 if len(newer) > 1:
778 781 divergent.add(ctx.rev())
779 782 break
780 783 toprocess.update(obsstore.precursors.get(prec, ()))
781 784 return divergent
782 785
783 786
784 787 def createmarkers(repo, relations, flag=0, metadata=None):
785 788 """Add obsolete markers between changesets in a repo
786 789
787 790 <relations> must be an iterable of (<old>, (<new>, ...)) tuple.
788 791 `old` and `news` are changectx.
789 792
790 793 Trying to obsolete a public changeset will raise an exception.
791 794
792 795 Current user and date are used except if specified otherwise in the
793 796 metadata attribute.
794 797
795 798 This function operates within a transaction of its own, but does
796 799 not take any lock on the repo.
797 800 """
798 801 # prepare metadata
799 802 if metadata is None:
800 803 metadata = {}
801 804 if 'date' not in metadata:
802 805 metadata['date'] = '%i %i' % util.makedate()
803 806 if 'user' not in metadata:
804 807 metadata['user'] = repo.ui.username()
805 808 tr = repo.transaction('add-obsolescence-marker')
806 809 try:
807 810 for prec, sucs in relations:
808 811 if not prec.mutable():
809 812 raise util.Abort("cannot obsolete immutable changeset: %s"
810 813 % prec)
811 814 nprec = prec.node()
812 815 nsucs = tuple(s.node() for s in sucs)
813 816 if nprec in nsucs:
814 817 raise util.Abort("changeset %s cannot obsolete itself" % prec)
815 818 repo.obsstore.create(tr, nprec, nsucs, flag, metadata)
816 819 repo.filteredrevcache.clear()
817 820 tr.close()
818 821 finally:
819 822 tr.release()
General Comments 0
You need to be logged in to leave comments. Login now