##// END OF EJS Templates
obsolete: let N denote number of obsoleted changestes in a marker...
Martin Geisler -
r21165:fde46c53 stable
parent child Browse files
Show More
@@ -1,869 +1,869 b''
1 1 # obsolete.py - obsolete markers handling
2 2 #
3 3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 4 # Logilab SA <contact@logilab.fr>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 """Obsolete marker handling
10 10
11 11 An obsolete marker maps an old changeset to a list of new
12 12 changesets. If the list of new changesets is empty, the old changeset
13 13 is said to be "killed". Otherwise, the old changeset is being
14 14 "replaced" by the new changesets.
15 15
16 16 Obsolete markers can be used to record and distribute changeset graph
17 17 transformations performed by history rewrite operations, and help
18 18 building new tools to reconcile conflicting rewrite actions. To
19 19 facilitate conflict resolution, markers include various annotations
20 20 besides old and news changeset identifiers, such as creation date or
21 21 author name.
22 22
23 23 The old obsoleted changeset is called a "precursor" and possible
24 24 replacements are called "successors". Markers that used changeset X as
25 25 a precursor are called "successor markers of X" because they hold
26 26 information about the successors of X. Markers that use changeset Y as
27 27 a successors are call "precursor markers of Y" because they hold
28 28 information about the precursors of Y.
29 29
30 30 Examples:
31 31
32 32 - When changeset A is replaced by changeset A', one marker is stored:
33 33
34 34 (A, (A'))
35 35
36 36 - When changesets A and B are folded into a new changeset C, two markers are
37 37 stored:
38 38
39 39 (A, (C,)) and (B, (C,))
40 40
41 41 - When changeset A is simply "pruned" from the graph, a marker is created:
42 42
43 43 (A, ())
44 44
45 45 - When changeset A is split into B and C, a single marker are used:
46 46
47 47 (A, (C, C))
48 48
49 49 We use a single marker to distinguish the "split" case from the "divergence"
50 50 case. If two independent operations rewrite the same changeset A in to A' and
51 51 A'', we have an error case: divergent rewriting. We can detect it because
52 52 two markers will be created independently:
53 53
54 54 (A, (B,)) and (A, (C,))
55 55
56 56 Format
57 57 ------
58 58
59 59 Markers are stored in an append-only file stored in
60 60 '.hg/store/obsstore'.
61 61
62 62 The file starts with a version header:
63 63
64 64 - 1 unsigned byte: version number, starting at zero.
65 65
66 66
67 67 The header is followed by the markers. Each marker is made of:
68 68
69 - 1 unsigned byte: number of new changesets "R", can be zero.
69 - 1 unsigned byte: number of new changesets "N", can be zero.
70 70
71 71 - 1 unsigned 32-bits integer: metadata size "M" in bytes.
72 72
73 73 - 1 byte: a bit field. It is reserved for flags used in common
74 74 obsolete marker operations, to avoid repeated decoding of metadata
75 75 entries.
76 76
77 77 - 20 bytes: obsoleted changeset identifier.
78 78
79 79 - N*20 bytes: new changesets identifiers.
80 80
81 81 - M bytes: metadata as a sequence of nul-terminated strings. Each
82 82 string contains a key and a value, separated by a colon ':', without
83 83 additional encoding. Keys cannot contain '\0' or ':' and values
84 84 cannot contain '\0'.
85 85
86 86 """
87 87 import struct
88 88 import util, base85, node
89 89 import phases
90 90 from i18n import _
91 91
92 92 _pack = struct.pack
93 93 _unpack = struct.unpack
94 94
95 95 _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5
96 96
97 97 # the obsolete feature is not mature enough to be enabled by default.
98 98 # you have to rely on third party extension extension to enable this.
99 99 _enabled = False
100 100
101 101 # data used for parsing and writing
102 102 _fmversion = 0
103 103 _fmfixed = '>BIB20s'
104 104 _fmnode = '20s'
105 105 _fmfsize = struct.calcsize(_fmfixed)
106 106 _fnodesize = struct.calcsize(_fmnode)
107 107
108 108 ### obsolescence marker flag
109 109
110 110 ## bumpedfix flag
111 111 #
112 112 # When a changeset A' succeed to a changeset A which became public, we call A'
113 113 # "bumped" because it's a successors of a public changesets
114 114 #
115 115 # o A' (bumped)
116 116 # |`:
117 117 # | o A
118 118 # |/
119 119 # o Z
120 120 #
121 121 # The way to solve this situation is to create a new changeset Ad as children
122 122 # of A. This changeset have the same content than A'. So the diff from A to A'
123 123 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
124 124 #
125 125 # o Ad
126 126 # |`:
127 127 # | x A'
128 128 # |'|
129 129 # o | A
130 130 # |/
131 131 # o Z
132 132 #
133 133 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
134 134 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
135 135 # This flag mean that the successors express the changes between the public and
136 136 # bumped version and fix the situation, breaking the transitivity of
137 137 # "bumped" here.
138 138 bumpedfix = 1
139 139
140 140 def _readmarkers(data):
141 141 """Read and enumerate markers from raw data"""
142 142 off = 0
143 143 diskversion = _unpack('>B', data[off:off + 1])[0]
144 144 off += 1
145 145 if diskversion != _fmversion:
146 146 raise util.Abort(_('parsing obsolete marker: unknown version %r')
147 147 % diskversion)
148 148
149 149 # Loop on markers
150 150 l = len(data)
151 151 while off + _fmfsize <= l:
152 152 # read fixed part
153 153 cur = data[off:off + _fmfsize]
154 154 off += _fmfsize
155 155 nbsuc, mdsize, flags, pre = _unpack(_fmfixed, cur)
156 156 # read replacement
157 157 sucs = ()
158 158 if nbsuc:
159 159 s = (_fnodesize * nbsuc)
160 160 cur = data[off:off + s]
161 161 sucs = _unpack(_fmnode * nbsuc, cur)
162 162 off += s
163 163 # read metadata
164 164 # (metadata will be decoded on demand)
165 165 metadata = data[off:off + mdsize]
166 166 if len(metadata) != mdsize:
167 167 raise util.Abort(_('parsing obsolete marker: metadata is too '
168 168 'short, %d bytes expected, got %d')
169 169 % (mdsize, len(metadata)))
170 170 off += mdsize
171 171 yield (pre, sucs, flags, metadata)
172 172
173 173 def encodemeta(meta):
174 174 """Return encoded metadata string to string mapping.
175 175
176 176 Assume no ':' in key and no '\0' in both key and value."""
177 177 for key, value in meta.iteritems():
178 178 if ':' in key or '\0' in key:
179 179 raise ValueError("':' and '\0' are forbidden in metadata key'")
180 180 if '\0' in value:
181 181 raise ValueError("':' is forbidden in metadata value'")
182 182 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
183 183
184 184 def decodemeta(data):
185 185 """Return string to string dictionary from encoded version."""
186 186 d = {}
187 187 for l in data.split('\0'):
188 188 if l:
189 189 key, value = l.split(':')
190 190 d[key] = value
191 191 return d
192 192
193 193 class marker(object):
194 194 """Wrap obsolete marker raw data"""
195 195
196 196 def __init__(self, repo, data):
197 197 # the repo argument will be used to create changectx in later version
198 198 self._repo = repo
199 199 self._data = data
200 200 self._decodedmeta = None
201 201
202 202 def __hash__(self):
203 203 return hash(self._data)
204 204
205 205 def __eq__(self, other):
206 206 if type(other) != type(self):
207 207 return False
208 208 return self._data == other._data
209 209
210 210 def precnode(self):
211 211 """Precursor changeset node identifier"""
212 212 return self._data[0]
213 213
214 214 def succnodes(self):
215 215 """List of successor changesets node identifiers"""
216 216 return self._data[1]
217 217
218 218 def metadata(self):
219 219 """Decoded metadata dictionary"""
220 220 if self._decodedmeta is None:
221 221 self._decodedmeta = decodemeta(self._data[3])
222 222 return self._decodedmeta
223 223
224 224 def date(self):
225 225 """Creation date as (unixtime, offset)"""
226 226 parts = self.metadata()['date'].split(' ')
227 227 return (float(parts[0]), int(parts[1]))
228 228
229 229 class obsstore(object):
230 230 """Store obsolete markers
231 231
232 232 Markers can be accessed with two mappings:
233 233 - precursors[x] -> set(markers on precursors edges of x)
234 234 - successors[x] -> set(markers on successors edges of x)
235 235 """
236 236
237 237 def __init__(self, sopener):
238 238 # caches for various obsolescence related cache
239 239 self.caches = {}
240 240 self._all = []
241 241 # new markers to serialize
242 242 self.precursors = {}
243 243 self.successors = {}
244 244 self.sopener = sopener
245 245 data = sopener.tryread('obsstore')
246 246 if data:
247 247 self._load(_readmarkers(data))
248 248
249 249 def __iter__(self):
250 250 return iter(self._all)
251 251
252 252 def __len__(self):
253 253 return len(self._all)
254 254
255 255 def __nonzero__(self):
256 256 return bool(self._all)
257 257
258 258 def create(self, transaction, prec, succs=(), flag=0, metadata=None):
259 259 """obsolete: add a new obsolete marker
260 260
261 261 * ensuring it is hashable
262 262 * check mandatory metadata
263 263 * encode metadata
264 264
265 265 If you are a human writing code creating marker you want to use the
266 266 `createmarkers` function in this module instead.
267 267
268 268 return True if a new marker have been added, False if the markers
269 269 already existed (no op).
270 270 """
271 271 if metadata is None:
272 272 metadata = {}
273 273 if 'date' not in metadata:
274 274 metadata['date'] = "%d %d" % util.makedate()
275 275 if len(prec) != 20:
276 276 raise ValueError(prec)
277 277 for succ in succs:
278 278 if len(succ) != 20:
279 279 raise ValueError(succ)
280 280 marker = (str(prec), tuple(succs), int(flag), encodemeta(metadata))
281 281 return bool(self.add(transaction, [marker]))
282 282
283 283 def add(self, transaction, markers):
284 284 """Add new markers to the store
285 285
286 286 Take care of filtering duplicate.
287 287 Return the number of new marker."""
288 288 if not _enabled:
289 289 raise util.Abort('obsolete feature is not enabled on this repo')
290 290 known = set(self._all)
291 291 new = []
292 292 for m in markers:
293 293 if m not in known:
294 294 known.add(m)
295 295 new.append(m)
296 296 if new:
297 297 f = self.sopener('obsstore', 'ab')
298 298 try:
299 299 # Whether the file's current position is at the begin or at
300 300 # the end after opening a file for appending is implementation
301 301 # defined. So we must seek to the end before calling tell(),
302 302 # or we may get a zero offset for non-zero sized files on
303 303 # some platforms (issue3543).
304 304 f.seek(0, _SEEK_END)
305 305 offset = f.tell()
306 306 transaction.add('obsstore', offset)
307 307 # offset == 0: new file - add the version header
308 308 for bytes in _encodemarkers(new, offset == 0):
309 309 f.write(bytes)
310 310 finally:
311 311 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
312 312 # call 'filecacheentry.refresh()' here
313 313 f.close()
314 314 self._load(new)
315 315 # new marker *may* have changed several set. invalidate the cache.
316 316 self.caches.clear()
317 317 return len(new)
318 318
319 319 def mergemarkers(self, transaction, data):
320 320 markers = _readmarkers(data)
321 321 self.add(transaction, markers)
322 322
323 323 def _load(self, markers):
324 324 for mark in markers:
325 325 self._all.append(mark)
326 326 pre, sucs = mark[:2]
327 327 self.successors.setdefault(pre, set()).add(mark)
328 328 for suc in sucs:
329 329 self.precursors.setdefault(suc, set()).add(mark)
330 330 if node.nullid in self.precursors:
331 331 raise util.Abort(_('bad obsolescence marker detected: '
332 332 'invalid successors nullid'))
333 333
334 334 def _encodemarkers(markers, addheader=False):
335 335 # Kept separate from flushmarkers(), it will be reused for
336 336 # markers exchange.
337 337 if addheader:
338 338 yield _pack('>B', _fmversion)
339 339 for marker in markers:
340 340 yield _encodeonemarker(marker)
341 341
342 342
343 343 def _encodeonemarker(marker):
344 344 pre, sucs, flags, metadata = marker
345 345 nbsuc = len(sucs)
346 346 format = _fmfixed + (_fmnode * nbsuc)
347 347 data = [nbsuc, len(metadata), flags, pre]
348 348 data.extend(sucs)
349 349 return _pack(format, *data) + metadata
350 350
351 351 # arbitrary picked to fit into 8K limit from HTTP server
352 352 # you have to take in account:
353 353 # - the version header
354 354 # - the base85 encoding
355 355 _maxpayload = 5300
356 356
357 357 def _pushkeyescape(markers):
358 358 """encode markers into a dict suitable for pushkey exchange
359 359
360 360 - binary data is base85 encoded
361 361 - split in chunks smaller than 5300 bytes"""
362 362 keys = {}
363 363 parts = []
364 364 currentlen = _maxpayload * 2 # ensure we create a new part
365 365 for marker in markers:
366 366 nextdata = _encodeonemarker(marker)
367 367 if (len(nextdata) + currentlen > _maxpayload):
368 368 currentpart = []
369 369 currentlen = 0
370 370 parts.append(currentpart)
371 371 currentpart.append(nextdata)
372 372 currentlen += len(nextdata)
373 373 for idx, part in enumerate(reversed(parts)):
374 374 data = ''.join([_pack('>B', _fmversion)] + part)
375 375 keys['dump%i' % idx] = base85.b85encode(data)
376 376 return keys
377 377
378 378 def listmarkers(repo):
379 379 """List markers over pushkey"""
380 380 if not repo.obsstore:
381 381 return {}
382 382 return _pushkeyescape(repo.obsstore)
383 383
384 384 def pushmarker(repo, key, old, new):
385 385 """Push markers over pushkey"""
386 386 if not key.startswith('dump'):
387 387 repo.ui.warn(_('unknown key: %r') % key)
388 388 return 0
389 389 if old:
390 390 repo.ui.warn(_('unexpected old value for %r') % key)
391 391 return 0
392 392 data = base85.b85decode(new)
393 393 lock = repo.lock()
394 394 try:
395 395 tr = repo.transaction('pushkey: obsolete markers')
396 396 try:
397 397 repo.obsstore.mergemarkers(tr, data)
398 398 tr.close()
399 399 return 1
400 400 finally:
401 401 tr.release()
402 402 finally:
403 403 lock.release()
404 404
405 405 def allmarkers(repo):
406 406 """all obsolete markers known in a repository"""
407 407 for markerdata in repo.obsstore:
408 408 yield marker(repo, markerdata)
409 409
410 410 def precursormarkers(ctx):
411 411 """obsolete marker marking this changeset as a successors"""
412 412 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
413 413 yield marker(ctx._repo, data)
414 414
415 415 def successormarkers(ctx):
416 416 """obsolete marker making this changeset obsolete"""
417 417 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
418 418 yield marker(ctx._repo, data)
419 419
420 420 def allsuccessors(obsstore, nodes, ignoreflags=0):
421 421 """Yield node for every successor of <nodes>.
422 422
423 423 Some successors may be unknown locally.
424 424
425 425 This is a linear yield unsuited to detecting split changesets. It includes
426 426 initial nodes too."""
427 427 remaining = set(nodes)
428 428 seen = set(remaining)
429 429 while remaining:
430 430 current = remaining.pop()
431 431 yield current
432 432 for mark in obsstore.successors.get(current, ()):
433 433 # ignore marker flagged with specified flag
434 434 if mark[2] & ignoreflags:
435 435 continue
436 436 for suc in mark[1]:
437 437 if suc not in seen:
438 438 seen.add(suc)
439 439 remaining.add(suc)
440 440
441 441 def allprecursors(obsstore, nodes, ignoreflags=0):
442 442 """Yield node for every precursors of <nodes>.
443 443
444 444 Some precursors may be unknown locally.
445 445
446 446 This is a linear yield unsuited to detecting folded changesets. It includes
447 447 initial nodes too."""
448 448
449 449 remaining = set(nodes)
450 450 seen = set(remaining)
451 451 while remaining:
452 452 current = remaining.pop()
453 453 yield current
454 454 for mark in obsstore.precursors.get(current, ()):
455 455 # ignore marker flagged with specified flag
456 456 if mark[2] & ignoreflags:
457 457 continue
458 458 suc = mark[0]
459 459 if suc not in seen:
460 460 seen.add(suc)
461 461 remaining.add(suc)
462 462
463 463 def foreground(repo, nodes):
464 464 """return all nodes in the "foreground" of other node
465 465
466 466 The foreground of a revision is anything reachable using parent -> children
467 467 or precursor -> successor relation. It is very similar to "descendant" but
468 468 augmented with obsolescence information.
469 469
470 470 Beware that possible obsolescence cycle may result if complex situation.
471 471 """
472 472 repo = repo.unfiltered()
473 473 foreground = set(repo.set('%ln::', nodes))
474 474 if repo.obsstore:
475 475 # We only need this complicated logic if there is obsolescence
476 476 # XXX will probably deserve an optimised revset.
477 477 nm = repo.changelog.nodemap
478 478 plen = -1
479 479 # compute the whole set of successors or descendants
480 480 while len(foreground) != plen:
481 481 plen = len(foreground)
482 482 succs = set(c.node() for c in foreground)
483 483 mutable = [c.node() for c in foreground if c.mutable()]
484 484 succs.update(allsuccessors(repo.obsstore, mutable))
485 485 known = (n for n in succs if n in nm)
486 486 foreground = set(repo.set('%ln::', known))
487 487 return set(c.node() for c in foreground)
488 488
489 489
490 490 def successorssets(repo, initialnode, cache=None):
491 491 """Return all set of successors of initial nodes
492 492
493 493 The successors set of a changeset A are a group of revisions that succeed
494 494 A. It succeeds A as a consistent whole, each revision being only a partial
495 495 replacement. The successors set contains non-obsolete changesets only.
496 496
497 497 This function returns the full list of successor sets which is why it
498 498 returns a list of tuples and not just a single tuple. Each tuple is a valid
499 499 successors set. Not that (A,) may be a valid successors set for changeset A
500 500 (see below).
501 501
502 502 In most cases, a changeset A will have a single element (e.g. the changeset
503 503 A is replaced by A') in its successors set. Though, it is also common for a
504 504 changeset A to have no elements in its successor set (e.g. the changeset
505 505 has been pruned). Therefore, the returned list of successors sets will be
506 506 [(A',)] or [], respectively.
507 507
508 508 When a changeset A is split into A' and B', however, it will result in a
509 509 successors set containing more than a single element, i.e. [(A',B')].
510 510 Divergent changesets will result in multiple successors sets, i.e. [(A',),
511 511 (A'')].
512 512
513 513 If a changeset A is not obsolete, then it will conceptually have no
514 514 successors set. To distinguish this from a pruned changeset, the successor
515 515 set will only contain itself, i.e. [(A,)].
516 516
517 517 Finally, successors unknown locally are considered to be pruned (obsoleted
518 518 without any successors).
519 519
520 520 The optional `cache` parameter is a dictionary that may contain precomputed
521 521 successors sets. It is meant to reuse the computation of a previous call to
522 522 `successorssets` when multiple calls are made at the same time. The cache
523 523 dictionary is updated in place. The caller is responsible for its live
524 524 spawn. Code that makes multiple calls to `successorssets` *must* use this
525 525 cache mechanism or suffer terrible performances.
526 526
527 527 """
528 528
529 529 succmarkers = repo.obsstore.successors
530 530
531 531 # Stack of nodes we search successors sets for
532 532 toproceed = [initialnode]
533 533 # set version of above list for fast loop detection
534 534 # element added to "toproceed" must be added here
535 535 stackedset = set(toproceed)
536 536 if cache is None:
537 537 cache = {}
538 538
539 539 # This while loop is the flattened version of a recursive search for
540 540 # successors sets
541 541 #
542 542 # def successorssets(x):
543 543 # successors = directsuccessors(x)
544 544 # ss = [[]]
545 545 # for succ in directsuccessors(x):
546 546 # # product as in itertools cartesian product
547 547 # ss = product(ss, successorssets(succ))
548 548 # return ss
549 549 #
550 550 # But we can not use plain recursive calls here:
551 551 # - that would blow the python call stack
552 552 # - obsolescence markers may have cycles, we need to handle them.
553 553 #
554 554 # The `toproceed` list act as our call stack. Every node we search
555 555 # successors set for are stacked there.
556 556 #
557 557 # The `stackedset` is set version of this stack used to check if a node is
558 558 # already stacked. This check is used to detect cycles and prevent infinite
559 559 # loop.
560 560 #
561 561 # successors set of all nodes are stored in the `cache` dictionary.
562 562 #
563 563 # After this while loop ends we use the cache to return the successors sets
564 564 # for the node requested by the caller.
565 565 while toproceed:
566 566 # Every iteration tries to compute the successors sets of the topmost
567 567 # node of the stack: CURRENT.
568 568 #
569 569 # There are four possible outcomes:
570 570 #
571 571 # 1) We already know the successors sets of CURRENT:
572 572 # -> mission accomplished, pop it from the stack.
573 573 # 2) Node is not obsolete:
574 574 # -> the node is its own successors sets. Add it to the cache.
575 575 # 3) We do not know successors set of direct successors of CURRENT:
576 576 # -> We add those successors to the stack.
577 577 # 4) We know successors sets of all direct successors of CURRENT:
578 578 # -> We can compute CURRENT successors set and add it to the
579 579 # cache.
580 580 #
581 581 current = toproceed[-1]
582 582 if current in cache:
583 583 # case (1): We already know the successors sets
584 584 stackedset.remove(toproceed.pop())
585 585 elif current not in succmarkers:
586 586 # case (2): The node is not obsolete.
587 587 if current in repo:
588 588 # We have a valid last successors.
589 589 cache[current] = [(current,)]
590 590 else:
591 591 # Final obsolete version is unknown locally.
592 592 # Do not count that as a valid successors
593 593 cache[current] = []
594 594 else:
595 595 # cases (3) and (4)
596 596 #
597 597 # We proceed in two phases. Phase 1 aims to distinguish case (3)
598 598 # from case (4):
599 599 #
600 600 # For each direct successors of CURRENT, we check whether its
601 601 # successors sets are known. If they are not, we stack the
602 602 # unknown node and proceed to the next iteration of the while
603 603 # loop. (case 3)
604 604 #
605 605 # During this step, we may detect obsolescence cycles: a node
606 606 # with unknown successors sets but already in the call stack.
607 607 # In such a situation, we arbitrary set the successors sets of
608 608 # the node to nothing (node pruned) to break the cycle.
609 609 #
610 610 # If no break was encountered we proceed to phase 2.
611 611 #
612 612 # Phase 2 computes successors sets of CURRENT (case 4); see details
613 613 # in phase 2 itself.
614 614 #
615 615 # Note the two levels of iteration in each phase.
616 616 # - The first one handles obsolescence markers using CURRENT as
617 617 # precursor (successors markers of CURRENT).
618 618 #
619 619 # Having multiple entry here means divergence.
620 620 #
621 621 # - The second one handles successors defined in each marker.
622 622 #
623 623 # Having none means pruned node, multiple successors means split,
624 624 # single successors are standard replacement.
625 625 #
626 626 for mark in sorted(succmarkers[current]):
627 627 for suc in mark[1]:
628 628 if suc not in cache:
629 629 if suc in stackedset:
630 630 # cycle breaking
631 631 cache[suc] = []
632 632 else:
633 633 # case (3) If we have not computed successors sets
634 634 # of one of those successors we add it to the
635 635 # `toproceed` stack and stop all work for this
636 636 # iteration.
637 637 toproceed.append(suc)
638 638 stackedset.add(suc)
639 639 break
640 640 else:
641 641 continue
642 642 break
643 643 else:
644 644 # case (4): we know all successors sets of all direct
645 645 # successors
646 646 #
647 647 # Successors set contributed by each marker depends on the
648 648 # successors sets of all its "successors" node.
649 649 #
650 650 # Each different marker is a divergence in the obsolescence
651 651 # history. It contributes successors sets distinct from other
652 652 # markers.
653 653 #
654 654 # Within a marker, a successor may have divergent successors
655 655 # sets. In such a case, the marker will contribute multiple
656 656 # divergent successors sets. If multiple successors have
657 657 # divergent successors sets, a Cartesian product is used.
658 658 #
659 659 # At the end we post-process successors sets to remove
660 660 # duplicated entry and successors set that are strict subset of
661 661 # another one.
662 662 succssets = []
663 663 for mark in sorted(succmarkers[current]):
664 664 # successors sets contributed by this marker
665 665 markss = [[]]
666 666 for suc in mark[1]:
667 667 # cardinal product with previous successors
668 668 productresult = []
669 669 for prefix in markss:
670 670 for suffix in cache[suc]:
671 671 newss = list(prefix)
672 672 for part in suffix:
673 673 # do not duplicated entry in successors set
674 674 # first entry wins.
675 675 if part not in newss:
676 676 newss.append(part)
677 677 productresult.append(newss)
678 678 markss = productresult
679 679 succssets.extend(markss)
680 680 # remove duplicated and subset
681 681 seen = []
682 682 final = []
683 683 candidate = sorted(((set(s), s) for s in succssets if s),
684 684 key=lambda x: len(x[1]), reverse=True)
685 685 for setversion, listversion in candidate:
686 686 for seenset in seen:
687 687 if setversion.issubset(seenset):
688 688 break
689 689 else:
690 690 final.append(listversion)
691 691 seen.append(setversion)
692 692 final.reverse() # put small successors set first
693 693 cache[current] = final
694 694 return cache[initialnode]
695 695
696 696 def _knownrevs(repo, nodes):
697 697 """yield revision numbers of known nodes passed in parameters
698 698
699 699 Unknown revisions are silently ignored."""
700 700 torev = repo.changelog.nodemap.get
701 701 for n in nodes:
702 702 rev = torev(n)
703 703 if rev is not None:
704 704 yield rev
705 705
706 706 # mapping of 'set-name' -> <function to compute this set>
707 707 cachefuncs = {}
708 708 def cachefor(name):
709 709 """Decorator to register a function as computing the cache for a set"""
710 710 def decorator(func):
711 711 assert name not in cachefuncs
712 712 cachefuncs[name] = func
713 713 return func
714 714 return decorator
715 715
716 716 def getrevs(repo, name):
717 717 """Return the set of revision that belong to the <name> set
718 718
719 719 Such access may compute the set and cache it for future use"""
720 720 repo = repo.unfiltered()
721 721 if not repo.obsstore:
722 722 return ()
723 723 if name not in repo.obsstore.caches:
724 724 repo.obsstore.caches[name] = cachefuncs[name](repo)
725 725 return repo.obsstore.caches[name]
726 726
727 727 # To be simple we need to invalidate obsolescence cache when:
728 728 #
729 729 # - new changeset is added:
730 730 # - public phase is changed
731 731 # - obsolescence marker are added
732 732 # - strip is used a repo
733 733 def clearobscaches(repo):
734 734 """Remove all obsolescence related cache from a repo
735 735
736 736 This remove all cache in obsstore is the obsstore already exist on the
737 737 repo.
738 738
739 739 (We could be smarter here given the exact event that trigger the cache
740 740 clearing)"""
741 741 # only clear cache is there is obsstore data in this repo
742 742 if 'obsstore' in repo._filecache:
743 743 repo.obsstore.caches.clear()
744 744
745 745 @cachefor('obsolete')
746 746 def _computeobsoleteset(repo):
747 747 """the set of obsolete revisions"""
748 748 obs = set()
749 749 getrev = repo.changelog.nodemap.get
750 750 getphase = repo._phasecache.phase
751 751 for node in repo.obsstore.successors:
752 752 rev = getrev(node)
753 753 if rev is not None and getphase(repo, rev):
754 754 obs.add(rev)
755 755 return obs
756 756
757 757 @cachefor('unstable')
758 758 def _computeunstableset(repo):
759 759 """the set of non obsolete revisions with obsolete parents"""
760 760 # revset is not efficient enough here
761 761 # we do (obsolete()::) - obsolete() by hand
762 762 obs = getrevs(repo, 'obsolete')
763 763 if not obs:
764 764 return set()
765 765 cl = repo.changelog
766 766 return set(r for r in cl.descendants(obs) if r not in obs)
767 767
768 768 @cachefor('suspended')
769 769 def _computesuspendedset(repo):
770 770 """the set of obsolete parents with non obsolete descendants"""
771 771 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
772 772 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
773 773
774 774 @cachefor('extinct')
775 775 def _computeextinctset(repo):
776 776 """the set of obsolete parents without non obsolete descendants"""
777 777 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
778 778
779 779
780 780 @cachefor('bumped')
781 781 def _computebumpedset(repo):
782 782 """the set of revs trying to obsolete public revisions"""
783 783 bumped = set()
784 784 # util function (avoid attribute lookup in the loop)
785 785 phase = repo._phasecache.phase # would be faster to grab the full list
786 786 public = phases.public
787 787 cl = repo.changelog
788 788 torev = cl.nodemap.get
789 789 obs = getrevs(repo, 'obsolete')
790 790 for rev in repo:
791 791 # We only evaluate mutable, non-obsolete revision
792 792 if (public < phase(repo, rev)) and (rev not in obs):
793 793 node = cl.node(rev)
794 794 # (future) A cache of precursors may worth if split is very common
795 795 for pnode in allprecursors(repo.obsstore, [node],
796 796 ignoreflags=bumpedfix):
797 797 prev = torev(pnode) # unfiltered! but so is phasecache
798 798 if (prev is not None) and (phase(repo, prev) <= public):
799 799 # we have a public precursors
800 800 bumped.add(rev)
801 801 break # Next draft!
802 802 return bumped
803 803
804 804 @cachefor('divergent')
805 805 def _computedivergentset(repo):
806 806 """the set of rev that compete to be the final successors of some revision.
807 807 """
808 808 divergent = set()
809 809 obsstore = repo.obsstore
810 810 newermap = {}
811 811 for ctx in repo.set('(not public()) - obsolete()'):
812 812 mark = obsstore.precursors.get(ctx.node(), ())
813 813 toprocess = set(mark)
814 814 while toprocess:
815 815 prec = toprocess.pop()[0]
816 816 if prec not in newermap:
817 817 successorssets(repo, prec, newermap)
818 818 newer = [n for n in newermap[prec] if n]
819 819 if len(newer) > 1:
820 820 divergent.add(ctx.rev())
821 821 break
822 822 toprocess.update(obsstore.precursors.get(prec, ()))
823 823 return divergent
824 824
825 825
826 826 def createmarkers(repo, relations, flag=0, metadata=None):
827 827 """Add obsolete markers between changesets in a repo
828 828
829 829 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
830 830 tuple. `old` and `news` are changectx. metadata is an optional dictionary
831 831 containing metadata for this marker only. It is merged with the global
832 832 metadata specified through the `metadata` argument of this function,
833 833
834 834 Trying to obsolete a public changeset will raise an exception.
835 835
836 836 Current user and date are used except if specified otherwise in the
837 837 metadata attribute.
838 838
839 839 This function operates within a transaction of its own, but does
840 840 not take any lock on the repo.
841 841 """
842 842 # prepare metadata
843 843 if metadata is None:
844 844 metadata = {}
845 845 if 'date' not in metadata:
846 846 metadata['date'] = '%i %i' % util.makedate()
847 847 if 'user' not in metadata:
848 848 metadata['user'] = repo.ui.username()
849 849 tr = repo.transaction('add-obsolescence-marker')
850 850 try:
851 851 for rel in relations:
852 852 prec = rel[0]
853 853 sucs = rel[1]
854 854 localmetadata = metadata.copy()
855 855 if 2 < len(rel):
856 856 localmetadata.update(rel[2])
857 857
858 858 if not prec.mutable():
859 859 raise util.Abort("cannot obsolete immutable changeset: %s"
860 860 % prec)
861 861 nprec = prec.node()
862 862 nsucs = tuple(s.node() for s in sucs)
863 863 if nprec in nsucs:
864 864 raise util.Abort("changeset %s cannot obsolete itself" % prec)
865 865 repo.obsstore.create(tr, nprec, nsucs, flag, localmetadata)
866 866 repo.filteredrevcache.clear()
867 867 tr.close()
868 868 finally:
869 869 tr.release()
General Comments 0
You need to be logged in to leave comments. Login now