##// END OF EJS Templates
obsstore: update create docstring to point to the coder friendly function...
Pierre-Yves David -
r20516:3af218cf default
parent child Browse files
Show More
@@ -1,843 +1,846
1 1 # obsolete.py - obsolete markers handling
2 2 #
3 3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 4 # Logilab SA <contact@logilab.fr>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 """Obsolete markers handling
10 10
11 11 An obsolete marker maps an old changeset to a list of new
12 12 changesets. If the list of new changesets is empty, the old changeset
13 13 is said to be "killed". Otherwise, the old changeset is being
14 14 "replaced" by the new changesets.
15 15
16 16 Obsolete markers can be used to record and distribute changeset graph
17 17 transformations performed by history rewriting operations, and help
18 18 building new tools to reconciliate conflicting rewriting actions. To
19 19 facilitate conflicts resolution, markers include various annotations
20 20 besides old and news changeset identifiers, such as creation date or
21 21 author name.
22 22
23 23 The old obsoleted changeset is called "precursor" and possible replacements are
24 24 called "successors". Markers that used changeset X as a precursors are called
25 25 "successor markers of X" because they hold information about the successors of
26 26 X. Markers that use changeset Y as a successors are call "precursor markers of
27 27 Y" because they hold information about the precursors of Y.
28 28
29 29 Examples:
30 30
31 31 - When changeset A is replacement by a changeset A', one marker is stored:
32 32
33 33 (A, (A'))
34 34
35 35 - When changesets A and B are folded into a new changeset C two markers are
36 36 stored:
37 37
38 38 (A, (C,)) and (B, (C,))
39 39
40 40 - When changeset A is simply "pruned" from the graph, a marker in create:
41 41
42 42 (A, ())
43 43
44 44 - When changeset A is split into B and C, a single marker are used:
45 45
46 46 (A, (C, C))
47 47
48 48 We use a single marker to distinct the "split" case from the "divergence"
49 49 case. If two independents operation rewrite the same changeset A in to A' and
50 50 A'' when have an error case: divergent rewriting. We can detect it because
51 51 two markers will be created independently:
52 52
53 53 (A, (B,)) and (A, (C,))
54 54
55 55 Format
56 56 ------
57 57
58 58 Markers are stored in an append-only file stored in
59 59 '.hg/store/obsstore'.
60 60
61 61 The file starts with a version header:
62 62
63 63 - 1 unsigned byte: version number, starting at zero.
64 64
65 65
66 66 The header is followed by the markers. Each marker is made of:
67 67
68 68 - 1 unsigned byte: number of new changesets "R", could be zero.
69 69
70 70 - 1 unsigned 32-bits integer: metadata size "M" in bytes.
71 71
72 72 - 1 byte: a bit field. It is reserved for flags used in obsolete
73 73 markers common operations, to avoid repeated decoding of metadata
74 74 entries.
75 75
76 76 - 20 bytes: obsoleted changeset identifier.
77 77
78 78 - N*20 bytes: new changesets identifiers.
79 79
80 80 - M bytes: metadata as a sequence of nul-terminated strings. Each
81 81 string contains a key and a value, separated by a color ':', without
82 82 additional encoding. Keys cannot contain '\0' or ':' and values
83 83 cannot contain '\0'.
84 84 """
85 85 import struct
86 86 import util, base85, node
87 87 import phases
88 88 from i18n import _
89 89
90 90 _pack = struct.pack
91 91 _unpack = struct.unpack
92 92
93 93 _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5
94 94
95 95 # the obsolete feature is not mature enough to be enabled by default.
96 96 # you have to rely on third party extension extension to enable this.
97 97 _enabled = False
98 98
99 99 # data used for parsing and writing
100 100 _fmversion = 0
101 101 _fmfixed = '>BIB20s'
102 102 _fmnode = '20s'
103 103 _fmfsize = struct.calcsize(_fmfixed)
104 104 _fnodesize = struct.calcsize(_fmnode)
105 105
106 106 ### obsolescence marker flag
107 107
108 108 ## bumpedfix flag
109 109 #
110 110 # When a changeset A' succeed to a changeset A which became public, we call A'
111 111 # "bumped" because it's a successors of a public changesets
112 112 #
113 113 # o A' (bumped)
114 114 # |`:
115 115 # | o A
116 116 # |/
117 117 # o Z
118 118 #
119 119 # The way to solve this situation is to create a new changeset Ad as children
120 120 # of A. This changeset have the same content than A'. So the diff from A to A'
121 121 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
122 122 #
123 123 # o Ad
124 124 # |`:
125 125 # | x A'
126 126 # |'|
127 127 # o | A
128 128 # |/
129 129 # o Z
130 130 #
131 131 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
132 132 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
133 133 # This flag mean that the successors express the changes between the public and
134 134 # bumped version and fix the situation, breaking the transitivity of
135 135 # "bumped" here.
136 136 bumpedfix = 1
137 137
138 138 def _readmarkers(data):
139 139 """Read and enumerate markers from raw data"""
140 140 off = 0
141 141 diskversion = _unpack('>B', data[off:off + 1])[0]
142 142 off += 1
143 143 if diskversion != _fmversion:
144 144 raise util.Abort(_('parsing obsolete marker: unknown version %r')
145 145 % diskversion)
146 146
147 147 # Loop on markers
148 148 l = len(data)
149 149 while off + _fmfsize <= l:
150 150 # read fixed part
151 151 cur = data[off:off + _fmfsize]
152 152 off += _fmfsize
153 153 nbsuc, mdsize, flags, pre = _unpack(_fmfixed, cur)
154 154 # read replacement
155 155 sucs = ()
156 156 if nbsuc:
157 157 s = (_fnodesize * nbsuc)
158 158 cur = data[off:off + s]
159 159 sucs = _unpack(_fmnode * nbsuc, cur)
160 160 off += s
161 161 # read metadata
162 162 # (metadata will be decoded on demand)
163 163 metadata = data[off:off + mdsize]
164 164 if len(metadata) != mdsize:
165 165 raise util.Abort(_('parsing obsolete marker: metadata is too '
166 166 'short, %d bytes expected, got %d')
167 167 % (mdsize, len(metadata)))
168 168 off += mdsize
169 169 yield (pre, sucs, flags, metadata)
170 170
171 171 def encodemeta(meta):
172 172 """Return encoded metadata string to string mapping.
173 173
174 174 Assume no ':' in key and no '\0' in both key and value."""
175 175 for key, value in meta.iteritems():
176 176 if ':' in key or '\0' in key:
177 177 raise ValueError("':' and '\0' are forbidden in metadata key'")
178 178 if '\0' in value:
179 179 raise ValueError("':' are forbidden in metadata value'")
180 180 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
181 181
182 182 def decodemeta(data):
183 183 """Return string to string dictionary from encoded version."""
184 184 d = {}
185 185 for l in data.split('\0'):
186 186 if l:
187 187 key, value = l.split(':')
188 188 d[key] = value
189 189 return d
190 190
191 191 class marker(object):
192 192 """Wrap obsolete marker raw data"""
193 193
194 194 def __init__(self, repo, data):
195 195 # the repo argument will be used to create changectx in later version
196 196 self._repo = repo
197 197 self._data = data
198 198 self._decodedmeta = None
199 199
200 200 def __hash__(self):
201 201 return hash(self._data)
202 202
203 203 def __eq__(self, other):
204 204 if type(other) != type(self):
205 205 return False
206 206 return self._data == other._data
207 207
208 208 def precnode(self):
209 209 """Precursor changeset node identifier"""
210 210 return self._data[0]
211 211
212 212 def succnodes(self):
213 213 """List of successor changesets node identifiers"""
214 214 return self._data[1]
215 215
216 216 def metadata(self):
217 217 """Decoded metadata dictionary"""
218 218 if self._decodedmeta is None:
219 219 self._decodedmeta = decodemeta(self._data[3])
220 220 return self._decodedmeta
221 221
222 222 def date(self):
223 223 """Creation date as (unixtime, offset)"""
224 224 parts = self.metadata()['date'].split(' ')
225 225 return (float(parts[0]), int(parts[1]))
226 226
227 227 class obsstore(object):
228 228 """Store obsolete markers
229 229
230 230 Markers can be accessed with two mappings:
231 231 - precursors[x] -> set(markers on precursors edges of x)
232 232 - successors[x] -> set(markers on successors edges of x)
233 233 """
234 234
235 235 def __init__(self, sopener):
236 236 # caches for various obsolescence related cache
237 237 self.caches = {}
238 238 self._all = []
239 239 # new markers to serialize
240 240 self.precursors = {}
241 241 self.successors = {}
242 242 self.sopener = sopener
243 243 data = sopener.tryread('obsstore')
244 244 if data:
245 245 self._load(_readmarkers(data))
246 246
247 247 def __iter__(self):
248 248 return iter(self._all)
249 249
250 250 def __nonzero__(self):
251 251 return bool(self._all)
252 252
253 253 def create(self, transaction, prec, succs=(), flag=0, metadata=None):
254 254 """obsolete: add a new obsolete marker
255 255
256 256 * ensuring it is hashable
257 257 * check mandatory metadata
258 258 * encode metadata
259
260 If you are a human writing code creating marker you want to use the
261 `createmarkers` function in this module instead.
259 262 """
260 263 if metadata is None:
261 264 metadata = {}
262 265 if 'date' not in metadata:
263 266 metadata['date'] = "%d %d" % util.makedate()
264 267 if len(prec) != 20:
265 268 raise ValueError(prec)
266 269 for succ in succs:
267 270 if len(succ) != 20:
268 271 raise ValueError(succ)
269 272 marker = (str(prec), tuple(succs), int(flag), encodemeta(metadata))
270 273 self.add(transaction, [marker])
271 274
272 275 def add(self, transaction, markers):
273 276 """Add new markers to the store
274 277
275 278 Take care of filtering duplicate.
276 279 Return the number of new marker."""
277 280 if not _enabled:
278 281 raise util.Abort('obsolete feature is not enabled on this repo')
279 282 known = set(self._all)
280 283 new = []
281 284 for m in markers:
282 285 if m not in known:
283 286 known.add(m)
284 287 new.append(m)
285 288 if new:
286 289 f = self.sopener('obsstore', 'ab')
287 290 try:
288 291 # Whether the file's current position is at the begin or at
289 292 # the end after opening a file for appending is implementation
290 293 # defined. So we must seek to the end before calling tell(),
291 294 # or we may get a zero offset for non-zero sized files on
292 295 # some platforms (issue3543).
293 296 f.seek(0, _SEEK_END)
294 297 offset = f.tell()
295 298 transaction.add('obsstore', offset)
296 299 # offset == 0: new file - add the version header
297 300 for bytes in _encodemarkers(new, offset == 0):
298 301 f.write(bytes)
299 302 finally:
300 303 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
301 304 # call 'filecacheentry.refresh()' here
302 305 f.close()
303 306 self._load(new)
304 307 # new marker *may* have changed several set. invalidate the cache.
305 308 self.caches.clear()
306 309 return len(new)
307 310
308 311 def mergemarkers(self, transaction, data):
309 312 markers = _readmarkers(data)
310 313 self.add(transaction, markers)
311 314
312 315 def _load(self, markers):
313 316 for mark in markers:
314 317 self._all.append(mark)
315 318 pre, sucs = mark[:2]
316 319 self.successors.setdefault(pre, set()).add(mark)
317 320 for suc in sucs:
318 321 self.precursors.setdefault(suc, set()).add(mark)
319 322 if node.nullid in self.precursors:
320 323 raise util.Abort(_('bad obsolescence marker detected: '
321 324 'invalid successors nullid'))
322 325
323 326 def _encodemarkers(markers, addheader=False):
324 327 # Kept separate from flushmarkers(), it will be reused for
325 328 # markers exchange.
326 329 if addheader:
327 330 yield _pack('>B', _fmversion)
328 331 for marker in markers:
329 332 yield _encodeonemarker(marker)
330 333
331 334
332 335 def _encodeonemarker(marker):
333 336 pre, sucs, flags, metadata = marker
334 337 nbsuc = len(sucs)
335 338 format = _fmfixed + (_fmnode * nbsuc)
336 339 data = [nbsuc, len(metadata), flags, pre]
337 340 data.extend(sucs)
338 341 return _pack(format, *data) + metadata
339 342
340 343 # arbitrary picked to fit into 8K limit from HTTP server
341 344 # you have to take in account:
342 345 # - the version header
343 346 # - the base85 encoding
344 347 _maxpayload = 5300
345 348
346 349 def listmarkers(repo):
347 350 """List markers over pushkey"""
348 351 if not repo.obsstore:
349 352 return {}
350 353 keys = {}
351 354 parts = []
352 355 currentlen = _maxpayload * 2 # ensure we create a new part
353 356 for marker in repo.obsstore:
354 357 nextdata = _encodeonemarker(marker)
355 358 if (len(nextdata) + currentlen > _maxpayload):
356 359 currentpart = []
357 360 currentlen = 0
358 361 parts.append(currentpart)
359 362 currentpart.append(nextdata)
360 363 currentlen += len(nextdata)
361 364 for idx, part in enumerate(reversed(parts)):
362 365 data = ''.join([_pack('>B', _fmversion)] + part)
363 366 keys['dump%i' % idx] = base85.b85encode(data)
364 367 return keys
365 368
366 369 def pushmarker(repo, key, old, new):
367 370 """Push markers over pushkey"""
368 371 if not key.startswith('dump'):
369 372 repo.ui.warn(_('unknown key: %r') % key)
370 373 return 0
371 374 if old:
372 375 repo.ui.warn(_('unexpected old value') % key)
373 376 return 0
374 377 data = base85.b85decode(new)
375 378 lock = repo.lock()
376 379 try:
377 380 tr = repo.transaction('pushkey: obsolete markers')
378 381 try:
379 382 repo.obsstore.mergemarkers(tr, data)
380 383 tr.close()
381 384 return 1
382 385 finally:
383 386 tr.release()
384 387 finally:
385 388 lock.release()
386 389
387 390 def allmarkers(repo):
388 391 """all obsolete markers known in a repository"""
389 392 for markerdata in repo.obsstore:
390 393 yield marker(repo, markerdata)
391 394
392 395 def precursormarkers(ctx):
393 396 """obsolete marker marking this changeset as a successors"""
394 397 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
395 398 yield marker(ctx._repo, data)
396 399
397 400 def successormarkers(ctx):
398 401 """obsolete marker making this changeset obsolete"""
399 402 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
400 403 yield marker(ctx._repo, data)
401 404
402 405 def allsuccessors(obsstore, nodes, ignoreflags=0):
403 406 """Yield node for every successor of <nodes>.
404 407
405 408 Some successors may be unknown locally.
406 409
407 410 This is a linear yield unsuited to detecting split changesets. It includes
408 411 initial nodes too."""
409 412 remaining = set(nodes)
410 413 seen = set(remaining)
411 414 while remaining:
412 415 current = remaining.pop()
413 416 yield current
414 417 for mark in obsstore.successors.get(current, ()):
415 418 # ignore marker flagged with specified flag
416 419 if mark[2] & ignoreflags:
417 420 continue
418 421 for suc in mark[1]:
419 422 if suc not in seen:
420 423 seen.add(suc)
421 424 remaining.add(suc)
422 425
423 426 def allprecursors(obsstore, nodes, ignoreflags=0):
424 427 """Yield node for every precursors of <nodes>.
425 428
426 429 Some precursors may be unknown locally.
427 430
428 431 This is a linear yield unsuited to detecting folded changesets. It includes
429 432 initial nodes too."""
430 433
431 434 remaining = set(nodes)
432 435 seen = set(remaining)
433 436 while remaining:
434 437 current = remaining.pop()
435 438 yield current
436 439 for mark in obsstore.precursors.get(current, ()):
437 440 # ignore marker flagged with specified flag
438 441 if mark[2] & ignoreflags:
439 442 continue
440 443 suc = mark[0]
441 444 if suc not in seen:
442 445 seen.add(suc)
443 446 remaining.add(suc)
444 447
445 448 def foreground(repo, nodes):
446 449 """return all nodes in the "foreground" of other node
447 450
448 451 The foreground of a revision is anything reachable using parent -> children
449 452 or precursor -> successor relation. It is very similar to "descendant" but
450 453 augmented with obsolescence information.
451 454
452 455 Beware that possible obsolescence cycle may result if complex situation.
453 456 """
454 457 repo = repo.unfiltered()
455 458 foreground = set(repo.set('%ln::', nodes))
456 459 if repo.obsstore:
457 460 # We only need this complicated logic if there is obsolescence
458 461 # XXX will probably deserve an optimised revset.
459 462 nm = repo.changelog.nodemap
460 463 plen = -1
461 464 # compute the whole set of successors or descendants
462 465 while len(foreground) != plen:
463 466 plen = len(foreground)
464 467 succs = set(c.node() for c in foreground)
465 468 mutable = [c.node() for c in foreground if c.mutable()]
466 469 succs.update(allsuccessors(repo.obsstore, mutable))
467 470 known = (n for n in succs if n in nm)
468 471 foreground = set(repo.set('%ln::', known))
469 472 return set(c.node() for c in foreground)
470 473
471 474
472 475 def successorssets(repo, initialnode, cache=None):
473 476 """Return all set of successors of initial nodes
474 477
475 478 The successors set of a changeset A are a group of revisions that succeed
476 479 A. It succeeds A as a consistent whole, each revision being only a partial
477 480 replacement. The successors set contains non-obsolete changesets only.
478 481
479 482 This function returns the full list of successor sets which is why it
480 483 returns a list of tuples and not just a single tuple. Each tuple is a valid
481 484 successors set. Not that (A,) may be a valid successors set for changeset A
482 485 (see below).
483 486
484 487 In most cases, a changeset A will have a single element (e.g. the changeset
485 488 A is replaced by A') in its successors set. Though, it is also common for a
486 489 changeset A to have no elements in its successor set (e.g. the changeset
487 490 has been pruned). Therefore, the returned list of successors sets will be
488 491 [(A',)] or [], respectively.
489 492
490 493 When a changeset A is split into A' and B', however, it will result in a
491 494 successors set containing more than a single element, i.e. [(A',B')].
492 495 Divergent changesets will result in multiple successors sets, i.e. [(A',),
493 496 (A'')].
494 497
495 498 If a changeset A is not obsolete, then it will conceptually have no
496 499 successors set. To distinguish this from a pruned changeset, the successor
497 500 set will only contain itself, i.e. [(A,)].
498 501
499 502 Finally, successors unknown locally are considered to be pruned (obsoleted
500 503 without any successors).
501 504
502 505 The optional `cache` parameter is a dictionary that may contain precomputed
503 506 successors sets. It is meant to reuse the computation of a previous call to
504 507 `successorssets` when multiple calls are made at the same time. The cache
505 508 dictionary is updated in place. The caller is responsible for its live
506 509 spawn. Code that makes multiple calls to `successorssets` *must* use this
507 510 cache mechanism or suffer terrible performances.
508 511
509 512 """
510 513
511 514 succmarkers = repo.obsstore.successors
512 515
513 516 # Stack of nodes we search successors sets for
514 517 toproceed = [initialnode]
515 518 # set version of above list for fast loop detection
516 519 # element added to "toproceed" must be added here
517 520 stackedset = set(toproceed)
518 521 if cache is None:
519 522 cache = {}
520 523
521 524 # This while loop is the flattened version of a recursive search for
522 525 # successors sets
523 526 #
524 527 # def successorssets(x):
525 528 # successors = directsuccessors(x)
526 529 # ss = [[]]
527 530 # for succ in directsuccessors(x):
528 531 # # product as in itertools cartesian product
529 532 # ss = product(ss, successorssets(succ))
530 533 # return ss
531 534 #
532 535 # But we can not use plain recursive calls here:
533 536 # - that would blow the python call stack
534 537 # - obsolescence markers may have cycles, we need to handle them.
535 538 #
536 539 # The `toproceed` list act as our call stack. Every node we search
537 540 # successors set for are stacked there.
538 541 #
539 542 # The `stackedset` is set version of this stack used to check if a node is
540 543 # already stacked. This check is used to detect cycles and prevent infinite
541 544 # loop.
542 545 #
543 546 # successors set of all nodes are stored in the `cache` dictionary.
544 547 #
545 548 # After this while loop ends we use the cache to return the successors sets
546 549 # for the node requested by the caller.
547 550 while toproceed:
548 551 # Every iteration tries to compute the successors sets of the topmost
549 552 # node of the stack: CURRENT.
550 553 #
551 554 # There are four possible outcomes:
552 555 #
553 556 # 1) We already know the successors sets of CURRENT:
554 557 # -> mission accomplished, pop it from the stack.
555 558 # 2) Node is not obsolete:
556 559 # -> the node is its own successors sets. Add it to the cache.
557 560 # 3) We do not know successors set of direct successors of CURRENT:
558 561 # -> We add those successors to the stack.
559 562 # 4) We know successors sets of all direct successors of CURRENT:
560 563 # -> We can compute CURRENT successors set and add it to the
561 564 # cache.
562 565 #
563 566 current = toproceed[-1]
564 567 if current in cache:
565 568 # case (1): We already know the successors sets
566 569 stackedset.remove(toproceed.pop())
567 570 elif current not in succmarkers:
568 571 # case (2): The node is not obsolete.
569 572 if current in repo:
570 573 # We have a valid last successors.
571 574 cache[current] = [(current,)]
572 575 else:
573 576 # Final obsolete version is unknown locally.
574 577 # Do not count that as a valid successors
575 578 cache[current] = []
576 579 else:
577 580 # cases (3) and (4)
578 581 #
579 582 # We proceed in two phases. Phase 1 aims to distinguish case (3)
580 583 # from case (4):
581 584 #
582 585 # For each direct successors of CURRENT, we check whether its
583 586 # successors sets are known. If they are not, we stack the
584 587 # unknown node and proceed to the next iteration of the while
585 588 # loop. (case 3)
586 589 #
587 590 # During this step, we may detect obsolescence cycles: a node
588 591 # with unknown successors sets but already in the call stack.
589 592 # In such a situation, we arbitrary set the successors sets of
590 593 # the node to nothing (node pruned) to break the cycle.
591 594 #
592 595 # If no break was encountered we proceed to phase 2.
593 596 #
594 597 # Phase 2 computes successors sets of CURRENT (case 4); see details
595 598 # in phase 2 itself.
596 599 #
597 600 # Note the two levels of iteration in each phase.
598 601 # - The first one handles obsolescence markers using CURRENT as
599 602 # precursor (successors markers of CURRENT).
600 603 #
601 604 # Having multiple entry here means divergence.
602 605 #
603 606 # - The second one handles successors defined in each marker.
604 607 #
605 608 # Having none means pruned node, multiple successors means split,
606 609 # single successors are standard replacement.
607 610 #
608 611 for mark in sorted(succmarkers[current]):
609 612 for suc in mark[1]:
610 613 if suc not in cache:
611 614 if suc in stackedset:
612 615 # cycle breaking
613 616 cache[suc] = []
614 617 else:
615 618 # case (3) If we have not computed successors sets
616 619 # of one of those successors we add it to the
617 620 # `toproceed` stack and stop all work for this
618 621 # iteration.
619 622 toproceed.append(suc)
620 623 stackedset.add(suc)
621 624 break
622 625 else:
623 626 continue
624 627 break
625 628 else:
626 629 # case (4): we know all successors sets of all direct
627 630 # successors
628 631 #
629 632 # Successors set contributed by each marker depends on the
630 633 # successors sets of all its "successors" node.
631 634 #
632 635 # Each different marker is a divergence in the obsolescence
633 636 # history. It contributes successors sets distinct from other
634 637 # markers.
635 638 #
636 639 # Within a marker, a successor may have divergent successors
637 640 # sets. In such a case, the marker will contribute multiple
638 641 # divergent successors sets. If multiple successors have
639 642 # divergent successors sets, a cartesian product is used.
640 643 #
641 644 # At the end we post-process successors sets to remove
642 645 # duplicated entry and successors set that are strict subset of
643 646 # another one.
644 647 succssets = []
645 648 for mark in sorted(succmarkers[current]):
646 649 # successors sets contributed by this marker
647 650 markss = [[]]
648 651 for suc in mark[1]:
649 652 # cardinal product with previous successors
650 653 productresult = []
651 654 for prefix in markss:
652 655 for suffix in cache[suc]:
653 656 newss = list(prefix)
654 657 for part in suffix:
655 658 # do not duplicated entry in successors set
656 659 # first entry wins.
657 660 if part not in newss:
658 661 newss.append(part)
659 662 productresult.append(newss)
660 663 markss = productresult
661 664 succssets.extend(markss)
662 665 # remove duplicated and subset
663 666 seen = []
664 667 final = []
665 668 candidate = sorted(((set(s), s) for s in succssets if s),
666 669 key=lambda x: len(x[1]), reverse=True)
667 670 for setversion, listversion in candidate:
668 671 for seenset in seen:
669 672 if setversion.issubset(seenset):
670 673 break
671 674 else:
672 675 final.append(listversion)
673 676 seen.append(setversion)
674 677 final.reverse() # put small successors set first
675 678 cache[current] = final
676 679 return cache[initialnode]
677 680
678 681 def _knownrevs(repo, nodes):
679 682 """yield revision numbers of known nodes passed in parameters
680 683
681 684 Unknown revisions are silently ignored."""
682 685 torev = repo.changelog.nodemap.get
683 686 for n in nodes:
684 687 rev = torev(n)
685 688 if rev is not None:
686 689 yield rev
687 690
688 691 # mapping of 'set-name' -> <function to compute this set>
689 692 cachefuncs = {}
690 693 def cachefor(name):
691 694 """Decorator to register a function as computing the cache for a set"""
692 695 def decorator(func):
693 696 assert name not in cachefuncs
694 697 cachefuncs[name] = func
695 698 return func
696 699 return decorator
697 700
698 701 def getrevs(repo, name):
699 702 """Return the set of revision that belong to the <name> set
700 703
701 704 Such access may compute the set and cache it for future use"""
702 705 repo = repo.unfiltered()
703 706 if not repo.obsstore:
704 707 return ()
705 708 if name not in repo.obsstore.caches:
706 709 repo.obsstore.caches[name] = cachefuncs[name](repo)
707 710 return repo.obsstore.caches[name]
708 711
709 712 # To be simple we need to invalidate obsolescence cache when:
710 713 #
711 714 # - new changeset is added:
712 715 # - public phase is changed
713 716 # - obsolescence marker are added
714 717 # - strip is used a repo
715 718 def clearobscaches(repo):
716 719 """Remove all obsolescence related cache from a repo
717 720
718 721 This remove all cache in obsstore is the obsstore already exist on the
719 722 repo.
720 723
721 724 (We could be smarter here given the exact event that trigger the cache
722 725 clearing)"""
723 726 # only clear cache is there is obsstore data in this repo
724 727 if 'obsstore' in repo._filecache:
725 728 repo.obsstore.caches.clear()
726 729
727 730 @cachefor('obsolete')
728 731 def _computeobsoleteset(repo):
729 732 """the set of obsolete revisions"""
730 733 obs = set()
731 734 getrev = repo.changelog.nodemap.get
732 735 getphase = repo._phasecache.phase
733 736 for node in repo.obsstore.successors:
734 737 rev = getrev(node)
735 738 if rev is not None and getphase(repo, rev):
736 739 obs.add(rev)
737 740 return obs
738 741
739 742 @cachefor('unstable')
740 743 def _computeunstableset(repo):
741 744 """the set of non obsolete revisions with obsolete parents"""
742 745 # revset is not efficient enough here
743 746 # we do (obsolete()::) - obsolete() by hand
744 747 obs = getrevs(repo, 'obsolete')
745 748 if not obs:
746 749 return set()
747 750 cl = repo.changelog
748 751 return set(r for r in cl.descendants(obs) if r not in obs)
749 752
750 753 @cachefor('suspended')
751 754 def _computesuspendedset(repo):
752 755 """the set of obsolete parents with non obsolete descendants"""
753 756 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
754 757 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
755 758
756 759 @cachefor('extinct')
757 760 def _computeextinctset(repo):
758 761 """the set of obsolete parents without non obsolete descendants"""
759 762 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
760 763
761 764
762 765 @cachefor('bumped')
763 766 def _computebumpedset(repo):
764 767 """the set of revs trying to obsolete public revisions"""
765 768 bumped = set()
766 769 # utils function (avoid attribut lookup in the loop)
767 770 phase = repo._phasecache.phase # would be faster to grab the full list
768 771 public = phases.public
769 772 cl = repo.changelog
770 773 torev = cl.nodemap.get
771 774 obs = getrevs(repo, 'obsolete')
772 775 for rev in repo:
773 776 # We only evaluate mutable, non-obsolete revision
774 777 if (public < phase(repo, rev)) and (rev not in obs):
775 778 node = cl.node(rev)
776 779 # (future) A cache of precursors may worth if split is very common
777 780 for pnode in allprecursors(repo.obsstore, [node],
778 781 ignoreflags=bumpedfix):
779 782 prev = torev(pnode) # unfiltered! but so is phasecache
780 783 if (prev is not None) and (phase(repo, prev) <= public):
781 784 # we have a public precursors
782 785 bumped.add(rev)
783 786 break # Next draft!
784 787 return bumped
785 788
786 789 @cachefor('divergent')
787 790 def _computedivergentset(repo):
788 791 """the set of rev that compete to be the final successors of some revision.
789 792 """
790 793 divergent = set()
791 794 obsstore = repo.obsstore
792 795 newermap = {}
793 796 for ctx in repo.set('(not public()) - obsolete()'):
794 797 mark = obsstore.precursors.get(ctx.node(), ())
795 798 toprocess = set(mark)
796 799 while toprocess:
797 800 prec = toprocess.pop()[0]
798 801 if prec not in newermap:
799 802 successorssets(repo, prec, newermap)
800 803 newer = [n for n in newermap[prec] if n]
801 804 if len(newer) > 1:
802 805 divergent.add(ctx.rev())
803 806 break
804 807 toprocess.update(obsstore.precursors.get(prec, ()))
805 808 return divergent
806 809
807 810
808 811 def createmarkers(repo, relations, flag=0, metadata=None):
809 812 """Add obsolete markers between changesets in a repo
810 813
811 814 <relations> must be an iterable of (<old>, (<new>, ...)) tuple.
812 815 `old` and `news` are changectx.
813 816
814 817 Trying to obsolete a public changeset will raise an exception.
815 818
816 819 Current user and date are used except if specified otherwise in the
817 820 metadata attribute.
818 821
819 822 This function operates within a transaction of its own, but does
820 823 not take any lock on the repo.
821 824 """
822 825 # prepare metadata
823 826 if metadata is None:
824 827 metadata = {}
825 828 if 'date' not in metadata:
826 829 metadata['date'] = '%i %i' % util.makedate()
827 830 if 'user' not in metadata:
828 831 metadata['user'] = repo.ui.username()
829 832 tr = repo.transaction('add-obsolescence-marker')
830 833 try:
831 834 for prec, sucs in relations:
832 835 if not prec.mutable():
833 836 raise util.Abort("cannot obsolete immutable changeset: %s"
834 837 % prec)
835 838 nprec = prec.node()
836 839 nsucs = tuple(s.node() for s in sucs)
837 840 if nprec in nsucs:
838 841 raise util.Abort("changeset %s cannot obsolete itself" % prec)
839 842 repo.obsstore.create(tr, nprec, nsucs, flag, metadata)
840 843 repo.filteredrevcache.clear()
841 844 tr.close()
842 845 finally:
843 846 tr.release()
General Comments 0
You need to be logged in to leave comments. Login now