##// END OF EJS Templates
obsolete: fix bad comment...
Pierre-Yves David -
r20203:509768fc default
parent child Browse files
Show More
@@ -1,832 +1,832
1 1 # obsolete.py - obsolete markers handling
2 2 #
3 3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 4 # Logilab SA <contact@logilab.fr>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 """Obsolete markers handling
10 10
11 11 An obsolete marker maps an old changeset to a list of new
12 12 changesets. If the list of new changesets is empty, the old changeset
13 13 is said to be "killed". Otherwise, the old changeset is being
14 14 "replaced" by the new changesets.
15 15
16 16 Obsolete markers can be used to record and distribute changeset graph
17 17 transformations performed by history rewriting operations, and help
18 18 building new tools to reconciliate conflicting rewriting actions. To
19 19 facilitate conflicts resolution, markers include various annotations
20 20 besides old and news changeset identifiers, such as creation date or
21 21 author name.
22 22
23 23 The old obsoleted changeset is called "precursor" and possible replacements are
24 24 called "successors". Markers that used changeset X as a precursors are called
25 25 "successor markers of X" because they hold information about the successors of
26 26 X. Markers that use changeset Y as a successors are call "precursor markers of
27 27 Y" because they hold information about the precursors of Y.
28 28
29 29 Examples:
30 30
31 31 - When changeset A is replacement by a changeset A', one marker is stored:
32 32
33 33 (A, (A'))
34 34
35 35 - When changesets A and B are folded into a new changeset C two markers are
36 36 stored:
37 37
38 38 (A, (C,)) and (B, (C,))
39 39
40 40 - When changeset A is simply "pruned" from the graph, a marker in create:
41 41
42 42 (A, ())
43 43
44 44 - When changeset A is split into B and C, a single marker are used:
45 45
46 46 (A, (C, C))
47 47
48 48 We use a single marker to distinct the "split" case from the "divergence"
49 49 case. If two independents operation rewrite the same changeset A in to A' and
50 50 A'' when have an error case: divergent rewriting. We can detect it because
51 51 two markers will be created independently:
52 52
53 53 (A, (B,)) and (A, (C,))
54 54
55 55 Format
56 56 ------
57 57
58 58 Markers are stored in an append-only file stored in
59 59 '.hg/store/obsstore'.
60 60
61 61 The file starts with a version header:
62 62
63 63 - 1 unsigned byte: version number, starting at zero.
64 64
65 65
66 66 The header is followed by the markers. Each marker is made of:
67 67
68 68 - 1 unsigned byte: number of new changesets "R", could be zero.
69 69
70 70 - 1 unsigned 32-bits integer: metadata size "M" in bytes.
71 71
72 72 - 1 byte: a bit field. It is reserved for flags used in obsolete
73 73 markers common operations, to avoid repeated decoding of metadata
74 74 entries.
75 75
76 76 - 20 bytes: obsoleted changeset identifier.
77 77
78 78 - N*20 bytes: new changesets identifiers.
79 79
80 80 - M bytes: metadata as a sequence of nul-terminated strings. Each
81 81 string contains a key and a value, separated by a color ':', without
82 82 additional encoding. Keys cannot contain '\0' or ':' and values
83 83 cannot contain '\0'.
84 84 """
85 85 import struct
86 86 import util, base85, node
87 87 from i18n import _
88 88
89 89 _pack = struct.pack
90 90 _unpack = struct.unpack
91 91
92 92 _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5
93 93
94 94 # the obsolete feature is not mature enough to be enabled by default.
95 95 # you have to rely on third party extension extension to enable this.
96 96 _enabled = False
97 97
98 98 # data used for parsing and writing
99 99 _fmversion = 0
100 100 _fmfixed = '>BIB20s'
101 101 _fmnode = '20s'
102 102 _fmfsize = struct.calcsize(_fmfixed)
103 103 _fnodesize = struct.calcsize(_fmnode)
104 104
105 105 ### obsolescence marker flag
106 106
107 107 ## bumpedfix flag
108 108 #
109 109 # When a changeset A' succeed to a changeset A which became public, we call A'
110 110 # "bumped" because it's a successors of a public changesets
111 111 #
112 112 # o A' (bumped)
113 113 # |`:
114 114 # | o A
115 115 # |/
116 116 # o Z
117 117 #
118 118 # The way to solve this situation is to create a new changeset Ad as children
119 119 # of A. This changeset have the same content than A'. So the diff from A to A'
120 120 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
121 121 #
122 122 # o Ad
123 123 # |`:
124 124 # | x A'
125 125 # |'|
126 126 # o | A
127 127 # |/
128 128 # o Z
129 129 #
130 130 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
131 131 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
132 132 # This flag mean that the successors express the changes between the public and
133 133 # bumped version and fix the situation, breaking the transitivity of
134 134 # "bumped" here.
135 135 bumpedfix = 1
136 136
137 137 def _readmarkers(data):
138 138 """Read and enumerate markers from raw data"""
139 139 off = 0
140 140 diskversion = _unpack('>B', data[off:off + 1])[0]
141 141 off += 1
142 142 if diskversion != _fmversion:
143 143 raise util.Abort(_('parsing obsolete marker: unknown version %r')
144 144 % diskversion)
145 145
146 146 # Loop on markers
147 147 l = len(data)
148 148 while off + _fmfsize <= l:
149 149 # read fixed part
150 150 cur = data[off:off + _fmfsize]
151 151 off += _fmfsize
152 152 nbsuc, mdsize, flags, pre = _unpack(_fmfixed, cur)
153 153 # read replacement
154 154 sucs = ()
155 155 if nbsuc:
156 156 s = (_fnodesize * nbsuc)
157 157 cur = data[off:off + s]
158 158 sucs = _unpack(_fmnode * nbsuc, cur)
159 159 off += s
160 160 # read metadata
161 161 # (metadata will be decoded on demand)
162 162 metadata = data[off:off + mdsize]
163 163 if len(metadata) != mdsize:
164 164 raise util.Abort(_('parsing obsolete marker: metadata is too '
165 165 'short, %d bytes expected, got %d')
166 166 % (mdsize, len(metadata)))
167 167 off += mdsize
168 168 yield (pre, sucs, flags, metadata)
169 169
170 170 def encodemeta(meta):
171 171 """Return encoded metadata string to string mapping.
172 172
173 173 Assume no ':' in key and no '\0' in both key and value."""
174 174 for key, value in meta.iteritems():
175 175 if ':' in key or '\0' in key:
176 176 raise ValueError("':' and '\0' are forbidden in metadata key'")
177 177 if '\0' in value:
178 178 raise ValueError("':' are forbidden in metadata value'")
179 179 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
180 180
181 181 def decodemeta(data):
182 182 """Return string to string dictionary from encoded version."""
183 183 d = {}
184 184 for l in data.split('\0'):
185 185 if l:
186 186 key, value = l.split(':')
187 187 d[key] = value
188 188 return d
189 189
190 190 class marker(object):
191 191 """Wrap obsolete marker raw data"""
192 192
193 193 def __init__(self, repo, data):
194 194 # the repo argument will be used to create changectx in later version
195 195 self._repo = repo
196 196 self._data = data
197 197 self._decodedmeta = None
198 198
199 199 def __hash__(self):
200 200 return hash(self._data)
201 201
202 202 def __eq__(self, other):
203 203 if type(other) != type(self):
204 204 return False
205 205 return self._data == other._data
206 206
207 207 def precnode(self):
208 208 """Precursor changeset node identifier"""
209 209 return self._data[0]
210 210
211 211 def succnodes(self):
212 212 """List of successor changesets node identifiers"""
213 213 return self._data[1]
214 214
215 215 def metadata(self):
216 216 """Decoded metadata dictionary"""
217 217 if self._decodedmeta is None:
218 218 self._decodedmeta = decodemeta(self._data[3])
219 219 return self._decodedmeta
220 220
221 221 def date(self):
222 222 """Creation date as (unixtime, offset)"""
223 223 parts = self.metadata()['date'].split(' ')
224 224 return (float(parts[0]), int(parts[1]))
225 225
226 226 class obsstore(object):
227 227 """Store obsolete markers
228 228
229 229 Markers can be accessed with two mappings:
230 230 - precursors[x] -> set(markers on precursors edges of x)
231 231 - successors[x] -> set(markers on successors edges of x)
232 232 """
233 233
234 234 def __init__(self, sopener):
235 235 # caches for various obsolescence related cache
236 236 self.caches = {}
237 237 self._all = []
238 238 # new markers to serialize
239 239 self.precursors = {}
240 240 self.successors = {}
241 241 self.sopener = sopener
242 242 data = sopener.tryread('obsstore')
243 243 if data:
244 244 self._load(_readmarkers(data))
245 245
246 246 def __iter__(self):
247 247 return iter(self._all)
248 248
249 249 def __nonzero__(self):
250 250 return bool(self._all)
251 251
252 252 def create(self, transaction, prec, succs=(), flag=0, metadata=None):
253 253 """obsolete: add a new obsolete marker
254 254
255 255 * ensuring it is hashable
256 256 * check mandatory metadata
257 257 * encode metadata
258 258 """
259 259 if metadata is None:
260 260 metadata = {}
261 261 if 'date' not in metadata:
262 262 metadata['date'] = "%d %d" % util.makedate()
263 263 if len(prec) != 20:
264 264 raise ValueError(prec)
265 265 for succ in succs:
266 266 if len(succ) != 20:
267 267 raise ValueError(succ)
268 268 marker = (str(prec), tuple(succs), int(flag), encodemeta(metadata))
269 269 self.add(transaction, [marker])
270 270
271 271 def add(self, transaction, markers):
272 272 """Add new markers to the store
273 273
274 274 Take care of filtering duplicate.
275 275 Return the number of new marker."""
276 276 if not _enabled:
277 277 raise util.Abort('obsolete feature is not enabled on this repo')
278 278 known = set(self._all)
279 279 new = []
280 280 for m in markers:
281 281 if m not in known:
282 282 known.add(m)
283 283 new.append(m)
284 284 if new:
285 285 f = self.sopener('obsstore', 'ab')
286 286 try:
287 287 # Whether the file's current position is at the begin or at
288 288 # the end after opening a file for appending is implementation
289 289 # defined. So we must seek to the end before calling tell(),
290 290 # or we may get a zero offset for non-zero sized files on
291 291 # some platforms (issue3543).
292 292 f.seek(0, _SEEK_END)
293 293 offset = f.tell()
294 294 transaction.add('obsstore', offset)
295 295 # offset == 0: new file - add the version header
296 296 for bytes in _encodemarkers(new, offset == 0):
297 297 f.write(bytes)
298 298 finally:
299 299 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
300 300 # call 'filecacheentry.refresh()' here
301 301 f.close()
302 302 self._load(new)
303 303 # new marker *may* have changed several set. invalidate the cache.
304 304 self.caches.clear()
305 305 return len(new)
306 306
307 307 def mergemarkers(self, transaction, data):
308 308 markers = _readmarkers(data)
309 309 self.add(transaction, markers)
310 310
311 311 def _load(self, markers):
312 312 for mark in markers:
313 313 self._all.append(mark)
314 314 pre, sucs = mark[:2]
315 315 self.successors.setdefault(pre, set()).add(mark)
316 316 for suc in sucs:
317 317 self.precursors.setdefault(suc, set()).add(mark)
318 318 if node.nullid in self.precursors:
319 319 raise util.Abort(_('bad obsolescence marker detected: '
320 320 'invalid successors nullid'))
321 321
322 322 def _encodemarkers(markers, addheader=False):
323 323 # Kept separate from flushmarkers(), it will be reused for
324 324 # markers exchange.
325 325 if addheader:
326 326 yield _pack('>B', _fmversion)
327 327 for marker in markers:
328 328 yield _encodeonemarker(marker)
329 329
330 330
331 331 def _encodeonemarker(marker):
332 332 pre, sucs, flags, metadata = marker
333 333 nbsuc = len(sucs)
334 334 format = _fmfixed + (_fmnode * nbsuc)
335 335 data = [nbsuc, len(metadata), flags, pre]
336 336 data.extend(sucs)
337 337 return _pack(format, *data) + metadata
338 338
339 339 # arbitrary picked to fit into 8K limit from HTTP server
340 340 # you have to take in account:
341 341 # - the version header
342 342 # - the base85 encoding
343 343 _maxpayload = 5300
344 344
345 345 def listmarkers(repo):
346 346 """List markers over pushkey"""
347 347 if not repo.obsstore:
348 348 return {}
349 349 keys = {}
350 350 parts = []
351 351 currentlen = _maxpayload * 2 # ensure we create a new part
352 352 for marker in repo.obsstore:
353 353 nextdata = _encodeonemarker(marker)
354 354 if (len(nextdata) + currentlen > _maxpayload):
355 355 currentpart = []
356 356 currentlen = 0
357 357 parts.append(currentpart)
358 358 currentpart.append(nextdata)
359 359 currentlen += len(nextdata)
360 360 for idx, part in enumerate(reversed(parts)):
361 361 data = ''.join([_pack('>B', _fmversion)] + part)
362 362 keys['dump%i' % idx] = base85.b85encode(data)
363 363 return keys
364 364
365 365 def pushmarker(repo, key, old, new):
366 366 """Push markers over pushkey"""
367 367 if not key.startswith('dump'):
368 368 repo.ui.warn(_('unknown key: %r') % key)
369 369 return 0
370 370 if old:
371 371 repo.ui.warn(_('unexpected old value') % key)
372 372 return 0
373 373 data = base85.b85decode(new)
374 374 lock = repo.lock()
375 375 try:
376 376 tr = repo.transaction('pushkey: obsolete markers')
377 377 try:
378 378 repo.obsstore.mergemarkers(tr, data)
379 379 tr.close()
380 380 return 1
381 381 finally:
382 382 tr.release()
383 383 finally:
384 384 lock.release()
385 385
386 386 def syncpush(repo, remote):
387 387 """utility function to push obsolete markers to a remote
388 388
389 389 Exist mostly to allow overriding for experimentation purpose"""
390 390 if (_enabled and repo.obsstore and
391 391 'obsolete' in remote.listkeys('namespaces')):
392 392 rslts = []
393 393 remotedata = repo.listkeys('obsolete')
394 394 for key in sorted(remotedata, reverse=True):
395 395 # reverse sort to ensure we end with dump0
396 396 data = remotedata[key]
397 397 rslts.append(remote.pushkey('obsolete', key, '', data))
398 398 if [r for r in rslts if not r]:
399 399 msg = _('failed to push some obsolete markers!\n')
400 400 repo.ui.warn(msg)
401 401
402 402 def syncpull(repo, remote, gettransaction):
403 403 """utility function to pull obsolete markers from a remote
404 404
405 405 The `gettransaction` is function that return the pull transaction, creating
406 406 one if necessary. We return the transaction to inform the calling code that
407 407 a new transaction have been created (when applicable).
408 408
409 409 Exists mostly to allow overriding for experimentation purpose"""
410 410 tr = None
411 411 if _enabled:
412 412 repo.ui.debug('fetching remote obsolete markers\n')
413 413 remoteobs = remote.listkeys('obsolete')
414 414 if 'dump0' in remoteobs:
415 415 tr = gettransaction()
416 416 for key in sorted(remoteobs, reverse=True):
417 417 if key.startswith('dump'):
418 418 data = base85.b85decode(remoteobs[key])
419 419 repo.obsstore.mergemarkers(tr, data)
420 420 repo.invalidatevolatilesets()
421 421 return tr
422 422
423 423 def allmarkers(repo):
424 424 """all obsolete markers known in a repository"""
425 425 for markerdata in repo.obsstore:
426 426 yield marker(repo, markerdata)
427 427
428 428 def precursormarkers(ctx):
429 429 """obsolete marker marking this changeset as a successors"""
430 430 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
431 431 yield marker(ctx._repo, data)
432 432
433 433 def successormarkers(ctx):
434 434 """obsolete marker making this changeset obsolete"""
435 435 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
436 436 yield marker(ctx._repo, data)
437 437
438 438 def allsuccessors(obsstore, nodes, ignoreflags=0):
439 439 """Yield node for every successor of <nodes>.
440 440
441 441 Some successors may be unknown locally.
442 442
443 443 This is a linear yield unsuited to detecting split changesets."""
444 444 remaining = set(nodes)
445 445 seen = set(remaining)
446 446 while remaining:
447 447 current = remaining.pop()
448 448 yield current
449 449 for mark in obsstore.successors.get(current, ()):
450 # ignore marker flagged with with specified flag
450 # ignore marker flagged with specified flag
451 451 if mark[2] & ignoreflags:
452 452 continue
453 453 for suc in mark[1]:
454 454 if suc not in seen:
455 455 seen.add(suc)
456 456 remaining.add(suc)
457 457
458 458 def foreground(repo, nodes):
459 459 """return all nodes in the "foreground" of other node
460 460
461 461 The foreground of a revision is anything reachable using parent -> children
462 462 or precursor -> successor relation. It is very similar to "descendant" but
463 463 augmented with obsolescence information.
464 464
465 465 Beware that possible obsolescence cycle may result if complex situation.
466 466 """
467 467 repo = repo.unfiltered()
468 468 foreground = set(repo.set('%ln::', nodes))
469 469 if repo.obsstore:
470 470 # We only need this complicated logic if there is obsolescence
471 471 # XXX will probably deserve an optimised revset.
472 472 nm = repo.changelog.nodemap
473 473 plen = -1
474 474 # compute the whole set of successors or descendants
475 475 while len(foreground) != plen:
476 476 plen = len(foreground)
477 477 succs = set(c.node() for c in foreground)
478 478 mutable = [c.node() for c in foreground if c.mutable()]
479 479 succs.update(allsuccessors(repo.obsstore, mutable))
480 480 known = (n for n in succs if n in nm)
481 481 foreground = set(repo.set('%ln::', known))
482 482 return set(c.node() for c in foreground)
483 483
484 484
485 485 def successorssets(repo, initialnode, cache=None):
486 486 """Return all set of successors of initial nodes
487 487
488 488 Successors set of changeset A are a group of revision that succeed A. It
489 489 succeed A as a consistent whole, each revision being only partial
490 490 replacement. Successors set contains non-obsolete changeset only.
491 491
492 492 In most cases a changeset A have zero (changeset pruned) or a single
493 493 successors set that contains a single successor (changeset A replaced by
494 494 A')
495 495
496 496 When changeset is split, it results successors set containing more than
497 497 a single element. Divergent rewriting will result in multiple successors
498 498 sets.
499 499
500 500 They are returned as a list of tuples containing all valid successors sets.
501 501
502 502 Final successors unknown locally are considered plain prune (obsoleted
503 503 without successors).
504 504
505 505 The optional `cache` parameter is a dictionary that may contains
506 506 precomputed successors sets. It is meant to reuse the computation of
507 507 previous call to `successorssets` when multiple calls are made at the same
508 508 time. The cache dictionary is updated in place. The caller is responsible
509 509 for its live spawn. Code that makes multiple calls to `successorssets`
510 510 *must* use this cache mechanism or suffer terrible performances."""
511 511
512 512 succmarkers = repo.obsstore.successors
513 513
514 514 # Stack of nodes we search successors sets for
515 515 toproceed = [initialnode]
516 516 # set version of above list for fast loop detection
517 517 # element added to "toproceed" must be added here
518 518 stackedset = set(toproceed)
519 519 if cache is None:
520 520 cache = {}
521 521
522 522 # This while loop is the flattened version of a recursive search for
523 523 # successors sets
524 524 #
525 525 # def successorssets(x):
526 526 # successors = directsuccessors(x)
527 527 # ss = [[]]
528 528 # for succ in directsuccessors(x):
529 529 # # product as in itertools cartesian product
530 530 # ss = product(ss, successorssets(succ))
531 531 # return ss
532 532 #
533 533 # But we can not use plain recursive calls here:
534 534 # - that would blow the python call stack
535 535 # - obsolescence markers may have cycles, we need to handle them.
536 536 #
537 537 # The `toproceed` list act as our call stack. Every node we search
538 538 # successors set for are stacked there.
539 539 #
540 540 # The `stackedset` is set version of this stack used to check if a node is
541 541 # already stacked. This check is used to detect cycles and prevent infinite
542 542 # loop.
543 543 #
544 544 # successors set of all nodes are stored in the `cache` dictionary.
545 545 #
546 546 # After this while loop ends we use the cache to return the successors sets
547 547 # for the node requested by the caller.
548 548 while toproceed:
549 549 # Every iteration tries to compute the successors sets of the topmost
550 550 # node of the stack: CURRENT.
551 551 #
552 552 # There are four possible outcomes:
553 553 #
554 554 # 1) We already know the successors sets of CURRENT:
555 555 # -> mission accomplished, pop it from the stack.
556 556 # 2) Node is not obsolete:
557 557 # -> the node is its own successors sets. Add it to the cache.
558 558 # 3) We do not know successors set of direct successors of CURRENT:
559 559 # -> We add those successors to the stack.
560 560 # 4) We know successors sets of all direct successors of CURRENT:
561 561 # -> We can compute CURRENT successors set and add it to the
562 562 # cache.
563 563 #
564 564 current = toproceed[-1]
565 565 if current in cache:
566 566 # case (1): We already know the successors sets
567 567 stackedset.remove(toproceed.pop())
568 568 elif current not in succmarkers:
569 569 # case (2): The node is not obsolete.
570 570 if current in repo:
571 571 # We have a valid last successors.
572 572 cache[current] = [(current,)]
573 573 else:
574 574 # Final obsolete version is unknown locally.
575 575 # Do not count that as a valid successors
576 576 cache[current] = []
577 577 else:
578 578 # cases (3) and (4)
579 579 #
580 580 # We proceed in two phases. Phase 1 aims to distinguish case (3)
581 581 # from case (4):
582 582 #
583 583 # For each direct successors of CURRENT, we check whether its
584 584 # successors sets are known. If they are not, we stack the
585 585 # unknown node and proceed to the next iteration of the while
586 586 # loop. (case 3)
587 587 #
588 588 # During this step, we may detect obsolescence cycles: a node
589 589 # with unknown successors sets but already in the call stack.
590 590 # In such a situation, we arbitrary set the successors sets of
591 591 # the node to nothing (node pruned) to break the cycle.
592 592 #
593 593 # If no break was encountered we proceed to phase 2.
594 594 #
595 595 # Phase 2 computes successors sets of CURRENT (case 4); see details
596 596 # in phase 2 itself.
597 597 #
598 598 # Note the two levels of iteration in each phase.
599 599 # - The first one handles obsolescence markers using CURRENT as
600 600 # precursor (successors markers of CURRENT).
601 601 #
602 602 # Having multiple entry here means divergence.
603 603 #
604 604 # - The second one handles successors defined in each marker.
605 605 #
606 606 # Having none means pruned node, multiple successors means split,
607 607 # single successors are standard replacement.
608 608 #
609 609 for mark in sorted(succmarkers[current]):
610 610 for suc in mark[1]:
611 611 if suc not in cache:
612 612 if suc in stackedset:
613 613 # cycle breaking
614 614 cache[suc] = []
615 615 else:
616 616 # case (3) If we have not computed successors sets
617 617 # of one of those successors we add it to the
618 618 # `toproceed` stack and stop all work for this
619 619 # iteration.
620 620 toproceed.append(suc)
621 621 stackedset.add(suc)
622 622 break
623 623 else:
624 624 continue
625 625 break
626 626 else:
627 627 # case (4): we know all successors sets of all direct
628 628 # successors
629 629 #
630 630 # Successors set contributed by each marker depends on the
631 631 # successors sets of all its "successors" node.
632 632 #
633 633 # Each different marker is a divergence in the obsolescence
634 634 # history. It contributes successors sets distinct from other
635 635 # markers.
636 636 #
637 637 # Within a marker, a successor may have divergent successors
638 638 # sets. In such a case, the marker will contribute multiple
639 639 # divergent successors sets. If multiple successors have
640 640 # divergent successors sets, a cartesian product is used.
641 641 #
642 642 # At the end we post-process successors sets to remove
643 643 # duplicated entry and successors set that are strict subset of
644 644 # another one.
645 645 succssets = []
646 646 for mark in sorted(succmarkers[current]):
647 647 # successors sets contributed by this marker
648 648 markss = [[]]
649 649 for suc in mark[1]:
650 650 # cardinal product with previous successors
651 651 productresult = []
652 652 for prefix in markss:
653 653 for suffix in cache[suc]:
654 654 newss = list(prefix)
655 655 for part in suffix:
656 656 # do not duplicated entry in successors set
657 657 # first entry wins.
658 658 if part not in newss:
659 659 newss.append(part)
660 660 productresult.append(newss)
661 661 markss = productresult
662 662 succssets.extend(markss)
663 663 # remove duplicated and subset
664 664 seen = []
665 665 final = []
666 666 candidate = sorted(((set(s), s) for s in succssets if s),
667 667 key=lambda x: len(x[1]), reverse=True)
668 668 for setversion, listversion in candidate:
669 669 for seenset in seen:
670 670 if setversion.issubset(seenset):
671 671 break
672 672 else:
673 673 final.append(listversion)
674 674 seen.append(setversion)
675 675 final.reverse() # put small successors set first
676 676 cache[current] = final
677 677 return cache[initialnode]
678 678
679 679 def _knownrevs(repo, nodes):
680 680 """yield revision numbers of known nodes passed in parameters
681 681
682 682 Unknown revisions are silently ignored."""
683 683 torev = repo.changelog.nodemap.get
684 684 for n in nodes:
685 685 rev = torev(n)
686 686 if rev is not None:
687 687 yield rev
688 688
689 689 # mapping of 'set-name' -> <function to compute this set>
690 690 cachefuncs = {}
691 691 def cachefor(name):
692 692 """Decorator to register a function as computing the cache for a set"""
693 693 def decorator(func):
694 694 assert name not in cachefuncs
695 695 cachefuncs[name] = func
696 696 return func
697 697 return decorator
698 698
699 699 def getrevs(repo, name):
700 700 """Return the set of revision that belong to the <name> set
701 701
702 702 Such access may compute the set and cache it for future use"""
703 703 repo = repo.unfiltered()
704 704 if not repo.obsstore:
705 705 return ()
706 706 if name not in repo.obsstore.caches:
707 707 repo.obsstore.caches[name] = cachefuncs[name](repo)
708 708 return repo.obsstore.caches[name]
709 709
710 710 # To be simple we need to invalidate obsolescence cache when:
711 711 #
712 712 # - new changeset is added:
713 713 # - public phase is changed
714 714 # - obsolescence marker are added
715 715 # - strip is used a repo
716 716 def clearobscaches(repo):
717 717 """Remove all obsolescence related cache from a repo
718 718
719 719 This remove all cache in obsstore is the obsstore already exist on the
720 720 repo.
721 721
722 722 (We could be smarter here given the exact event that trigger the cache
723 723 clearing)"""
724 724 # only clear cache is there is obsstore data in this repo
725 725 if 'obsstore' in repo._filecache:
726 726 repo.obsstore.caches.clear()
727 727
728 728 @cachefor('obsolete')
729 729 def _computeobsoleteset(repo):
730 730 """the set of obsolete revisions"""
731 731 obs = set()
732 732 getrev = repo.changelog.nodemap.get
733 733 getphase = repo._phasecache.phase
734 734 for node in repo.obsstore.successors:
735 735 rev = getrev(node)
736 736 if rev is not None and getphase(repo, rev):
737 737 obs.add(rev)
738 738 return obs
739 739
740 740 @cachefor('unstable')
741 741 def _computeunstableset(repo):
742 742 """the set of non obsolete revisions with obsolete parents"""
743 743 # revset is not efficient enough here
744 744 # we do (obsolete()::) - obsolete() by hand
745 745 obs = getrevs(repo, 'obsolete')
746 746 if not obs:
747 747 return set()
748 748 cl = repo.changelog
749 749 return set(r for r in cl.descendants(obs) if r not in obs)
750 750
751 751 @cachefor('suspended')
752 752 def _computesuspendedset(repo):
753 753 """the set of obsolete parents with non obsolete descendants"""
754 754 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
755 755 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
756 756
757 757 @cachefor('extinct')
758 758 def _computeextinctset(repo):
759 759 """the set of obsolete parents without non obsolete descendants"""
760 760 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
761 761
762 762
763 763 @cachefor('bumped')
764 764 def _computebumpedset(repo):
765 765 """the set of revs trying to obsolete public revisions"""
766 766 # get all possible bumped changesets
767 767 tonode = repo.changelog.node
768 768 publicnodes = (tonode(r) for r in repo.revs('public()'))
769 769 successors = allsuccessors(repo.obsstore, publicnodes,
770 770 ignoreflags=bumpedfix)
771 771 # revision public or already obsolete don't count as bumped
772 772 query = '%ld - obsolete() - public()'
773 773 return set(repo.revs(query, _knownrevs(repo, successors)))
774 774
775 775 @cachefor('divergent')
776 776 def _computedivergentset(repo):
777 777 """the set of rev that compete to be the final successors of some revision.
778 778 """
779 779 divergent = set()
780 780 obsstore = repo.obsstore
781 781 newermap = {}
782 782 for ctx in repo.set('(not public()) - obsolete()'):
783 783 mark = obsstore.precursors.get(ctx.node(), ())
784 784 toprocess = set(mark)
785 785 while toprocess:
786 786 prec = toprocess.pop()[0]
787 787 if prec not in newermap:
788 788 successorssets(repo, prec, newermap)
789 789 newer = [n for n in newermap[prec] if n]
790 790 if len(newer) > 1:
791 791 divergent.add(ctx.rev())
792 792 break
793 793 toprocess.update(obsstore.precursors.get(prec, ()))
794 794 return divergent
795 795
796 796
797 797 def createmarkers(repo, relations, flag=0, metadata=None):
798 798 """Add obsolete markers between changesets in a repo
799 799
800 800 <relations> must be an iterable of (<old>, (<new>, ...)) tuple.
801 801 `old` and `news` are changectx.
802 802
803 803 Trying to obsolete a public changeset will raise an exception.
804 804
805 805 Current user and date are used except if specified otherwise in the
806 806 metadata attribute.
807 807
808 808 This function operates within a transaction of its own, but does
809 809 not take any lock on the repo.
810 810 """
811 811 # prepare metadata
812 812 if metadata is None:
813 813 metadata = {}
814 814 if 'date' not in metadata:
815 815 metadata['date'] = '%i %i' % util.makedate()
816 816 if 'user' not in metadata:
817 817 metadata['user'] = repo.ui.username()
818 818 tr = repo.transaction('add-obsolescence-marker')
819 819 try:
820 820 for prec, sucs in relations:
821 821 if not prec.mutable():
822 822 raise util.Abort("cannot obsolete immutable changeset: %s"
823 823 % prec)
824 824 nprec = prec.node()
825 825 nsucs = tuple(s.node() for s in sucs)
826 826 if nprec in nsucs:
827 827 raise util.Abort("changeset %s cannot obsolete itself" % prec)
828 828 repo.obsstore.create(tr, nprec, nsucs, flag, metadata)
829 829 repo.filteredrevcache.clear()
830 830 tr.close()
831 831 finally:
832 832 tr.release()
General Comments 0
You need to be logged in to leave comments. Login now