##// END OF EJS Templates
obsolete: add __eq__ and __hash__ to marker to make set() deduplication work
Augie Fackler -
r20031:6c1adf20 default
parent child Browse files
Show More
@@ -1,824 +1,832
1 1 # obsolete.py - obsolete markers handling
2 2 #
3 3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 4 # Logilab SA <contact@logilab.fr>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 """Obsolete markers handling
10 10
11 11 An obsolete marker maps an old changeset to a list of new
12 12 changesets. If the list of new changesets is empty, the old changeset
13 13 is said to be "killed". Otherwise, the old changeset is being
14 14 "replaced" by the new changesets.
15 15
16 16 Obsolete markers can be used to record and distribute changeset graph
17 17 transformations performed by history rewriting operations, and help
18 18 building new tools to reconciliate conflicting rewriting actions. To
19 19 facilitate conflicts resolution, markers include various annotations
20 20 besides old and news changeset identifiers, such as creation date or
21 21 author name.
22 22
23 23 The old obsoleted changeset is called "precursor" and possible replacements are
24 24 called "successors". Markers that used changeset X as a precursors are called
25 25 "successor markers of X" because they hold information about the successors of
26 26 X. Markers that use changeset Y as a successors are call "precursor markers of
27 27 Y" because they hold information about the precursors of Y.
28 28
29 29 Examples:
30 30
31 31 - When changeset A is replacement by a changeset A', one marker is stored:
32 32
33 33 (A, (A'))
34 34
35 35 - When changesets A and B are folded into a new changeset C two markers are
36 36 stored:
37 37
38 38 (A, (C,)) and (B, (C,))
39 39
40 40 - When changeset A is simply "pruned" from the graph, a marker in create:
41 41
42 42 (A, ())
43 43
44 44 - When changeset A is split into B and C, a single marker are used:
45 45
46 46 (A, (C, C))
47 47
48 48 We use a single marker to distinct the "split" case from the "divergence"
49 49 case. If two independents operation rewrite the same changeset A in to A' and
50 50 A'' when have an error case: divergent rewriting. We can detect it because
51 51 two markers will be created independently:
52 52
53 53 (A, (B,)) and (A, (C,))
54 54
55 55 Format
56 56 ------
57 57
58 58 Markers are stored in an append-only file stored in
59 59 '.hg/store/obsstore'.
60 60
61 61 The file starts with a version header:
62 62
63 63 - 1 unsigned byte: version number, starting at zero.
64 64
65 65
66 66 The header is followed by the markers. Each marker is made of:
67 67
68 68 - 1 unsigned byte: number of new changesets "R", could be zero.
69 69
70 70 - 1 unsigned 32-bits integer: metadata size "M" in bytes.
71 71
72 72 - 1 byte: a bit field. It is reserved for flags used in obsolete
73 73 markers common operations, to avoid repeated decoding of metadata
74 74 entries.
75 75
76 76 - 20 bytes: obsoleted changeset identifier.
77 77
78 78 - N*20 bytes: new changesets identifiers.
79 79
80 80 - M bytes: metadata as a sequence of nul-terminated strings. Each
81 81 string contains a key and a value, separated by a color ':', without
82 82 additional encoding. Keys cannot contain '\0' or ':' and values
83 83 cannot contain '\0'.
84 84 """
85 85 import struct
86 86 import util, base85, node
87 87 from i18n import _
88 88
89 89 _pack = struct.pack
90 90 _unpack = struct.unpack
91 91
92 92 _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5
93 93
94 94 # the obsolete feature is not mature enough to be enabled by default.
95 95 # you have to rely on third party extension extension to enable this.
96 96 _enabled = False
97 97
98 98 # data used for parsing and writing
99 99 _fmversion = 0
100 100 _fmfixed = '>BIB20s'
101 101 _fmnode = '20s'
102 102 _fmfsize = struct.calcsize(_fmfixed)
103 103 _fnodesize = struct.calcsize(_fmnode)
104 104
105 105 ### obsolescence marker flag
106 106
107 107 ## bumpedfix flag
108 108 #
109 109 # When a changeset A' succeed to a changeset A which became public, we call A'
110 110 # "bumped" because it's a successors of a public changesets
111 111 #
112 112 # o A' (bumped)
113 113 # |`:
114 114 # | o A
115 115 # |/
116 116 # o Z
117 117 #
118 118 # The way to solve this situation is to create a new changeset Ad as children
119 119 # of A. This changeset have the same content than A'. So the diff from A to A'
120 120 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
121 121 #
122 122 # o Ad
123 123 # |`:
124 124 # | x A'
125 125 # |'|
126 126 # o | A
127 127 # |/
128 128 # o Z
129 129 #
130 130 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
131 131 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
132 132 # This flag mean that the successors express the changes between the public and
133 133 # bumped version and fix the situation, breaking the transitivity of
134 134 # "bumped" here.
135 135 bumpedfix = 1
136 136
137 137 def _readmarkers(data):
138 138 """Read and enumerate markers from raw data"""
139 139 off = 0
140 140 diskversion = _unpack('>B', data[off:off + 1])[0]
141 141 off += 1
142 142 if diskversion != _fmversion:
143 143 raise util.Abort(_('parsing obsolete marker: unknown version %r')
144 144 % diskversion)
145 145
146 146 # Loop on markers
147 147 l = len(data)
148 148 while off + _fmfsize <= l:
149 149 # read fixed part
150 150 cur = data[off:off + _fmfsize]
151 151 off += _fmfsize
152 152 nbsuc, mdsize, flags, pre = _unpack(_fmfixed, cur)
153 153 # read replacement
154 154 sucs = ()
155 155 if nbsuc:
156 156 s = (_fnodesize * nbsuc)
157 157 cur = data[off:off + s]
158 158 sucs = _unpack(_fmnode * nbsuc, cur)
159 159 off += s
160 160 # read metadata
161 161 # (metadata will be decoded on demand)
162 162 metadata = data[off:off + mdsize]
163 163 if len(metadata) != mdsize:
164 164 raise util.Abort(_('parsing obsolete marker: metadata is too '
165 165 'short, %d bytes expected, got %d')
166 166 % (mdsize, len(metadata)))
167 167 off += mdsize
168 168 yield (pre, sucs, flags, metadata)
169 169
170 170 def encodemeta(meta):
171 171 """Return encoded metadata string to string mapping.
172 172
173 173 Assume no ':' in key and no '\0' in both key and value."""
174 174 for key, value in meta.iteritems():
175 175 if ':' in key or '\0' in key:
176 176 raise ValueError("':' and '\0' are forbidden in metadata key'")
177 177 if '\0' in value:
178 178 raise ValueError("':' are forbidden in metadata value'")
179 179 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
180 180
181 181 def decodemeta(data):
182 182 """Return string to string dictionary from encoded version."""
183 183 d = {}
184 184 for l in data.split('\0'):
185 185 if l:
186 186 key, value = l.split(':')
187 187 d[key] = value
188 188 return d
189 189
190 190 class marker(object):
191 191 """Wrap obsolete marker raw data"""
192 192
193 193 def __init__(self, repo, data):
194 194 # the repo argument will be used to create changectx in later version
195 195 self._repo = repo
196 196 self._data = data
197 197 self._decodedmeta = None
198 198
199 def __hash__(self):
200 return hash(self._data)
201
202 def __eq__(self, other):
203 if type(other) != type(self):
204 return False
205 return self._data == other._data
206
199 207 def precnode(self):
200 208 """Precursor changeset node identifier"""
201 209 return self._data[0]
202 210
203 211 def succnodes(self):
204 212 """List of successor changesets node identifiers"""
205 213 return self._data[1]
206 214
207 215 def metadata(self):
208 216 """Decoded metadata dictionary"""
209 217 if self._decodedmeta is None:
210 218 self._decodedmeta = decodemeta(self._data[3])
211 219 return self._decodedmeta
212 220
213 221 def date(self):
214 222 """Creation date as (unixtime, offset)"""
215 223 parts = self.metadata()['date'].split(' ')
216 224 return (float(parts[0]), int(parts[1]))
217 225
218 226 class obsstore(object):
219 227 """Store obsolete markers
220 228
221 229 Markers can be accessed with two mappings:
222 230 - precursors[x] -> set(markers on precursors edges of x)
223 231 - successors[x] -> set(markers on successors edges of x)
224 232 """
225 233
226 234 def __init__(self, sopener):
227 235 # caches for various obsolescence related cache
228 236 self.caches = {}
229 237 self._all = []
230 238 # new markers to serialize
231 239 self.precursors = {}
232 240 self.successors = {}
233 241 self.sopener = sopener
234 242 data = sopener.tryread('obsstore')
235 243 if data:
236 244 self._load(_readmarkers(data))
237 245
238 246 def __iter__(self):
239 247 return iter(self._all)
240 248
241 249 def __nonzero__(self):
242 250 return bool(self._all)
243 251
244 252 def create(self, transaction, prec, succs=(), flag=0, metadata=None):
245 253 """obsolete: add a new obsolete marker
246 254
247 255 * ensuring it is hashable
248 256 * check mandatory metadata
249 257 * encode metadata
250 258 """
251 259 if metadata is None:
252 260 metadata = {}
253 261 if 'date' not in metadata:
254 262 metadata['date'] = "%d %d" % util.makedate()
255 263 if len(prec) != 20:
256 264 raise ValueError(prec)
257 265 for succ in succs:
258 266 if len(succ) != 20:
259 267 raise ValueError(succ)
260 268 marker = (str(prec), tuple(succs), int(flag), encodemeta(metadata))
261 269 self.add(transaction, [marker])
262 270
263 271 def add(self, transaction, markers):
264 272 """Add new markers to the store
265 273
266 274 Take care of filtering duplicate.
267 275 Return the number of new marker."""
268 276 if not _enabled:
269 277 raise util.Abort('obsolete feature is not enabled on this repo')
270 278 known = set(self._all)
271 279 new = []
272 280 for m in markers:
273 281 if m not in known:
274 282 known.add(m)
275 283 new.append(m)
276 284 if new:
277 285 f = self.sopener('obsstore', 'ab')
278 286 try:
279 287 # Whether the file's current position is at the begin or at
280 288 # the end after opening a file for appending is implementation
281 289 # defined. So we must seek to the end before calling tell(),
282 290 # or we may get a zero offset for non-zero sized files on
283 291 # some platforms (issue3543).
284 292 f.seek(0, _SEEK_END)
285 293 offset = f.tell()
286 294 transaction.add('obsstore', offset)
287 295 # offset == 0: new file - add the version header
288 296 for bytes in _encodemarkers(new, offset == 0):
289 297 f.write(bytes)
290 298 finally:
291 299 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
292 300 # call 'filecacheentry.refresh()' here
293 301 f.close()
294 302 self._load(new)
295 303 # new marker *may* have changed several set. invalidate the cache.
296 304 self.caches.clear()
297 305 return len(new)
298 306
299 307 def mergemarkers(self, transaction, data):
300 308 markers = _readmarkers(data)
301 309 self.add(transaction, markers)
302 310
303 311 def _load(self, markers):
304 312 for mark in markers:
305 313 self._all.append(mark)
306 314 pre, sucs = mark[:2]
307 315 self.successors.setdefault(pre, set()).add(mark)
308 316 for suc in sucs:
309 317 self.precursors.setdefault(suc, set()).add(mark)
310 318 if node.nullid in self.precursors:
311 319 raise util.Abort(_('bad obsolescence marker detected: '
312 320 'invalid successors nullid'))
313 321
314 322 def _encodemarkers(markers, addheader=False):
315 323 # Kept separate from flushmarkers(), it will be reused for
316 324 # markers exchange.
317 325 if addheader:
318 326 yield _pack('>B', _fmversion)
319 327 for marker in markers:
320 328 yield _encodeonemarker(marker)
321 329
322 330
323 331 def _encodeonemarker(marker):
324 332 pre, sucs, flags, metadata = marker
325 333 nbsuc = len(sucs)
326 334 format = _fmfixed + (_fmnode * nbsuc)
327 335 data = [nbsuc, len(metadata), flags, pre]
328 336 data.extend(sucs)
329 337 return _pack(format, *data) + metadata
330 338
331 339 # arbitrary picked to fit into 8K limit from HTTP server
332 340 # you have to take in account:
333 341 # - the version header
334 342 # - the base85 encoding
335 343 _maxpayload = 5300
336 344
337 345 def listmarkers(repo):
338 346 """List markers over pushkey"""
339 347 if not repo.obsstore:
340 348 return {}
341 349 keys = {}
342 350 parts = []
343 351 currentlen = _maxpayload * 2 # ensure we create a new part
344 352 for marker in repo.obsstore:
345 353 nextdata = _encodeonemarker(marker)
346 354 if (len(nextdata) + currentlen > _maxpayload):
347 355 currentpart = []
348 356 currentlen = 0
349 357 parts.append(currentpart)
350 358 currentpart.append(nextdata)
351 359 currentlen += len(nextdata)
352 360 for idx, part in enumerate(reversed(parts)):
353 361 data = ''.join([_pack('>B', _fmversion)] + part)
354 362 keys['dump%i' % idx] = base85.b85encode(data)
355 363 return keys
356 364
357 365 def pushmarker(repo, key, old, new):
358 366 """Push markers over pushkey"""
359 367 if not key.startswith('dump'):
360 368 repo.ui.warn(_('unknown key: %r') % key)
361 369 return 0
362 370 if old:
363 371 repo.ui.warn(_('unexpected old value') % key)
364 372 return 0
365 373 data = base85.b85decode(new)
366 374 lock = repo.lock()
367 375 try:
368 376 tr = repo.transaction('pushkey: obsolete markers')
369 377 try:
370 378 repo.obsstore.mergemarkers(tr, data)
371 379 tr.close()
372 380 return 1
373 381 finally:
374 382 tr.release()
375 383 finally:
376 384 lock.release()
377 385
378 386 def syncpush(repo, remote):
379 387 """utility function to push obsolete markers to a remote
380 388
381 389 Exist mostly to allow overriding for experimentation purpose"""
382 390 if (_enabled and repo.obsstore and
383 391 'obsolete' in remote.listkeys('namespaces')):
384 392 rslts = []
385 393 remotedata = repo.listkeys('obsolete')
386 394 for key in sorted(remotedata, reverse=True):
387 395 # reverse sort to ensure we end with dump0
388 396 data = remotedata[key]
389 397 rslts.append(remote.pushkey('obsolete', key, '', data))
390 398 if [r for r in rslts if not r]:
391 399 msg = _('failed to push some obsolete markers!\n')
392 400 repo.ui.warn(msg)
393 401
394 402 def syncpull(repo, remote, gettransaction):
395 403 """utility function to pull obsolete markers from a remote
396 404
397 405 The `gettransaction` is function that return the pull transaction, creating
398 406 one if necessary. We return the transaction to inform the calling code that
399 407 a new transaction have been created (when applicable).
400 408
401 409 Exists mostly to allow overriding for experimentation purpose"""
402 410 tr = None
403 411 if _enabled:
404 412 repo.ui.debug('fetching remote obsolete markers\n')
405 413 remoteobs = remote.listkeys('obsolete')
406 414 if 'dump0' in remoteobs:
407 415 tr = gettransaction()
408 416 for key in sorted(remoteobs, reverse=True):
409 417 if key.startswith('dump'):
410 418 data = base85.b85decode(remoteobs[key])
411 419 repo.obsstore.mergemarkers(tr, data)
412 420 repo.invalidatevolatilesets()
413 421 return tr
414 422
415 423 def allmarkers(repo):
416 424 """all obsolete markers known in a repository"""
417 425 for markerdata in repo.obsstore:
418 426 yield marker(repo, markerdata)
419 427
420 428 def precursormarkers(ctx):
421 429 """obsolete marker marking this changeset as a successors"""
422 430 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
423 431 yield marker(ctx._repo, data)
424 432
425 433 def successormarkers(ctx):
426 434 """obsolete marker making this changeset obsolete"""
427 435 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
428 436 yield marker(ctx._repo, data)
429 437
430 438 def allsuccessors(obsstore, nodes, ignoreflags=0):
431 439 """Yield node for every successor of <nodes>.
432 440
433 441 Some successors may be unknown locally.
434 442
435 443 This is a linear yield unsuited to detecting split changesets."""
436 444 remaining = set(nodes)
437 445 seen = set(remaining)
438 446 while remaining:
439 447 current = remaining.pop()
440 448 yield current
441 449 for mark in obsstore.successors.get(current, ()):
442 450 # ignore marker flagged with with specified flag
443 451 if mark[2] & ignoreflags:
444 452 continue
445 453 for suc in mark[1]:
446 454 if suc not in seen:
447 455 seen.add(suc)
448 456 remaining.add(suc)
449 457
450 458 def foreground(repo, nodes):
451 459 """return all nodes in the "foreground" of other node
452 460
453 461 The foreground of a revision is anything reachable using parent -> children
454 462 or precursor -> successor relation. It is very similar to "descendant" but
455 463 augmented with obsolescence information.
456 464
457 465 Beware that possible obsolescence cycle may result if complex situation.
458 466 """
459 467 repo = repo.unfiltered()
460 468 foreground = set(repo.set('%ln::', nodes))
461 469 if repo.obsstore:
462 470 # We only need this complicated logic if there is obsolescence
463 471 # XXX will probably deserve an optimised revset.
464 472 nm = repo.changelog.nodemap
465 473 plen = -1
466 474 # compute the whole set of successors or descendants
467 475 while len(foreground) != plen:
468 476 plen = len(foreground)
469 477 succs = set(c.node() for c in foreground)
470 478 mutable = [c.node() for c in foreground if c.mutable()]
471 479 succs.update(allsuccessors(repo.obsstore, mutable))
472 480 known = (n for n in succs if n in nm)
473 481 foreground = set(repo.set('%ln::', known))
474 482 return set(c.node() for c in foreground)
475 483
476 484
477 485 def successorssets(repo, initialnode, cache=None):
478 486 """Return all set of successors of initial nodes
479 487
480 488 Successors set of changeset A are a group of revision that succeed A. It
481 489 succeed A as a consistent whole, each revision being only partial
482 490 replacement. Successors set contains non-obsolete changeset only.
483 491
484 492 In most cases a changeset A have zero (changeset pruned) or a single
485 493 successors set that contains a single successor (changeset A replaced by
486 494 A')
487 495
488 496 When changeset is split, it results successors set containing more than
489 497 a single element. Divergent rewriting will result in multiple successors
490 498 sets.
491 499
492 500 They are returned as a list of tuples containing all valid successors sets.
493 501
494 502 Final successors unknown locally are considered plain prune (obsoleted
495 503 without successors).
496 504
497 505 The optional `cache` parameter is a dictionary that may contains
498 506 precomputed successors sets. It is meant to reuse the computation of
499 507 previous call to `successorssets` when multiple calls are made at the same
500 508 time. The cache dictionary is updated in place. The caller is responsible
501 509 for its live spawn. Code that makes multiple calls to `successorssets`
502 510 *must* use this cache mechanism or suffer terrible performances."""
503 511
504 512 succmarkers = repo.obsstore.successors
505 513
506 514 # Stack of nodes we search successors sets for
507 515 toproceed = [initialnode]
508 516 # set version of above list for fast loop detection
509 517 # element added to "toproceed" must be added here
510 518 stackedset = set(toproceed)
511 519 if cache is None:
512 520 cache = {}
513 521
514 522 # This while loop is the flattened version of a recursive search for
515 523 # successors sets
516 524 #
517 525 # def successorssets(x):
518 526 # successors = directsuccessors(x)
519 527 # ss = [[]]
520 528 # for succ in directsuccessors(x):
521 529 # # product as in itertools cartesian product
522 530 # ss = product(ss, successorssets(succ))
523 531 # return ss
524 532 #
525 533 # But we can not use plain recursive calls here:
526 534 # - that would blow the python call stack
527 535 # - obsolescence markers may have cycles, we need to handle them.
528 536 #
529 537 # The `toproceed` list act as our call stack. Every node we search
530 538 # successors set for are stacked there.
531 539 #
532 540 # The `stackedset` is set version of this stack used to check if a node is
533 541 # already stacked. This check is used to detect cycles and prevent infinite
534 542 # loop.
535 543 #
536 544 # successors set of all nodes are stored in the `cache` dictionary.
537 545 #
538 546 # After this while loop ends we use the cache to return the successors sets
539 547 # for the node requested by the caller.
540 548 while toproceed:
541 549 # Every iteration tries to compute the successors sets of the topmost
542 550 # node of the stack: CURRENT.
543 551 #
544 552 # There are four possible outcomes:
545 553 #
546 554 # 1) We already know the successors sets of CURRENT:
547 555 # -> mission accomplished, pop it from the stack.
548 556 # 2) Node is not obsolete:
549 557 # -> the node is its own successors sets. Add it to the cache.
550 558 # 3) We do not know successors set of direct successors of CURRENT:
551 559 # -> We add those successors to the stack.
552 560 # 4) We know successors sets of all direct successors of CURRENT:
553 561 # -> We can compute CURRENT successors set and add it to the
554 562 # cache.
555 563 #
556 564 current = toproceed[-1]
557 565 if current in cache:
558 566 # case (1): We already know the successors sets
559 567 stackedset.remove(toproceed.pop())
560 568 elif current not in succmarkers:
561 569 # case (2): The node is not obsolete.
562 570 if current in repo:
563 571 # We have a valid last successors.
564 572 cache[current] = [(current,)]
565 573 else:
566 574 # Final obsolete version is unknown locally.
567 575 # Do not count that as a valid successors
568 576 cache[current] = []
569 577 else:
570 578 # cases (3) and (4)
571 579 #
572 580 # We proceed in two phases. Phase 1 aims to distinguish case (3)
573 581 # from case (4):
574 582 #
575 583 # For each direct successors of CURRENT, we check whether its
576 584 # successors sets are known. If they are not, we stack the
577 585 # unknown node and proceed to the next iteration of the while
578 586 # loop. (case 3)
579 587 #
580 588 # During this step, we may detect obsolescence cycles: a node
581 589 # with unknown successors sets but already in the call stack.
582 590 # In such a situation, we arbitrary set the successors sets of
583 591 # the node to nothing (node pruned) to break the cycle.
584 592 #
585 593 # If no break was encountered we proceed to phase 2.
586 594 #
587 595 # Phase 2 computes successors sets of CURRENT (case 4); see details
588 596 # in phase 2 itself.
589 597 #
590 598 # Note the two levels of iteration in each phase.
591 599 # - The first one handles obsolescence markers using CURRENT as
592 600 # precursor (successors markers of CURRENT).
593 601 #
594 602 # Having multiple entry here means divergence.
595 603 #
596 604 # - The second one handles successors defined in each marker.
597 605 #
598 606 # Having none means pruned node, multiple successors means split,
599 607 # single successors are standard replacement.
600 608 #
601 609 for mark in sorted(succmarkers[current]):
602 610 for suc in mark[1]:
603 611 if suc not in cache:
604 612 if suc in stackedset:
605 613 # cycle breaking
606 614 cache[suc] = []
607 615 else:
608 616 # case (3) If we have not computed successors sets
609 617 # of one of those successors we add it to the
610 618 # `toproceed` stack and stop all work for this
611 619 # iteration.
612 620 toproceed.append(suc)
613 621 stackedset.add(suc)
614 622 break
615 623 else:
616 624 continue
617 625 break
618 626 else:
619 627 # case (4): we know all successors sets of all direct
620 628 # successors
621 629 #
622 630 # Successors set contributed by each marker depends on the
623 631 # successors sets of all its "successors" node.
624 632 #
625 633 # Each different marker is a divergence in the obsolescence
626 634 # history. It contributes successors sets distinct from other
627 635 # markers.
628 636 #
629 637 # Within a marker, a successor may have divergent successors
630 638 # sets. In such a case, the marker will contribute multiple
631 639 # divergent successors sets. If multiple successors have
632 640 # divergent successors sets, a cartesian product is used.
633 641 #
634 642 # At the end we post-process successors sets to remove
635 643 # duplicated entry and successors set that are strict subset of
636 644 # another one.
637 645 succssets = []
638 646 for mark in sorted(succmarkers[current]):
639 647 # successors sets contributed by this marker
640 648 markss = [[]]
641 649 for suc in mark[1]:
642 650 # cardinal product with previous successors
643 651 productresult = []
644 652 for prefix in markss:
645 653 for suffix in cache[suc]:
646 654 newss = list(prefix)
647 655 for part in suffix:
648 656 # do not duplicated entry in successors set
649 657 # first entry wins.
650 658 if part not in newss:
651 659 newss.append(part)
652 660 productresult.append(newss)
653 661 markss = productresult
654 662 succssets.extend(markss)
655 663 # remove duplicated and subset
656 664 seen = []
657 665 final = []
658 666 candidate = sorted(((set(s), s) for s in succssets if s),
659 667 key=lambda x: len(x[1]), reverse=True)
660 668 for setversion, listversion in candidate:
661 669 for seenset in seen:
662 670 if setversion.issubset(seenset):
663 671 break
664 672 else:
665 673 final.append(listversion)
666 674 seen.append(setversion)
667 675 final.reverse() # put small successors set first
668 676 cache[current] = final
669 677 return cache[initialnode]
670 678
671 679 def _knownrevs(repo, nodes):
672 680 """yield revision numbers of known nodes passed in parameters
673 681
674 682 Unknown revisions are silently ignored."""
675 683 torev = repo.changelog.nodemap.get
676 684 for n in nodes:
677 685 rev = torev(n)
678 686 if rev is not None:
679 687 yield rev
680 688
681 689 # mapping of 'set-name' -> <function to compute this set>
682 690 cachefuncs = {}
683 691 def cachefor(name):
684 692 """Decorator to register a function as computing the cache for a set"""
685 693 def decorator(func):
686 694 assert name not in cachefuncs
687 695 cachefuncs[name] = func
688 696 return func
689 697 return decorator
690 698
691 699 def getrevs(repo, name):
692 700 """Return the set of revision that belong to the <name> set
693 701
694 702 Such access may compute the set and cache it for future use"""
695 703 repo = repo.unfiltered()
696 704 if not repo.obsstore:
697 705 return ()
698 706 if name not in repo.obsstore.caches:
699 707 repo.obsstore.caches[name] = cachefuncs[name](repo)
700 708 return repo.obsstore.caches[name]
701 709
702 710 # To be simple we need to invalidate obsolescence cache when:
703 711 #
704 712 # - new changeset is added:
705 713 # - public phase is changed
706 714 # - obsolescence marker are added
707 715 # - strip is used a repo
708 716 def clearobscaches(repo):
709 717 """Remove all obsolescence related cache from a repo
710 718
711 719 This remove all cache in obsstore is the obsstore already exist on the
712 720 repo.
713 721
714 722 (We could be smarter here given the exact event that trigger the cache
715 723 clearing)"""
716 724 # only clear cache is there is obsstore data in this repo
717 725 if 'obsstore' in repo._filecache:
718 726 repo.obsstore.caches.clear()
719 727
720 728 @cachefor('obsolete')
721 729 def _computeobsoleteset(repo):
722 730 """the set of obsolete revisions"""
723 731 obs = set()
724 732 getrev = repo.changelog.nodemap.get
725 733 getphase = repo._phasecache.phase
726 734 for node in repo.obsstore.successors:
727 735 rev = getrev(node)
728 736 if rev is not None and getphase(repo, rev):
729 737 obs.add(rev)
730 738 return obs
731 739
732 740 @cachefor('unstable')
733 741 def _computeunstableset(repo):
734 742 """the set of non obsolete revisions with obsolete parents"""
735 743 # revset is not efficient enough here
736 744 # we do (obsolete()::) - obsolete() by hand
737 745 obs = getrevs(repo, 'obsolete')
738 746 if not obs:
739 747 return set()
740 748 cl = repo.changelog
741 749 return set(r for r in cl.descendants(obs) if r not in obs)
742 750
743 751 @cachefor('suspended')
744 752 def _computesuspendedset(repo):
745 753 """the set of obsolete parents with non obsolete descendants"""
746 754 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
747 755 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
748 756
749 757 @cachefor('extinct')
750 758 def _computeextinctset(repo):
751 759 """the set of obsolete parents without non obsolete descendants"""
752 760 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
753 761
754 762
755 763 @cachefor('bumped')
756 764 def _computebumpedset(repo):
757 765 """the set of revs trying to obsolete public revisions"""
758 766 # get all possible bumped changesets
759 767 tonode = repo.changelog.node
760 768 publicnodes = (tonode(r) for r in repo.revs('public()'))
761 769 successors = allsuccessors(repo.obsstore, publicnodes,
762 770 ignoreflags=bumpedfix)
763 771 # revision public or already obsolete don't count as bumped
764 772 query = '%ld - obsolete() - public()'
765 773 return set(repo.revs(query, _knownrevs(repo, successors)))
766 774
767 775 @cachefor('divergent')
768 776 def _computedivergentset(repo):
769 777 """the set of rev that compete to be the final successors of some revision.
770 778 """
771 779 divergent = set()
772 780 obsstore = repo.obsstore
773 781 newermap = {}
774 782 for ctx in repo.set('(not public()) - obsolete()'):
775 783 mark = obsstore.precursors.get(ctx.node(), ())
776 784 toprocess = set(mark)
777 785 while toprocess:
778 786 prec = toprocess.pop()[0]
779 787 if prec not in newermap:
780 788 successorssets(repo, prec, newermap)
781 789 newer = [n for n in newermap[prec] if n]
782 790 if len(newer) > 1:
783 791 divergent.add(ctx.rev())
784 792 break
785 793 toprocess.update(obsstore.precursors.get(prec, ()))
786 794 return divergent
787 795
788 796
789 797 def createmarkers(repo, relations, flag=0, metadata=None):
790 798 """Add obsolete markers between changesets in a repo
791 799
792 800 <relations> must be an iterable of (<old>, (<new>, ...)) tuple.
793 801 `old` and `news` are changectx.
794 802
795 803 Trying to obsolete a public changeset will raise an exception.
796 804
797 805 Current user and date are used except if specified otherwise in the
798 806 metadata attribute.
799 807
800 808 This function operates within a transaction of its own, but does
801 809 not take any lock on the repo.
802 810 """
803 811 # prepare metadata
804 812 if metadata is None:
805 813 metadata = {}
806 814 if 'date' not in metadata:
807 815 metadata['date'] = '%i %i' % util.makedate()
808 816 if 'user' not in metadata:
809 817 metadata['user'] = repo.ui.username()
810 818 tr = repo.transaction('add-obsolescence-marker')
811 819 try:
812 820 for prec, sucs in relations:
813 821 if not prec.mutable():
814 822 raise util.Abort("cannot obsolete immutable changeset: %s"
815 823 % prec)
816 824 nprec = prec.node()
817 825 nsucs = tuple(s.node() for s in sucs)
818 826 if nprec in nsucs:
819 827 raise util.Abort("changeset %s cannot obsolete itself" % prec)
820 828 repo.obsstore.create(tr, nprec, nsucs, flag, metadata)
821 829 repo.filteredrevcache.clear()
822 830 tr.close()
823 831 finally:
824 832 tr.release()
General Comments 0
You need to be logged in to leave comments. Login now