##// END OF EJS Templates
obsolete: add an allprecursors method mirroring allsuccessors one....
Pierre-Yves David -
r20206:cdcbe103 default
parent child Browse files
Show More
@@ -1,833 +1,855
1 1 # obsolete.py - obsolete markers handling
2 2 #
3 3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 4 # Logilab SA <contact@logilab.fr>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 """Obsolete markers handling
10 10
11 11 An obsolete marker maps an old changeset to a list of new
12 12 changesets. If the list of new changesets is empty, the old changeset
13 13 is said to be "killed". Otherwise, the old changeset is being
14 14 "replaced" by the new changesets.
15 15
16 16 Obsolete markers can be used to record and distribute changeset graph
17 17 transformations performed by history rewriting operations, and help
18 18 building new tools to reconciliate conflicting rewriting actions. To
19 19 facilitate conflicts resolution, markers include various annotations
20 20 besides old and news changeset identifiers, such as creation date or
21 21 author name.
22 22
23 23 The old obsoleted changeset is called "precursor" and possible replacements are
24 24 called "successors". Markers that used changeset X as a precursors are called
25 25 "successor markers of X" because they hold information about the successors of
26 26 X. Markers that use changeset Y as a successors are call "precursor markers of
27 27 Y" because they hold information about the precursors of Y.
28 28
29 29 Examples:
30 30
31 31 - When changeset A is replacement by a changeset A', one marker is stored:
32 32
33 33 (A, (A'))
34 34
35 35 - When changesets A and B are folded into a new changeset C two markers are
36 36 stored:
37 37
38 38 (A, (C,)) and (B, (C,))
39 39
40 40 - When changeset A is simply "pruned" from the graph, a marker in create:
41 41
42 42 (A, ())
43 43
44 44 - When changeset A is split into B and C, a single marker are used:
45 45
46 46 (A, (C, C))
47 47
48 48 We use a single marker to distinct the "split" case from the "divergence"
49 49 case. If two independents operation rewrite the same changeset A in to A' and
50 50 A'' when have an error case: divergent rewriting. We can detect it because
51 51 two markers will be created independently:
52 52
53 53 (A, (B,)) and (A, (C,))
54 54
55 55 Format
56 56 ------
57 57
58 58 Markers are stored in an append-only file stored in
59 59 '.hg/store/obsstore'.
60 60
61 61 The file starts with a version header:
62 62
63 63 - 1 unsigned byte: version number, starting at zero.
64 64
65 65
66 66 The header is followed by the markers. Each marker is made of:
67 67
68 68 - 1 unsigned byte: number of new changesets "R", could be zero.
69 69
70 70 - 1 unsigned 32-bits integer: metadata size "M" in bytes.
71 71
72 72 - 1 byte: a bit field. It is reserved for flags used in obsolete
73 73 markers common operations, to avoid repeated decoding of metadata
74 74 entries.
75 75
76 76 - 20 bytes: obsoleted changeset identifier.
77 77
78 78 - N*20 bytes: new changesets identifiers.
79 79
80 80 - M bytes: metadata as a sequence of nul-terminated strings. Each
81 81 string contains a key and a value, separated by a color ':', without
82 82 additional encoding. Keys cannot contain '\0' or ':' and values
83 83 cannot contain '\0'.
84 84 """
85 85 import struct
86 86 import util, base85, node
87 87 from i18n import _
88 88
89 89 _pack = struct.pack
90 90 _unpack = struct.unpack
91 91
92 92 _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5
93 93
94 94 # the obsolete feature is not mature enough to be enabled by default.
95 95 # you have to rely on third party extension extension to enable this.
96 96 _enabled = False
97 97
98 98 # data used for parsing and writing
99 99 _fmversion = 0
100 100 _fmfixed = '>BIB20s'
101 101 _fmnode = '20s'
102 102 _fmfsize = struct.calcsize(_fmfixed)
103 103 _fnodesize = struct.calcsize(_fmnode)
104 104
105 105 ### obsolescence marker flag
106 106
107 107 ## bumpedfix flag
108 108 #
109 109 # When a changeset A' succeed to a changeset A which became public, we call A'
110 110 # "bumped" because it's a successors of a public changesets
111 111 #
112 112 # o A' (bumped)
113 113 # |`:
114 114 # | o A
115 115 # |/
116 116 # o Z
117 117 #
118 118 # The way to solve this situation is to create a new changeset Ad as children
119 119 # of A. This changeset have the same content than A'. So the diff from A to A'
120 120 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
121 121 #
122 122 # o Ad
123 123 # |`:
124 124 # | x A'
125 125 # |'|
126 126 # o | A
127 127 # |/
128 128 # o Z
129 129 #
130 130 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
131 131 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
132 132 # This flag mean that the successors express the changes between the public and
133 133 # bumped version and fix the situation, breaking the transitivity of
134 134 # "bumped" here.
135 135 bumpedfix = 1
136 136
137 137 def _readmarkers(data):
138 138 """Read and enumerate markers from raw data"""
139 139 off = 0
140 140 diskversion = _unpack('>B', data[off:off + 1])[0]
141 141 off += 1
142 142 if diskversion != _fmversion:
143 143 raise util.Abort(_('parsing obsolete marker: unknown version %r')
144 144 % diskversion)
145 145
146 146 # Loop on markers
147 147 l = len(data)
148 148 while off + _fmfsize <= l:
149 149 # read fixed part
150 150 cur = data[off:off + _fmfsize]
151 151 off += _fmfsize
152 152 nbsuc, mdsize, flags, pre = _unpack(_fmfixed, cur)
153 153 # read replacement
154 154 sucs = ()
155 155 if nbsuc:
156 156 s = (_fnodesize * nbsuc)
157 157 cur = data[off:off + s]
158 158 sucs = _unpack(_fmnode * nbsuc, cur)
159 159 off += s
160 160 # read metadata
161 161 # (metadata will be decoded on demand)
162 162 metadata = data[off:off + mdsize]
163 163 if len(metadata) != mdsize:
164 164 raise util.Abort(_('parsing obsolete marker: metadata is too '
165 165 'short, %d bytes expected, got %d')
166 166 % (mdsize, len(metadata)))
167 167 off += mdsize
168 168 yield (pre, sucs, flags, metadata)
169 169
170 170 def encodemeta(meta):
171 171 """Return encoded metadata string to string mapping.
172 172
173 173 Assume no ':' in key and no '\0' in both key and value."""
174 174 for key, value in meta.iteritems():
175 175 if ':' in key or '\0' in key:
176 176 raise ValueError("':' and '\0' are forbidden in metadata key'")
177 177 if '\0' in value:
178 178 raise ValueError("':' are forbidden in metadata value'")
179 179 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
180 180
181 181 def decodemeta(data):
182 182 """Return string to string dictionary from encoded version."""
183 183 d = {}
184 184 for l in data.split('\0'):
185 185 if l:
186 186 key, value = l.split(':')
187 187 d[key] = value
188 188 return d
189 189
190 190 class marker(object):
191 191 """Wrap obsolete marker raw data"""
192 192
193 193 def __init__(self, repo, data):
194 194 # the repo argument will be used to create changectx in later version
195 195 self._repo = repo
196 196 self._data = data
197 197 self._decodedmeta = None
198 198
199 199 def __hash__(self):
200 200 return hash(self._data)
201 201
202 202 def __eq__(self, other):
203 203 if type(other) != type(self):
204 204 return False
205 205 return self._data == other._data
206 206
207 207 def precnode(self):
208 208 """Precursor changeset node identifier"""
209 209 return self._data[0]
210 210
211 211 def succnodes(self):
212 212 """List of successor changesets node identifiers"""
213 213 return self._data[1]
214 214
215 215 def metadata(self):
216 216 """Decoded metadata dictionary"""
217 217 if self._decodedmeta is None:
218 218 self._decodedmeta = decodemeta(self._data[3])
219 219 return self._decodedmeta
220 220
221 221 def date(self):
222 222 """Creation date as (unixtime, offset)"""
223 223 parts = self.metadata()['date'].split(' ')
224 224 return (float(parts[0]), int(parts[1]))
225 225
226 226 class obsstore(object):
227 227 """Store obsolete markers
228 228
229 229 Markers can be accessed with two mappings:
230 230 - precursors[x] -> set(markers on precursors edges of x)
231 231 - successors[x] -> set(markers on successors edges of x)
232 232 """
233 233
234 234 def __init__(self, sopener):
235 235 # caches for various obsolescence related cache
236 236 self.caches = {}
237 237 self._all = []
238 238 # new markers to serialize
239 239 self.precursors = {}
240 240 self.successors = {}
241 241 self.sopener = sopener
242 242 data = sopener.tryread('obsstore')
243 243 if data:
244 244 self._load(_readmarkers(data))
245 245
246 246 def __iter__(self):
247 247 return iter(self._all)
248 248
249 249 def __nonzero__(self):
250 250 return bool(self._all)
251 251
252 252 def create(self, transaction, prec, succs=(), flag=0, metadata=None):
253 253 """obsolete: add a new obsolete marker
254 254
255 255 * ensuring it is hashable
256 256 * check mandatory metadata
257 257 * encode metadata
258 258 """
259 259 if metadata is None:
260 260 metadata = {}
261 261 if 'date' not in metadata:
262 262 metadata['date'] = "%d %d" % util.makedate()
263 263 if len(prec) != 20:
264 264 raise ValueError(prec)
265 265 for succ in succs:
266 266 if len(succ) != 20:
267 267 raise ValueError(succ)
268 268 marker = (str(prec), tuple(succs), int(flag), encodemeta(metadata))
269 269 self.add(transaction, [marker])
270 270
271 271 def add(self, transaction, markers):
272 272 """Add new markers to the store
273 273
274 274 Take care of filtering duplicate.
275 275 Return the number of new marker."""
276 276 if not _enabled:
277 277 raise util.Abort('obsolete feature is not enabled on this repo')
278 278 known = set(self._all)
279 279 new = []
280 280 for m in markers:
281 281 if m not in known:
282 282 known.add(m)
283 283 new.append(m)
284 284 if new:
285 285 f = self.sopener('obsstore', 'ab')
286 286 try:
287 287 # Whether the file's current position is at the begin or at
288 288 # the end after opening a file for appending is implementation
289 289 # defined. So we must seek to the end before calling tell(),
290 290 # or we may get a zero offset for non-zero sized files on
291 291 # some platforms (issue3543).
292 292 f.seek(0, _SEEK_END)
293 293 offset = f.tell()
294 294 transaction.add('obsstore', offset)
295 295 # offset == 0: new file - add the version header
296 296 for bytes in _encodemarkers(new, offset == 0):
297 297 f.write(bytes)
298 298 finally:
299 299 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
300 300 # call 'filecacheentry.refresh()' here
301 301 f.close()
302 302 self._load(new)
303 303 # new marker *may* have changed several set. invalidate the cache.
304 304 self.caches.clear()
305 305 return len(new)
306 306
307 307 def mergemarkers(self, transaction, data):
308 308 markers = _readmarkers(data)
309 309 self.add(transaction, markers)
310 310
311 311 def _load(self, markers):
312 312 for mark in markers:
313 313 self._all.append(mark)
314 314 pre, sucs = mark[:2]
315 315 self.successors.setdefault(pre, set()).add(mark)
316 316 for suc in sucs:
317 317 self.precursors.setdefault(suc, set()).add(mark)
318 318 if node.nullid in self.precursors:
319 319 raise util.Abort(_('bad obsolescence marker detected: '
320 320 'invalid successors nullid'))
321 321
322 322 def _encodemarkers(markers, addheader=False):
323 323 # Kept separate from flushmarkers(), it will be reused for
324 324 # markers exchange.
325 325 if addheader:
326 326 yield _pack('>B', _fmversion)
327 327 for marker in markers:
328 328 yield _encodeonemarker(marker)
329 329
330 330
331 331 def _encodeonemarker(marker):
332 332 pre, sucs, flags, metadata = marker
333 333 nbsuc = len(sucs)
334 334 format = _fmfixed + (_fmnode * nbsuc)
335 335 data = [nbsuc, len(metadata), flags, pre]
336 336 data.extend(sucs)
337 337 return _pack(format, *data) + metadata
338 338
339 339 # arbitrary picked to fit into 8K limit from HTTP server
340 340 # you have to take in account:
341 341 # - the version header
342 342 # - the base85 encoding
343 343 _maxpayload = 5300
344 344
345 345 def listmarkers(repo):
346 346 """List markers over pushkey"""
347 347 if not repo.obsstore:
348 348 return {}
349 349 keys = {}
350 350 parts = []
351 351 currentlen = _maxpayload * 2 # ensure we create a new part
352 352 for marker in repo.obsstore:
353 353 nextdata = _encodeonemarker(marker)
354 354 if (len(nextdata) + currentlen > _maxpayload):
355 355 currentpart = []
356 356 currentlen = 0
357 357 parts.append(currentpart)
358 358 currentpart.append(nextdata)
359 359 currentlen += len(nextdata)
360 360 for idx, part in enumerate(reversed(parts)):
361 361 data = ''.join([_pack('>B', _fmversion)] + part)
362 362 keys['dump%i' % idx] = base85.b85encode(data)
363 363 return keys
364 364
365 365 def pushmarker(repo, key, old, new):
366 366 """Push markers over pushkey"""
367 367 if not key.startswith('dump'):
368 368 repo.ui.warn(_('unknown key: %r') % key)
369 369 return 0
370 370 if old:
371 371 repo.ui.warn(_('unexpected old value') % key)
372 372 return 0
373 373 data = base85.b85decode(new)
374 374 lock = repo.lock()
375 375 try:
376 376 tr = repo.transaction('pushkey: obsolete markers')
377 377 try:
378 378 repo.obsstore.mergemarkers(tr, data)
379 379 tr.close()
380 380 return 1
381 381 finally:
382 382 tr.release()
383 383 finally:
384 384 lock.release()
385 385
386 386 def syncpush(repo, remote):
387 387 """utility function to push obsolete markers to a remote
388 388
389 389 Exist mostly to allow overriding for experimentation purpose"""
390 390 if (_enabled and repo.obsstore and
391 391 'obsolete' in remote.listkeys('namespaces')):
392 392 rslts = []
393 393 remotedata = repo.listkeys('obsolete')
394 394 for key in sorted(remotedata, reverse=True):
395 395 # reverse sort to ensure we end with dump0
396 396 data = remotedata[key]
397 397 rslts.append(remote.pushkey('obsolete', key, '', data))
398 398 if [r for r in rslts if not r]:
399 399 msg = _('failed to push some obsolete markers!\n')
400 400 repo.ui.warn(msg)
401 401
402 402 def syncpull(repo, remote, gettransaction):
403 403 """utility function to pull obsolete markers from a remote
404 404
405 405 The `gettransaction` is function that return the pull transaction, creating
406 406 one if necessary. We return the transaction to inform the calling code that
407 407 a new transaction have been created (when applicable).
408 408
409 409 Exists mostly to allow overriding for experimentation purpose"""
410 410 tr = None
411 411 if _enabled:
412 412 repo.ui.debug('fetching remote obsolete markers\n')
413 413 remoteobs = remote.listkeys('obsolete')
414 414 if 'dump0' in remoteobs:
415 415 tr = gettransaction()
416 416 for key in sorted(remoteobs, reverse=True):
417 417 if key.startswith('dump'):
418 418 data = base85.b85decode(remoteobs[key])
419 419 repo.obsstore.mergemarkers(tr, data)
420 420 repo.invalidatevolatilesets()
421 421 return tr
422 422
423 423 def allmarkers(repo):
424 424 """all obsolete markers known in a repository"""
425 425 for markerdata in repo.obsstore:
426 426 yield marker(repo, markerdata)
427 427
428 428 def precursormarkers(ctx):
429 429 """obsolete marker marking this changeset as a successors"""
430 430 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
431 431 yield marker(ctx._repo, data)
432 432
433 433 def successormarkers(ctx):
434 434 """obsolete marker making this changeset obsolete"""
435 435 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
436 436 yield marker(ctx._repo, data)
437 437
438 438 def allsuccessors(obsstore, nodes, ignoreflags=0):
439 439 """Yield node for every successor of <nodes>.
440 440
441 441 Some successors may be unknown locally.
442 442
443 443 This is a linear yield unsuited to detecting split changesets. It includes
444 444 initial nodes too."""
445 445 remaining = set(nodes)
446 446 seen = set(remaining)
447 447 while remaining:
448 448 current = remaining.pop()
449 449 yield current
450 450 for mark in obsstore.successors.get(current, ()):
451 451 # ignore marker flagged with specified flag
452 452 if mark[2] & ignoreflags:
453 453 continue
454 454 for suc in mark[1]:
455 455 if suc not in seen:
456 456 seen.add(suc)
457 457 remaining.add(suc)
458 458
459 def allprecursors(obsstore, nodes, ignoreflags=0):
460 """Yield node for every precursors of <nodes>.
461
462 Some precursors may be unknown locally.
463
464 This is a linear yield unsuited to detecting folded changesets. It includes
465 initial nodes too."""
466
467 remaining = set(nodes)
468 seen = set(remaining)
469 while remaining:
470 current = remaining.pop()
471 yield current
472 for mark in obsstore.precursors.get(current, ()):
473 # ignore marker flagged with specified flag
474 if mark[2] & ignoreflags:
475 continue
476 suc = mark[0]
477 if suc not in seen:
478 seen.add(suc)
479 remaining.add(suc)
480
459 481 def foreground(repo, nodes):
460 482 """return all nodes in the "foreground" of other node
461 483
462 484 The foreground of a revision is anything reachable using parent -> children
463 485 or precursor -> successor relation. It is very similar to "descendant" but
464 486 augmented with obsolescence information.
465 487
466 488 Beware that possible obsolescence cycle may result if complex situation.
467 489 """
468 490 repo = repo.unfiltered()
469 491 foreground = set(repo.set('%ln::', nodes))
470 492 if repo.obsstore:
471 493 # We only need this complicated logic if there is obsolescence
472 494 # XXX will probably deserve an optimised revset.
473 495 nm = repo.changelog.nodemap
474 496 plen = -1
475 497 # compute the whole set of successors or descendants
476 498 while len(foreground) != plen:
477 499 plen = len(foreground)
478 500 succs = set(c.node() for c in foreground)
479 501 mutable = [c.node() for c in foreground if c.mutable()]
480 502 succs.update(allsuccessors(repo.obsstore, mutable))
481 503 known = (n for n in succs if n in nm)
482 504 foreground = set(repo.set('%ln::', known))
483 505 return set(c.node() for c in foreground)
484 506
485 507
486 508 def successorssets(repo, initialnode, cache=None):
487 509 """Return all set of successors of initial nodes
488 510
489 511 Successors set of changeset A are a group of revision that succeed A. It
490 512 succeed A as a consistent whole, each revision being only partial
491 513 replacement. Successors set contains non-obsolete changeset only.
492 514
493 515 In most cases a changeset A have zero (changeset pruned) or a single
494 516 successors set that contains a single successor (changeset A replaced by
495 517 A')
496 518
497 519 When changeset is split, it results successors set containing more than
498 520 a single element. Divergent rewriting will result in multiple successors
499 521 sets.
500 522
501 523 They are returned as a list of tuples containing all valid successors sets.
502 524
503 525 Final successors unknown locally are considered plain prune (obsoleted
504 526 without successors).
505 527
506 528 The optional `cache` parameter is a dictionary that may contains
507 529 precomputed successors sets. It is meant to reuse the computation of
508 530 previous call to `successorssets` when multiple calls are made at the same
509 531 time. The cache dictionary is updated in place. The caller is responsible
510 532 for its live spawn. Code that makes multiple calls to `successorssets`
511 533 *must* use this cache mechanism or suffer terrible performances."""
512 534
513 535 succmarkers = repo.obsstore.successors
514 536
515 537 # Stack of nodes we search successors sets for
516 538 toproceed = [initialnode]
517 539 # set version of above list for fast loop detection
518 540 # element added to "toproceed" must be added here
519 541 stackedset = set(toproceed)
520 542 if cache is None:
521 543 cache = {}
522 544
523 545 # This while loop is the flattened version of a recursive search for
524 546 # successors sets
525 547 #
526 548 # def successorssets(x):
527 549 # successors = directsuccessors(x)
528 550 # ss = [[]]
529 551 # for succ in directsuccessors(x):
530 552 # # product as in itertools cartesian product
531 553 # ss = product(ss, successorssets(succ))
532 554 # return ss
533 555 #
534 556 # But we can not use plain recursive calls here:
535 557 # - that would blow the python call stack
536 558 # - obsolescence markers may have cycles, we need to handle them.
537 559 #
538 560 # The `toproceed` list act as our call stack. Every node we search
539 561 # successors set for are stacked there.
540 562 #
541 563 # The `stackedset` is set version of this stack used to check if a node is
542 564 # already stacked. This check is used to detect cycles and prevent infinite
543 565 # loop.
544 566 #
545 567 # successors set of all nodes are stored in the `cache` dictionary.
546 568 #
547 569 # After this while loop ends we use the cache to return the successors sets
548 570 # for the node requested by the caller.
549 571 while toproceed:
550 572 # Every iteration tries to compute the successors sets of the topmost
551 573 # node of the stack: CURRENT.
552 574 #
553 575 # There are four possible outcomes:
554 576 #
555 577 # 1) We already know the successors sets of CURRENT:
556 578 # -> mission accomplished, pop it from the stack.
557 579 # 2) Node is not obsolete:
558 580 # -> the node is its own successors sets. Add it to the cache.
559 581 # 3) We do not know successors set of direct successors of CURRENT:
560 582 # -> We add those successors to the stack.
561 583 # 4) We know successors sets of all direct successors of CURRENT:
562 584 # -> We can compute CURRENT successors set and add it to the
563 585 # cache.
564 586 #
565 587 current = toproceed[-1]
566 588 if current in cache:
567 589 # case (1): We already know the successors sets
568 590 stackedset.remove(toproceed.pop())
569 591 elif current not in succmarkers:
570 592 # case (2): The node is not obsolete.
571 593 if current in repo:
572 594 # We have a valid last successors.
573 595 cache[current] = [(current,)]
574 596 else:
575 597 # Final obsolete version is unknown locally.
576 598 # Do not count that as a valid successors
577 599 cache[current] = []
578 600 else:
579 601 # cases (3) and (4)
580 602 #
581 603 # We proceed in two phases. Phase 1 aims to distinguish case (3)
582 604 # from case (4):
583 605 #
584 606 # For each direct successors of CURRENT, we check whether its
585 607 # successors sets are known. If they are not, we stack the
586 608 # unknown node and proceed to the next iteration of the while
587 609 # loop. (case 3)
588 610 #
589 611 # During this step, we may detect obsolescence cycles: a node
590 612 # with unknown successors sets but already in the call stack.
591 613 # In such a situation, we arbitrary set the successors sets of
592 614 # the node to nothing (node pruned) to break the cycle.
593 615 #
594 616 # If no break was encountered we proceed to phase 2.
595 617 #
596 618 # Phase 2 computes successors sets of CURRENT (case 4); see details
597 619 # in phase 2 itself.
598 620 #
599 621 # Note the two levels of iteration in each phase.
600 622 # - The first one handles obsolescence markers using CURRENT as
601 623 # precursor (successors markers of CURRENT).
602 624 #
603 625 # Having multiple entry here means divergence.
604 626 #
605 627 # - The second one handles successors defined in each marker.
606 628 #
607 629 # Having none means pruned node, multiple successors means split,
608 630 # single successors are standard replacement.
609 631 #
610 632 for mark in sorted(succmarkers[current]):
611 633 for suc in mark[1]:
612 634 if suc not in cache:
613 635 if suc in stackedset:
614 636 # cycle breaking
615 637 cache[suc] = []
616 638 else:
617 639 # case (3) If we have not computed successors sets
618 640 # of one of those successors we add it to the
619 641 # `toproceed` stack and stop all work for this
620 642 # iteration.
621 643 toproceed.append(suc)
622 644 stackedset.add(suc)
623 645 break
624 646 else:
625 647 continue
626 648 break
627 649 else:
628 650 # case (4): we know all successors sets of all direct
629 651 # successors
630 652 #
631 653 # Successors set contributed by each marker depends on the
632 654 # successors sets of all its "successors" node.
633 655 #
634 656 # Each different marker is a divergence in the obsolescence
635 657 # history. It contributes successors sets distinct from other
636 658 # markers.
637 659 #
638 660 # Within a marker, a successor may have divergent successors
639 661 # sets. In such a case, the marker will contribute multiple
640 662 # divergent successors sets. If multiple successors have
641 663 # divergent successors sets, a cartesian product is used.
642 664 #
643 665 # At the end we post-process successors sets to remove
644 666 # duplicated entry and successors set that are strict subset of
645 667 # another one.
646 668 succssets = []
647 669 for mark in sorted(succmarkers[current]):
648 670 # successors sets contributed by this marker
649 671 markss = [[]]
650 672 for suc in mark[1]:
651 673 # cardinal product with previous successors
652 674 productresult = []
653 675 for prefix in markss:
654 676 for suffix in cache[suc]:
655 677 newss = list(prefix)
656 678 for part in suffix:
657 679 # do not duplicated entry in successors set
658 680 # first entry wins.
659 681 if part not in newss:
660 682 newss.append(part)
661 683 productresult.append(newss)
662 684 markss = productresult
663 685 succssets.extend(markss)
664 686 # remove duplicated and subset
665 687 seen = []
666 688 final = []
667 689 candidate = sorted(((set(s), s) for s in succssets if s),
668 690 key=lambda x: len(x[1]), reverse=True)
669 691 for setversion, listversion in candidate:
670 692 for seenset in seen:
671 693 if setversion.issubset(seenset):
672 694 break
673 695 else:
674 696 final.append(listversion)
675 697 seen.append(setversion)
676 698 final.reverse() # put small successors set first
677 699 cache[current] = final
678 700 return cache[initialnode]
679 701
680 702 def _knownrevs(repo, nodes):
681 703 """yield revision numbers of known nodes passed in parameters
682 704
683 705 Unknown revisions are silently ignored."""
684 706 torev = repo.changelog.nodemap.get
685 707 for n in nodes:
686 708 rev = torev(n)
687 709 if rev is not None:
688 710 yield rev
689 711
690 712 # mapping of 'set-name' -> <function to compute this set>
691 713 cachefuncs = {}
692 714 def cachefor(name):
693 715 """Decorator to register a function as computing the cache for a set"""
694 716 def decorator(func):
695 717 assert name not in cachefuncs
696 718 cachefuncs[name] = func
697 719 return func
698 720 return decorator
699 721
700 722 def getrevs(repo, name):
701 723 """Return the set of revision that belong to the <name> set
702 724
703 725 Such access may compute the set and cache it for future use"""
704 726 repo = repo.unfiltered()
705 727 if not repo.obsstore:
706 728 return ()
707 729 if name not in repo.obsstore.caches:
708 730 repo.obsstore.caches[name] = cachefuncs[name](repo)
709 731 return repo.obsstore.caches[name]
710 732
711 733 # To be simple we need to invalidate obsolescence cache when:
712 734 #
713 735 # - new changeset is added:
714 736 # - public phase is changed
715 737 # - obsolescence marker are added
716 738 # - strip is used a repo
717 739 def clearobscaches(repo):
718 740 """Remove all obsolescence related cache from a repo
719 741
720 742 This remove all cache in obsstore is the obsstore already exist on the
721 743 repo.
722 744
723 745 (We could be smarter here given the exact event that trigger the cache
724 746 clearing)"""
725 747 # only clear cache is there is obsstore data in this repo
726 748 if 'obsstore' in repo._filecache:
727 749 repo.obsstore.caches.clear()
728 750
729 751 @cachefor('obsolete')
730 752 def _computeobsoleteset(repo):
731 753 """the set of obsolete revisions"""
732 754 obs = set()
733 755 getrev = repo.changelog.nodemap.get
734 756 getphase = repo._phasecache.phase
735 757 for node in repo.obsstore.successors:
736 758 rev = getrev(node)
737 759 if rev is not None and getphase(repo, rev):
738 760 obs.add(rev)
739 761 return obs
740 762
741 763 @cachefor('unstable')
742 764 def _computeunstableset(repo):
743 765 """the set of non obsolete revisions with obsolete parents"""
744 766 # revset is not efficient enough here
745 767 # we do (obsolete()::) - obsolete() by hand
746 768 obs = getrevs(repo, 'obsolete')
747 769 if not obs:
748 770 return set()
749 771 cl = repo.changelog
750 772 return set(r for r in cl.descendants(obs) if r not in obs)
751 773
752 774 @cachefor('suspended')
753 775 def _computesuspendedset(repo):
754 776 """the set of obsolete parents with non obsolete descendants"""
755 777 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
756 778 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
757 779
758 780 @cachefor('extinct')
759 781 def _computeextinctset(repo):
760 782 """the set of obsolete parents without non obsolete descendants"""
761 783 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
762 784
763 785
764 786 @cachefor('bumped')
765 787 def _computebumpedset(repo):
766 788 """the set of revs trying to obsolete public revisions"""
767 789 # get all possible bumped changesets
768 790 tonode = repo.changelog.node
769 791 publicnodes = (tonode(r) for r in repo.revs('public()'))
770 792 successors = allsuccessors(repo.obsstore, publicnodes,
771 793 ignoreflags=bumpedfix)
772 794 # revision public or already obsolete don't count as bumped
773 795 query = '%ld - obsolete() - public()'
774 796 return set(repo.revs(query, _knownrevs(repo, successors)))
775 797
776 798 @cachefor('divergent')
777 799 def _computedivergentset(repo):
778 800 """the set of rev that compete to be the final successors of some revision.
779 801 """
780 802 divergent = set()
781 803 obsstore = repo.obsstore
782 804 newermap = {}
783 805 for ctx in repo.set('(not public()) - obsolete()'):
784 806 mark = obsstore.precursors.get(ctx.node(), ())
785 807 toprocess = set(mark)
786 808 while toprocess:
787 809 prec = toprocess.pop()[0]
788 810 if prec not in newermap:
789 811 successorssets(repo, prec, newermap)
790 812 newer = [n for n in newermap[prec] if n]
791 813 if len(newer) > 1:
792 814 divergent.add(ctx.rev())
793 815 break
794 816 toprocess.update(obsstore.precursors.get(prec, ()))
795 817 return divergent
796 818
797 819
798 820 def createmarkers(repo, relations, flag=0, metadata=None):
799 821 """Add obsolete markers between changesets in a repo
800 822
801 823 <relations> must be an iterable of (<old>, (<new>, ...)) tuple.
802 824 `old` and `news` are changectx.
803 825
804 826 Trying to obsolete a public changeset will raise an exception.
805 827
806 828 Current user and date are used except if specified otherwise in the
807 829 metadata attribute.
808 830
809 831 This function operates within a transaction of its own, but does
810 832 not take any lock on the repo.
811 833 """
812 834 # prepare metadata
813 835 if metadata is None:
814 836 metadata = {}
815 837 if 'date' not in metadata:
816 838 metadata['date'] = '%i %i' % util.makedate()
817 839 if 'user' not in metadata:
818 840 metadata['user'] = repo.ui.username()
819 841 tr = repo.transaction('add-obsolescence-marker')
820 842 try:
821 843 for prec, sucs in relations:
822 844 if not prec.mutable():
823 845 raise util.Abort("cannot obsolete immutable changeset: %s"
824 846 % prec)
825 847 nprec = prec.node()
826 848 nsucs = tuple(s.node() for s in sucs)
827 849 if nprec in nsucs:
828 850 raise util.Abort("changeset %s cannot obsolete itself" % prec)
829 851 repo.obsstore.create(tr, nprec, nsucs, flag, metadata)
830 852 repo.filteredrevcache.clear()
831 853 tr.close()
832 854 finally:
833 855 tr.release()
General Comments 0
You need to be logged in to leave comments. Login now