##// END OF EJS Templates
obsstore: add a __len__ method...
Pierre-Yves David -
r20585:f3c8db3d default
parent child Browse files
Show More
@@ -1,857 +1,860 b''
1 1 # obsolete.py - obsolete markers handling
2 2 #
3 3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 4 # Logilab SA <contact@logilab.fr>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 """Obsolete markers handling
10 10
11 11 An obsolete marker maps an old changeset to a list of new
12 12 changesets. If the list of new changesets is empty, the old changeset
13 13 is said to be "killed". Otherwise, the old changeset is being
14 14 "replaced" by the new changesets.
15 15
16 16 Obsolete markers can be used to record and distribute changeset graph
17 17 transformations performed by history rewriting operations, and help
18 18 building new tools to reconciliate conflicting rewriting actions. To
19 19 facilitate conflicts resolution, markers include various annotations
20 20 besides old and news changeset identifiers, such as creation date or
21 21 author name.
22 22
23 23 The old obsoleted changeset is called "precursor" and possible replacements are
24 24 called "successors". Markers that used changeset X as a precursors are called
25 25 "successor markers of X" because they hold information about the successors of
26 26 X. Markers that use changeset Y as a successors are call "precursor markers of
27 27 Y" because they hold information about the precursors of Y.
28 28
29 29 Examples:
30 30
31 31 - When changeset A is replacement by a changeset A', one marker is stored:
32 32
33 33 (A, (A'))
34 34
35 35 - When changesets A and B are folded into a new changeset C two markers are
36 36 stored:
37 37
38 38 (A, (C,)) and (B, (C,))
39 39
40 40 - When changeset A is simply "pruned" from the graph, a marker in create:
41 41
42 42 (A, ())
43 43
44 44 - When changeset A is split into B and C, a single marker are used:
45 45
46 46 (A, (C, C))
47 47
48 48 We use a single marker to distinct the "split" case from the "divergence"
49 49 case. If two independents operation rewrite the same changeset A in to A' and
50 50 A'' when have an error case: divergent rewriting. We can detect it because
51 51 two markers will be created independently:
52 52
53 53 (A, (B,)) and (A, (C,))
54 54
55 55 Format
56 56 ------
57 57
58 58 Markers are stored in an append-only file stored in
59 59 '.hg/store/obsstore'.
60 60
61 61 The file starts with a version header:
62 62
63 63 - 1 unsigned byte: version number, starting at zero.
64 64
65 65
66 66 The header is followed by the markers. Each marker is made of:
67 67
68 68 - 1 unsigned byte: number of new changesets "R", could be zero.
69 69
70 70 - 1 unsigned 32-bits integer: metadata size "M" in bytes.
71 71
72 72 - 1 byte: a bit field. It is reserved for flags used in obsolete
73 73 markers common operations, to avoid repeated decoding of metadata
74 74 entries.
75 75
76 76 - 20 bytes: obsoleted changeset identifier.
77 77
78 78 - N*20 bytes: new changesets identifiers.
79 79
80 80 - M bytes: metadata as a sequence of nul-terminated strings. Each
81 81 string contains a key and a value, separated by a color ':', without
82 82 additional encoding. Keys cannot contain '\0' or ':' and values
83 83 cannot contain '\0'.
84 84 """
85 85 import struct
86 86 import util, base85, node
87 87 import phases
88 88 from i18n import _
89 89
90 90 _pack = struct.pack
91 91 _unpack = struct.unpack
92 92
93 93 _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5
94 94
95 95 # the obsolete feature is not mature enough to be enabled by default.
96 96 # you have to rely on third party extension extension to enable this.
97 97 _enabled = False
98 98
99 99 # data used for parsing and writing
100 100 _fmversion = 0
101 101 _fmfixed = '>BIB20s'
102 102 _fmnode = '20s'
103 103 _fmfsize = struct.calcsize(_fmfixed)
104 104 _fnodesize = struct.calcsize(_fmnode)
105 105
106 106 ### obsolescence marker flag
107 107
108 108 ## bumpedfix flag
109 109 #
110 110 # When a changeset A' succeed to a changeset A which became public, we call A'
111 111 # "bumped" because it's a successors of a public changesets
112 112 #
113 113 # o A' (bumped)
114 114 # |`:
115 115 # | o A
116 116 # |/
117 117 # o Z
118 118 #
119 119 # The way to solve this situation is to create a new changeset Ad as children
120 120 # of A. This changeset have the same content than A'. So the diff from A to A'
121 121 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
122 122 #
123 123 # o Ad
124 124 # |`:
125 125 # | x A'
126 126 # |'|
127 127 # o | A
128 128 # |/
129 129 # o Z
130 130 #
131 131 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
132 132 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
133 133 # This flag mean that the successors express the changes between the public and
134 134 # bumped version and fix the situation, breaking the transitivity of
135 135 # "bumped" here.
136 136 bumpedfix = 1
137 137
138 138 def _readmarkers(data):
139 139 """Read and enumerate markers from raw data"""
140 140 off = 0
141 141 diskversion = _unpack('>B', data[off:off + 1])[0]
142 142 off += 1
143 143 if diskversion != _fmversion:
144 144 raise util.Abort(_('parsing obsolete marker: unknown version %r')
145 145 % diskversion)
146 146
147 147 # Loop on markers
148 148 l = len(data)
149 149 while off + _fmfsize <= l:
150 150 # read fixed part
151 151 cur = data[off:off + _fmfsize]
152 152 off += _fmfsize
153 153 nbsuc, mdsize, flags, pre = _unpack(_fmfixed, cur)
154 154 # read replacement
155 155 sucs = ()
156 156 if nbsuc:
157 157 s = (_fnodesize * nbsuc)
158 158 cur = data[off:off + s]
159 159 sucs = _unpack(_fmnode * nbsuc, cur)
160 160 off += s
161 161 # read metadata
162 162 # (metadata will be decoded on demand)
163 163 metadata = data[off:off + mdsize]
164 164 if len(metadata) != mdsize:
165 165 raise util.Abort(_('parsing obsolete marker: metadata is too '
166 166 'short, %d bytes expected, got %d')
167 167 % (mdsize, len(metadata)))
168 168 off += mdsize
169 169 yield (pre, sucs, flags, metadata)
170 170
171 171 def encodemeta(meta):
172 172 """Return encoded metadata string to string mapping.
173 173
174 174 Assume no ':' in key and no '\0' in both key and value."""
175 175 for key, value in meta.iteritems():
176 176 if ':' in key or '\0' in key:
177 177 raise ValueError("':' and '\0' are forbidden in metadata key'")
178 178 if '\0' in value:
179 179 raise ValueError("':' are forbidden in metadata value'")
180 180 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
181 181
182 182 def decodemeta(data):
183 183 """Return string to string dictionary from encoded version."""
184 184 d = {}
185 185 for l in data.split('\0'):
186 186 if l:
187 187 key, value = l.split(':')
188 188 d[key] = value
189 189 return d
190 190
191 191 class marker(object):
192 192 """Wrap obsolete marker raw data"""
193 193
194 194 def __init__(self, repo, data):
195 195 # the repo argument will be used to create changectx in later version
196 196 self._repo = repo
197 197 self._data = data
198 198 self._decodedmeta = None
199 199
200 200 def __hash__(self):
201 201 return hash(self._data)
202 202
203 203 def __eq__(self, other):
204 204 if type(other) != type(self):
205 205 return False
206 206 return self._data == other._data
207 207
208 208 def precnode(self):
209 209 """Precursor changeset node identifier"""
210 210 return self._data[0]
211 211
212 212 def succnodes(self):
213 213 """List of successor changesets node identifiers"""
214 214 return self._data[1]
215 215
216 216 def metadata(self):
217 217 """Decoded metadata dictionary"""
218 218 if self._decodedmeta is None:
219 219 self._decodedmeta = decodemeta(self._data[3])
220 220 return self._decodedmeta
221 221
222 222 def date(self):
223 223 """Creation date as (unixtime, offset)"""
224 224 parts = self.metadata()['date'].split(' ')
225 225 return (float(parts[0]), int(parts[1]))
226 226
227 227 class obsstore(object):
228 228 """Store obsolete markers
229 229
230 230 Markers can be accessed with two mappings:
231 231 - precursors[x] -> set(markers on precursors edges of x)
232 232 - successors[x] -> set(markers on successors edges of x)
233 233 """
234 234
235 235 def __init__(self, sopener):
236 236 # caches for various obsolescence related cache
237 237 self.caches = {}
238 238 self._all = []
239 239 # new markers to serialize
240 240 self.precursors = {}
241 241 self.successors = {}
242 242 self.sopener = sopener
243 243 data = sopener.tryread('obsstore')
244 244 if data:
245 245 self._load(_readmarkers(data))
246 246
247 247 def __iter__(self):
248 248 return iter(self._all)
249 249
250 def __len__(self):
251 return len(self._all)
252
250 253 def __nonzero__(self):
251 254 return bool(self._all)
252 255
253 256 def create(self, transaction, prec, succs=(), flag=0, metadata=None):
254 257 """obsolete: add a new obsolete marker
255 258
256 259 * ensuring it is hashable
257 260 * check mandatory metadata
258 261 * encode metadata
259 262
260 263 If you are a human writing code creating marker you want to use the
261 264 `createmarkers` function in this module instead.
262 265
263 266 return True if a new marker have been added, False if the markers
264 267 already existed (no op).
265 268 """
266 269 if metadata is None:
267 270 metadata = {}
268 271 if 'date' not in metadata:
269 272 metadata['date'] = "%d %d" % util.makedate()
270 273 if len(prec) != 20:
271 274 raise ValueError(prec)
272 275 for succ in succs:
273 276 if len(succ) != 20:
274 277 raise ValueError(succ)
275 278 marker = (str(prec), tuple(succs), int(flag), encodemeta(metadata))
276 279 return bool(self.add(transaction, [marker]))
277 280
278 281 def add(self, transaction, markers):
279 282 """Add new markers to the store
280 283
281 284 Take care of filtering duplicate.
282 285 Return the number of new marker."""
283 286 if not _enabled:
284 287 raise util.Abort('obsolete feature is not enabled on this repo')
285 288 known = set(self._all)
286 289 new = []
287 290 for m in markers:
288 291 if m not in known:
289 292 known.add(m)
290 293 new.append(m)
291 294 if new:
292 295 f = self.sopener('obsstore', 'ab')
293 296 try:
294 297 # Whether the file's current position is at the begin or at
295 298 # the end after opening a file for appending is implementation
296 299 # defined. So we must seek to the end before calling tell(),
297 300 # or we may get a zero offset for non-zero sized files on
298 301 # some platforms (issue3543).
299 302 f.seek(0, _SEEK_END)
300 303 offset = f.tell()
301 304 transaction.add('obsstore', offset)
302 305 # offset == 0: new file - add the version header
303 306 for bytes in _encodemarkers(new, offset == 0):
304 307 f.write(bytes)
305 308 finally:
306 309 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
307 310 # call 'filecacheentry.refresh()' here
308 311 f.close()
309 312 self._load(new)
310 313 # new marker *may* have changed several set. invalidate the cache.
311 314 self.caches.clear()
312 315 return len(new)
313 316
314 317 def mergemarkers(self, transaction, data):
315 318 markers = _readmarkers(data)
316 319 self.add(transaction, markers)
317 320
318 321 def _load(self, markers):
319 322 for mark in markers:
320 323 self._all.append(mark)
321 324 pre, sucs = mark[:2]
322 325 self.successors.setdefault(pre, set()).add(mark)
323 326 for suc in sucs:
324 327 self.precursors.setdefault(suc, set()).add(mark)
325 328 if node.nullid in self.precursors:
326 329 raise util.Abort(_('bad obsolescence marker detected: '
327 330 'invalid successors nullid'))
328 331
329 332 def _encodemarkers(markers, addheader=False):
330 333 # Kept separate from flushmarkers(), it will be reused for
331 334 # markers exchange.
332 335 if addheader:
333 336 yield _pack('>B', _fmversion)
334 337 for marker in markers:
335 338 yield _encodeonemarker(marker)
336 339
337 340
338 341 def _encodeonemarker(marker):
339 342 pre, sucs, flags, metadata = marker
340 343 nbsuc = len(sucs)
341 344 format = _fmfixed + (_fmnode * nbsuc)
342 345 data = [nbsuc, len(metadata), flags, pre]
343 346 data.extend(sucs)
344 347 return _pack(format, *data) + metadata
345 348
346 349 # arbitrary picked to fit into 8K limit from HTTP server
347 350 # you have to take in account:
348 351 # - the version header
349 352 # - the base85 encoding
350 353 _maxpayload = 5300
351 354
352 355 def listmarkers(repo):
353 356 """List markers over pushkey"""
354 357 if not repo.obsstore:
355 358 return {}
356 359 keys = {}
357 360 parts = []
358 361 currentlen = _maxpayload * 2 # ensure we create a new part
359 362 for marker in repo.obsstore:
360 363 nextdata = _encodeonemarker(marker)
361 364 if (len(nextdata) + currentlen > _maxpayload):
362 365 currentpart = []
363 366 currentlen = 0
364 367 parts.append(currentpart)
365 368 currentpart.append(nextdata)
366 369 currentlen += len(nextdata)
367 370 for idx, part in enumerate(reversed(parts)):
368 371 data = ''.join([_pack('>B', _fmversion)] + part)
369 372 keys['dump%i' % idx] = base85.b85encode(data)
370 373 return keys
371 374
372 375 def pushmarker(repo, key, old, new):
373 376 """Push markers over pushkey"""
374 377 if not key.startswith('dump'):
375 378 repo.ui.warn(_('unknown key: %r') % key)
376 379 return 0
377 380 if old:
378 381 repo.ui.warn(_('unexpected old value') % key)
379 382 return 0
380 383 data = base85.b85decode(new)
381 384 lock = repo.lock()
382 385 try:
383 386 tr = repo.transaction('pushkey: obsolete markers')
384 387 try:
385 388 repo.obsstore.mergemarkers(tr, data)
386 389 tr.close()
387 390 return 1
388 391 finally:
389 392 tr.release()
390 393 finally:
391 394 lock.release()
392 395
393 396 def allmarkers(repo):
394 397 """all obsolete markers known in a repository"""
395 398 for markerdata in repo.obsstore:
396 399 yield marker(repo, markerdata)
397 400
398 401 def precursormarkers(ctx):
399 402 """obsolete marker marking this changeset as a successors"""
400 403 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
401 404 yield marker(ctx._repo, data)
402 405
403 406 def successormarkers(ctx):
404 407 """obsolete marker making this changeset obsolete"""
405 408 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
406 409 yield marker(ctx._repo, data)
407 410
408 411 def allsuccessors(obsstore, nodes, ignoreflags=0):
409 412 """Yield node for every successor of <nodes>.
410 413
411 414 Some successors may be unknown locally.
412 415
413 416 This is a linear yield unsuited to detecting split changesets. It includes
414 417 initial nodes too."""
415 418 remaining = set(nodes)
416 419 seen = set(remaining)
417 420 while remaining:
418 421 current = remaining.pop()
419 422 yield current
420 423 for mark in obsstore.successors.get(current, ()):
421 424 # ignore marker flagged with specified flag
422 425 if mark[2] & ignoreflags:
423 426 continue
424 427 for suc in mark[1]:
425 428 if suc not in seen:
426 429 seen.add(suc)
427 430 remaining.add(suc)
428 431
429 432 def allprecursors(obsstore, nodes, ignoreflags=0):
430 433 """Yield node for every precursors of <nodes>.
431 434
432 435 Some precursors may be unknown locally.
433 436
434 437 This is a linear yield unsuited to detecting folded changesets. It includes
435 438 initial nodes too."""
436 439
437 440 remaining = set(nodes)
438 441 seen = set(remaining)
439 442 while remaining:
440 443 current = remaining.pop()
441 444 yield current
442 445 for mark in obsstore.precursors.get(current, ()):
443 446 # ignore marker flagged with specified flag
444 447 if mark[2] & ignoreflags:
445 448 continue
446 449 suc = mark[0]
447 450 if suc not in seen:
448 451 seen.add(suc)
449 452 remaining.add(suc)
450 453
451 454 def foreground(repo, nodes):
452 455 """return all nodes in the "foreground" of other node
453 456
454 457 The foreground of a revision is anything reachable using parent -> children
455 458 or precursor -> successor relation. It is very similar to "descendant" but
456 459 augmented with obsolescence information.
457 460
458 461 Beware that possible obsolescence cycle may result if complex situation.
459 462 """
460 463 repo = repo.unfiltered()
461 464 foreground = set(repo.set('%ln::', nodes))
462 465 if repo.obsstore:
463 466 # We only need this complicated logic if there is obsolescence
464 467 # XXX will probably deserve an optimised revset.
465 468 nm = repo.changelog.nodemap
466 469 plen = -1
467 470 # compute the whole set of successors or descendants
468 471 while len(foreground) != plen:
469 472 plen = len(foreground)
470 473 succs = set(c.node() for c in foreground)
471 474 mutable = [c.node() for c in foreground if c.mutable()]
472 475 succs.update(allsuccessors(repo.obsstore, mutable))
473 476 known = (n for n in succs if n in nm)
474 477 foreground = set(repo.set('%ln::', known))
475 478 return set(c.node() for c in foreground)
476 479
477 480
478 481 def successorssets(repo, initialnode, cache=None):
479 482 """Return all set of successors of initial nodes
480 483
481 484 The successors set of a changeset A are a group of revisions that succeed
482 485 A. It succeeds A as a consistent whole, each revision being only a partial
483 486 replacement. The successors set contains non-obsolete changesets only.
484 487
485 488 This function returns the full list of successor sets which is why it
486 489 returns a list of tuples and not just a single tuple. Each tuple is a valid
487 490 successors set. Not that (A,) may be a valid successors set for changeset A
488 491 (see below).
489 492
490 493 In most cases, a changeset A will have a single element (e.g. the changeset
491 494 A is replaced by A') in its successors set. Though, it is also common for a
492 495 changeset A to have no elements in its successor set (e.g. the changeset
493 496 has been pruned). Therefore, the returned list of successors sets will be
494 497 [(A',)] or [], respectively.
495 498
496 499 When a changeset A is split into A' and B', however, it will result in a
497 500 successors set containing more than a single element, i.e. [(A',B')].
498 501 Divergent changesets will result in multiple successors sets, i.e. [(A',),
499 502 (A'')].
500 503
501 504 If a changeset A is not obsolete, then it will conceptually have no
502 505 successors set. To distinguish this from a pruned changeset, the successor
503 506 set will only contain itself, i.e. [(A,)].
504 507
505 508 Finally, successors unknown locally are considered to be pruned (obsoleted
506 509 without any successors).
507 510
508 511 The optional `cache` parameter is a dictionary that may contain precomputed
509 512 successors sets. It is meant to reuse the computation of a previous call to
510 513 `successorssets` when multiple calls are made at the same time. The cache
511 514 dictionary is updated in place. The caller is responsible for its live
512 515 spawn. Code that makes multiple calls to `successorssets` *must* use this
513 516 cache mechanism or suffer terrible performances.
514 517
515 518 """
516 519
517 520 succmarkers = repo.obsstore.successors
518 521
519 522 # Stack of nodes we search successors sets for
520 523 toproceed = [initialnode]
521 524 # set version of above list for fast loop detection
522 525 # element added to "toproceed" must be added here
523 526 stackedset = set(toproceed)
524 527 if cache is None:
525 528 cache = {}
526 529
527 530 # This while loop is the flattened version of a recursive search for
528 531 # successors sets
529 532 #
530 533 # def successorssets(x):
531 534 # successors = directsuccessors(x)
532 535 # ss = [[]]
533 536 # for succ in directsuccessors(x):
534 537 # # product as in itertools cartesian product
535 538 # ss = product(ss, successorssets(succ))
536 539 # return ss
537 540 #
538 541 # But we can not use plain recursive calls here:
539 542 # - that would blow the python call stack
540 543 # - obsolescence markers may have cycles, we need to handle them.
541 544 #
542 545 # The `toproceed` list act as our call stack. Every node we search
543 546 # successors set for are stacked there.
544 547 #
545 548 # The `stackedset` is set version of this stack used to check if a node is
546 549 # already stacked. This check is used to detect cycles and prevent infinite
547 550 # loop.
548 551 #
549 552 # successors set of all nodes are stored in the `cache` dictionary.
550 553 #
551 554 # After this while loop ends we use the cache to return the successors sets
552 555 # for the node requested by the caller.
553 556 while toproceed:
554 557 # Every iteration tries to compute the successors sets of the topmost
555 558 # node of the stack: CURRENT.
556 559 #
557 560 # There are four possible outcomes:
558 561 #
559 562 # 1) We already know the successors sets of CURRENT:
560 563 # -> mission accomplished, pop it from the stack.
561 564 # 2) Node is not obsolete:
562 565 # -> the node is its own successors sets. Add it to the cache.
563 566 # 3) We do not know successors set of direct successors of CURRENT:
564 567 # -> We add those successors to the stack.
565 568 # 4) We know successors sets of all direct successors of CURRENT:
566 569 # -> We can compute CURRENT successors set and add it to the
567 570 # cache.
568 571 #
569 572 current = toproceed[-1]
570 573 if current in cache:
571 574 # case (1): We already know the successors sets
572 575 stackedset.remove(toproceed.pop())
573 576 elif current not in succmarkers:
574 577 # case (2): The node is not obsolete.
575 578 if current in repo:
576 579 # We have a valid last successors.
577 580 cache[current] = [(current,)]
578 581 else:
579 582 # Final obsolete version is unknown locally.
580 583 # Do not count that as a valid successors
581 584 cache[current] = []
582 585 else:
583 586 # cases (3) and (4)
584 587 #
585 588 # We proceed in two phases. Phase 1 aims to distinguish case (3)
586 589 # from case (4):
587 590 #
588 591 # For each direct successors of CURRENT, we check whether its
589 592 # successors sets are known. If they are not, we stack the
590 593 # unknown node and proceed to the next iteration of the while
591 594 # loop. (case 3)
592 595 #
593 596 # During this step, we may detect obsolescence cycles: a node
594 597 # with unknown successors sets but already in the call stack.
595 598 # In such a situation, we arbitrary set the successors sets of
596 599 # the node to nothing (node pruned) to break the cycle.
597 600 #
598 601 # If no break was encountered we proceed to phase 2.
599 602 #
600 603 # Phase 2 computes successors sets of CURRENT (case 4); see details
601 604 # in phase 2 itself.
602 605 #
603 606 # Note the two levels of iteration in each phase.
604 607 # - The first one handles obsolescence markers using CURRENT as
605 608 # precursor (successors markers of CURRENT).
606 609 #
607 610 # Having multiple entry here means divergence.
608 611 #
609 612 # - The second one handles successors defined in each marker.
610 613 #
611 614 # Having none means pruned node, multiple successors means split,
612 615 # single successors are standard replacement.
613 616 #
614 617 for mark in sorted(succmarkers[current]):
615 618 for suc in mark[1]:
616 619 if suc not in cache:
617 620 if suc in stackedset:
618 621 # cycle breaking
619 622 cache[suc] = []
620 623 else:
621 624 # case (3) If we have not computed successors sets
622 625 # of one of those successors we add it to the
623 626 # `toproceed` stack and stop all work for this
624 627 # iteration.
625 628 toproceed.append(suc)
626 629 stackedset.add(suc)
627 630 break
628 631 else:
629 632 continue
630 633 break
631 634 else:
632 635 # case (4): we know all successors sets of all direct
633 636 # successors
634 637 #
635 638 # Successors set contributed by each marker depends on the
636 639 # successors sets of all its "successors" node.
637 640 #
638 641 # Each different marker is a divergence in the obsolescence
639 642 # history. It contributes successors sets distinct from other
640 643 # markers.
641 644 #
642 645 # Within a marker, a successor may have divergent successors
643 646 # sets. In such a case, the marker will contribute multiple
644 647 # divergent successors sets. If multiple successors have
645 648 # divergent successors sets, a cartesian product is used.
646 649 #
647 650 # At the end we post-process successors sets to remove
648 651 # duplicated entry and successors set that are strict subset of
649 652 # another one.
650 653 succssets = []
651 654 for mark in sorted(succmarkers[current]):
652 655 # successors sets contributed by this marker
653 656 markss = [[]]
654 657 for suc in mark[1]:
655 658 # cardinal product with previous successors
656 659 productresult = []
657 660 for prefix in markss:
658 661 for suffix in cache[suc]:
659 662 newss = list(prefix)
660 663 for part in suffix:
661 664 # do not duplicated entry in successors set
662 665 # first entry wins.
663 666 if part not in newss:
664 667 newss.append(part)
665 668 productresult.append(newss)
666 669 markss = productresult
667 670 succssets.extend(markss)
668 671 # remove duplicated and subset
669 672 seen = []
670 673 final = []
671 674 candidate = sorted(((set(s), s) for s in succssets if s),
672 675 key=lambda x: len(x[1]), reverse=True)
673 676 for setversion, listversion in candidate:
674 677 for seenset in seen:
675 678 if setversion.issubset(seenset):
676 679 break
677 680 else:
678 681 final.append(listversion)
679 682 seen.append(setversion)
680 683 final.reverse() # put small successors set first
681 684 cache[current] = final
682 685 return cache[initialnode]
683 686
684 687 def _knownrevs(repo, nodes):
685 688 """yield revision numbers of known nodes passed in parameters
686 689
687 690 Unknown revisions are silently ignored."""
688 691 torev = repo.changelog.nodemap.get
689 692 for n in nodes:
690 693 rev = torev(n)
691 694 if rev is not None:
692 695 yield rev
693 696
694 697 # mapping of 'set-name' -> <function to compute this set>
695 698 cachefuncs = {}
696 699 def cachefor(name):
697 700 """Decorator to register a function as computing the cache for a set"""
698 701 def decorator(func):
699 702 assert name not in cachefuncs
700 703 cachefuncs[name] = func
701 704 return func
702 705 return decorator
703 706
704 707 def getrevs(repo, name):
705 708 """Return the set of revision that belong to the <name> set
706 709
707 710 Such access may compute the set and cache it for future use"""
708 711 repo = repo.unfiltered()
709 712 if not repo.obsstore:
710 713 return ()
711 714 if name not in repo.obsstore.caches:
712 715 repo.obsstore.caches[name] = cachefuncs[name](repo)
713 716 return repo.obsstore.caches[name]
714 717
715 718 # To be simple we need to invalidate obsolescence cache when:
716 719 #
717 720 # - new changeset is added:
718 721 # - public phase is changed
719 722 # - obsolescence marker are added
720 723 # - strip is used a repo
721 724 def clearobscaches(repo):
722 725 """Remove all obsolescence related cache from a repo
723 726
724 727 This remove all cache in obsstore is the obsstore already exist on the
725 728 repo.
726 729
727 730 (We could be smarter here given the exact event that trigger the cache
728 731 clearing)"""
729 732 # only clear cache is there is obsstore data in this repo
730 733 if 'obsstore' in repo._filecache:
731 734 repo.obsstore.caches.clear()
732 735
733 736 @cachefor('obsolete')
734 737 def _computeobsoleteset(repo):
735 738 """the set of obsolete revisions"""
736 739 obs = set()
737 740 getrev = repo.changelog.nodemap.get
738 741 getphase = repo._phasecache.phase
739 742 for node in repo.obsstore.successors:
740 743 rev = getrev(node)
741 744 if rev is not None and getphase(repo, rev):
742 745 obs.add(rev)
743 746 return obs
744 747
745 748 @cachefor('unstable')
746 749 def _computeunstableset(repo):
747 750 """the set of non obsolete revisions with obsolete parents"""
748 751 # revset is not efficient enough here
749 752 # we do (obsolete()::) - obsolete() by hand
750 753 obs = getrevs(repo, 'obsolete')
751 754 if not obs:
752 755 return set()
753 756 cl = repo.changelog
754 757 return set(r for r in cl.descendants(obs) if r not in obs)
755 758
756 759 @cachefor('suspended')
757 760 def _computesuspendedset(repo):
758 761 """the set of obsolete parents with non obsolete descendants"""
759 762 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
760 763 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
761 764
762 765 @cachefor('extinct')
763 766 def _computeextinctset(repo):
764 767 """the set of obsolete parents without non obsolete descendants"""
765 768 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
766 769
767 770
768 771 @cachefor('bumped')
769 772 def _computebumpedset(repo):
770 773 """the set of revs trying to obsolete public revisions"""
771 774 bumped = set()
772 775 # utils function (avoid attribute lookup in the loop)
773 776 phase = repo._phasecache.phase # would be faster to grab the full list
774 777 public = phases.public
775 778 cl = repo.changelog
776 779 torev = cl.nodemap.get
777 780 obs = getrevs(repo, 'obsolete')
778 781 for rev in repo:
779 782 # We only evaluate mutable, non-obsolete revision
780 783 if (public < phase(repo, rev)) and (rev not in obs):
781 784 node = cl.node(rev)
782 785 # (future) A cache of precursors may worth if split is very common
783 786 for pnode in allprecursors(repo.obsstore, [node],
784 787 ignoreflags=bumpedfix):
785 788 prev = torev(pnode) # unfiltered! but so is phasecache
786 789 if (prev is not None) and (phase(repo, prev) <= public):
787 790 # we have a public precursors
788 791 bumped.add(rev)
789 792 break # Next draft!
790 793 return bumped
791 794
792 795 @cachefor('divergent')
793 796 def _computedivergentset(repo):
794 797 """the set of rev that compete to be the final successors of some revision.
795 798 """
796 799 divergent = set()
797 800 obsstore = repo.obsstore
798 801 newermap = {}
799 802 for ctx in repo.set('(not public()) - obsolete()'):
800 803 mark = obsstore.precursors.get(ctx.node(), ())
801 804 toprocess = set(mark)
802 805 while toprocess:
803 806 prec = toprocess.pop()[0]
804 807 if prec not in newermap:
805 808 successorssets(repo, prec, newermap)
806 809 newer = [n for n in newermap[prec] if n]
807 810 if len(newer) > 1:
808 811 divergent.add(ctx.rev())
809 812 break
810 813 toprocess.update(obsstore.precursors.get(prec, ()))
811 814 return divergent
812 815
813 816
814 817 def createmarkers(repo, relations, flag=0, metadata=None):
815 818 """Add obsolete markers between changesets in a repo
816 819
817 820 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
818 821 tuple. `old` and `news` are changectx. metadata is an optional dictionnary
819 822 containing metadata for this marker only. It is merged with the global
820 823 metadata specified through the `metadata` argument of this function,
821 824
822 825 Trying to obsolete a public changeset will raise an exception.
823 826
824 827 Current user and date are used except if specified otherwise in the
825 828 metadata attribute.
826 829
827 830 This function operates within a transaction of its own, but does
828 831 not take any lock on the repo.
829 832 """
830 833 # prepare metadata
831 834 if metadata is None:
832 835 metadata = {}
833 836 if 'date' not in metadata:
834 837 metadata['date'] = '%i %i' % util.makedate()
835 838 if 'user' not in metadata:
836 839 metadata['user'] = repo.ui.username()
837 840 tr = repo.transaction('add-obsolescence-marker')
838 841 try:
839 842 for rel in relations:
840 843 prec = rel[0]
841 844 sucs = rel[1]
842 845 localmetadata = metadata.copy()
843 846 if 2 < len(rel):
844 847 localmetadata.update(rel[2])
845 848
846 849 if not prec.mutable():
847 850 raise util.Abort("cannot obsolete immutable changeset: %s"
848 851 % prec)
849 852 nprec = prec.node()
850 853 nsucs = tuple(s.node() for s in sucs)
851 854 if nprec in nsucs:
852 855 raise util.Abort("changeset %s cannot obsolete itself" % prec)
853 856 repo.obsstore.create(tr, nprec, nsucs, flag, localmetadata)
854 857 repo.filteredrevcache.clear()
855 858 tr.close()
856 859 finally:
857 860 tr.release()
General Comments 0
You need to be logged in to leave comments. Login now