##// END OF EJS Templates
obsolete: add a date argument to the `createmarkers` function...
Pierre-Yves David -
r22219:79c68626 default
parent child Browse files
Show More
@@ -1,879 +1,877
1 1 # obsolete.py - obsolete markers handling
2 2 #
3 3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 4 # Logilab SA <contact@logilab.fr>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 """Obsolete marker handling
10 10
11 11 An obsolete marker maps an old changeset to a list of new
12 12 changesets. If the list of new changesets is empty, the old changeset
13 13 is said to be "killed". Otherwise, the old changeset is being
14 14 "replaced" by the new changesets.
15 15
16 16 Obsolete markers can be used to record and distribute changeset graph
17 17 transformations performed by history rewrite operations, and help
18 18 building new tools to reconcile conflicting rewrite actions. To
19 19 facilitate conflict resolution, markers include various annotations
20 20 besides old and news changeset identifiers, such as creation date or
21 21 author name.
22 22
23 23 The old obsoleted changeset is called a "precursor" and possible
24 24 replacements are called "successors". Markers that used changeset X as
25 25 a precursor are called "successor markers of X" because they hold
26 26 information about the successors of X. Markers that use changeset Y as
27 27 a successors are call "precursor markers of Y" because they hold
28 28 information about the precursors of Y.
29 29
30 30 Examples:
31 31
32 32 - When changeset A is replaced by changeset A', one marker is stored:
33 33
34 34 (A, (A',))
35 35
36 36 - When changesets A and B are folded into a new changeset C, two markers are
37 37 stored:
38 38
39 39 (A, (C,)) and (B, (C,))
40 40
41 41 - When changeset A is simply "pruned" from the graph, a marker is created:
42 42
43 43 (A, ())
44 44
45 45 - When changeset A is split into B and C, a single marker are used:
46 46
47 47 (A, (C, C))
48 48
49 49 We use a single marker to distinguish the "split" case from the "divergence"
50 50 case. If two independent operations rewrite the same changeset A in to A' and
51 51 A'', we have an error case: divergent rewriting. We can detect it because
52 52 two markers will be created independently:
53 53
54 54 (A, (B,)) and (A, (C,))
55 55
56 56 Format
57 57 ------
58 58
59 59 Markers are stored in an append-only file stored in
60 60 '.hg/store/obsstore'.
61 61
62 62 The file starts with a version header:
63 63
64 64 - 1 unsigned byte: version number, starting at zero.
65 65
66 66
67 67 The header is followed by the markers. Each marker is made of:
68 68
69 69 - 1 unsigned byte: number of new changesets "N", can be zero.
70 70
71 71 - 1 unsigned 32-bits integer: metadata size "M" in bytes.
72 72
73 73 - 1 byte: a bit field. It is reserved for flags used in common
74 74 obsolete marker operations, to avoid repeated decoding of metadata
75 75 entries.
76 76
77 77 - 20 bytes: obsoleted changeset identifier.
78 78
79 79 - N*20 bytes: new changesets identifiers.
80 80
81 81 - M bytes: metadata as a sequence of nul-terminated strings. Each
82 82 string contains a key and a value, separated by a colon ':', without
83 83 additional encoding. Keys cannot contain '\0' or ':' and values
84 84 cannot contain '\0'.
85 85
86 86 """
87 87 import struct
88 88 import util, base85, node
89 89 import phases
90 90 from i18n import _
91 91
92 92 _pack = struct.pack
93 93 _unpack = struct.unpack
94 94
95 95 _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5
96 96
97 97 # the obsolete feature is not mature enough to be enabled by default.
98 98 # you have to rely on third party extension extension to enable this.
99 99 _enabled = False
100 100
101 101 # data used for parsing and writing
102 102 _fmversion = 0
103 103 _fmfixed = '>BIB20s'
104 104 _fmnode = '20s'
105 105 _fmfsize = struct.calcsize(_fmfixed)
106 106 _fnodesize = struct.calcsize(_fmnode)
107 107
108 108 ### obsolescence marker flag
109 109
110 110 ## bumpedfix flag
111 111 #
112 112 # When a changeset A' succeed to a changeset A which became public, we call A'
113 113 # "bumped" because it's a successors of a public changesets
114 114 #
115 115 # o A' (bumped)
116 116 # |`:
117 117 # | o A
118 118 # |/
119 119 # o Z
120 120 #
121 121 # The way to solve this situation is to create a new changeset Ad as children
122 122 # of A. This changeset have the same content than A'. So the diff from A to A'
123 123 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
124 124 #
125 125 # o Ad
126 126 # |`:
127 127 # | x A'
128 128 # |'|
129 129 # o | A
130 130 # |/
131 131 # o Z
132 132 #
133 133 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
134 134 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
135 135 # This flag mean that the successors express the changes between the public and
136 136 # bumped version and fix the situation, breaking the transitivity of
137 137 # "bumped" here.
138 138 bumpedfix = 1
139 139
140 140 def _readmarkers(data):
141 141 """Read and enumerate markers from raw data"""
142 142 off = 0
143 143 diskversion = _unpack('>B', data[off:off + 1])[0]
144 144 off += 1
145 145 if diskversion != _fmversion:
146 146 raise util.Abort(_('parsing obsolete marker: unknown version %r')
147 147 % diskversion)
148 148
149 149 # Loop on markers
150 150 l = len(data)
151 151 while off + _fmfsize <= l:
152 152 # read fixed part
153 153 cur = data[off:off + _fmfsize]
154 154 off += _fmfsize
155 155 nbsuc, mdsize, flags, pre = _unpack(_fmfixed, cur)
156 156 # read replacement
157 157 sucs = ()
158 158 if nbsuc:
159 159 s = (_fnodesize * nbsuc)
160 160 cur = data[off:off + s]
161 161 sucs = _unpack(_fmnode * nbsuc, cur)
162 162 off += s
163 163 # read metadata
164 164 # (metadata will be decoded on demand)
165 165 metadata = data[off:off + mdsize]
166 166 if len(metadata) != mdsize:
167 167 raise util.Abort(_('parsing obsolete marker: metadata is too '
168 168 'short, %d bytes expected, got %d')
169 169 % (mdsize, len(metadata)))
170 170 off += mdsize
171 171 yield (pre, sucs, flags, metadata)
172 172
173 173 def encodemeta(meta):
174 174 """Return encoded metadata string to string mapping.
175 175
176 176 Assume no ':' in key and no '\0' in both key and value."""
177 177 for key, value in meta.iteritems():
178 178 if ':' in key or '\0' in key:
179 179 raise ValueError("':' and '\0' are forbidden in metadata key'")
180 180 if '\0' in value:
181 181 raise ValueError("':' is forbidden in metadata value'")
182 182 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
183 183
184 184 def decodemeta(data):
185 185 """Return string to string dictionary from encoded version."""
186 186 d = {}
187 187 for l in data.split('\0'):
188 188 if l:
189 189 key, value = l.split(':')
190 190 d[key] = value
191 191 return d
192 192
193 193 class marker(object):
194 194 """Wrap obsolete marker raw data"""
195 195
196 196 def __init__(self, repo, data):
197 197 # the repo argument will be used to create changectx in later version
198 198 self._repo = repo
199 199 self._data = data
200 200 self._decodedmeta = None
201 201
202 202 def __hash__(self):
203 203 return hash(self._data)
204 204
205 205 def __eq__(self, other):
206 206 if type(other) != type(self):
207 207 return False
208 208 return self._data == other._data
209 209
210 210 def precnode(self):
211 211 """Precursor changeset node identifier"""
212 212 return self._data[0]
213 213
214 214 def succnodes(self):
215 215 """List of successor changesets node identifiers"""
216 216 return self._data[1]
217 217
218 218 def metadata(self):
219 219 """Decoded metadata dictionary"""
220 220 if self._decodedmeta is None:
221 221 self._decodedmeta = decodemeta(self._data[3])
222 222 return self._decodedmeta
223 223
224 224 def date(self):
225 225 """Creation date as (unixtime, offset)"""
226 226 parts = self.metadata()['date'].split(' ')
227 227 return (float(parts[0]), int(parts[1]))
228 228
229 229 def flags(self):
230 230 """The flags field of the marker"""
231 231 return self._data[2]
232 232
233 233 class obsstore(object):
234 234 """Store obsolete markers
235 235
236 236 Markers can be accessed with two mappings:
237 237 - precursors[x] -> set(markers on precursors edges of x)
238 238 - successors[x] -> set(markers on successors edges of x)
239 239 """
240 240
241 241 def __init__(self, sopener):
242 242 # caches for various obsolescence related cache
243 243 self.caches = {}
244 244 self._all = []
245 245 # new markers to serialize
246 246 self.precursors = {}
247 247 self.successors = {}
248 248 self.sopener = sopener
249 249 data = sopener.tryread('obsstore')
250 250 if data:
251 251 self._load(_readmarkers(data))
252 252
253 253 def __iter__(self):
254 254 return iter(self._all)
255 255
256 256 def __len__(self):
257 257 return len(self._all)
258 258
259 259 def __nonzero__(self):
260 260 return bool(self._all)
261 261
262 262 def create(self, transaction, prec, succs=(), flag=0, date=None,
263 263 metadata=None):
264 264 """obsolete: add a new obsolete marker
265 265
266 266 * ensuring it is hashable
267 267 * check mandatory metadata
268 268 * encode metadata
269 269
270 270 If you are a human writing code creating marker you want to use the
271 271 `createmarkers` function in this module instead.
272 272
273 273 return True if a new marker have been added, False if the markers
274 274 already existed (no op).
275 275 """
276 276 if metadata is None:
277 277 metadata = {}
278 278 if 'date' not in metadata:
279 279 if date is None:
280 280 date = util.makedate()
281 281 metadata['date'] = "%d %d" % date
282 282 if len(prec) != 20:
283 283 raise ValueError(prec)
284 284 for succ in succs:
285 285 if len(succ) != 20:
286 286 raise ValueError(succ)
287 287 if prec in succs:
288 288 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
289 289 marker = (str(prec), tuple(succs), int(flag), encodemeta(metadata))
290 290 return bool(self.add(transaction, [marker]))
291 291
292 292 def add(self, transaction, markers):
293 293 """Add new markers to the store
294 294
295 295 Take care of filtering duplicate.
296 296 Return the number of new marker."""
297 297 if not _enabled:
298 298 raise util.Abort('obsolete feature is not enabled on this repo')
299 299 known = set(self._all)
300 300 new = []
301 301 for m in markers:
302 302 if m not in known:
303 303 known.add(m)
304 304 new.append(m)
305 305 if new:
306 306 f = self.sopener('obsstore', 'ab')
307 307 try:
308 308 # Whether the file's current position is at the begin or at
309 309 # the end after opening a file for appending is implementation
310 310 # defined. So we must seek to the end before calling tell(),
311 311 # or we may get a zero offset for non-zero sized files on
312 312 # some platforms (issue3543).
313 313 f.seek(0, _SEEK_END)
314 314 offset = f.tell()
315 315 transaction.add('obsstore', offset)
316 316 # offset == 0: new file - add the version header
317 317 for bytes in _encodemarkers(new, offset == 0):
318 318 f.write(bytes)
319 319 finally:
320 320 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
321 321 # call 'filecacheentry.refresh()' here
322 322 f.close()
323 323 self._load(new)
324 324 # new marker *may* have changed several set. invalidate the cache.
325 325 self.caches.clear()
326 326 return len(new)
327 327
328 328 def mergemarkers(self, transaction, data):
329 329 markers = _readmarkers(data)
330 330 self.add(transaction, markers)
331 331
332 332 def _load(self, markers):
333 333 for mark in markers:
334 334 self._all.append(mark)
335 335 pre, sucs = mark[:2]
336 336 self.successors.setdefault(pre, set()).add(mark)
337 337 for suc in sucs:
338 338 self.precursors.setdefault(suc, set()).add(mark)
339 339 if node.nullid in self.precursors:
340 340 raise util.Abort(_('bad obsolescence marker detected: '
341 341 'invalid successors nullid'))
342 342
343 343 def _encodemarkers(markers, addheader=False):
344 344 # Kept separate from flushmarkers(), it will be reused for
345 345 # markers exchange.
346 346 if addheader:
347 347 yield _pack('>B', _fmversion)
348 348 for marker in markers:
349 349 yield _encodeonemarker(marker)
350 350
351 351
352 352 def _encodeonemarker(marker):
353 353 pre, sucs, flags, metadata = marker
354 354 nbsuc = len(sucs)
355 355 format = _fmfixed + (_fmnode * nbsuc)
356 356 data = [nbsuc, len(metadata), flags, pre]
357 357 data.extend(sucs)
358 358 return _pack(format, *data) + metadata
359 359
360 360 # arbitrary picked to fit into 8K limit from HTTP server
361 361 # you have to take in account:
362 362 # - the version header
363 363 # - the base85 encoding
364 364 _maxpayload = 5300
365 365
366 366 def _pushkeyescape(markers):
367 367 """encode markers into a dict suitable for pushkey exchange
368 368
369 369 - binary data is base85 encoded
370 370 - split in chunks smaller than 5300 bytes"""
371 371 keys = {}
372 372 parts = []
373 373 currentlen = _maxpayload * 2 # ensure we create a new part
374 374 for marker in markers:
375 375 nextdata = _encodeonemarker(marker)
376 376 if (len(nextdata) + currentlen > _maxpayload):
377 377 currentpart = []
378 378 currentlen = 0
379 379 parts.append(currentpart)
380 380 currentpart.append(nextdata)
381 381 currentlen += len(nextdata)
382 382 for idx, part in enumerate(reversed(parts)):
383 383 data = ''.join([_pack('>B', _fmversion)] + part)
384 384 keys['dump%i' % idx] = base85.b85encode(data)
385 385 return keys
386 386
387 387 def listmarkers(repo):
388 388 """List markers over pushkey"""
389 389 if not repo.obsstore:
390 390 return {}
391 391 return _pushkeyescape(repo.obsstore)
392 392
393 393 def pushmarker(repo, key, old, new):
394 394 """Push markers over pushkey"""
395 395 if not key.startswith('dump'):
396 396 repo.ui.warn(_('unknown key: %r') % key)
397 397 return 0
398 398 if old:
399 399 repo.ui.warn(_('unexpected old value for %r') % key)
400 400 return 0
401 401 data = base85.b85decode(new)
402 402 lock = repo.lock()
403 403 try:
404 404 tr = repo.transaction('pushkey: obsolete markers')
405 405 try:
406 406 repo.obsstore.mergemarkers(tr, data)
407 407 tr.close()
408 408 return 1
409 409 finally:
410 410 tr.release()
411 411 finally:
412 412 lock.release()
413 413
414 414 def allmarkers(repo):
415 415 """all obsolete markers known in a repository"""
416 416 for markerdata in repo.obsstore:
417 417 yield marker(repo, markerdata)
418 418
419 419 def precursormarkers(ctx):
420 420 """obsolete marker marking this changeset as a successors"""
421 421 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
422 422 yield marker(ctx._repo, data)
423 423
424 424 def successormarkers(ctx):
425 425 """obsolete marker making this changeset obsolete"""
426 426 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
427 427 yield marker(ctx._repo, data)
428 428
429 429 def allsuccessors(obsstore, nodes, ignoreflags=0):
430 430 """Yield node for every successor of <nodes>.
431 431
432 432 Some successors may be unknown locally.
433 433
434 434 This is a linear yield unsuited to detecting split changesets. It includes
435 435 initial nodes too."""
436 436 remaining = set(nodes)
437 437 seen = set(remaining)
438 438 while remaining:
439 439 current = remaining.pop()
440 440 yield current
441 441 for mark in obsstore.successors.get(current, ()):
442 442 # ignore marker flagged with specified flag
443 443 if mark[2] & ignoreflags:
444 444 continue
445 445 for suc in mark[1]:
446 446 if suc not in seen:
447 447 seen.add(suc)
448 448 remaining.add(suc)
449 449
450 450 def allprecursors(obsstore, nodes, ignoreflags=0):
451 451 """Yield node for every precursors of <nodes>.
452 452
453 453 Some precursors may be unknown locally.
454 454
455 455 This is a linear yield unsuited to detecting folded changesets. It includes
456 456 initial nodes too."""
457 457
458 458 remaining = set(nodes)
459 459 seen = set(remaining)
460 460 while remaining:
461 461 current = remaining.pop()
462 462 yield current
463 463 for mark in obsstore.precursors.get(current, ()):
464 464 # ignore marker flagged with specified flag
465 465 if mark[2] & ignoreflags:
466 466 continue
467 467 suc = mark[0]
468 468 if suc not in seen:
469 469 seen.add(suc)
470 470 remaining.add(suc)
471 471
472 472 def foreground(repo, nodes):
473 473 """return all nodes in the "foreground" of other node
474 474
475 475 The foreground of a revision is anything reachable using parent -> children
476 476 or precursor -> successor relation. It is very similar to "descendant" but
477 477 augmented with obsolescence information.
478 478
479 479 Beware that possible obsolescence cycle may result if complex situation.
480 480 """
481 481 repo = repo.unfiltered()
482 482 foreground = set(repo.set('%ln::', nodes))
483 483 if repo.obsstore:
484 484 # We only need this complicated logic if there is obsolescence
485 485 # XXX will probably deserve an optimised revset.
486 486 nm = repo.changelog.nodemap
487 487 plen = -1
488 488 # compute the whole set of successors or descendants
489 489 while len(foreground) != plen:
490 490 plen = len(foreground)
491 491 succs = set(c.node() for c in foreground)
492 492 mutable = [c.node() for c in foreground if c.mutable()]
493 493 succs.update(allsuccessors(repo.obsstore, mutable))
494 494 known = (n for n in succs if n in nm)
495 495 foreground = set(repo.set('%ln::', known))
496 496 return set(c.node() for c in foreground)
497 497
498 498
499 499 def successorssets(repo, initialnode, cache=None):
500 500 """Return all set of successors of initial nodes
501 501
502 502 The successors set of a changeset A are a group of revisions that succeed
503 503 A. It succeeds A as a consistent whole, each revision being only a partial
504 504 replacement. The successors set contains non-obsolete changesets only.
505 505
506 506 This function returns the full list of successor sets which is why it
507 507 returns a list of tuples and not just a single tuple. Each tuple is a valid
508 508 successors set. Not that (A,) may be a valid successors set for changeset A
509 509 (see below).
510 510
511 511 In most cases, a changeset A will have a single element (e.g. the changeset
512 512 A is replaced by A') in its successors set. Though, it is also common for a
513 513 changeset A to have no elements in its successor set (e.g. the changeset
514 514 has been pruned). Therefore, the returned list of successors sets will be
515 515 [(A',)] or [], respectively.
516 516
517 517 When a changeset A is split into A' and B', however, it will result in a
518 518 successors set containing more than a single element, i.e. [(A',B')].
519 519 Divergent changesets will result in multiple successors sets, i.e. [(A',),
520 520 (A'')].
521 521
522 522 If a changeset A is not obsolete, then it will conceptually have no
523 523 successors set. To distinguish this from a pruned changeset, the successor
524 524 set will only contain itself, i.e. [(A,)].
525 525
526 526 Finally, successors unknown locally are considered to be pruned (obsoleted
527 527 without any successors).
528 528
529 529 The optional `cache` parameter is a dictionary that may contain precomputed
530 530 successors sets. It is meant to reuse the computation of a previous call to
531 531 `successorssets` when multiple calls are made at the same time. The cache
532 532 dictionary is updated in place. The caller is responsible for its live
533 533 spawn. Code that makes multiple calls to `successorssets` *must* use this
534 534 cache mechanism or suffer terrible performances.
535 535
536 536 """
537 537
538 538 succmarkers = repo.obsstore.successors
539 539
540 540 # Stack of nodes we search successors sets for
541 541 toproceed = [initialnode]
542 542 # set version of above list for fast loop detection
543 543 # element added to "toproceed" must be added here
544 544 stackedset = set(toproceed)
545 545 if cache is None:
546 546 cache = {}
547 547
548 548 # This while loop is the flattened version of a recursive search for
549 549 # successors sets
550 550 #
551 551 # def successorssets(x):
552 552 # successors = directsuccessors(x)
553 553 # ss = [[]]
554 554 # for succ in directsuccessors(x):
555 555 # # product as in itertools cartesian product
556 556 # ss = product(ss, successorssets(succ))
557 557 # return ss
558 558 #
559 559 # But we can not use plain recursive calls here:
560 560 # - that would blow the python call stack
561 561 # - obsolescence markers may have cycles, we need to handle them.
562 562 #
563 563 # The `toproceed` list act as our call stack. Every node we search
564 564 # successors set for are stacked there.
565 565 #
566 566 # The `stackedset` is set version of this stack used to check if a node is
567 567 # already stacked. This check is used to detect cycles and prevent infinite
568 568 # loop.
569 569 #
570 570 # successors set of all nodes are stored in the `cache` dictionary.
571 571 #
572 572 # After this while loop ends we use the cache to return the successors sets
573 573 # for the node requested by the caller.
574 574 while toproceed:
575 575 # Every iteration tries to compute the successors sets of the topmost
576 576 # node of the stack: CURRENT.
577 577 #
578 578 # There are four possible outcomes:
579 579 #
580 580 # 1) We already know the successors sets of CURRENT:
581 581 # -> mission accomplished, pop it from the stack.
582 582 # 2) Node is not obsolete:
583 583 # -> the node is its own successors sets. Add it to the cache.
584 584 # 3) We do not know successors set of direct successors of CURRENT:
585 585 # -> We add those successors to the stack.
586 586 # 4) We know successors sets of all direct successors of CURRENT:
587 587 # -> We can compute CURRENT successors set and add it to the
588 588 # cache.
589 589 #
590 590 current = toproceed[-1]
591 591 if current in cache:
592 592 # case (1): We already know the successors sets
593 593 stackedset.remove(toproceed.pop())
594 594 elif current not in succmarkers:
595 595 # case (2): The node is not obsolete.
596 596 if current in repo:
597 597 # We have a valid last successors.
598 598 cache[current] = [(current,)]
599 599 else:
600 600 # Final obsolete version is unknown locally.
601 601 # Do not count that as a valid successors
602 602 cache[current] = []
603 603 else:
604 604 # cases (3) and (4)
605 605 #
606 606 # We proceed in two phases. Phase 1 aims to distinguish case (3)
607 607 # from case (4):
608 608 #
609 609 # For each direct successors of CURRENT, we check whether its
610 610 # successors sets are known. If they are not, we stack the
611 611 # unknown node and proceed to the next iteration of the while
612 612 # loop. (case 3)
613 613 #
614 614 # During this step, we may detect obsolescence cycles: a node
615 615 # with unknown successors sets but already in the call stack.
616 616 # In such a situation, we arbitrary set the successors sets of
617 617 # the node to nothing (node pruned) to break the cycle.
618 618 #
619 619 # If no break was encountered we proceed to phase 2.
620 620 #
621 621 # Phase 2 computes successors sets of CURRENT (case 4); see details
622 622 # in phase 2 itself.
623 623 #
624 624 # Note the two levels of iteration in each phase.
625 625 # - The first one handles obsolescence markers using CURRENT as
626 626 # precursor (successors markers of CURRENT).
627 627 #
628 628 # Having multiple entry here means divergence.
629 629 #
630 630 # - The second one handles successors defined in each marker.
631 631 #
632 632 # Having none means pruned node, multiple successors means split,
633 633 # single successors are standard replacement.
634 634 #
635 635 for mark in sorted(succmarkers[current]):
636 636 for suc in mark[1]:
637 637 if suc not in cache:
638 638 if suc in stackedset:
639 639 # cycle breaking
640 640 cache[suc] = []
641 641 else:
642 642 # case (3) If we have not computed successors sets
643 643 # of one of those successors we add it to the
644 644 # `toproceed` stack and stop all work for this
645 645 # iteration.
646 646 toproceed.append(suc)
647 647 stackedset.add(suc)
648 648 break
649 649 else:
650 650 continue
651 651 break
652 652 else:
653 653 # case (4): we know all successors sets of all direct
654 654 # successors
655 655 #
656 656 # Successors set contributed by each marker depends on the
657 657 # successors sets of all its "successors" node.
658 658 #
659 659 # Each different marker is a divergence in the obsolescence
660 660 # history. It contributes successors sets distinct from other
661 661 # markers.
662 662 #
663 663 # Within a marker, a successor may have divergent successors
664 664 # sets. In such a case, the marker will contribute multiple
665 665 # divergent successors sets. If multiple successors have
666 666 # divergent successors sets, a Cartesian product is used.
667 667 #
668 668 # At the end we post-process successors sets to remove
669 669 # duplicated entry and successors set that are strict subset of
670 670 # another one.
671 671 succssets = []
672 672 for mark in sorted(succmarkers[current]):
673 673 # successors sets contributed by this marker
674 674 markss = [[]]
675 675 for suc in mark[1]:
676 676 # cardinal product with previous successors
677 677 productresult = []
678 678 for prefix in markss:
679 679 for suffix in cache[suc]:
680 680 newss = list(prefix)
681 681 for part in suffix:
682 682 # do not duplicated entry in successors set
683 683 # first entry wins.
684 684 if part not in newss:
685 685 newss.append(part)
686 686 productresult.append(newss)
687 687 markss = productresult
688 688 succssets.extend(markss)
689 689 # remove duplicated and subset
690 690 seen = []
691 691 final = []
692 692 candidate = sorted(((set(s), s) for s in succssets if s),
693 693 key=lambda x: len(x[1]), reverse=True)
694 694 for setversion, listversion in candidate:
695 695 for seenset in seen:
696 696 if setversion.issubset(seenset):
697 697 break
698 698 else:
699 699 final.append(listversion)
700 700 seen.append(setversion)
701 701 final.reverse() # put small successors set first
702 702 cache[current] = final
703 703 return cache[initialnode]
704 704
705 705 def _knownrevs(repo, nodes):
706 706 """yield revision numbers of known nodes passed in parameters
707 707
708 708 Unknown revisions are silently ignored."""
709 709 torev = repo.changelog.nodemap.get
710 710 for n in nodes:
711 711 rev = torev(n)
712 712 if rev is not None:
713 713 yield rev
714 714
715 715 # mapping of 'set-name' -> <function to compute this set>
716 716 cachefuncs = {}
717 717 def cachefor(name):
718 718 """Decorator to register a function as computing the cache for a set"""
719 719 def decorator(func):
720 720 assert name not in cachefuncs
721 721 cachefuncs[name] = func
722 722 return func
723 723 return decorator
724 724
725 725 def getrevs(repo, name):
726 726 """Return the set of revision that belong to the <name> set
727 727
728 728 Such access may compute the set and cache it for future use"""
729 729 repo = repo.unfiltered()
730 730 if not repo.obsstore:
731 731 return ()
732 732 if name not in repo.obsstore.caches:
733 733 repo.obsstore.caches[name] = cachefuncs[name](repo)
734 734 return repo.obsstore.caches[name]
735 735
736 736 # To be simple we need to invalidate obsolescence cache when:
737 737 #
738 738 # - new changeset is added:
739 739 # - public phase is changed
740 740 # - obsolescence marker are added
741 741 # - strip is used a repo
742 742 def clearobscaches(repo):
743 743 """Remove all obsolescence related cache from a repo
744 744
745 745 This remove all cache in obsstore is the obsstore already exist on the
746 746 repo.
747 747
748 748 (We could be smarter here given the exact event that trigger the cache
749 749 clearing)"""
750 750 # only clear cache is there is obsstore data in this repo
751 751 if 'obsstore' in repo._filecache:
752 752 repo.obsstore.caches.clear()
753 753
754 754 @cachefor('obsolete')
755 755 def _computeobsoleteset(repo):
756 756 """the set of obsolete revisions"""
757 757 obs = set()
758 758 getrev = repo.changelog.nodemap.get
759 759 getphase = repo._phasecache.phase
760 760 for n in repo.obsstore.successors:
761 761 rev = getrev(n)
762 762 if rev is not None and getphase(repo, rev):
763 763 obs.add(rev)
764 764 return obs
765 765
766 766 @cachefor('unstable')
767 767 def _computeunstableset(repo):
768 768 """the set of non obsolete revisions with obsolete parents"""
769 769 # revset is not efficient enough here
770 770 # we do (obsolete()::) - obsolete() by hand
771 771 obs = getrevs(repo, 'obsolete')
772 772 if not obs:
773 773 return set()
774 774 cl = repo.changelog
775 775 return set(r for r in cl.descendants(obs) if r not in obs)
776 776
777 777 @cachefor('suspended')
778 778 def _computesuspendedset(repo):
779 779 """the set of obsolete parents with non obsolete descendants"""
780 780 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
781 781 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
782 782
783 783 @cachefor('extinct')
784 784 def _computeextinctset(repo):
785 785 """the set of obsolete parents without non obsolete descendants"""
786 786 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
787 787
788 788
789 789 @cachefor('bumped')
790 790 def _computebumpedset(repo):
791 791 """the set of revs trying to obsolete public revisions"""
792 792 bumped = set()
793 793 # util function (avoid attribute lookup in the loop)
794 794 phase = repo._phasecache.phase # would be faster to grab the full list
795 795 public = phases.public
796 796 cl = repo.changelog
797 797 torev = cl.nodemap.get
798 798 obs = getrevs(repo, 'obsolete')
799 799 for rev in repo:
800 800 # We only evaluate mutable, non-obsolete revision
801 801 if (public < phase(repo, rev)) and (rev not in obs):
802 802 node = cl.node(rev)
803 803 # (future) A cache of precursors may worth if split is very common
804 804 for pnode in allprecursors(repo.obsstore, [node],
805 805 ignoreflags=bumpedfix):
806 806 prev = torev(pnode) # unfiltered! but so is phasecache
807 807 if (prev is not None) and (phase(repo, prev) <= public):
808 808 # we have a public precursors
809 809 bumped.add(rev)
810 810 break # Next draft!
811 811 return bumped
812 812
813 813 @cachefor('divergent')
814 814 def _computedivergentset(repo):
815 815 """the set of rev that compete to be the final successors of some revision.
816 816 """
817 817 divergent = set()
818 818 obsstore = repo.obsstore
819 819 newermap = {}
820 820 for ctx in repo.set('(not public()) - obsolete()'):
821 821 mark = obsstore.precursors.get(ctx.node(), ())
822 822 toprocess = set(mark)
823 823 while toprocess:
824 824 prec = toprocess.pop()[0]
825 825 if prec not in newermap:
826 826 successorssets(repo, prec, newermap)
827 827 newer = [n for n in newermap[prec] if n]
828 828 if len(newer) > 1:
829 829 divergent.add(ctx.rev())
830 830 break
831 831 toprocess.update(obsstore.precursors.get(prec, ()))
832 832 return divergent
833 833
834 834
835 def createmarkers(repo, relations, flag=0, metadata=None):
835 def createmarkers(repo, relations, flag=0, date=None, metadata=None):
836 836 """Add obsolete markers between changesets in a repo
837 837
838 838 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
839 839 tuple. `old` and `news` are changectx. metadata is an optional dictionary
840 840 containing metadata for this marker only. It is merged with the global
841 841 metadata specified through the `metadata` argument of this function,
842 842
843 843 Trying to obsolete a public changeset will raise an exception.
844 844
845 845 Current user and date are used except if specified otherwise in the
846 846 metadata attribute.
847 847
848 848 This function operates within a transaction of its own, but does
849 849 not take any lock on the repo.
850 850 """
851 851 # prepare metadata
852 852 if metadata is None:
853 853 metadata = {}
854 if 'date' not in metadata:
855 metadata['date'] = '%i %i' % util.makedate()
856 854 if 'user' not in metadata:
857 855 metadata['user'] = repo.ui.username()
858 856 tr = repo.transaction('add-obsolescence-marker')
859 857 try:
860 858 for rel in relations:
861 859 prec = rel[0]
862 860 sucs = rel[1]
863 861 localmetadata = metadata.copy()
864 862 if 2 < len(rel):
865 863 localmetadata.update(rel[2])
866 864
867 865 if not prec.mutable():
868 866 raise util.Abort("cannot obsolete immutable changeset: %s"
869 867 % prec)
870 868 nprec = prec.node()
871 869 nsucs = tuple(s.node() for s in sucs)
872 870 if nprec in nsucs:
873 871 raise util.Abort("changeset %s cannot obsolete itself" % prec)
874 repo.obsstore.create(tr, nprec, nsucs, flag,
872 repo.obsstore.create(tr, nprec, nsucs, flag, date=date,
875 873 metadata=localmetadata)
876 874 repo.filteredrevcache.clear()
877 875 tr.close()
878 876 finally:
879 877 tr.release()
General Comments 0
You need to be logged in to leave comments. Login now