##// END OF EJS Templates
obsolete: avoid 2-argument form of enumerate, which was new in Python 2.6
Augie Fackler -
r22262:10880c8a default
parent child Browse files
Show More
@@ -1,930 +1,930 b''
1 1 # obsolete.py - obsolete markers handling
2 2 #
3 3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 4 # Logilab SA <contact@logilab.fr>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 """Obsolete marker handling
10 10
11 11 An obsolete marker maps an old changeset to a list of new
12 12 changesets. If the list of new changesets is empty, the old changeset
13 13 is said to be "killed". Otherwise, the old changeset is being
14 14 "replaced" by the new changesets.
15 15
16 16 Obsolete markers can be used to record and distribute changeset graph
17 17 transformations performed by history rewrite operations, and help
18 18 building new tools to reconcile conflicting rewrite actions. To
19 19 facilitate conflict resolution, markers include various annotations
20 20 besides old and news changeset identifiers, such as creation date or
21 21 author name.
22 22
23 23 The old obsoleted changeset is called a "precursor" and possible
24 24 replacements are called "successors". Markers that used changeset X as
25 25 a precursor are called "successor markers of X" because they hold
26 26 information about the successors of X. Markers that use changeset Y as
27 27 a successors are call "precursor markers of Y" because they hold
28 28 information about the precursors of Y.
29 29
30 30 Examples:
31 31
32 32 - When changeset A is replaced by changeset A', one marker is stored:
33 33
34 34 (A, (A',))
35 35
36 36 - When changesets A and B are folded into a new changeset C, two markers are
37 37 stored:
38 38
39 39 (A, (C,)) and (B, (C,))
40 40
41 41 - When changeset A is simply "pruned" from the graph, a marker is created:
42 42
43 43 (A, ())
44 44
45 45 - When changeset A is split into B and C, a single marker are used:
46 46
47 47 (A, (C, C))
48 48
49 49 We use a single marker to distinguish the "split" case from the "divergence"
50 50 case. If two independent operations rewrite the same changeset A in to A' and
51 51 A'', we have an error case: divergent rewriting. We can detect it because
52 52 two markers will be created independently:
53 53
54 54 (A, (B,)) and (A, (C,))
55 55
56 56 Format
57 57 ------
58 58
59 59 Markers are stored in an append-only file stored in
60 60 '.hg/store/obsstore'.
61 61
62 62 The file starts with a version header:
63 63
64 64 - 1 unsigned byte: version number, starting at zero.
65 65
66 66
67 67 The header is followed by the markers. Each marker is made of:
68 68
69 69 - 1 unsigned byte: number of new changesets "N", can be zero.
70 70
71 71 - 1 unsigned 32-bits integer: metadata size "M" in bytes.
72 72
73 73 - 1 byte: a bit field. It is reserved for flags used in common
74 74 obsolete marker operations, to avoid repeated decoding of metadata
75 75 entries.
76 76
77 77 - 20 bytes: obsoleted changeset identifier.
78 78
79 79 - N*20 bytes: new changesets identifiers.
80 80
81 81 - M bytes: metadata as a sequence of nul-terminated strings. Each
82 82 string contains a key and a value, separated by a colon ':', without
83 83 additional encoding. Keys cannot contain '\0' or ':' and values
84 84 cannot contain '\0'.
85 85
86 86 """
87 87 import struct
88 88 import util, base85, node
89 89 import phases
90 90 from i18n import _
91 91
92 92 _pack = struct.pack
93 93 _unpack = struct.unpack
94 94
95 95 _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5
96 96
97 97 # the obsolete feature is not mature enough to be enabled by default.
98 98 # you have to rely on third party extension extension to enable this.
99 99 _enabled = False
100 100
101 101 # data used for parsing and writing
102 102 _fmversion = 0
103 103 _fmfixed = '>BIB20s'
104 104 _fmnode = '20s'
105 105 _fmfsize = struct.calcsize(_fmfixed)
106 106 _fnodesize = struct.calcsize(_fmnode)
107 107
108 108 ### obsolescence marker flag
109 109
110 110 ## bumpedfix flag
111 111 #
112 112 # When a changeset A' succeed to a changeset A which became public, we call A'
113 113 # "bumped" because it's a successors of a public changesets
114 114 #
115 115 # o A' (bumped)
116 116 # |`:
117 117 # | o A
118 118 # |/
119 119 # o Z
120 120 #
121 121 # The way to solve this situation is to create a new changeset Ad as children
122 122 # of A. This changeset have the same content than A'. So the diff from A to A'
123 123 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
124 124 #
125 125 # o Ad
126 126 # |`:
127 127 # | x A'
128 128 # |'|
129 129 # o | A
130 130 # |/
131 131 # o Z
132 132 #
133 133 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
134 134 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
135 135 # This flag mean that the successors express the changes between the public and
136 136 # bumped version and fix the situation, breaking the transitivity of
137 137 # "bumped" here.
138 138 bumpedfix = 1
139 139
140 140 def _readmarkers(data):
141 141 """Read and enumerate markers from raw data"""
142 142 off = 0
143 143 diskversion = _unpack('>B', data[off:off + 1])[0]
144 144 off += 1
145 145 if diskversion != _fmversion:
146 146 raise util.Abort(_('parsing obsolete marker: unknown version %r')
147 147 % diskversion)
148 148
149 149 # Loop on markers
150 150 l = len(data)
151 151 while off + _fmfsize <= l:
152 152 # read fixed part
153 153 cur = data[off:off + _fmfsize]
154 154 off += _fmfsize
155 155 nbsuc, mdsize, flags, pre = _unpack(_fmfixed, cur)
156 156 # read replacement
157 157 sucs = ()
158 158 if nbsuc:
159 159 s = (_fnodesize * nbsuc)
160 160 cur = data[off:off + s]
161 161 sucs = _unpack(_fmnode * nbsuc, cur)
162 162 off += s
163 163 # read metadata
164 164 # (metadata will be decoded on demand)
165 165 metadata = data[off:off + mdsize]
166 166 if len(metadata) != mdsize:
167 167 raise util.Abort(_('parsing obsolete marker: metadata is too '
168 168 'short, %d bytes expected, got %d')
169 169 % (mdsize, len(metadata)))
170 170 off += mdsize
171 171 meta = decodemeta(metadata)
172 172 try:
173 173 date = util.parsedate(decodemeta(metadata).pop('date', '0 0'))
174 174 except util.Abort:
175 175 date = (0., 0)
176 176 parents = None
177 177 if 'p2' in meta:
178 178 parents = (meta.pop('p1', None), meta.pop('p2', None))
179 179 elif 'p1' in meta:
180 180 parents = (meta.pop('p1', None),)
181 181 elif 'p0' in meta:
182 182 parents = ()
183 183 if parents is not None:
184 184 try:
185 185 parents = tuple(node.bin(p) for p in parents)
186 186 # if parent content is not a nodeid, drop the data
187 187 for p in parents:
188 188 if len(p) != 20:
189 189 parents = None
190 190 break
191 191 except TypeError:
192 192 # if content cannot be translated to nodeid drop the data.
193 193 parents = None
194 194
195 195 metadata = encodemeta(meta)
196 196
197 197 yield (pre, sucs, flags, metadata, date, parents)
198 198
199 199 def encodemeta(meta):
200 200 """Return encoded metadata string to string mapping.
201 201
202 202 Assume no ':' in key and no '\0' in both key and value."""
203 203 for key, value in meta.iteritems():
204 204 if ':' in key or '\0' in key:
205 205 raise ValueError("':' and '\0' are forbidden in metadata key'")
206 206 if '\0' in value:
207 207 raise ValueError("':' is forbidden in metadata value'")
208 208 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
209 209
210 210 def decodemeta(data):
211 211 """Return string to string dictionary from encoded version."""
212 212 d = {}
213 213 for l in data.split('\0'):
214 214 if l:
215 215 key, value = l.split(':')
216 216 d[key] = value
217 217 return d
218 218
219 219 class marker(object):
220 220 """Wrap obsolete marker raw data"""
221 221
222 222 def __init__(self, repo, data):
223 223 # the repo argument will be used to create changectx in later version
224 224 self._repo = repo
225 225 self._data = data
226 226 self._decodedmeta = None
227 227
228 228 def __hash__(self):
229 229 return hash(self._data)
230 230
231 231 def __eq__(self, other):
232 232 if type(other) != type(self):
233 233 return False
234 234 return self._data == other._data
235 235
236 236 def precnode(self):
237 237 """Precursor changeset node identifier"""
238 238 return self._data[0]
239 239
240 240 def succnodes(self):
241 241 """List of successor changesets node identifiers"""
242 242 return self._data[1]
243 243
244 244 def parentnodes(self):
245 245 """Parents of the precursors (None if not recorded)"""
246 246 return self._data[5]
247 247
248 248 def metadata(self):
249 249 """Decoded metadata dictionary"""
250 250 if self._decodedmeta is None:
251 251 self._decodedmeta = decodemeta(self._data[3])
252 252 return self._decodedmeta
253 253
254 254 def date(self):
255 255 """Creation date as (unixtime, offset)"""
256 256 return self._data[4]
257 257
258 258 def flags(self):
259 259 """The flags field of the marker"""
260 260 return self._data[2]
261 261
262 262 class obsstore(object):
263 263 """Store obsolete markers
264 264
265 265 Markers can be accessed with two mappings:
266 266 - precursors[x] -> set(markers on precursors edges of x)
267 267 - successors[x] -> set(markers on successors edges of x)
268 268 """
269 269
270 270 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
271 271 # prec: nodeid, precursor changesets
272 272 # succs: tuple of nodeid, successor changesets (0-N length)
273 273 # flag: integer, flag field carrying modifier for the markers (see doc)
274 274 # meta: binary blob, encoded metadata dictionary
275 275 # date: (float, int) tuple, date of marker creation
276 276 # parents: (tuple of nodeid) or None, parents of precursors
277 277 # None is used when no data has been recorded
278 278
279 279 def __init__(self, sopener):
280 280 # caches for various obsolescence related cache
281 281 self.caches = {}
282 282 self._all = []
283 283 # new markers to serialize
284 284 self.precursors = {}
285 285 self.successors = {}
286 286 self.sopener = sopener
287 287 data = sopener.tryread('obsstore')
288 288 if data:
289 289 self._load(_readmarkers(data))
290 290
291 291 def __iter__(self):
292 292 return iter(self._all)
293 293
294 294 def __len__(self):
295 295 return len(self._all)
296 296
297 297 def __nonzero__(self):
298 298 return bool(self._all)
299 299
300 300 def create(self, transaction, prec, succs=(), flag=0, parents=None,
301 301 date=None, metadata=None):
302 302 """obsolete: add a new obsolete marker
303 303
304 304 * ensuring it is hashable
305 305 * check mandatory metadata
306 306 * encode metadata
307 307
308 308 If you are a human writing code creating marker you want to use the
309 309 `createmarkers` function in this module instead.
310 310
311 311 return True if a new marker have been added, False if the markers
312 312 already existed (no op).
313 313 """
314 314 if metadata is None:
315 315 metadata = {}
316 316 if date is None:
317 317 if 'date' in metadata:
318 318 # as a courtesy for out-of-tree extensions
319 319 date = util.parsedate(metadata.pop('date'))
320 320 else:
321 321 date = util.makedate()
322 322 if len(prec) != 20:
323 323 raise ValueError(prec)
324 324 for succ in succs:
325 325 if len(succ) != 20:
326 326 raise ValueError(succ)
327 327 if prec in succs:
328 328 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
329 329 marker = (str(prec), tuple(succs), int(flag), encodemeta(metadata),
330 330 date, parents)
331 331 return bool(self.add(transaction, [marker]))
332 332
333 333 def add(self, transaction, markers):
334 334 """Add new markers to the store
335 335
336 336 Take care of filtering duplicate.
337 337 Return the number of new marker."""
338 338 if not _enabled:
339 339 raise util.Abort('obsolete feature is not enabled on this repo')
340 340 known = set(self._all)
341 341 new = []
342 342 for m in markers:
343 343 if m not in known:
344 344 known.add(m)
345 345 new.append(m)
346 346 if new:
347 347 f = self.sopener('obsstore', 'ab')
348 348 try:
349 349 # Whether the file's current position is at the begin or at
350 350 # the end after opening a file for appending is implementation
351 351 # defined. So we must seek to the end before calling tell(),
352 352 # or we may get a zero offset for non-zero sized files on
353 353 # some platforms (issue3543).
354 354 f.seek(0, _SEEK_END)
355 355 offset = f.tell()
356 356 transaction.add('obsstore', offset)
357 357 # offset == 0: new file - add the version header
358 358 for bytes in _encodemarkers(new, offset == 0):
359 359 f.write(bytes)
360 360 finally:
361 361 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
362 362 # call 'filecacheentry.refresh()' here
363 363 f.close()
364 364 self._load(new)
365 365 # new marker *may* have changed several set. invalidate the cache.
366 366 self.caches.clear()
367 367 return len(new)
368 368
369 369 def mergemarkers(self, transaction, data):
370 370 markers = _readmarkers(data)
371 371 self.add(transaction, markers)
372 372
373 373 def _load(self, markers):
374 374 for mark in markers:
375 375 self._all.append(mark)
376 376 pre, sucs = mark[:2]
377 377 self.successors.setdefault(pre, set()).add(mark)
378 378 for suc in sucs:
379 379 self.precursors.setdefault(suc, set()).add(mark)
380 380 if node.nullid in self.precursors:
381 381 raise util.Abort(_('bad obsolescence marker detected: '
382 382 'invalid successors nullid'))
383 383
384 384 def _encodemarkers(markers, addheader=False):
385 385 # Kept separate from flushmarkers(), it will be reused for
386 386 # markers exchange.
387 387 if addheader:
388 388 yield _pack('>B', _fmversion)
389 389 for marker in markers:
390 390 yield _encodeonemarker(marker)
391 391
392 392
393 393 def _encodeonemarker(marker):
394 394 pre, sucs, flags, metadata, date, parents = marker
395 395 metadata = decodemeta(metadata)
396 396 metadata['date'] = '%d %i' % date
397 397 if parents is not None:
398 398 if not parents:
399 399 # mark that we explicitly recorded no parents
400 400 metadata['p0'] = ''
401 for i, p in enumerate(parents, 1):
402 metadata['p%i' % i] = node.hex(p)
401 for i, p in enumerate(parents):
402 metadata['p%i' % (i + 1)] = node.hex(p)
403 403 metadata = encodemeta(metadata)
404 404 nbsuc = len(sucs)
405 405 format = _fmfixed + (_fmnode * nbsuc)
406 406 data = [nbsuc, len(metadata), flags, pre]
407 407 data.extend(sucs)
408 408 return _pack(format, *data) + metadata
409 409
410 410 # arbitrary picked to fit into 8K limit from HTTP server
411 411 # you have to take in account:
412 412 # - the version header
413 413 # - the base85 encoding
414 414 _maxpayload = 5300
415 415
416 416 def _pushkeyescape(markers):
417 417 """encode markers into a dict suitable for pushkey exchange
418 418
419 419 - binary data is base85 encoded
420 420 - split in chunks smaller than 5300 bytes"""
421 421 keys = {}
422 422 parts = []
423 423 currentlen = _maxpayload * 2 # ensure we create a new part
424 424 for marker in markers:
425 425 nextdata = _encodeonemarker(marker)
426 426 if (len(nextdata) + currentlen > _maxpayload):
427 427 currentpart = []
428 428 currentlen = 0
429 429 parts.append(currentpart)
430 430 currentpart.append(nextdata)
431 431 currentlen += len(nextdata)
432 432 for idx, part in enumerate(reversed(parts)):
433 433 data = ''.join([_pack('>B', _fmversion)] + part)
434 434 keys['dump%i' % idx] = base85.b85encode(data)
435 435 return keys
436 436
437 437 def listmarkers(repo):
438 438 """List markers over pushkey"""
439 439 if not repo.obsstore:
440 440 return {}
441 441 return _pushkeyescape(repo.obsstore)
442 442
443 443 def pushmarker(repo, key, old, new):
444 444 """Push markers over pushkey"""
445 445 if not key.startswith('dump'):
446 446 repo.ui.warn(_('unknown key: %r') % key)
447 447 return 0
448 448 if old:
449 449 repo.ui.warn(_('unexpected old value for %r') % key)
450 450 return 0
451 451 data = base85.b85decode(new)
452 452 lock = repo.lock()
453 453 try:
454 454 tr = repo.transaction('pushkey: obsolete markers')
455 455 try:
456 456 repo.obsstore.mergemarkers(tr, data)
457 457 tr.close()
458 458 return 1
459 459 finally:
460 460 tr.release()
461 461 finally:
462 462 lock.release()
463 463
464 464 def allmarkers(repo):
465 465 """all obsolete markers known in a repository"""
466 466 for markerdata in repo.obsstore:
467 467 yield marker(repo, markerdata)
468 468
469 469 def precursormarkers(ctx):
470 470 """obsolete marker marking this changeset as a successors"""
471 471 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
472 472 yield marker(ctx._repo, data)
473 473
474 474 def successormarkers(ctx):
475 475 """obsolete marker making this changeset obsolete"""
476 476 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
477 477 yield marker(ctx._repo, data)
478 478
479 479 def allsuccessors(obsstore, nodes, ignoreflags=0):
480 480 """Yield node for every successor of <nodes>.
481 481
482 482 Some successors may be unknown locally.
483 483
484 484 This is a linear yield unsuited to detecting split changesets. It includes
485 485 initial nodes too."""
486 486 remaining = set(nodes)
487 487 seen = set(remaining)
488 488 while remaining:
489 489 current = remaining.pop()
490 490 yield current
491 491 for mark in obsstore.successors.get(current, ()):
492 492 # ignore marker flagged with specified flag
493 493 if mark[2] & ignoreflags:
494 494 continue
495 495 for suc in mark[1]:
496 496 if suc not in seen:
497 497 seen.add(suc)
498 498 remaining.add(suc)
499 499
500 500 def allprecursors(obsstore, nodes, ignoreflags=0):
501 501 """Yield node for every precursors of <nodes>.
502 502
503 503 Some precursors may be unknown locally.
504 504
505 505 This is a linear yield unsuited to detecting folded changesets. It includes
506 506 initial nodes too."""
507 507
508 508 remaining = set(nodes)
509 509 seen = set(remaining)
510 510 while remaining:
511 511 current = remaining.pop()
512 512 yield current
513 513 for mark in obsstore.precursors.get(current, ()):
514 514 # ignore marker flagged with specified flag
515 515 if mark[2] & ignoreflags:
516 516 continue
517 517 suc = mark[0]
518 518 if suc not in seen:
519 519 seen.add(suc)
520 520 remaining.add(suc)
521 521
522 522 def foreground(repo, nodes):
523 523 """return all nodes in the "foreground" of other node
524 524
525 525 The foreground of a revision is anything reachable using parent -> children
526 526 or precursor -> successor relation. It is very similar to "descendant" but
527 527 augmented with obsolescence information.
528 528
529 529 Beware that possible obsolescence cycle may result if complex situation.
530 530 """
531 531 repo = repo.unfiltered()
532 532 foreground = set(repo.set('%ln::', nodes))
533 533 if repo.obsstore:
534 534 # We only need this complicated logic if there is obsolescence
535 535 # XXX will probably deserve an optimised revset.
536 536 nm = repo.changelog.nodemap
537 537 plen = -1
538 538 # compute the whole set of successors or descendants
539 539 while len(foreground) != plen:
540 540 plen = len(foreground)
541 541 succs = set(c.node() for c in foreground)
542 542 mutable = [c.node() for c in foreground if c.mutable()]
543 543 succs.update(allsuccessors(repo.obsstore, mutable))
544 544 known = (n for n in succs if n in nm)
545 545 foreground = set(repo.set('%ln::', known))
546 546 return set(c.node() for c in foreground)
547 547
548 548
549 549 def successorssets(repo, initialnode, cache=None):
550 550 """Return all set of successors of initial nodes
551 551
552 552 The successors set of a changeset A are a group of revisions that succeed
553 553 A. It succeeds A as a consistent whole, each revision being only a partial
554 554 replacement. The successors set contains non-obsolete changesets only.
555 555
556 556 This function returns the full list of successor sets which is why it
557 557 returns a list of tuples and not just a single tuple. Each tuple is a valid
558 558 successors set. Not that (A,) may be a valid successors set for changeset A
559 559 (see below).
560 560
561 561 In most cases, a changeset A will have a single element (e.g. the changeset
562 562 A is replaced by A') in its successors set. Though, it is also common for a
563 563 changeset A to have no elements in its successor set (e.g. the changeset
564 564 has been pruned). Therefore, the returned list of successors sets will be
565 565 [(A',)] or [], respectively.
566 566
567 567 When a changeset A is split into A' and B', however, it will result in a
568 568 successors set containing more than a single element, i.e. [(A',B')].
569 569 Divergent changesets will result in multiple successors sets, i.e. [(A',),
570 570 (A'')].
571 571
572 572 If a changeset A is not obsolete, then it will conceptually have no
573 573 successors set. To distinguish this from a pruned changeset, the successor
574 574 set will only contain itself, i.e. [(A,)].
575 575
576 576 Finally, successors unknown locally are considered to be pruned (obsoleted
577 577 without any successors).
578 578
579 579 The optional `cache` parameter is a dictionary that may contain precomputed
580 580 successors sets. It is meant to reuse the computation of a previous call to
581 581 `successorssets` when multiple calls are made at the same time. The cache
582 582 dictionary is updated in place. The caller is responsible for its live
583 583 spawn. Code that makes multiple calls to `successorssets` *must* use this
584 584 cache mechanism or suffer terrible performances.
585 585
586 586 """
587 587
588 588 succmarkers = repo.obsstore.successors
589 589
590 590 # Stack of nodes we search successors sets for
591 591 toproceed = [initialnode]
592 592 # set version of above list for fast loop detection
593 593 # element added to "toproceed" must be added here
594 594 stackedset = set(toproceed)
595 595 if cache is None:
596 596 cache = {}
597 597
598 598 # This while loop is the flattened version of a recursive search for
599 599 # successors sets
600 600 #
601 601 # def successorssets(x):
602 602 # successors = directsuccessors(x)
603 603 # ss = [[]]
604 604 # for succ in directsuccessors(x):
605 605 # # product as in itertools cartesian product
606 606 # ss = product(ss, successorssets(succ))
607 607 # return ss
608 608 #
609 609 # But we can not use plain recursive calls here:
610 610 # - that would blow the python call stack
611 611 # - obsolescence markers may have cycles, we need to handle them.
612 612 #
613 613 # The `toproceed` list act as our call stack. Every node we search
614 614 # successors set for are stacked there.
615 615 #
616 616 # The `stackedset` is set version of this stack used to check if a node is
617 617 # already stacked. This check is used to detect cycles and prevent infinite
618 618 # loop.
619 619 #
620 620 # successors set of all nodes are stored in the `cache` dictionary.
621 621 #
622 622 # After this while loop ends we use the cache to return the successors sets
623 623 # for the node requested by the caller.
624 624 while toproceed:
625 625 # Every iteration tries to compute the successors sets of the topmost
626 626 # node of the stack: CURRENT.
627 627 #
628 628 # There are four possible outcomes:
629 629 #
630 630 # 1) We already know the successors sets of CURRENT:
631 631 # -> mission accomplished, pop it from the stack.
632 632 # 2) Node is not obsolete:
633 633 # -> the node is its own successors sets. Add it to the cache.
634 634 # 3) We do not know successors set of direct successors of CURRENT:
635 635 # -> We add those successors to the stack.
636 636 # 4) We know successors sets of all direct successors of CURRENT:
637 637 # -> We can compute CURRENT successors set and add it to the
638 638 # cache.
639 639 #
640 640 current = toproceed[-1]
641 641 if current in cache:
642 642 # case (1): We already know the successors sets
643 643 stackedset.remove(toproceed.pop())
644 644 elif current not in succmarkers:
645 645 # case (2): The node is not obsolete.
646 646 if current in repo:
647 647 # We have a valid last successors.
648 648 cache[current] = [(current,)]
649 649 else:
650 650 # Final obsolete version is unknown locally.
651 651 # Do not count that as a valid successors
652 652 cache[current] = []
653 653 else:
654 654 # cases (3) and (4)
655 655 #
656 656 # We proceed in two phases. Phase 1 aims to distinguish case (3)
657 657 # from case (4):
658 658 #
659 659 # For each direct successors of CURRENT, we check whether its
660 660 # successors sets are known. If they are not, we stack the
661 661 # unknown node and proceed to the next iteration of the while
662 662 # loop. (case 3)
663 663 #
664 664 # During this step, we may detect obsolescence cycles: a node
665 665 # with unknown successors sets but already in the call stack.
666 666 # In such a situation, we arbitrary set the successors sets of
667 667 # the node to nothing (node pruned) to break the cycle.
668 668 #
669 669 # If no break was encountered we proceed to phase 2.
670 670 #
671 671 # Phase 2 computes successors sets of CURRENT (case 4); see details
672 672 # in phase 2 itself.
673 673 #
674 674 # Note the two levels of iteration in each phase.
675 675 # - The first one handles obsolescence markers using CURRENT as
676 676 # precursor (successors markers of CURRENT).
677 677 #
678 678 # Having multiple entry here means divergence.
679 679 #
680 680 # - The second one handles successors defined in each marker.
681 681 #
682 682 # Having none means pruned node, multiple successors means split,
683 683 # single successors are standard replacement.
684 684 #
685 685 for mark in sorted(succmarkers[current]):
686 686 for suc in mark[1]:
687 687 if suc not in cache:
688 688 if suc in stackedset:
689 689 # cycle breaking
690 690 cache[suc] = []
691 691 else:
692 692 # case (3) If we have not computed successors sets
693 693 # of one of those successors we add it to the
694 694 # `toproceed` stack and stop all work for this
695 695 # iteration.
696 696 toproceed.append(suc)
697 697 stackedset.add(suc)
698 698 break
699 699 else:
700 700 continue
701 701 break
702 702 else:
703 703 # case (4): we know all successors sets of all direct
704 704 # successors
705 705 #
706 706 # Successors set contributed by each marker depends on the
707 707 # successors sets of all its "successors" node.
708 708 #
709 709 # Each different marker is a divergence in the obsolescence
710 710 # history. It contributes successors sets distinct from other
711 711 # markers.
712 712 #
713 713 # Within a marker, a successor may have divergent successors
714 714 # sets. In such a case, the marker will contribute multiple
715 715 # divergent successors sets. If multiple successors have
716 716 # divergent successors sets, a Cartesian product is used.
717 717 #
718 718 # At the end we post-process successors sets to remove
719 719 # duplicated entry and successors set that are strict subset of
720 720 # another one.
721 721 succssets = []
722 722 for mark in sorted(succmarkers[current]):
723 723 # successors sets contributed by this marker
724 724 markss = [[]]
725 725 for suc in mark[1]:
726 726 # cardinal product with previous successors
727 727 productresult = []
728 728 for prefix in markss:
729 729 for suffix in cache[suc]:
730 730 newss = list(prefix)
731 731 for part in suffix:
732 732 # do not duplicated entry in successors set
733 733 # first entry wins.
734 734 if part not in newss:
735 735 newss.append(part)
736 736 productresult.append(newss)
737 737 markss = productresult
738 738 succssets.extend(markss)
739 739 # remove duplicated and subset
740 740 seen = []
741 741 final = []
742 742 candidate = sorted(((set(s), s) for s in succssets if s),
743 743 key=lambda x: len(x[1]), reverse=True)
744 744 for setversion, listversion in candidate:
745 745 for seenset in seen:
746 746 if setversion.issubset(seenset):
747 747 break
748 748 else:
749 749 final.append(listversion)
750 750 seen.append(setversion)
751 751 final.reverse() # put small successors set first
752 752 cache[current] = final
753 753 return cache[initialnode]
754 754
755 755 def _knownrevs(repo, nodes):
756 756 """yield revision numbers of known nodes passed in parameters
757 757
758 758 Unknown revisions are silently ignored."""
759 759 torev = repo.changelog.nodemap.get
760 760 for n in nodes:
761 761 rev = torev(n)
762 762 if rev is not None:
763 763 yield rev
764 764
765 765 # mapping of 'set-name' -> <function to compute this set>
766 766 cachefuncs = {}
767 767 def cachefor(name):
768 768 """Decorator to register a function as computing the cache for a set"""
769 769 def decorator(func):
770 770 assert name not in cachefuncs
771 771 cachefuncs[name] = func
772 772 return func
773 773 return decorator
774 774
775 775 def getrevs(repo, name):
776 776 """Return the set of revision that belong to the <name> set
777 777
778 778 Such access may compute the set and cache it for future use"""
779 779 repo = repo.unfiltered()
780 780 if not repo.obsstore:
781 781 return ()
782 782 if name not in repo.obsstore.caches:
783 783 repo.obsstore.caches[name] = cachefuncs[name](repo)
784 784 return repo.obsstore.caches[name]
785 785
786 786 # To be simple we need to invalidate obsolescence cache when:
787 787 #
788 788 # - new changeset is added:
789 789 # - public phase is changed
790 790 # - obsolescence marker are added
791 791 # - strip is used a repo
792 792 def clearobscaches(repo):
793 793 """Remove all obsolescence related cache from a repo
794 794
795 795 This remove all cache in obsstore is the obsstore already exist on the
796 796 repo.
797 797
798 798 (We could be smarter here given the exact event that trigger the cache
799 799 clearing)"""
800 800 # only clear cache is there is obsstore data in this repo
801 801 if 'obsstore' in repo._filecache:
802 802 repo.obsstore.caches.clear()
803 803
804 804 @cachefor('obsolete')
805 805 def _computeobsoleteset(repo):
806 806 """the set of obsolete revisions"""
807 807 obs = set()
808 808 getrev = repo.changelog.nodemap.get
809 809 getphase = repo._phasecache.phase
810 810 for n in repo.obsstore.successors:
811 811 rev = getrev(n)
812 812 if rev is not None and getphase(repo, rev):
813 813 obs.add(rev)
814 814 return obs
815 815
816 816 @cachefor('unstable')
817 817 def _computeunstableset(repo):
818 818 """the set of non obsolete revisions with obsolete parents"""
819 819 # revset is not efficient enough here
820 820 # we do (obsolete()::) - obsolete() by hand
821 821 obs = getrevs(repo, 'obsolete')
822 822 if not obs:
823 823 return set()
824 824 cl = repo.changelog
825 825 return set(r for r in cl.descendants(obs) if r not in obs)
826 826
827 827 @cachefor('suspended')
828 828 def _computesuspendedset(repo):
829 829 """the set of obsolete parents with non obsolete descendants"""
830 830 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
831 831 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
832 832
833 833 @cachefor('extinct')
834 834 def _computeextinctset(repo):
835 835 """the set of obsolete parents without non obsolete descendants"""
836 836 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
837 837
838 838
839 839 @cachefor('bumped')
840 840 def _computebumpedset(repo):
841 841 """the set of revs trying to obsolete public revisions"""
842 842 bumped = set()
843 843 # util function (avoid attribute lookup in the loop)
844 844 phase = repo._phasecache.phase # would be faster to grab the full list
845 845 public = phases.public
846 846 cl = repo.changelog
847 847 torev = cl.nodemap.get
848 848 obs = getrevs(repo, 'obsolete')
849 849 for rev in repo:
850 850 # We only evaluate mutable, non-obsolete revision
851 851 if (public < phase(repo, rev)) and (rev not in obs):
852 852 node = cl.node(rev)
853 853 # (future) A cache of precursors may worth if split is very common
854 854 for pnode in allprecursors(repo.obsstore, [node],
855 855 ignoreflags=bumpedfix):
856 856 prev = torev(pnode) # unfiltered! but so is phasecache
857 857 if (prev is not None) and (phase(repo, prev) <= public):
858 858 # we have a public precursors
859 859 bumped.add(rev)
860 860 break # Next draft!
861 861 return bumped
862 862
863 863 @cachefor('divergent')
864 864 def _computedivergentset(repo):
865 865 """the set of rev that compete to be the final successors of some revision.
866 866 """
867 867 divergent = set()
868 868 obsstore = repo.obsstore
869 869 newermap = {}
870 870 for ctx in repo.set('(not public()) - obsolete()'):
871 871 mark = obsstore.precursors.get(ctx.node(), ())
872 872 toprocess = set(mark)
873 873 while toprocess:
874 874 prec = toprocess.pop()[0]
875 875 if prec not in newermap:
876 876 successorssets(repo, prec, newermap)
877 877 newer = [n for n in newermap[prec] if n]
878 878 if len(newer) > 1:
879 879 divergent.add(ctx.rev())
880 880 break
881 881 toprocess.update(obsstore.precursors.get(prec, ()))
882 882 return divergent
883 883
884 884
885 885 def createmarkers(repo, relations, flag=0, date=None, metadata=None):
886 886 """Add obsolete markers between changesets in a repo
887 887
888 888 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
889 889 tuple. `old` and `news` are changectx. metadata is an optional dictionary
890 890 containing metadata for this marker only. It is merged with the global
891 891 metadata specified through the `metadata` argument of this function,
892 892
893 893 Trying to obsolete a public changeset will raise an exception.
894 894
895 895 Current user and date are used except if specified otherwise in the
896 896 metadata attribute.
897 897
898 898 This function operates within a transaction of its own, but does
899 899 not take any lock on the repo.
900 900 """
901 901 # prepare metadata
902 902 if metadata is None:
903 903 metadata = {}
904 904 if 'user' not in metadata:
905 905 metadata['user'] = repo.ui.username()
906 906 tr = repo.transaction('add-obsolescence-marker')
907 907 try:
908 908 for rel in relations:
909 909 prec = rel[0]
910 910 sucs = rel[1]
911 911 localmetadata = metadata.copy()
912 912 if 2 < len(rel):
913 913 localmetadata.update(rel[2])
914 914
915 915 if not prec.mutable():
916 916 raise util.Abort("cannot obsolete immutable changeset: %s"
917 917 % prec)
918 918 nprec = prec.node()
919 919 nsucs = tuple(s.node() for s in sucs)
920 920 npare = None
921 921 if not nsucs:
922 922 npare = tuple(p.node() for p in prec.parents())
923 923 if nprec in nsucs:
924 924 raise util.Abort("changeset %s cannot obsolete itself" % prec)
925 925 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
926 926 date=date, metadata=localmetadata)
927 927 repo.filteredrevcache.clear()
928 928 tr.close()
929 929 finally:
930 930 tr.release()
General Comments 0
You need to be logged in to leave comments. Login now