##// END OF EJS Templates
obsolete: use ProgrammingError over assert for volatile set registration...
Pierre-Yves David -
r32884:9d472b21 default
parent child Browse files
Show More
@@ -1,1459 +1,1461 b''
1 1 # obsolete.py - obsolete markers handling
2 2 #
3 3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 4 # Logilab SA <contact@logilab.fr>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 """Obsolete marker handling
10 10
11 11 An obsolete marker maps an old changeset to a list of new
12 12 changesets. If the list of new changesets is empty, the old changeset
13 13 is said to be "killed". Otherwise, the old changeset is being
14 14 "replaced" by the new changesets.
15 15
16 16 Obsolete markers can be used to record and distribute changeset graph
17 17 transformations performed by history rewrite operations, and help
18 18 building new tools to reconcile conflicting rewrite actions. To
19 19 facilitate conflict resolution, markers include various annotations
20 20 besides old and news changeset identifiers, such as creation date or
21 21 author name.
22 22
23 23 The old obsoleted changeset is called a "precursor" and possible
24 24 replacements are called "successors". Markers that used changeset X as
25 25 a precursor are called "successor markers of X" because they hold
26 26 information about the successors of X. Markers that use changeset Y as
27 27 a successors are call "precursor markers of Y" because they hold
28 28 information about the precursors of Y.
29 29
30 30 Examples:
31 31
32 32 - When changeset A is replaced by changeset A', one marker is stored:
33 33
34 34 (A, (A',))
35 35
36 36 - When changesets A and B are folded into a new changeset C, two markers are
37 37 stored:
38 38
39 39 (A, (C,)) and (B, (C,))
40 40
41 41 - When changeset A is simply "pruned" from the graph, a marker is created:
42 42
43 43 (A, ())
44 44
45 45 - When changeset A is split into B and C, a single marker is used:
46 46
47 47 (A, (B, C))
48 48
49 49 We use a single marker to distinguish the "split" case from the "divergence"
50 50 case. If two independent operations rewrite the same changeset A in to A' and
51 51 A'', we have an error case: divergent rewriting. We can detect it because
52 52 two markers will be created independently:
53 53
54 54 (A, (B,)) and (A, (C,))
55 55
56 56 Format
57 57 ------
58 58
59 59 Markers are stored in an append-only file stored in
60 60 '.hg/store/obsstore'.
61 61
62 62 The file starts with a version header:
63 63
64 64 - 1 unsigned byte: version number, starting at zero.
65 65
66 66 The header is followed by the markers. Marker format depend of the version. See
67 67 comment associated with each format for details.
68 68
69 69 """
70 70 from __future__ import absolute_import
71 71
72 72 import errno
73 73 import struct
74 74
75 75 from .i18n import _
76 76 from . import (
77 77 error,
78 78 node,
79 79 phases,
80 80 policy,
81 81 util,
82 82 )
83 83
84 84 parsers = policy.importmod(r'parsers')
85 85
86 86 _pack = struct.pack
87 87 _unpack = struct.unpack
88 88 _calcsize = struct.calcsize
89 89 propertycache = util.propertycache
90 90
91 91 # the obsolete feature is not mature enough to be enabled by default.
92 92 # you have to rely on third party extension extension to enable this.
93 93 _enabled = False
94 94
95 95 # Options for obsolescence
96 96 createmarkersopt = 'createmarkers'
97 97 allowunstableopt = 'allowunstable'
98 98 exchangeopt = 'exchange'
99 99
100 100 def isenabled(repo, option):
101 101 """Returns True if the given repository has the given obsolete option
102 102 enabled.
103 103 """
104 104 result = set(repo.ui.configlist('experimental', 'evolution'))
105 105 if 'all' in result:
106 106 return True
107 107
108 108 # For migration purposes, temporarily return true if the config hasn't been
109 109 # set but _enabled is true.
110 110 if len(result) == 0 and _enabled:
111 111 return True
112 112
113 113 # createmarkers must be enabled if other options are enabled
114 114 if ((allowunstableopt in result or exchangeopt in result) and
115 115 not createmarkersopt in result):
116 116 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
117 117 "if other obsolete options are enabled"))
118 118
119 119 return option in result
120 120
121 121 ### obsolescence marker flag
122 122
123 123 ## bumpedfix flag
124 124 #
125 125 # When a changeset A' succeed to a changeset A which became public, we call A'
126 126 # "bumped" because it's a successors of a public changesets
127 127 #
128 128 # o A' (bumped)
129 129 # |`:
130 130 # | o A
131 131 # |/
132 132 # o Z
133 133 #
134 134 # The way to solve this situation is to create a new changeset Ad as children
135 135 # of A. This changeset have the same content than A'. So the diff from A to A'
136 136 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
137 137 #
138 138 # o Ad
139 139 # |`:
140 140 # | x A'
141 141 # |'|
142 142 # o | A
143 143 # |/
144 144 # o Z
145 145 #
146 146 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
147 147 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
148 148 # This flag mean that the successors express the changes between the public and
149 149 # bumped version and fix the situation, breaking the transitivity of
150 150 # "bumped" here.
151 151 bumpedfix = 1
152 152 usingsha256 = 2
153 153
154 154 ## Parsing and writing of version "0"
155 155 #
156 156 # The header is followed by the markers. Each marker is made of:
157 157 #
158 158 # - 1 uint8 : number of new changesets "N", can be zero.
159 159 #
160 160 # - 1 uint32: metadata size "M" in bytes.
161 161 #
162 162 # - 1 byte: a bit field. It is reserved for flags used in common
163 163 # obsolete marker operations, to avoid repeated decoding of metadata
164 164 # entries.
165 165 #
166 166 # - 20 bytes: obsoleted changeset identifier.
167 167 #
168 168 # - N*20 bytes: new changesets identifiers.
169 169 #
170 170 # - M bytes: metadata as a sequence of nul-terminated strings. Each
171 171 # string contains a key and a value, separated by a colon ':', without
172 172 # additional encoding. Keys cannot contain '\0' or ':' and values
173 173 # cannot contain '\0'.
174 174 _fm0version = 0
175 175 _fm0fixed = '>BIB20s'
176 176 _fm0node = '20s'
177 177 _fm0fsize = _calcsize(_fm0fixed)
178 178 _fm0fnodesize = _calcsize(_fm0node)
179 179
180 180 def _fm0readmarkers(data, off):
181 181 # Loop on markers
182 182 l = len(data)
183 183 while off + _fm0fsize <= l:
184 184 # read fixed part
185 185 cur = data[off:off + _fm0fsize]
186 186 off += _fm0fsize
187 187 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
188 188 # read replacement
189 189 sucs = ()
190 190 if numsuc:
191 191 s = (_fm0fnodesize * numsuc)
192 192 cur = data[off:off + s]
193 193 sucs = _unpack(_fm0node * numsuc, cur)
194 194 off += s
195 195 # read metadata
196 196 # (metadata will be decoded on demand)
197 197 metadata = data[off:off + mdsize]
198 198 if len(metadata) != mdsize:
199 199 raise error.Abort(_('parsing obsolete marker: metadata is too '
200 200 'short, %d bytes expected, got %d')
201 201 % (mdsize, len(metadata)))
202 202 off += mdsize
203 203 metadata = _fm0decodemeta(metadata)
204 204 try:
205 205 when, offset = metadata.pop('date', '0 0').split(' ')
206 206 date = float(when), int(offset)
207 207 except ValueError:
208 208 date = (0., 0)
209 209 parents = None
210 210 if 'p2' in metadata:
211 211 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
212 212 elif 'p1' in metadata:
213 213 parents = (metadata.pop('p1', None),)
214 214 elif 'p0' in metadata:
215 215 parents = ()
216 216 if parents is not None:
217 217 try:
218 218 parents = tuple(node.bin(p) for p in parents)
219 219 # if parent content is not a nodeid, drop the data
220 220 for p in parents:
221 221 if len(p) != 20:
222 222 parents = None
223 223 break
224 224 except TypeError:
225 225 # if content cannot be translated to nodeid drop the data.
226 226 parents = None
227 227
228 228 metadata = tuple(sorted(metadata.iteritems()))
229 229
230 230 yield (pre, sucs, flags, metadata, date, parents)
231 231
232 232 def _fm0encodeonemarker(marker):
233 233 pre, sucs, flags, metadata, date, parents = marker
234 234 if flags & usingsha256:
235 235 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
236 236 metadata = dict(metadata)
237 237 time, tz = date
238 238 metadata['date'] = '%r %i' % (time, tz)
239 239 if parents is not None:
240 240 if not parents:
241 241 # mark that we explicitly recorded no parents
242 242 metadata['p0'] = ''
243 243 for i, p in enumerate(parents, 1):
244 244 metadata['p%i' % i] = node.hex(p)
245 245 metadata = _fm0encodemeta(metadata)
246 246 numsuc = len(sucs)
247 247 format = _fm0fixed + (_fm0node * numsuc)
248 248 data = [numsuc, len(metadata), flags, pre]
249 249 data.extend(sucs)
250 250 return _pack(format, *data) + metadata
251 251
252 252 def _fm0encodemeta(meta):
253 253 """Return encoded metadata string to string mapping.
254 254
255 255 Assume no ':' in key and no '\0' in both key and value."""
256 256 for key, value in meta.iteritems():
257 257 if ':' in key or '\0' in key:
258 258 raise ValueError("':' and '\0' are forbidden in metadata key'")
259 259 if '\0' in value:
260 260 raise ValueError("':' is forbidden in metadata value'")
261 261 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
262 262
263 263 def _fm0decodemeta(data):
264 264 """Return string to string dictionary from encoded version."""
265 265 d = {}
266 266 for l in data.split('\0'):
267 267 if l:
268 268 key, value = l.split(':')
269 269 d[key] = value
270 270 return d
271 271
272 272 ## Parsing and writing of version "1"
273 273 #
274 274 # The header is followed by the markers. Each marker is made of:
275 275 #
276 276 # - uint32: total size of the marker (including this field)
277 277 #
278 278 # - float64: date in seconds since epoch
279 279 #
280 280 # - int16: timezone offset in minutes
281 281 #
282 282 # - uint16: a bit field. It is reserved for flags used in common
283 283 # obsolete marker operations, to avoid repeated decoding of metadata
284 284 # entries.
285 285 #
286 286 # - uint8: number of successors "N", can be zero.
287 287 #
288 288 # - uint8: number of parents "P", can be zero.
289 289 #
290 290 # 0: parents data stored but no parent,
291 291 # 1: one parent stored,
292 292 # 2: two parents stored,
293 293 # 3: no parent data stored
294 294 #
295 295 # - uint8: number of metadata entries M
296 296 #
297 297 # - 20 or 32 bytes: precursor changeset identifier.
298 298 #
299 299 # - N*(20 or 32) bytes: successors changesets identifiers.
300 300 #
301 301 # - P*(20 or 32) bytes: parents of the precursors changesets.
302 302 #
303 303 # - M*(uint8, uint8): size of all metadata entries (key and value)
304 304 #
305 305 # - remaining bytes: the metadata, each (key, value) pair after the other.
306 306 _fm1version = 1
307 307 _fm1fixed = '>IdhHBBB20s'
308 308 _fm1nodesha1 = '20s'
309 309 _fm1nodesha256 = '32s'
310 310 _fm1nodesha1size = _calcsize(_fm1nodesha1)
311 311 _fm1nodesha256size = _calcsize(_fm1nodesha256)
312 312 _fm1fsize = _calcsize(_fm1fixed)
313 313 _fm1parentnone = 3
314 314 _fm1parentshift = 14
315 315 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
316 316 _fm1metapair = 'BB'
317 317 _fm1metapairsize = _calcsize('BB')
318 318
319 319 def _fm1purereadmarkers(data, off):
320 320 # make some global constants local for performance
321 321 noneflag = _fm1parentnone
322 322 sha2flag = usingsha256
323 323 sha1size = _fm1nodesha1size
324 324 sha2size = _fm1nodesha256size
325 325 sha1fmt = _fm1nodesha1
326 326 sha2fmt = _fm1nodesha256
327 327 metasize = _fm1metapairsize
328 328 metafmt = _fm1metapair
329 329 fsize = _fm1fsize
330 330 unpack = _unpack
331 331
332 332 # Loop on markers
333 333 stop = len(data) - _fm1fsize
334 334 ufixed = struct.Struct(_fm1fixed).unpack
335 335
336 336 while off <= stop:
337 337 # read fixed part
338 338 o1 = off + fsize
339 339 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
340 340
341 341 if flags & sha2flag:
342 342 # FIXME: prec was read as a SHA1, needs to be amended
343 343
344 344 # read 0 or more successors
345 345 if numsuc == 1:
346 346 o2 = o1 + sha2size
347 347 sucs = (data[o1:o2],)
348 348 else:
349 349 o2 = o1 + sha2size * numsuc
350 350 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
351 351
352 352 # read parents
353 353 if numpar == noneflag:
354 354 o3 = o2
355 355 parents = None
356 356 elif numpar == 1:
357 357 o3 = o2 + sha2size
358 358 parents = (data[o2:o3],)
359 359 else:
360 360 o3 = o2 + sha2size * numpar
361 361 parents = unpack(sha2fmt * numpar, data[o2:o3])
362 362 else:
363 363 # read 0 or more successors
364 364 if numsuc == 1:
365 365 o2 = o1 + sha1size
366 366 sucs = (data[o1:o2],)
367 367 else:
368 368 o2 = o1 + sha1size * numsuc
369 369 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
370 370
371 371 # read parents
372 372 if numpar == noneflag:
373 373 o3 = o2
374 374 parents = None
375 375 elif numpar == 1:
376 376 o3 = o2 + sha1size
377 377 parents = (data[o2:o3],)
378 378 else:
379 379 o3 = o2 + sha1size * numpar
380 380 parents = unpack(sha1fmt * numpar, data[o2:o3])
381 381
382 382 # read metadata
383 383 off = o3 + metasize * nummeta
384 384 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
385 385 metadata = []
386 386 for idx in xrange(0, len(metapairsize), 2):
387 387 o1 = off + metapairsize[idx]
388 388 o2 = o1 + metapairsize[idx + 1]
389 389 metadata.append((data[off:o1], data[o1:o2]))
390 390 off = o2
391 391
392 392 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
393 393
394 394 def _fm1encodeonemarker(marker):
395 395 pre, sucs, flags, metadata, date, parents = marker
396 396 # determine node size
397 397 _fm1node = _fm1nodesha1
398 398 if flags & usingsha256:
399 399 _fm1node = _fm1nodesha256
400 400 numsuc = len(sucs)
401 401 numextranodes = numsuc
402 402 if parents is None:
403 403 numpar = _fm1parentnone
404 404 else:
405 405 numpar = len(parents)
406 406 numextranodes += numpar
407 407 formatnodes = _fm1node * numextranodes
408 408 formatmeta = _fm1metapair * len(metadata)
409 409 format = _fm1fixed + formatnodes + formatmeta
410 410 # tz is stored in minutes so we divide by 60
411 411 tz = date[1]//60
412 412 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
413 413 data.extend(sucs)
414 414 if parents is not None:
415 415 data.extend(parents)
416 416 totalsize = _calcsize(format)
417 417 for key, value in metadata:
418 418 lk = len(key)
419 419 lv = len(value)
420 420 data.append(lk)
421 421 data.append(lv)
422 422 totalsize += lk + lv
423 423 data[0] = totalsize
424 424 data = [_pack(format, *data)]
425 425 for key, value in metadata:
426 426 data.append(key)
427 427 data.append(value)
428 428 return ''.join(data)
429 429
430 430 def _fm1readmarkers(data, off):
431 431 native = getattr(parsers, 'fm1readmarkers', None)
432 432 if not native:
433 433 return _fm1purereadmarkers(data, off)
434 434 stop = len(data) - _fm1fsize
435 435 return native(data, off, stop)
436 436
437 437 # mapping to read/write various marker formats
438 438 # <version> -> (decoder, encoder)
439 439 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
440 440 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
441 441
442 442 def _readmarkerversion(data):
443 443 return _unpack('>B', data[0:1])[0]
444 444
445 445 @util.nogc
446 446 def _readmarkers(data):
447 447 """Read and enumerate markers from raw data"""
448 448 diskversion = _readmarkerversion(data)
449 449 off = 1
450 450 if diskversion not in formats:
451 451 msg = _('parsing obsolete marker: unknown version %r') % diskversion
452 452 raise error.UnknownVersion(msg, version=diskversion)
453 453 return diskversion, formats[diskversion][0](data, off)
454 454
455 455 def encodeheader(version=_fm0version):
456 456 return _pack('>B', version)
457 457
458 458 def encodemarkers(markers, addheader=False, version=_fm0version):
459 459 # Kept separate from flushmarkers(), it will be reused for
460 460 # markers exchange.
461 461 encodeone = formats[version][1]
462 462 if addheader:
463 463 yield encodeheader(version)
464 464 for marker in markers:
465 465 yield encodeone(marker)
466 466
467 467
468 468 class marker(object):
469 469 """Wrap obsolete marker raw data"""
470 470
471 471 def __init__(self, repo, data):
472 472 # the repo argument will be used to create changectx in later version
473 473 self._repo = repo
474 474 self._data = data
475 475 self._decodedmeta = None
476 476
477 477 def __hash__(self):
478 478 return hash(self._data)
479 479
480 480 def __eq__(self, other):
481 481 if type(other) != type(self):
482 482 return False
483 483 return self._data == other._data
484 484
485 485 def precnode(self):
486 486 """Precursor changeset node identifier"""
487 487 return self._data[0]
488 488
489 489 def succnodes(self):
490 490 """List of successor changesets node identifiers"""
491 491 return self._data[1]
492 492
493 493 def parentnodes(self):
494 494 """Parents of the precursors (None if not recorded)"""
495 495 return self._data[5]
496 496
497 497 def metadata(self):
498 498 """Decoded metadata dictionary"""
499 499 return dict(self._data[3])
500 500
501 501 def date(self):
502 502 """Creation date as (unixtime, offset)"""
503 503 return self._data[4]
504 504
505 505 def flags(self):
506 506 """The flags field of the marker"""
507 507 return self._data[2]
508 508
509 509 @util.nogc
510 510 def _addsuccessors(successors, markers):
511 511 for mark in markers:
512 512 successors.setdefault(mark[0], set()).add(mark)
513 513
514 514 @util.nogc
515 515 def _addprecursors(precursors, markers):
516 516 for mark in markers:
517 517 for suc in mark[1]:
518 518 precursors.setdefault(suc, set()).add(mark)
519 519
520 520 @util.nogc
521 521 def _addchildren(children, markers):
522 522 for mark in markers:
523 523 parents = mark[5]
524 524 if parents is not None:
525 525 for p in parents:
526 526 children.setdefault(p, set()).add(mark)
527 527
528 528 def _checkinvalidmarkers(markers):
529 529 """search for marker with invalid data and raise error if needed
530 530
531 531 Exist as a separated function to allow the evolve extension for a more
532 532 subtle handling.
533 533 """
534 534 for mark in markers:
535 535 if node.nullid in mark[1]:
536 536 raise error.Abort(_('bad obsolescence marker detected: '
537 537 'invalid successors nullid'))
538 538
539 539 class obsstore(object):
540 540 """Store obsolete markers
541 541
542 542 Markers can be accessed with two mappings:
543 543 - precursors[x] -> set(markers on precursors edges of x)
544 544 - successors[x] -> set(markers on successors edges of x)
545 545 - children[x] -> set(markers on precursors edges of children(x)
546 546 """
547 547
548 548 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
549 549 # prec: nodeid, precursor changesets
550 550 # succs: tuple of nodeid, successor changesets (0-N length)
551 551 # flag: integer, flag field carrying modifier for the markers (see doc)
552 552 # meta: binary blob, encoded metadata dictionary
553 553 # date: (float, int) tuple, date of marker creation
554 554 # parents: (tuple of nodeid) or None, parents of precursors
555 555 # None is used when no data has been recorded
556 556
557 557 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
558 558 # caches for various obsolescence related cache
559 559 self.caches = {}
560 560 self.svfs = svfs
561 561 self._defaultformat = defaultformat
562 562 self._readonly = readonly
563 563
564 564 def __iter__(self):
565 565 return iter(self._all)
566 566
567 567 def __len__(self):
568 568 return len(self._all)
569 569
570 570 def __nonzero__(self):
571 571 if not self._cached('_all'):
572 572 try:
573 573 return self.svfs.stat('obsstore').st_size > 1
574 574 except OSError as inst:
575 575 if inst.errno != errno.ENOENT:
576 576 raise
577 577 # just build an empty _all list if no obsstore exists, which
578 578 # avoids further stat() syscalls
579 579 pass
580 580 return bool(self._all)
581 581
582 582 __bool__ = __nonzero__
583 583
584 584 @property
585 585 def readonly(self):
586 586 """True if marker creation is disabled
587 587
588 588 Remove me in the future when obsolete marker is always on."""
589 589 return self._readonly
590 590
591 591 def create(self, transaction, prec, succs=(), flag=0, parents=None,
592 592 date=None, metadata=None, ui=None):
593 593 """obsolete: add a new obsolete marker
594 594
595 595 * ensuring it is hashable
596 596 * check mandatory metadata
597 597 * encode metadata
598 598
599 599 If you are a human writing code creating marker you want to use the
600 600 `createmarkers` function in this module instead.
601 601
602 602 return True if a new marker have been added, False if the markers
603 603 already existed (no op).
604 604 """
605 605 if metadata is None:
606 606 metadata = {}
607 607 if date is None:
608 608 if 'date' in metadata:
609 609 # as a courtesy for out-of-tree extensions
610 610 date = util.parsedate(metadata.pop('date'))
611 611 elif ui is not None:
612 612 date = ui.configdate('devel', 'default-date')
613 613 if date is None:
614 614 date = util.makedate()
615 615 else:
616 616 date = util.makedate()
617 617 if len(prec) != 20:
618 618 raise ValueError(prec)
619 619 for succ in succs:
620 620 if len(succ) != 20:
621 621 raise ValueError(succ)
622 622 if prec in succs:
623 623 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
624 624
625 625 metadata = tuple(sorted(metadata.iteritems()))
626 626
627 627 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
628 628 return bool(self.add(transaction, [marker]))
629 629
630 630 def add(self, transaction, markers):
631 631 """Add new markers to the store
632 632
633 633 Take care of filtering duplicate.
634 634 Return the number of new marker."""
635 635 if self._readonly:
636 636 raise error.Abort(_('creating obsolete markers is not enabled on '
637 637 'this repo'))
638 638 known = set()
639 639 getsuccessors = self.successors.get
640 640 new = []
641 641 for m in markers:
642 642 if m not in getsuccessors(m[0], ()) and m not in known:
643 643 known.add(m)
644 644 new.append(m)
645 645 if new:
646 646 f = self.svfs('obsstore', 'ab')
647 647 try:
648 648 offset = f.tell()
649 649 transaction.add('obsstore', offset)
650 650 # offset == 0: new file - add the version header
651 651 for bytes in encodemarkers(new, offset == 0, self._version):
652 652 f.write(bytes)
653 653 finally:
654 654 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
655 655 # call 'filecacheentry.refresh()' here
656 656 f.close()
657 657 self._addmarkers(new)
658 658 # new marker *may* have changed several set. invalidate the cache.
659 659 self.caches.clear()
660 660 # records the number of new markers for the transaction hooks
661 661 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
662 662 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
663 663 return len(new)
664 664
665 665 def mergemarkers(self, transaction, data):
666 666 """merge a binary stream of markers inside the obsstore
667 667
668 668 Returns the number of new markers added."""
669 669 version, markers = _readmarkers(data)
670 670 return self.add(transaction, markers)
671 671
672 672 @propertycache
673 673 def _data(self):
674 674 return self.svfs.tryread('obsstore')
675 675
676 676 @propertycache
677 677 def _version(self):
678 678 if len(self._data) >= 1:
679 679 return _readmarkerversion(self._data)
680 680 else:
681 681 return self._defaultformat
682 682
683 683 @propertycache
684 684 def _all(self):
685 685 data = self._data
686 686 if not data:
687 687 return []
688 688 self._version, markers = _readmarkers(data)
689 689 markers = list(markers)
690 690 _checkinvalidmarkers(markers)
691 691 return markers
692 692
693 693 @propertycache
694 694 def successors(self):
695 695 successors = {}
696 696 _addsuccessors(successors, self._all)
697 697 return successors
698 698
699 699 @propertycache
700 700 def precursors(self):
701 701 precursors = {}
702 702 _addprecursors(precursors, self._all)
703 703 return precursors
704 704
705 705 @propertycache
706 706 def children(self):
707 707 children = {}
708 708 _addchildren(children, self._all)
709 709 return children
710 710
711 711 def _cached(self, attr):
712 712 return attr in self.__dict__
713 713
714 714 def _addmarkers(self, markers):
715 715 markers = list(markers) # to allow repeated iteration
716 716 self._all.extend(markers)
717 717 if self._cached('successors'):
718 718 _addsuccessors(self.successors, markers)
719 719 if self._cached('precursors'):
720 720 _addprecursors(self.precursors, markers)
721 721 if self._cached('children'):
722 722 _addchildren(self.children, markers)
723 723 _checkinvalidmarkers(markers)
724 724
725 725 def relevantmarkers(self, nodes):
726 726 """return a set of all obsolescence markers relevant to a set of nodes.
727 727
728 728 "relevant" to a set of nodes mean:
729 729
730 730 - marker that use this changeset as successor
731 731 - prune marker of direct children on this changeset
732 732 - recursive application of the two rules on precursors of these markers
733 733
734 734 It is a set so you cannot rely on order."""
735 735
736 736 pendingnodes = set(nodes)
737 737 seenmarkers = set()
738 738 seennodes = set(pendingnodes)
739 739 precursorsmarkers = self.precursors
740 740 succsmarkers = self.successors
741 741 children = self.children
742 742 while pendingnodes:
743 743 direct = set()
744 744 for current in pendingnodes:
745 745 direct.update(precursorsmarkers.get(current, ()))
746 746 pruned = [m for m in children.get(current, ()) if not m[1]]
747 747 direct.update(pruned)
748 748 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
749 749 direct.update(pruned)
750 750 direct -= seenmarkers
751 751 pendingnodes = set([m[0] for m in direct])
752 752 seenmarkers |= direct
753 753 pendingnodes -= seennodes
754 754 seennodes |= pendingnodes
755 755 return seenmarkers
756 756
757 757 def makestore(ui, repo):
758 758 """Create an obsstore instance from a repo."""
759 759 # read default format for new obsstore.
760 760 # developer config: format.obsstore-version
761 761 defaultformat = ui.configint('format', 'obsstore-version', None)
762 762 # rely on obsstore class default when possible.
763 763 kwargs = {}
764 764 if defaultformat is not None:
765 765 kwargs['defaultformat'] = defaultformat
766 766 readonly = not isenabled(repo, createmarkersopt)
767 767 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
768 768 if store and readonly:
769 769 ui.warn(_('obsolete feature not enabled but %i markers found!\n')
770 770 % len(list(store)))
771 771 return store
772 772
773 773 def _filterprunes(markers):
774 774 """return a set with no prune markers"""
775 775 return set(m for m in markers if m[1])
776 776
777 777 def exclusivemarkers(repo, nodes):
778 778 """set of markers relevant to "nodes" but no other locally-known nodes
779 779
780 780 This function compute the set of markers "exclusive" to a locally-known
781 781 node. This means we walk the markers starting from <nodes> until we reach a
782 782 locally-known precursors outside of <nodes>. Element of <nodes> with
783 783 locally-known successors outside of <nodes> are ignored (since their
784 784 precursors markers are also relevant to these successors).
785 785
786 786 For example:
787 787
788 788 # (A0 rewritten as A1)
789 789 #
790 790 # A0 <-1- A1 # Marker "1" is exclusive to A1
791 791
792 792 or
793 793
794 794 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
795 795 #
796 796 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
797 797
798 798 or
799 799
800 800 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
801 801 #
802 802 # <-2- A1 # Marker "2" is exclusive to A0,A1
803 803 # /
804 804 # <-1- A0
805 805 # \
806 806 # <-3- A2 # Marker "3" is exclusive to A0,A2
807 807 #
808 808 # in addition:
809 809 #
810 810 # Markers "2,3" are exclusive to A1,A2
811 811 # Markers "1,2,3" are exclusive to A0,A1,A2
812 812
813 813 See test/test-obsolete-bundle-strip.t for more examples.
814 814
815 815 An example usage is strip. When stripping a changeset, we also want to
816 816 strip the markers exclusive to this changeset. Otherwise we would have
817 817 "dangling"" obsolescence markers from its precursors: Obsolescence markers
818 818 marking a node as obsolete without any successors available locally.
819 819
820 820 As for relevant markers, the prune markers for children will be followed.
821 821 Of course, they will only be followed if the pruned children is
822 822 locally-known. Since the prune markers are relevant to the pruned node.
823 823 However, while prune markers are considered relevant to the parent of the
824 824 pruned changesets, prune markers for locally-known changeset (with no
825 825 successors) are considered exclusive to the pruned nodes. This allows
826 826 to strip the prune markers (with the rest of the exclusive chain) alongside
827 827 the pruned changesets.
828 828 """
829 829 # running on a filtered repository would be dangerous as markers could be
830 830 # reported as exclusive when they are relevant for other filtered nodes.
831 831 unfi = repo.unfiltered()
832 832
833 833 # shortcut to various useful item
834 834 nm = unfi.changelog.nodemap
835 835 precursorsmarkers = unfi.obsstore.precursors
836 836 successormarkers = unfi.obsstore.successors
837 837 childrenmarkers = unfi.obsstore.children
838 838
839 839 # exclusive markers (return of the function)
840 840 exclmarkers = set()
841 841 # we need fast membership testing
842 842 nodes = set(nodes)
843 843 # looking for head in the obshistory
844 844 #
845 845 # XXX we are ignoring all issues in regard with cycle for now.
846 846 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
847 847 stack.sort()
848 848 # nodes already stacked
849 849 seennodes = set(stack)
850 850 while stack:
851 851 current = stack.pop()
852 852 # fetch precursors markers
853 853 markers = list(precursorsmarkers.get(current, ()))
854 854 # extend the list with prune markers
855 855 for mark in successormarkers.get(current, ()):
856 856 if not mark[1]:
857 857 markers.append(mark)
858 858 # and markers from children (looking for prune)
859 859 for mark in childrenmarkers.get(current, ()):
860 860 if not mark[1]:
861 861 markers.append(mark)
862 862 # traverse the markers
863 863 for mark in markers:
864 864 if mark in exclmarkers:
865 865 # markers already selected
866 866 continue
867 867
868 868 # If the markers is about the current node, select it
869 869 #
870 870 # (this delay the addition of markers from children)
871 871 if mark[1] or mark[0] == current:
872 872 exclmarkers.add(mark)
873 873
874 874 # should we keep traversing through the precursors?
875 875 prec = mark[0]
876 876
877 877 # nodes in the stack or already processed
878 878 if prec in seennodes:
879 879 continue
880 880
881 881 # is this a locally known node ?
882 882 known = prec in nm
883 883 # if locally-known and not in the <nodes> set the traversal
884 884 # stop here.
885 885 if known and prec not in nodes:
886 886 continue
887 887
888 888 # do not keep going if there are unselected markers pointing to this
889 889 # nodes. If we end up traversing these unselected markers later the
890 890 # node will be taken care of at that point.
891 891 precmarkers = _filterprunes(successormarkers.get(prec))
892 892 if precmarkers.issubset(exclmarkers):
893 893 seennodes.add(prec)
894 894 stack.append(prec)
895 895
896 896 return exclmarkers
897 897
898 898 def commonversion(versions):
899 899 """Return the newest version listed in both versions and our local formats.
900 900
901 901 Returns None if no common version exists.
902 902 """
903 903 versions.sort(reverse=True)
904 904 # search for highest version known on both side
905 905 for v in versions:
906 906 if v in formats:
907 907 return v
908 908 return None
909 909
910 910 # arbitrary picked to fit into 8K limit from HTTP server
911 911 # you have to take in account:
912 912 # - the version header
913 913 # - the base85 encoding
914 914 _maxpayload = 5300
915 915
916 916 def _pushkeyescape(markers):
917 917 """encode markers into a dict suitable for pushkey exchange
918 918
919 919 - binary data is base85 encoded
920 920 - split in chunks smaller than 5300 bytes"""
921 921 keys = {}
922 922 parts = []
923 923 currentlen = _maxpayload * 2 # ensure we create a new part
924 924 for marker in markers:
925 925 nextdata = _fm0encodeonemarker(marker)
926 926 if (len(nextdata) + currentlen > _maxpayload):
927 927 currentpart = []
928 928 currentlen = 0
929 929 parts.append(currentpart)
930 930 currentpart.append(nextdata)
931 931 currentlen += len(nextdata)
932 932 for idx, part in enumerate(reversed(parts)):
933 933 data = ''.join([_pack('>B', _fm0version)] + part)
934 934 keys['dump%i' % idx] = util.b85encode(data)
935 935 return keys
936 936
937 937 def listmarkers(repo):
938 938 """List markers over pushkey"""
939 939 if not repo.obsstore:
940 940 return {}
941 941 return _pushkeyescape(sorted(repo.obsstore))
942 942
943 943 def pushmarker(repo, key, old, new):
944 944 """Push markers over pushkey"""
945 945 if not key.startswith('dump'):
946 946 repo.ui.warn(_('unknown key: %r') % key)
947 947 return False
948 948 if old:
949 949 repo.ui.warn(_('unexpected old value for %r') % key)
950 950 return False
951 951 data = util.b85decode(new)
952 952 lock = repo.lock()
953 953 try:
954 954 tr = repo.transaction('pushkey: obsolete markers')
955 955 try:
956 956 repo.obsstore.mergemarkers(tr, data)
957 957 repo.invalidatevolatilesets()
958 958 tr.close()
959 959 return True
960 960 finally:
961 961 tr.release()
962 962 finally:
963 963 lock.release()
964 964
965 965 def getmarkers(repo, nodes=None, exclusive=False):
966 966 """returns markers known in a repository
967 967
968 968 If <nodes> is specified, only markers "relevant" to those nodes are are
969 969 returned"""
970 970 if nodes is None:
971 971 rawmarkers = repo.obsstore
972 972 elif exclusive:
973 973 rawmarkers = exclusivemarkers(repo, nodes)
974 974 else:
975 975 rawmarkers = repo.obsstore.relevantmarkers(nodes)
976 976
977 977 for markerdata in rawmarkers:
978 978 yield marker(repo, markerdata)
979 979
980 980 def relevantmarkers(repo, node):
981 981 """all obsolete markers relevant to some revision"""
982 982 for markerdata in repo.obsstore.relevantmarkers(node):
983 983 yield marker(repo, markerdata)
984 984
985 985
986 986 def precursormarkers(ctx):
987 987 """obsolete marker marking this changeset as a successors"""
988 988 for data in ctx.repo().obsstore.precursors.get(ctx.node(), ()):
989 989 yield marker(ctx.repo(), data)
990 990
991 991 def successormarkers(ctx):
992 992 """obsolete marker making this changeset obsolete"""
993 993 for data in ctx.repo().obsstore.successors.get(ctx.node(), ()):
994 994 yield marker(ctx.repo(), data)
995 995
996 996 def allsuccessors(obsstore, nodes, ignoreflags=0):
997 997 """Yield node for every successor of <nodes>.
998 998
999 999 Some successors may be unknown locally.
1000 1000
1001 1001 This is a linear yield unsuited to detecting split changesets. It includes
1002 1002 initial nodes too."""
1003 1003 remaining = set(nodes)
1004 1004 seen = set(remaining)
1005 1005 while remaining:
1006 1006 current = remaining.pop()
1007 1007 yield current
1008 1008 for mark in obsstore.successors.get(current, ()):
1009 1009 # ignore marker flagged with specified flag
1010 1010 if mark[2] & ignoreflags:
1011 1011 continue
1012 1012 for suc in mark[1]:
1013 1013 if suc not in seen:
1014 1014 seen.add(suc)
1015 1015 remaining.add(suc)
1016 1016
1017 1017 def allprecursors(obsstore, nodes, ignoreflags=0):
1018 1018 """Yield node for every precursors of <nodes>.
1019 1019
1020 1020 Some precursors may be unknown locally.
1021 1021
1022 1022 This is a linear yield unsuited to detecting folded changesets. It includes
1023 1023 initial nodes too."""
1024 1024
1025 1025 remaining = set(nodes)
1026 1026 seen = set(remaining)
1027 1027 while remaining:
1028 1028 current = remaining.pop()
1029 1029 yield current
1030 1030 for mark in obsstore.precursors.get(current, ()):
1031 1031 # ignore marker flagged with specified flag
1032 1032 if mark[2] & ignoreflags:
1033 1033 continue
1034 1034 suc = mark[0]
1035 1035 if suc not in seen:
1036 1036 seen.add(suc)
1037 1037 remaining.add(suc)
1038 1038
1039 1039 def foreground(repo, nodes):
1040 1040 """return all nodes in the "foreground" of other node
1041 1041
1042 1042 The foreground of a revision is anything reachable using parent -> children
1043 1043 or precursor -> successor relation. It is very similar to "descendant" but
1044 1044 augmented with obsolescence information.
1045 1045
1046 1046 Beware that possible obsolescence cycle may result if complex situation.
1047 1047 """
1048 1048 repo = repo.unfiltered()
1049 1049 foreground = set(repo.set('%ln::', nodes))
1050 1050 if repo.obsstore:
1051 1051 # We only need this complicated logic if there is obsolescence
1052 1052 # XXX will probably deserve an optimised revset.
1053 1053 nm = repo.changelog.nodemap
1054 1054 plen = -1
1055 1055 # compute the whole set of successors or descendants
1056 1056 while len(foreground) != plen:
1057 1057 plen = len(foreground)
1058 1058 succs = set(c.node() for c in foreground)
1059 1059 mutable = [c.node() for c in foreground if c.mutable()]
1060 1060 succs.update(allsuccessors(repo.obsstore, mutable))
1061 1061 known = (n for n in succs if n in nm)
1062 1062 foreground = set(repo.set('%ln::', known))
1063 1063 return set(c.node() for c in foreground)
1064 1064
1065 1065
1066 1066 def successorssets(repo, initialnode, cache=None):
1067 1067 """Return set of all latest successors of initial nodes
1068 1068
1069 1069 The successors set of a changeset A are the group of revisions that succeed
1070 1070 A. It succeeds A as a consistent whole, each revision being only a partial
1071 1071 replacement. The successors set contains non-obsolete changesets only.
1072 1072
1073 1073 This function returns the full list of successor sets which is why it
1074 1074 returns a list of tuples and not just a single tuple. Each tuple is a valid
1075 1075 successors set. Note that (A,) may be a valid successors set for changeset A
1076 1076 (see below).
1077 1077
1078 1078 In most cases, a changeset A will have a single element (e.g. the changeset
1079 1079 A is replaced by A') in its successors set. Though, it is also common for a
1080 1080 changeset A to have no elements in its successor set (e.g. the changeset
1081 1081 has been pruned). Therefore, the returned list of successors sets will be
1082 1082 [(A',)] or [], respectively.
1083 1083
1084 1084 When a changeset A is split into A' and B', however, it will result in a
1085 1085 successors set containing more than a single element, i.e. [(A',B')].
1086 1086 Divergent changesets will result in multiple successors sets, i.e. [(A',),
1087 1087 (A'')].
1088 1088
1089 1089 If a changeset A is not obsolete, then it will conceptually have no
1090 1090 successors set. To distinguish this from a pruned changeset, the successor
1091 1091 set will contain itself only, i.e. [(A,)].
1092 1092
1093 1093 Finally, successors unknown locally are considered to be pruned (obsoleted
1094 1094 without any successors).
1095 1095
1096 1096 The optional `cache` parameter is a dictionary that may contain precomputed
1097 1097 successors sets. It is meant to reuse the computation of a previous call to
1098 1098 `successorssets` when multiple calls are made at the same time. The cache
1099 1099 dictionary is updated in place. The caller is responsible for its life
1100 1100 span. Code that makes multiple calls to `successorssets` *must* use this
1101 1101 cache mechanism or suffer terrible performance.
1102 1102 """
1103 1103
1104 1104 succmarkers = repo.obsstore.successors
1105 1105
1106 1106 # Stack of nodes we search successors sets for
1107 1107 toproceed = [initialnode]
1108 1108 # set version of above list for fast loop detection
1109 1109 # element added to "toproceed" must be added here
1110 1110 stackedset = set(toproceed)
1111 1111 if cache is None:
1112 1112 cache = {}
1113 1113
1114 1114 # This while loop is the flattened version of a recursive search for
1115 1115 # successors sets
1116 1116 #
1117 1117 # def successorssets(x):
1118 1118 # successors = directsuccessors(x)
1119 1119 # ss = [[]]
1120 1120 # for succ in directsuccessors(x):
1121 1121 # # product as in itertools cartesian product
1122 1122 # ss = product(ss, successorssets(succ))
1123 1123 # return ss
1124 1124 #
1125 1125 # But we can not use plain recursive calls here:
1126 1126 # - that would blow the python call stack
1127 1127 # - obsolescence markers may have cycles, we need to handle them.
1128 1128 #
1129 1129 # The `toproceed` list act as our call stack. Every node we search
1130 1130 # successors set for are stacked there.
1131 1131 #
1132 1132 # The `stackedset` is set version of this stack used to check if a node is
1133 1133 # already stacked. This check is used to detect cycles and prevent infinite
1134 1134 # loop.
1135 1135 #
1136 1136 # successors set of all nodes are stored in the `cache` dictionary.
1137 1137 #
1138 1138 # After this while loop ends we use the cache to return the successors sets
1139 1139 # for the node requested by the caller.
1140 1140 while toproceed:
1141 1141 # Every iteration tries to compute the successors sets of the topmost
1142 1142 # node of the stack: CURRENT.
1143 1143 #
1144 1144 # There are four possible outcomes:
1145 1145 #
1146 1146 # 1) We already know the successors sets of CURRENT:
1147 1147 # -> mission accomplished, pop it from the stack.
1148 1148 # 2) Node is not obsolete:
1149 1149 # -> the node is its own successors sets. Add it to the cache.
1150 1150 # 3) We do not know successors set of direct successors of CURRENT:
1151 1151 # -> We add those successors to the stack.
1152 1152 # 4) We know successors sets of all direct successors of CURRENT:
1153 1153 # -> We can compute CURRENT successors set and add it to the
1154 1154 # cache.
1155 1155 #
1156 1156 current = toproceed[-1]
1157 1157 if current in cache:
1158 1158 # case (1): We already know the successors sets
1159 1159 stackedset.remove(toproceed.pop())
1160 1160 elif current not in succmarkers:
1161 1161 # case (2): The node is not obsolete.
1162 1162 if current in repo:
1163 1163 # We have a valid last successors.
1164 1164 cache[current] = [(current,)]
1165 1165 else:
1166 1166 # Final obsolete version is unknown locally.
1167 1167 # Do not count that as a valid successors
1168 1168 cache[current] = []
1169 1169 else:
1170 1170 # cases (3) and (4)
1171 1171 #
1172 1172 # We proceed in two phases. Phase 1 aims to distinguish case (3)
1173 1173 # from case (4):
1174 1174 #
1175 1175 # For each direct successors of CURRENT, we check whether its
1176 1176 # successors sets are known. If they are not, we stack the
1177 1177 # unknown node and proceed to the next iteration of the while
1178 1178 # loop. (case 3)
1179 1179 #
1180 1180 # During this step, we may detect obsolescence cycles: a node
1181 1181 # with unknown successors sets but already in the call stack.
1182 1182 # In such a situation, we arbitrary set the successors sets of
1183 1183 # the node to nothing (node pruned) to break the cycle.
1184 1184 #
1185 1185 # If no break was encountered we proceed to phase 2.
1186 1186 #
1187 1187 # Phase 2 computes successors sets of CURRENT (case 4); see details
1188 1188 # in phase 2 itself.
1189 1189 #
1190 1190 # Note the two levels of iteration in each phase.
1191 1191 # - The first one handles obsolescence markers using CURRENT as
1192 1192 # precursor (successors markers of CURRENT).
1193 1193 #
1194 1194 # Having multiple entry here means divergence.
1195 1195 #
1196 1196 # - The second one handles successors defined in each marker.
1197 1197 #
1198 1198 # Having none means pruned node, multiple successors means split,
1199 1199 # single successors are standard replacement.
1200 1200 #
1201 1201 for mark in sorted(succmarkers[current]):
1202 1202 for suc in mark[1]:
1203 1203 if suc not in cache:
1204 1204 if suc in stackedset:
1205 1205 # cycle breaking
1206 1206 cache[suc] = []
1207 1207 else:
1208 1208 # case (3) If we have not computed successors sets
1209 1209 # of one of those successors we add it to the
1210 1210 # `toproceed` stack and stop all work for this
1211 1211 # iteration.
1212 1212 toproceed.append(suc)
1213 1213 stackedset.add(suc)
1214 1214 break
1215 1215 else:
1216 1216 continue
1217 1217 break
1218 1218 else:
1219 1219 # case (4): we know all successors sets of all direct
1220 1220 # successors
1221 1221 #
1222 1222 # Successors set contributed by each marker depends on the
1223 1223 # successors sets of all its "successors" node.
1224 1224 #
1225 1225 # Each different marker is a divergence in the obsolescence
1226 1226 # history. It contributes successors sets distinct from other
1227 1227 # markers.
1228 1228 #
1229 1229 # Within a marker, a successor may have divergent successors
1230 1230 # sets. In such a case, the marker will contribute multiple
1231 1231 # divergent successors sets. If multiple successors have
1232 1232 # divergent successors sets, a Cartesian product is used.
1233 1233 #
1234 1234 # At the end we post-process successors sets to remove
1235 1235 # duplicated entry and successors set that are strict subset of
1236 1236 # another one.
1237 1237 succssets = []
1238 1238 for mark in sorted(succmarkers[current]):
1239 1239 # successors sets contributed by this marker
1240 1240 markss = [[]]
1241 1241 for suc in mark[1]:
1242 1242 # cardinal product with previous successors
1243 1243 productresult = []
1244 1244 for prefix in markss:
1245 1245 for suffix in cache[suc]:
1246 1246 newss = list(prefix)
1247 1247 for part in suffix:
1248 1248 # do not duplicated entry in successors set
1249 1249 # first entry wins.
1250 1250 if part not in newss:
1251 1251 newss.append(part)
1252 1252 productresult.append(newss)
1253 1253 markss = productresult
1254 1254 succssets.extend(markss)
1255 1255 # remove duplicated and subset
1256 1256 seen = []
1257 1257 final = []
1258 1258 candidate = sorted(((set(s), s) for s in succssets if s),
1259 1259 key=lambda x: len(x[1]), reverse=True)
1260 1260 for setversion, listversion in candidate:
1261 1261 for seenset in seen:
1262 1262 if setversion.issubset(seenset):
1263 1263 break
1264 1264 else:
1265 1265 final.append(listversion)
1266 1266 seen.append(setversion)
1267 1267 final.reverse() # put small successors set first
1268 1268 cache[current] = final
1269 1269 return cache[initialnode]
1270 1270
1271 1271 # mapping of 'set-name' -> <function to compute this set>
1272 1272 cachefuncs = {}
1273 1273 def cachefor(name):
1274 1274 """Decorator to register a function as computing the cache for a set"""
1275 1275 def decorator(func):
1276 assert name not in cachefuncs
1276 if name in cachefuncs:
1277 msg = "duplicated registration for volatileset '%s' (existing: %r)"
1278 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
1277 1279 cachefuncs[name] = func
1278 1280 return func
1279 1281 return decorator
1280 1282
1281 1283 def getrevs(repo, name):
1282 1284 """Return the set of revision that belong to the <name> set
1283 1285
1284 1286 Such access may compute the set and cache it for future use"""
1285 1287 repo = repo.unfiltered()
1286 1288 if not repo.obsstore:
1287 1289 return frozenset()
1288 1290 if name not in repo.obsstore.caches:
1289 1291 repo.obsstore.caches[name] = cachefuncs[name](repo)
1290 1292 return repo.obsstore.caches[name]
1291 1293
1292 1294 # To be simple we need to invalidate obsolescence cache when:
1293 1295 #
1294 1296 # - new changeset is added:
1295 1297 # - public phase is changed
1296 1298 # - obsolescence marker are added
1297 1299 # - strip is used a repo
1298 1300 def clearobscaches(repo):
1299 1301 """Remove all obsolescence related cache from a repo
1300 1302
1301 1303 This remove all cache in obsstore is the obsstore already exist on the
1302 1304 repo.
1303 1305
1304 1306 (We could be smarter here given the exact event that trigger the cache
1305 1307 clearing)"""
1306 1308 # only clear cache is there is obsstore data in this repo
1307 1309 if 'obsstore' in repo._filecache:
1308 1310 repo.obsstore.caches.clear()
1309 1311
1310 1312 @cachefor('obsolete')
1311 1313 def _computeobsoleteset(repo):
1312 1314 """the set of obsolete revisions"""
1313 1315 getnode = repo.changelog.node
1314 1316 notpublic = repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
1315 1317 isobs = repo.obsstore.successors.__contains__
1316 1318 obs = set(r for r in notpublic if isobs(getnode(r)))
1317 1319 return obs
1318 1320
1319 1321 @cachefor('unstable')
1320 1322 def _computeunstableset(repo):
1321 1323 """the set of non obsolete revisions with obsolete parents"""
1322 1324 revs = [(ctx.rev(), ctx) for ctx in
1323 1325 repo.set('(not public()) and (not obsolete())')]
1324 1326 revs.sort(key=lambda x:x[0])
1325 1327 unstable = set()
1326 1328 for rev, ctx in revs:
1327 1329 # A rev is unstable if one of its parent is obsolete or unstable
1328 1330 # this works since we traverse following growing rev order
1329 1331 if any((x.obsolete() or (x.rev() in unstable))
1330 1332 for x in ctx.parents()):
1331 1333 unstable.add(rev)
1332 1334 return unstable
1333 1335
1334 1336 @cachefor('suspended')
1335 1337 def _computesuspendedset(repo):
1336 1338 """the set of obsolete parents with non obsolete descendants"""
1337 1339 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1338 1340 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1339 1341
1340 1342 @cachefor('extinct')
1341 1343 def _computeextinctset(repo):
1342 1344 """the set of obsolete parents without non obsolete descendants"""
1343 1345 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1344 1346
1345 1347
1346 1348 @cachefor('bumped')
1347 1349 def _computebumpedset(repo):
1348 1350 """the set of revs trying to obsolete public revisions"""
1349 1351 bumped = set()
1350 1352 # util function (avoid attribute lookup in the loop)
1351 1353 phase = repo._phasecache.phase # would be faster to grab the full list
1352 1354 public = phases.public
1353 1355 cl = repo.changelog
1354 1356 torev = cl.nodemap.get
1355 1357 for ctx in repo.set('(not public()) and (not obsolete())'):
1356 1358 rev = ctx.rev()
1357 1359 # We only evaluate mutable, non-obsolete revision
1358 1360 node = ctx.node()
1359 1361 # (future) A cache of precursors may worth if split is very common
1360 1362 for pnode in allprecursors(repo.obsstore, [node],
1361 1363 ignoreflags=bumpedfix):
1362 1364 prev = torev(pnode) # unfiltered! but so is phasecache
1363 1365 if (prev is not None) and (phase(repo, prev) <= public):
1364 1366 # we have a public precursor
1365 1367 bumped.add(rev)
1366 1368 break # Next draft!
1367 1369 return bumped
1368 1370
1369 1371 @cachefor('divergent')
1370 1372 def _computedivergentset(repo):
1371 1373 """the set of rev that compete to be the final successors of some revision.
1372 1374 """
1373 1375 divergent = set()
1374 1376 obsstore = repo.obsstore
1375 1377 newermap = {}
1376 1378 for ctx in repo.set('(not public()) - obsolete()'):
1377 1379 mark = obsstore.precursors.get(ctx.node(), ())
1378 1380 toprocess = set(mark)
1379 1381 seen = set()
1380 1382 while toprocess:
1381 1383 prec = toprocess.pop()[0]
1382 1384 if prec in seen:
1383 1385 continue # emergency cycle hanging prevention
1384 1386 seen.add(prec)
1385 1387 if prec not in newermap:
1386 1388 successorssets(repo, prec, newermap)
1387 1389 newer = [n for n in newermap[prec] if n]
1388 1390 if len(newer) > 1:
1389 1391 divergent.add(ctx.rev())
1390 1392 break
1391 1393 toprocess.update(obsstore.precursors.get(prec, ()))
1392 1394 return divergent
1393 1395
1394 1396
1395 1397 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
1396 1398 operation=None):
1397 1399 """Add obsolete markers between changesets in a repo
1398 1400
1399 1401 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1400 1402 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1401 1403 containing metadata for this marker only. It is merged with the global
1402 1404 metadata specified through the `metadata` argument of this function,
1403 1405
1404 1406 Trying to obsolete a public changeset will raise an exception.
1405 1407
1406 1408 Current user and date are used except if specified otherwise in the
1407 1409 metadata attribute.
1408 1410
1409 1411 This function operates within a transaction of its own, but does
1410 1412 not take any lock on the repo.
1411 1413 """
1412 1414 # prepare metadata
1413 1415 if metadata is None:
1414 1416 metadata = {}
1415 1417 if 'user' not in metadata:
1416 1418 metadata['user'] = repo.ui.username()
1417 1419 useoperation = repo.ui.configbool('experimental',
1418 1420 'evolution.track-operation',
1419 1421 False)
1420 1422 if useoperation and operation:
1421 1423 metadata['operation'] = operation
1422 1424 tr = repo.transaction('add-obsolescence-marker')
1423 1425 try:
1424 1426 markerargs = []
1425 1427 for rel in relations:
1426 1428 prec = rel[0]
1427 1429 sucs = rel[1]
1428 1430 localmetadata = metadata.copy()
1429 1431 if 2 < len(rel):
1430 1432 localmetadata.update(rel[2])
1431 1433
1432 1434 if not prec.mutable():
1433 1435 raise error.Abort(_("cannot obsolete public changeset: %s")
1434 1436 % prec,
1435 1437 hint="see 'hg help phases' for details")
1436 1438 nprec = prec.node()
1437 1439 nsucs = tuple(s.node() for s in sucs)
1438 1440 npare = None
1439 1441 if not nsucs:
1440 1442 npare = tuple(p.node() for p in prec.parents())
1441 1443 if nprec in nsucs:
1442 1444 raise error.Abort(_("changeset %s cannot obsolete itself")
1443 1445 % prec)
1444 1446
1445 1447 # Creating the marker causes the hidden cache to become invalid,
1446 1448 # which causes recomputation when we ask for prec.parents() above.
1447 1449 # Resulting in n^2 behavior. So let's prepare all of the args
1448 1450 # first, then create the markers.
1449 1451 markerargs.append((nprec, nsucs, npare, localmetadata))
1450 1452
1451 1453 for args in markerargs:
1452 1454 nprec, nsucs, npare, localmetadata = args
1453 1455 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1454 1456 date=date, metadata=localmetadata,
1455 1457 ui=repo.ui)
1456 1458 repo.filteredrevcache.clear()
1457 1459 tr.close()
1458 1460 finally:
1459 1461 tr.release()
General Comments 0
You need to be logged in to leave comments. Login now