##// END OF EJS Templates
obsolete: fix n^2 marker computation behavior...
Durham Goode -
r27984:e60e13a8 default
parent child Browse files
Show More
@@ -1,1272 +1,1282 b''
1 1 # obsolete.py - obsolete markers handling
2 2 #
3 3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 4 # Logilab SA <contact@logilab.fr>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 """Obsolete marker handling
10 10
11 11 An obsolete marker maps an old changeset to a list of new
12 12 changesets. If the list of new changesets is empty, the old changeset
13 13 is said to be "killed". Otherwise, the old changeset is being
14 14 "replaced" by the new changesets.
15 15
16 16 Obsolete markers can be used to record and distribute changeset graph
17 17 transformations performed by history rewrite operations, and help
18 18 building new tools to reconcile conflicting rewrite actions. To
19 19 facilitate conflict resolution, markers include various annotations
20 20 besides old and news changeset identifiers, such as creation date or
21 21 author name.
22 22
23 23 The old obsoleted changeset is called a "precursor" and possible
24 24 replacements are called "successors". Markers that used changeset X as
25 25 a precursor are called "successor markers of X" because they hold
26 26 information about the successors of X. Markers that use changeset Y as
27 27 a successors are call "precursor markers of Y" because they hold
28 28 information about the precursors of Y.
29 29
30 30 Examples:
31 31
32 32 - When changeset A is replaced by changeset A', one marker is stored:
33 33
34 34 (A, (A',))
35 35
36 36 - When changesets A and B are folded into a new changeset C, two markers are
37 37 stored:
38 38
39 39 (A, (C,)) and (B, (C,))
40 40
41 41 - When changeset A is simply "pruned" from the graph, a marker is created:
42 42
43 43 (A, ())
44 44
45 45 - When changeset A is split into B and C, a single marker are used:
46 46
47 47 (A, (C, C))
48 48
49 49 We use a single marker to distinguish the "split" case from the "divergence"
50 50 case. If two independent operations rewrite the same changeset A in to A' and
51 51 A'', we have an error case: divergent rewriting. We can detect it because
52 52 two markers will be created independently:
53 53
54 54 (A, (B,)) and (A, (C,))
55 55
56 56 Format
57 57 ------
58 58
59 59 Markers are stored in an append-only file stored in
60 60 '.hg/store/obsstore'.
61 61
62 62 The file starts with a version header:
63 63
64 64 - 1 unsigned byte: version number, starting at zero.
65 65
66 66 The header is followed by the markers. Marker format depend of the version. See
67 67 comment associated with each format for details.
68 68
69 69 """
70 70 from __future__ import absolute_import
71 71
72 72 import errno
73 73 import struct
74 74
75 75 from .i18n import _
76 76 from . import (
77 77 base85,
78 78 error,
79 79 node,
80 80 parsers,
81 81 phases,
82 82 util,
83 83 )
84 84
85 85 _pack = struct.pack
86 86 _unpack = struct.unpack
87 87 _calcsize = struct.calcsize
88 88 propertycache = util.propertycache
89 89
90 90 # the obsolete feature is not mature enough to be enabled by default.
91 91 # you have to rely on third party extension extension to enable this.
92 92 _enabled = False
93 93
94 94 # Options for obsolescence
95 95 createmarkersopt = 'createmarkers'
96 96 allowunstableopt = 'allowunstable'
97 97 exchangeopt = 'exchange'
98 98
99 99 ### obsolescence marker flag
100 100
101 101 ## bumpedfix flag
102 102 #
103 103 # When a changeset A' succeed to a changeset A which became public, we call A'
104 104 # "bumped" because it's a successors of a public changesets
105 105 #
106 106 # o A' (bumped)
107 107 # |`:
108 108 # | o A
109 109 # |/
110 110 # o Z
111 111 #
112 112 # The way to solve this situation is to create a new changeset Ad as children
113 113 # of A. This changeset have the same content than A'. So the diff from A to A'
114 114 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
115 115 #
116 116 # o Ad
117 117 # |`:
118 118 # | x A'
119 119 # |'|
120 120 # o | A
121 121 # |/
122 122 # o Z
123 123 #
124 124 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
125 125 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
126 126 # This flag mean that the successors express the changes between the public and
127 127 # bumped version and fix the situation, breaking the transitivity of
128 128 # "bumped" here.
129 129 bumpedfix = 1
130 130 usingsha256 = 2
131 131
132 132 ## Parsing and writing of version "0"
133 133 #
134 134 # The header is followed by the markers. Each marker is made of:
135 135 #
136 136 # - 1 uint8 : number of new changesets "N", can be zero.
137 137 #
138 138 # - 1 uint32: metadata size "M" in bytes.
139 139 #
140 140 # - 1 byte: a bit field. It is reserved for flags used in common
141 141 # obsolete marker operations, to avoid repeated decoding of metadata
142 142 # entries.
143 143 #
144 144 # - 20 bytes: obsoleted changeset identifier.
145 145 #
146 146 # - N*20 bytes: new changesets identifiers.
147 147 #
148 148 # - M bytes: metadata as a sequence of nul-terminated strings. Each
149 149 # string contains a key and a value, separated by a colon ':', without
150 150 # additional encoding. Keys cannot contain '\0' or ':' and values
151 151 # cannot contain '\0'.
152 152 _fm0version = 0
153 153 _fm0fixed = '>BIB20s'
154 154 _fm0node = '20s'
155 155 _fm0fsize = _calcsize(_fm0fixed)
156 156 _fm0fnodesize = _calcsize(_fm0node)
157 157
158 158 def _fm0readmarkers(data, off):
159 159 # Loop on markers
160 160 l = len(data)
161 161 while off + _fm0fsize <= l:
162 162 # read fixed part
163 163 cur = data[off:off + _fm0fsize]
164 164 off += _fm0fsize
165 165 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
166 166 # read replacement
167 167 sucs = ()
168 168 if numsuc:
169 169 s = (_fm0fnodesize * numsuc)
170 170 cur = data[off:off + s]
171 171 sucs = _unpack(_fm0node * numsuc, cur)
172 172 off += s
173 173 # read metadata
174 174 # (metadata will be decoded on demand)
175 175 metadata = data[off:off + mdsize]
176 176 if len(metadata) != mdsize:
177 177 raise error.Abort(_('parsing obsolete marker: metadata is too '
178 178 'short, %d bytes expected, got %d')
179 179 % (mdsize, len(metadata)))
180 180 off += mdsize
181 181 metadata = _fm0decodemeta(metadata)
182 182 try:
183 183 when, offset = metadata.pop('date', '0 0').split(' ')
184 184 date = float(when), int(offset)
185 185 except ValueError:
186 186 date = (0., 0)
187 187 parents = None
188 188 if 'p2' in metadata:
189 189 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
190 190 elif 'p1' in metadata:
191 191 parents = (metadata.pop('p1', None),)
192 192 elif 'p0' in metadata:
193 193 parents = ()
194 194 if parents is not None:
195 195 try:
196 196 parents = tuple(node.bin(p) for p in parents)
197 197 # if parent content is not a nodeid, drop the data
198 198 for p in parents:
199 199 if len(p) != 20:
200 200 parents = None
201 201 break
202 202 except TypeError:
203 203 # if content cannot be translated to nodeid drop the data.
204 204 parents = None
205 205
206 206 metadata = tuple(sorted(metadata.iteritems()))
207 207
208 208 yield (pre, sucs, flags, metadata, date, parents)
209 209
210 210 def _fm0encodeonemarker(marker):
211 211 pre, sucs, flags, metadata, date, parents = marker
212 212 if flags & usingsha256:
213 213 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
214 214 metadata = dict(metadata)
215 215 time, tz = date
216 216 metadata['date'] = '%r %i' % (time, tz)
217 217 if parents is not None:
218 218 if not parents:
219 219 # mark that we explicitly recorded no parents
220 220 metadata['p0'] = ''
221 221 for i, p in enumerate(parents):
222 222 metadata['p%i' % (i + 1)] = node.hex(p)
223 223 metadata = _fm0encodemeta(metadata)
224 224 numsuc = len(sucs)
225 225 format = _fm0fixed + (_fm0node * numsuc)
226 226 data = [numsuc, len(metadata), flags, pre]
227 227 data.extend(sucs)
228 228 return _pack(format, *data) + metadata
229 229
230 230 def _fm0encodemeta(meta):
231 231 """Return encoded metadata string to string mapping.
232 232
233 233 Assume no ':' in key and no '\0' in both key and value."""
234 234 for key, value in meta.iteritems():
235 235 if ':' in key or '\0' in key:
236 236 raise ValueError("':' and '\0' are forbidden in metadata key'")
237 237 if '\0' in value:
238 238 raise ValueError("':' is forbidden in metadata value'")
239 239 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
240 240
241 241 def _fm0decodemeta(data):
242 242 """Return string to string dictionary from encoded version."""
243 243 d = {}
244 244 for l in data.split('\0'):
245 245 if l:
246 246 key, value = l.split(':')
247 247 d[key] = value
248 248 return d
249 249
250 250 ## Parsing and writing of version "1"
251 251 #
252 252 # The header is followed by the markers. Each marker is made of:
253 253 #
254 254 # - uint32: total size of the marker (including this field)
255 255 #
256 256 # - float64: date in seconds since epoch
257 257 #
258 258 # - int16: timezone offset in minutes
259 259 #
260 260 # - uint16: a bit field. It is reserved for flags used in common
261 261 # obsolete marker operations, to avoid repeated decoding of metadata
262 262 # entries.
263 263 #
264 264 # - uint8: number of successors "N", can be zero.
265 265 #
266 266 # - uint8: number of parents "P", can be zero.
267 267 #
268 268 # 0: parents data stored but no parent,
269 269 # 1: one parent stored,
270 270 # 2: two parents stored,
271 271 # 3: no parent data stored
272 272 #
273 273 # - uint8: number of metadata entries M
274 274 #
275 275 # - 20 or 32 bytes: precursor changeset identifier.
276 276 #
277 277 # - N*(20 or 32) bytes: successors changesets identifiers.
278 278 #
279 279 # - P*(20 or 32) bytes: parents of the precursors changesets.
280 280 #
281 281 # - M*(uint8, uint8): size of all metadata entries (key and value)
282 282 #
283 283 # - remaining bytes: the metadata, each (key, value) pair after the other.
284 284 _fm1version = 1
285 285 _fm1fixed = '>IdhHBBB20s'
286 286 _fm1nodesha1 = '20s'
287 287 _fm1nodesha256 = '32s'
288 288 _fm1nodesha1size = _calcsize(_fm1nodesha1)
289 289 _fm1nodesha256size = _calcsize(_fm1nodesha256)
290 290 _fm1fsize = _calcsize(_fm1fixed)
291 291 _fm1parentnone = 3
292 292 _fm1parentshift = 14
293 293 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
294 294 _fm1metapair = 'BB'
295 295 _fm1metapairsize = _calcsize('BB')
296 296
297 297 def _fm1purereadmarkers(data, off):
298 298 # make some global constants local for performance
299 299 noneflag = _fm1parentnone
300 300 sha2flag = usingsha256
301 301 sha1size = _fm1nodesha1size
302 302 sha2size = _fm1nodesha256size
303 303 sha1fmt = _fm1nodesha1
304 304 sha2fmt = _fm1nodesha256
305 305 metasize = _fm1metapairsize
306 306 metafmt = _fm1metapair
307 307 fsize = _fm1fsize
308 308 unpack = _unpack
309 309
310 310 # Loop on markers
311 311 stop = len(data) - _fm1fsize
312 312 ufixed = struct.Struct(_fm1fixed).unpack
313 313
314 314 while off <= stop:
315 315 # read fixed part
316 316 o1 = off + fsize
317 317 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
318 318
319 319 if flags & sha2flag:
320 320 # FIXME: prec was read as a SHA1, needs to be amended
321 321
322 322 # read 0 or more successors
323 323 if numsuc == 1:
324 324 o2 = o1 + sha2size
325 325 sucs = (data[o1:o2],)
326 326 else:
327 327 o2 = o1 + sha2size * numsuc
328 328 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
329 329
330 330 # read parents
331 331 if numpar == noneflag:
332 332 o3 = o2
333 333 parents = None
334 334 elif numpar == 1:
335 335 o3 = o2 + sha2size
336 336 parents = (data[o2:o3],)
337 337 else:
338 338 o3 = o2 + sha2size * numpar
339 339 parents = unpack(sha2fmt * numpar, data[o2:o3])
340 340 else:
341 341 # read 0 or more successors
342 342 if numsuc == 1:
343 343 o2 = o1 + sha1size
344 344 sucs = (data[o1:o2],)
345 345 else:
346 346 o2 = o1 + sha1size * numsuc
347 347 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
348 348
349 349 # read parents
350 350 if numpar == noneflag:
351 351 o3 = o2
352 352 parents = None
353 353 elif numpar == 1:
354 354 o3 = o2 + sha1size
355 355 parents = (data[o2:o3],)
356 356 else:
357 357 o3 = o2 + sha1size * numpar
358 358 parents = unpack(sha1fmt * numpar, data[o2:o3])
359 359
360 360 # read metadata
361 361 off = o3 + metasize * nummeta
362 362 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
363 363 metadata = []
364 364 for idx in xrange(0, len(metapairsize), 2):
365 365 o1 = off + metapairsize[idx]
366 366 o2 = o1 + metapairsize[idx + 1]
367 367 metadata.append((data[off:o1], data[o1:o2]))
368 368 off = o2
369 369
370 370 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
371 371
372 372 def _fm1encodeonemarker(marker):
373 373 pre, sucs, flags, metadata, date, parents = marker
374 374 # determine node size
375 375 _fm1node = _fm1nodesha1
376 376 if flags & usingsha256:
377 377 _fm1node = _fm1nodesha256
378 378 numsuc = len(sucs)
379 379 numextranodes = numsuc
380 380 if parents is None:
381 381 numpar = _fm1parentnone
382 382 else:
383 383 numpar = len(parents)
384 384 numextranodes += numpar
385 385 formatnodes = _fm1node * numextranodes
386 386 formatmeta = _fm1metapair * len(metadata)
387 387 format = _fm1fixed + formatnodes + formatmeta
388 388 # tz is stored in minutes so we divide by 60
389 389 tz = date[1]//60
390 390 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
391 391 data.extend(sucs)
392 392 if parents is not None:
393 393 data.extend(parents)
394 394 totalsize = _calcsize(format)
395 395 for key, value in metadata:
396 396 lk = len(key)
397 397 lv = len(value)
398 398 data.append(lk)
399 399 data.append(lv)
400 400 totalsize += lk + lv
401 401 data[0] = totalsize
402 402 data = [_pack(format, *data)]
403 403 for key, value in metadata:
404 404 data.append(key)
405 405 data.append(value)
406 406 return ''.join(data)
407 407
408 408 def _fm1readmarkers(data, off):
409 409 native = getattr(parsers, 'fm1readmarkers', None)
410 410 if not native:
411 411 return _fm1purereadmarkers(data, off)
412 412 stop = len(data) - _fm1fsize
413 413 return native(data, off, stop)
414 414
415 415 # mapping to read/write various marker formats
416 416 # <version> -> (decoder, encoder)
417 417 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
418 418 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
419 419
420 420 @util.nogc
421 421 def _readmarkers(data):
422 422 """Read and enumerate markers from raw data"""
423 423 off = 0
424 424 diskversion = _unpack('>B', data[off:off + 1])[0]
425 425 off += 1
426 426 if diskversion not in formats:
427 427 raise error.Abort(_('parsing obsolete marker: unknown version %r')
428 428 % diskversion)
429 429 return diskversion, formats[diskversion][0](data, off)
430 430
431 431 def encodemarkers(markers, addheader=False, version=_fm0version):
432 432 # Kept separate from flushmarkers(), it will be reused for
433 433 # markers exchange.
434 434 encodeone = formats[version][1]
435 435 if addheader:
436 436 yield _pack('>B', version)
437 437 for marker in markers:
438 438 yield encodeone(marker)
439 439
440 440
441 441 class marker(object):
442 442 """Wrap obsolete marker raw data"""
443 443
444 444 def __init__(self, repo, data):
445 445 # the repo argument will be used to create changectx in later version
446 446 self._repo = repo
447 447 self._data = data
448 448 self._decodedmeta = None
449 449
450 450 def __hash__(self):
451 451 return hash(self._data)
452 452
453 453 def __eq__(self, other):
454 454 if type(other) != type(self):
455 455 return False
456 456 return self._data == other._data
457 457
458 458 def precnode(self):
459 459 """Precursor changeset node identifier"""
460 460 return self._data[0]
461 461
462 462 def succnodes(self):
463 463 """List of successor changesets node identifiers"""
464 464 return self._data[1]
465 465
466 466 def parentnodes(self):
467 467 """Parents of the precursors (None if not recorded)"""
468 468 return self._data[5]
469 469
470 470 def metadata(self):
471 471 """Decoded metadata dictionary"""
472 472 return dict(self._data[3])
473 473
474 474 def date(self):
475 475 """Creation date as (unixtime, offset)"""
476 476 return self._data[4]
477 477
478 478 def flags(self):
479 479 """The flags field of the marker"""
480 480 return self._data[2]
481 481
482 482 @util.nogc
483 483 def _addsuccessors(successors, markers):
484 484 for mark in markers:
485 485 successors.setdefault(mark[0], set()).add(mark)
486 486
487 487 @util.nogc
488 488 def _addprecursors(precursors, markers):
489 489 for mark in markers:
490 490 for suc in mark[1]:
491 491 precursors.setdefault(suc, set()).add(mark)
492 492
493 493 @util.nogc
494 494 def _addchildren(children, markers):
495 495 for mark in markers:
496 496 parents = mark[5]
497 497 if parents is not None:
498 498 for p in parents:
499 499 children.setdefault(p, set()).add(mark)
500 500
501 501 def _checkinvalidmarkers(markers):
502 502 """search for marker with invalid data and raise error if needed
503 503
504 504 Exist as a separated function to allow the evolve extension for a more
505 505 subtle handling.
506 506 """
507 507 for mark in markers:
508 508 if node.nullid in mark[1]:
509 509 raise error.Abort(_('bad obsolescence marker detected: '
510 510 'invalid successors nullid'))
511 511
512 512 class obsstore(object):
513 513 """Store obsolete markers
514 514
515 515 Markers can be accessed with two mappings:
516 516 - precursors[x] -> set(markers on precursors edges of x)
517 517 - successors[x] -> set(markers on successors edges of x)
518 518 - children[x] -> set(markers on precursors edges of children(x)
519 519 """
520 520
521 521 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
522 522 # prec: nodeid, precursor changesets
523 523 # succs: tuple of nodeid, successor changesets (0-N length)
524 524 # flag: integer, flag field carrying modifier for the markers (see doc)
525 525 # meta: binary blob, encoded metadata dictionary
526 526 # date: (float, int) tuple, date of marker creation
527 527 # parents: (tuple of nodeid) or None, parents of precursors
528 528 # None is used when no data has been recorded
529 529
530 530 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
531 531 # caches for various obsolescence related cache
532 532 self.caches = {}
533 533 self.svfs = svfs
534 534 self._version = defaultformat
535 535 self._readonly = readonly
536 536
537 537 def __iter__(self):
538 538 return iter(self._all)
539 539
540 540 def __len__(self):
541 541 return len(self._all)
542 542
543 543 def __nonzero__(self):
544 544 if not self._cached('_all'):
545 545 try:
546 546 return self.svfs.stat('obsstore').st_size > 1
547 547 except OSError as inst:
548 548 if inst.errno != errno.ENOENT:
549 549 raise
550 550 # just build an empty _all list if no obsstore exists, which
551 551 # avoids further stat() syscalls
552 552 pass
553 553 return bool(self._all)
554 554
555 555 @property
556 556 def readonly(self):
557 557 """True if marker creation is disabled
558 558
559 559 Remove me in the future when obsolete marker is always on."""
560 560 return self._readonly
561 561
562 562 def create(self, transaction, prec, succs=(), flag=0, parents=None,
563 563 date=None, metadata=None):
564 564 """obsolete: add a new obsolete marker
565 565
566 566 * ensuring it is hashable
567 567 * check mandatory metadata
568 568 * encode metadata
569 569
570 570 If you are a human writing code creating marker you want to use the
571 571 `createmarkers` function in this module instead.
572 572
573 573 return True if a new marker have been added, False if the markers
574 574 already existed (no op).
575 575 """
576 576 if metadata is None:
577 577 metadata = {}
578 578 if date is None:
579 579 if 'date' in metadata:
580 580 # as a courtesy for out-of-tree extensions
581 581 date = util.parsedate(metadata.pop('date'))
582 582 else:
583 583 date = util.makedate()
584 584 if len(prec) != 20:
585 585 raise ValueError(prec)
586 586 for succ in succs:
587 587 if len(succ) != 20:
588 588 raise ValueError(succ)
589 589 if prec in succs:
590 590 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
591 591
592 592 metadata = tuple(sorted(metadata.iteritems()))
593 593
594 594 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
595 595 return bool(self.add(transaction, [marker]))
596 596
597 597 def add(self, transaction, markers):
598 598 """Add new markers to the store
599 599
600 600 Take care of filtering duplicate.
601 601 Return the number of new marker."""
602 602 if self._readonly:
603 603 raise error.Abort('creating obsolete markers is not enabled on '
604 604 'this repo')
605 605 known = set(self._all)
606 606 new = []
607 607 for m in markers:
608 608 if m not in known:
609 609 known.add(m)
610 610 new.append(m)
611 611 if new:
612 612 f = self.svfs('obsstore', 'ab')
613 613 try:
614 614 offset = f.tell()
615 615 transaction.add('obsstore', offset)
616 616 # offset == 0: new file - add the version header
617 617 for bytes in encodemarkers(new, offset == 0, self._version):
618 618 f.write(bytes)
619 619 finally:
620 620 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
621 621 # call 'filecacheentry.refresh()' here
622 622 f.close()
623 623 self._addmarkers(new)
624 624 # new marker *may* have changed several set. invalidate the cache.
625 625 self.caches.clear()
626 626 # records the number of new markers for the transaction hooks
627 627 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
628 628 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
629 629 return len(new)
630 630
631 631 def mergemarkers(self, transaction, data):
632 632 """merge a binary stream of markers inside the obsstore
633 633
634 634 Returns the number of new markers added."""
635 635 version, markers = _readmarkers(data)
636 636 return self.add(transaction, markers)
637 637
638 638 @propertycache
639 639 def _all(self):
640 640 data = self.svfs.tryread('obsstore')
641 641 if not data:
642 642 return []
643 643 self._version, markers = _readmarkers(data)
644 644 markers = list(markers)
645 645 _checkinvalidmarkers(markers)
646 646 return markers
647 647
648 648 @propertycache
649 649 def successors(self):
650 650 successors = {}
651 651 _addsuccessors(successors, self._all)
652 652 return successors
653 653
654 654 @propertycache
655 655 def precursors(self):
656 656 precursors = {}
657 657 _addprecursors(precursors, self._all)
658 658 return precursors
659 659
660 660 @propertycache
661 661 def children(self):
662 662 children = {}
663 663 _addchildren(children, self._all)
664 664 return children
665 665
666 666 def _cached(self, attr):
667 667 return attr in self.__dict__
668 668
669 669 def _addmarkers(self, markers):
670 670 markers = list(markers) # to allow repeated iteration
671 671 self._all.extend(markers)
672 672 if self._cached('successors'):
673 673 _addsuccessors(self.successors, markers)
674 674 if self._cached('precursors'):
675 675 _addprecursors(self.precursors, markers)
676 676 if self._cached('children'):
677 677 _addchildren(self.children, markers)
678 678 _checkinvalidmarkers(markers)
679 679
680 680 def relevantmarkers(self, nodes):
681 681 """return a set of all obsolescence markers relevant to a set of nodes.
682 682
683 683 "relevant" to a set of nodes mean:
684 684
685 685 - marker that use this changeset as successor
686 686 - prune marker of direct children on this changeset
687 687 - recursive application of the two rules on precursors of these markers
688 688
689 689 It is a set so you cannot rely on order."""
690 690
691 691 pendingnodes = set(nodes)
692 692 seenmarkers = set()
693 693 seennodes = set(pendingnodes)
694 694 precursorsmarkers = self.precursors
695 695 children = self.children
696 696 while pendingnodes:
697 697 direct = set()
698 698 for current in pendingnodes:
699 699 direct.update(precursorsmarkers.get(current, ()))
700 700 pruned = [m for m in children.get(current, ()) if not m[1]]
701 701 direct.update(pruned)
702 702 direct -= seenmarkers
703 703 pendingnodes = set([m[0] for m in direct])
704 704 seenmarkers |= direct
705 705 pendingnodes -= seennodes
706 706 seennodes |= pendingnodes
707 707 return seenmarkers
708 708
709 709 def commonversion(versions):
710 710 """Return the newest version listed in both versions and our local formats.
711 711
712 712 Returns None if no common version exists.
713 713 """
714 714 versions.sort(reverse=True)
715 715 # search for highest version known on both side
716 716 for v in versions:
717 717 if v in formats:
718 718 return v
719 719 return None
720 720
721 721 # arbitrary picked to fit into 8K limit from HTTP server
722 722 # you have to take in account:
723 723 # - the version header
724 724 # - the base85 encoding
725 725 _maxpayload = 5300
726 726
727 727 def _pushkeyescape(markers):
728 728 """encode markers into a dict suitable for pushkey exchange
729 729
730 730 - binary data is base85 encoded
731 731 - split in chunks smaller than 5300 bytes"""
732 732 keys = {}
733 733 parts = []
734 734 currentlen = _maxpayload * 2 # ensure we create a new part
735 735 for marker in markers:
736 736 nextdata = _fm0encodeonemarker(marker)
737 737 if (len(nextdata) + currentlen > _maxpayload):
738 738 currentpart = []
739 739 currentlen = 0
740 740 parts.append(currentpart)
741 741 currentpart.append(nextdata)
742 742 currentlen += len(nextdata)
743 743 for idx, part in enumerate(reversed(parts)):
744 744 data = ''.join([_pack('>B', _fm0version)] + part)
745 745 keys['dump%i' % idx] = base85.b85encode(data)
746 746 return keys
747 747
748 748 def listmarkers(repo):
749 749 """List markers over pushkey"""
750 750 if not repo.obsstore:
751 751 return {}
752 752 return _pushkeyescape(sorted(repo.obsstore))
753 753
754 754 def pushmarker(repo, key, old, new):
755 755 """Push markers over pushkey"""
756 756 if not key.startswith('dump'):
757 757 repo.ui.warn(_('unknown key: %r') % key)
758 758 return 0
759 759 if old:
760 760 repo.ui.warn(_('unexpected old value for %r') % key)
761 761 return 0
762 762 data = base85.b85decode(new)
763 763 lock = repo.lock()
764 764 try:
765 765 tr = repo.transaction('pushkey: obsolete markers')
766 766 try:
767 767 repo.obsstore.mergemarkers(tr, data)
768 768 tr.close()
769 769 return 1
770 770 finally:
771 771 tr.release()
772 772 finally:
773 773 lock.release()
774 774
775 775 def getmarkers(repo, nodes=None):
776 776 """returns markers known in a repository
777 777
778 778 If <nodes> is specified, only markers "relevant" to those nodes are are
779 779 returned"""
780 780 if nodes is None:
781 781 rawmarkers = repo.obsstore
782 782 else:
783 783 rawmarkers = repo.obsstore.relevantmarkers(nodes)
784 784
785 785 for markerdata in rawmarkers:
786 786 yield marker(repo, markerdata)
787 787
788 788 def relevantmarkers(repo, node):
789 789 """all obsolete markers relevant to some revision"""
790 790 for markerdata in repo.obsstore.relevantmarkers(node):
791 791 yield marker(repo, markerdata)
792 792
793 793
794 794 def precursormarkers(ctx):
795 795 """obsolete marker marking this changeset as a successors"""
796 796 for data in ctx.repo().obsstore.precursors.get(ctx.node(), ()):
797 797 yield marker(ctx.repo(), data)
798 798
799 799 def successormarkers(ctx):
800 800 """obsolete marker making this changeset obsolete"""
801 801 for data in ctx.repo().obsstore.successors.get(ctx.node(), ()):
802 802 yield marker(ctx.repo(), data)
803 803
804 804 def allsuccessors(obsstore, nodes, ignoreflags=0):
805 805 """Yield node for every successor of <nodes>.
806 806
807 807 Some successors may be unknown locally.
808 808
809 809 This is a linear yield unsuited to detecting split changesets. It includes
810 810 initial nodes too."""
811 811 remaining = set(nodes)
812 812 seen = set(remaining)
813 813 while remaining:
814 814 current = remaining.pop()
815 815 yield current
816 816 for mark in obsstore.successors.get(current, ()):
817 817 # ignore marker flagged with specified flag
818 818 if mark[2] & ignoreflags:
819 819 continue
820 820 for suc in mark[1]:
821 821 if suc not in seen:
822 822 seen.add(suc)
823 823 remaining.add(suc)
824 824
825 825 def allprecursors(obsstore, nodes, ignoreflags=0):
826 826 """Yield node for every precursors of <nodes>.
827 827
828 828 Some precursors may be unknown locally.
829 829
830 830 This is a linear yield unsuited to detecting folded changesets. It includes
831 831 initial nodes too."""
832 832
833 833 remaining = set(nodes)
834 834 seen = set(remaining)
835 835 while remaining:
836 836 current = remaining.pop()
837 837 yield current
838 838 for mark in obsstore.precursors.get(current, ()):
839 839 # ignore marker flagged with specified flag
840 840 if mark[2] & ignoreflags:
841 841 continue
842 842 suc = mark[0]
843 843 if suc not in seen:
844 844 seen.add(suc)
845 845 remaining.add(suc)
846 846
847 847 def foreground(repo, nodes):
848 848 """return all nodes in the "foreground" of other node
849 849
850 850 The foreground of a revision is anything reachable using parent -> children
851 851 or precursor -> successor relation. It is very similar to "descendant" but
852 852 augmented with obsolescence information.
853 853
854 854 Beware that possible obsolescence cycle may result if complex situation.
855 855 """
856 856 repo = repo.unfiltered()
857 857 foreground = set(repo.set('%ln::', nodes))
858 858 if repo.obsstore:
859 859 # We only need this complicated logic if there is obsolescence
860 860 # XXX will probably deserve an optimised revset.
861 861 nm = repo.changelog.nodemap
862 862 plen = -1
863 863 # compute the whole set of successors or descendants
864 864 while len(foreground) != plen:
865 865 plen = len(foreground)
866 866 succs = set(c.node() for c in foreground)
867 867 mutable = [c.node() for c in foreground if c.mutable()]
868 868 succs.update(allsuccessors(repo.obsstore, mutable))
869 869 known = (n for n in succs if n in nm)
870 870 foreground = set(repo.set('%ln::', known))
871 871 return set(c.node() for c in foreground)
872 872
873 873
874 874 def successorssets(repo, initialnode, cache=None):
875 875 """Return set of all latest successors of initial nodes
876 876
877 877 The successors set of a changeset A are the group of revisions that succeed
878 878 A. It succeeds A as a consistent whole, each revision being only a partial
879 879 replacement. The successors set contains non-obsolete changesets only.
880 880
881 881 This function returns the full list of successor sets which is why it
882 882 returns a list of tuples and not just a single tuple. Each tuple is a valid
883 883 successors set. Note that (A,) may be a valid successors set for changeset A
884 884 (see below).
885 885
886 886 In most cases, a changeset A will have a single element (e.g. the changeset
887 887 A is replaced by A') in its successors set. Though, it is also common for a
888 888 changeset A to have no elements in its successor set (e.g. the changeset
889 889 has been pruned). Therefore, the returned list of successors sets will be
890 890 [(A',)] or [], respectively.
891 891
892 892 When a changeset A is split into A' and B', however, it will result in a
893 893 successors set containing more than a single element, i.e. [(A',B')].
894 894 Divergent changesets will result in multiple successors sets, i.e. [(A',),
895 895 (A'')].
896 896
897 897 If a changeset A is not obsolete, then it will conceptually have no
898 898 successors set. To distinguish this from a pruned changeset, the successor
899 899 set will contain itself only, i.e. [(A,)].
900 900
901 901 Finally, successors unknown locally are considered to be pruned (obsoleted
902 902 without any successors).
903 903
904 904 The optional `cache` parameter is a dictionary that may contain precomputed
905 905 successors sets. It is meant to reuse the computation of a previous call to
906 906 `successorssets` when multiple calls are made at the same time. The cache
907 907 dictionary is updated in place. The caller is responsible for its life
908 908 span. Code that makes multiple calls to `successorssets` *must* use this
909 909 cache mechanism or suffer terrible performance.
910 910 """
911 911
912 912 succmarkers = repo.obsstore.successors
913 913
914 914 # Stack of nodes we search successors sets for
915 915 toproceed = [initialnode]
916 916 # set version of above list for fast loop detection
917 917 # element added to "toproceed" must be added here
918 918 stackedset = set(toproceed)
919 919 if cache is None:
920 920 cache = {}
921 921
922 922 # This while loop is the flattened version of a recursive search for
923 923 # successors sets
924 924 #
925 925 # def successorssets(x):
926 926 # successors = directsuccessors(x)
927 927 # ss = [[]]
928 928 # for succ in directsuccessors(x):
929 929 # # product as in itertools cartesian product
930 930 # ss = product(ss, successorssets(succ))
931 931 # return ss
932 932 #
933 933 # But we can not use plain recursive calls here:
934 934 # - that would blow the python call stack
935 935 # - obsolescence markers may have cycles, we need to handle them.
936 936 #
937 937 # The `toproceed` list act as our call stack. Every node we search
938 938 # successors set for are stacked there.
939 939 #
940 940 # The `stackedset` is set version of this stack used to check if a node is
941 941 # already stacked. This check is used to detect cycles and prevent infinite
942 942 # loop.
943 943 #
944 944 # successors set of all nodes are stored in the `cache` dictionary.
945 945 #
946 946 # After this while loop ends we use the cache to return the successors sets
947 947 # for the node requested by the caller.
948 948 while toproceed:
949 949 # Every iteration tries to compute the successors sets of the topmost
950 950 # node of the stack: CURRENT.
951 951 #
952 952 # There are four possible outcomes:
953 953 #
954 954 # 1) We already know the successors sets of CURRENT:
955 955 # -> mission accomplished, pop it from the stack.
956 956 # 2) Node is not obsolete:
957 957 # -> the node is its own successors sets. Add it to the cache.
958 958 # 3) We do not know successors set of direct successors of CURRENT:
959 959 # -> We add those successors to the stack.
960 960 # 4) We know successors sets of all direct successors of CURRENT:
961 961 # -> We can compute CURRENT successors set and add it to the
962 962 # cache.
963 963 #
964 964 current = toproceed[-1]
965 965 if current in cache:
966 966 # case (1): We already know the successors sets
967 967 stackedset.remove(toproceed.pop())
968 968 elif current not in succmarkers:
969 969 # case (2): The node is not obsolete.
970 970 if current in repo:
971 971 # We have a valid last successors.
972 972 cache[current] = [(current,)]
973 973 else:
974 974 # Final obsolete version is unknown locally.
975 975 # Do not count that as a valid successors
976 976 cache[current] = []
977 977 else:
978 978 # cases (3) and (4)
979 979 #
980 980 # We proceed in two phases. Phase 1 aims to distinguish case (3)
981 981 # from case (4):
982 982 #
983 983 # For each direct successors of CURRENT, we check whether its
984 984 # successors sets are known. If they are not, we stack the
985 985 # unknown node and proceed to the next iteration of the while
986 986 # loop. (case 3)
987 987 #
988 988 # During this step, we may detect obsolescence cycles: a node
989 989 # with unknown successors sets but already in the call stack.
990 990 # In such a situation, we arbitrary set the successors sets of
991 991 # the node to nothing (node pruned) to break the cycle.
992 992 #
993 993 # If no break was encountered we proceed to phase 2.
994 994 #
995 995 # Phase 2 computes successors sets of CURRENT (case 4); see details
996 996 # in phase 2 itself.
997 997 #
998 998 # Note the two levels of iteration in each phase.
999 999 # - The first one handles obsolescence markers using CURRENT as
1000 1000 # precursor (successors markers of CURRENT).
1001 1001 #
1002 1002 # Having multiple entry here means divergence.
1003 1003 #
1004 1004 # - The second one handles successors defined in each marker.
1005 1005 #
1006 1006 # Having none means pruned node, multiple successors means split,
1007 1007 # single successors are standard replacement.
1008 1008 #
1009 1009 for mark in sorted(succmarkers[current]):
1010 1010 for suc in mark[1]:
1011 1011 if suc not in cache:
1012 1012 if suc in stackedset:
1013 1013 # cycle breaking
1014 1014 cache[suc] = []
1015 1015 else:
1016 1016 # case (3) If we have not computed successors sets
1017 1017 # of one of those successors we add it to the
1018 1018 # `toproceed` stack and stop all work for this
1019 1019 # iteration.
1020 1020 toproceed.append(suc)
1021 1021 stackedset.add(suc)
1022 1022 break
1023 1023 else:
1024 1024 continue
1025 1025 break
1026 1026 else:
1027 1027 # case (4): we know all successors sets of all direct
1028 1028 # successors
1029 1029 #
1030 1030 # Successors set contributed by each marker depends on the
1031 1031 # successors sets of all its "successors" node.
1032 1032 #
1033 1033 # Each different marker is a divergence in the obsolescence
1034 1034 # history. It contributes successors sets distinct from other
1035 1035 # markers.
1036 1036 #
1037 1037 # Within a marker, a successor may have divergent successors
1038 1038 # sets. In such a case, the marker will contribute multiple
1039 1039 # divergent successors sets. If multiple successors have
1040 1040 # divergent successors sets, a Cartesian product is used.
1041 1041 #
1042 1042 # At the end we post-process successors sets to remove
1043 1043 # duplicated entry and successors set that are strict subset of
1044 1044 # another one.
1045 1045 succssets = []
1046 1046 for mark in sorted(succmarkers[current]):
1047 1047 # successors sets contributed by this marker
1048 1048 markss = [[]]
1049 1049 for suc in mark[1]:
1050 1050 # cardinal product with previous successors
1051 1051 productresult = []
1052 1052 for prefix in markss:
1053 1053 for suffix in cache[suc]:
1054 1054 newss = list(prefix)
1055 1055 for part in suffix:
1056 1056 # do not duplicated entry in successors set
1057 1057 # first entry wins.
1058 1058 if part not in newss:
1059 1059 newss.append(part)
1060 1060 productresult.append(newss)
1061 1061 markss = productresult
1062 1062 succssets.extend(markss)
1063 1063 # remove duplicated and subset
1064 1064 seen = []
1065 1065 final = []
1066 1066 candidate = sorted(((set(s), s) for s in succssets if s),
1067 1067 key=lambda x: len(x[1]), reverse=True)
1068 1068 for setversion, listversion in candidate:
1069 1069 for seenset in seen:
1070 1070 if setversion.issubset(seenset):
1071 1071 break
1072 1072 else:
1073 1073 final.append(listversion)
1074 1074 seen.append(setversion)
1075 1075 final.reverse() # put small successors set first
1076 1076 cache[current] = final
1077 1077 return cache[initialnode]
1078 1078
1079 1079 # mapping of 'set-name' -> <function to compute this set>
1080 1080 cachefuncs = {}
1081 1081 def cachefor(name):
1082 1082 """Decorator to register a function as computing the cache for a set"""
1083 1083 def decorator(func):
1084 1084 assert name not in cachefuncs
1085 1085 cachefuncs[name] = func
1086 1086 return func
1087 1087 return decorator
1088 1088
1089 1089 def getrevs(repo, name):
1090 1090 """Return the set of revision that belong to the <name> set
1091 1091
1092 1092 Such access may compute the set and cache it for future use"""
1093 1093 repo = repo.unfiltered()
1094 1094 if not repo.obsstore:
1095 1095 return frozenset()
1096 1096 if name not in repo.obsstore.caches:
1097 1097 repo.obsstore.caches[name] = cachefuncs[name](repo)
1098 1098 return repo.obsstore.caches[name]
1099 1099
1100 1100 # To be simple we need to invalidate obsolescence cache when:
1101 1101 #
1102 1102 # - new changeset is added:
1103 1103 # - public phase is changed
1104 1104 # - obsolescence marker are added
1105 1105 # - strip is used a repo
1106 1106 def clearobscaches(repo):
1107 1107 """Remove all obsolescence related cache from a repo
1108 1108
1109 1109 This remove all cache in obsstore is the obsstore already exist on the
1110 1110 repo.
1111 1111
1112 1112 (We could be smarter here given the exact event that trigger the cache
1113 1113 clearing)"""
1114 1114 # only clear cache is there is obsstore data in this repo
1115 1115 if 'obsstore' in repo._filecache:
1116 1116 repo.obsstore.caches.clear()
1117 1117
1118 1118 @cachefor('obsolete')
1119 1119 def _computeobsoleteset(repo):
1120 1120 """the set of obsolete revisions"""
1121 1121 obs = set()
1122 1122 getnode = repo.changelog.node
1123 1123 notpublic = repo.revs("not public()")
1124 1124 for r in notpublic:
1125 1125 if getnode(r) in repo.obsstore.successors:
1126 1126 obs.add(r)
1127 1127 return obs
1128 1128
1129 1129 @cachefor('unstable')
1130 1130 def _computeunstableset(repo):
1131 1131 """the set of non obsolete revisions with obsolete parents"""
1132 1132 revs = [(ctx.rev(), ctx) for ctx in
1133 1133 repo.set('(not public()) and (not obsolete())')]
1134 1134 revs.sort(key=lambda x:x[0])
1135 1135 unstable = set()
1136 1136 for rev, ctx in revs:
1137 1137 # A rev is unstable if one of its parent is obsolete or unstable
1138 1138 # this works since we traverse following growing rev order
1139 1139 if any((x.obsolete() or (x.rev() in unstable))
1140 1140 for x in ctx.parents()):
1141 1141 unstable.add(rev)
1142 1142 return unstable
1143 1143
1144 1144 @cachefor('suspended')
1145 1145 def _computesuspendedset(repo):
1146 1146 """the set of obsolete parents with non obsolete descendants"""
1147 1147 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1148 1148 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1149 1149
1150 1150 @cachefor('extinct')
1151 1151 def _computeextinctset(repo):
1152 1152 """the set of obsolete parents without non obsolete descendants"""
1153 1153 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1154 1154
1155 1155
1156 1156 @cachefor('bumped')
1157 1157 def _computebumpedset(repo):
1158 1158 """the set of revs trying to obsolete public revisions"""
1159 1159 bumped = set()
1160 1160 # util function (avoid attribute lookup in the loop)
1161 1161 phase = repo._phasecache.phase # would be faster to grab the full list
1162 1162 public = phases.public
1163 1163 cl = repo.changelog
1164 1164 torev = cl.nodemap.get
1165 1165 for ctx in repo.set('(not public()) and (not obsolete())'):
1166 1166 rev = ctx.rev()
1167 1167 # We only evaluate mutable, non-obsolete revision
1168 1168 node = ctx.node()
1169 1169 # (future) A cache of precursors may worth if split is very common
1170 1170 for pnode in allprecursors(repo.obsstore, [node],
1171 1171 ignoreflags=bumpedfix):
1172 1172 prev = torev(pnode) # unfiltered! but so is phasecache
1173 1173 if (prev is not None) and (phase(repo, prev) <= public):
1174 1174 # we have a public precursors
1175 1175 bumped.add(rev)
1176 1176 break # Next draft!
1177 1177 return bumped
1178 1178
1179 1179 @cachefor('divergent')
1180 1180 def _computedivergentset(repo):
1181 1181 """the set of rev that compete to be the final successors of some revision.
1182 1182 """
1183 1183 divergent = set()
1184 1184 obsstore = repo.obsstore
1185 1185 newermap = {}
1186 1186 for ctx in repo.set('(not public()) - obsolete()'):
1187 1187 mark = obsstore.precursors.get(ctx.node(), ())
1188 1188 toprocess = set(mark)
1189 1189 seen = set()
1190 1190 while toprocess:
1191 1191 prec = toprocess.pop()[0]
1192 1192 if prec in seen:
1193 1193 continue # emergency cycle hanging prevention
1194 1194 seen.add(prec)
1195 1195 if prec not in newermap:
1196 1196 successorssets(repo, prec, newermap)
1197 1197 newer = [n for n in newermap[prec] if n]
1198 1198 if len(newer) > 1:
1199 1199 divergent.add(ctx.rev())
1200 1200 break
1201 1201 toprocess.update(obsstore.precursors.get(prec, ()))
1202 1202 return divergent
1203 1203
1204 1204
1205 1205 def createmarkers(repo, relations, flag=0, date=None, metadata=None):
1206 1206 """Add obsolete markers between changesets in a repo
1207 1207
1208 1208 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1209 1209 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1210 1210 containing metadata for this marker only. It is merged with the global
1211 1211 metadata specified through the `metadata` argument of this function,
1212 1212
1213 1213 Trying to obsolete a public changeset will raise an exception.
1214 1214
1215 1215 Current user and date are used except if specified otherwise in the
1216 1216 metadata attribute.
1217 1217
1218 1218 This function operates within a transaction of its own, but does
1219 1219 not take any lock on the repo.
1220 1220 """
1221 1221 # prepare metadata
1222 1222 if metadata is None:
1223 1223 metadata = {}
1224 1224 if 'user' not in metadata:
1225 1225 metadata['user'] = repo.ui.username()
1226 1226 tr = repo.transaction('add-obsolescence-marker')
1227 1227 try:
1228 markerargs = []
1228 1229 for rel in relations:
1229 1230 prec = rel[0]
1230 1231 sucs = rel[1]
1231 1232 localmetadata = metadata.copy()
1232 1233 if 2 < len(rel):
1233 1234 localmetadata.update(rel[2])
1234 1235
1235 1236 if not prec.mutable():
1236 1237 raise error.Abort("cannot obsolete public changeset: %s"
1237 1238 % prec,
1238 1239 hint='see "hg help phases" for details')
1239 1240 nprec = prec.node()
1240 1241 nsucs = tuple(s.node() for s in sucs)
1241 1242 npare = None
1242 1243 if not nsucs:
1243 1244 npare = tuple(p.node() for p in prec.parents())
1244 1245 if nprec in nsucs:
1245 1246 raise error.Abort("changeset %s cannot obsolete itself" % prec)
1247
1248 # Creating the marker causes the hidden cache to become invalid,
1249 # which causes recomputation when we ask for prec.parents() above.
1250 # Resulting in n^2 behavior. So let's prepare all of the args
1251 # first, then create the markers.
1252 markerargs.append((nprec, nsucs, npare, localmetadata))
1253
1254 for args in markerargs:
1255 nprec, nsucs, npare, localmetadata = args
1246 1256 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1247 1257 date=date, metadata=localmetadata)
1248 1258 repo.filteredrevcache.clear()
1249 1259 tr.close()
1250 1260 finally:
1251 1261 tr.release()
1252 1262
1253 1263 def isenabled(repo, option):
1254 1264 """Returns True if the given repository has the given obsolete option
1255 1265 enabled.
1256 1266 """
1257 1267 result = set(repo.ui.configlist('experimental', 'evolution'))
1258 1268 if 'all' in result:
1259 1269 return True
1260 1270
1261 1271 # For migration purposes, temporarily return true if the config hasn't been
1262 1272 # set but _enabled is true.
1263 1273 if len(result) == 0 and _enabled:
1264 1274 return True
1265 1275
1266 1276 # createmarkers must be enabled if other options are enabled
1267 1277 if ((allowunstableopt in result or exchangeopt in result) and
1268 1278 not createmarkersopt in result):
1269 1279 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
1270 1280 "if other obsolete options are enabled"))
1271 1281
1272 1282 return option in result
General Comments 0
You need to be logged in to leave comments. Login now