##// END OF EJS Templates
obsolete: use 2 argument form of enumerate()...
Gregory Szorc -
r32278:7c3ef55d default
parent child Browse files
Show More
@@ -1,1284 +1,1284 b''
1 1 # obsolete.py - obsolete markers handling
2 2 #
3 3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 4 # Logilab SA <contact@logilab.fr>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 """Obsolete marker handling
10 10
11 11 An obsolete marker maps an old changeset to a list of new
12 12 changesets. If the list of new changesets is empty, the old changeset
13 13 is said to be "killed". Otherwise, the old changeset is being
14 14 "replaced" by the new changesets.
15 15
16 16 Obsolete markers can be used to record and distribute changeset graph
17 17 transformations performed by history rewrite operations, and help
18 18 building new tools to reconcile conflicting rewrite actions. To
19 19 facilitate conflict resolution, markers include various annotations
20 20 besides old and news changeset identifiers, such as creation date or
21 21 author name.
22 22
23 23 The old obsoleted changeset is called a "precursor" and possible
24 24 replacements are called "successors". Markers that used changeset X as
25 25 a precursor are called "successor markers of X" because they hold
26 26 information about the successors of X. Markers that use changeset Y as
27 27 a successors are call "precursor markers of Y" because they hold
28 28 information about the precursors of Y.
29 29
30 30 Examples:
31 31
32 32 - When changeset A is replaced by changeset A', one marker is stored:
33 33
34 34 (A, (A',))
35 35
36 36 - When changesets A and B are folded into a new changeset C, two markers are
37 37 stored:
38 38
39 39 (A, (C,)) and (B, (C,))
40 40
41 41 - When changeset A is simply "pruned" from the graph, a marker is created:
42 42
43 43 (A, ())
44 44
45 45 - When changeset A is split into B and C, a single marker is used:
46 46
47 47 (A, (B, C))
48 48
49 49 We use a single marker to distinguish the "split" case from the "divergence"
50 50 case. If two independent operations rewrite the same changeset A in to A' and
51 51 A'', we have an error case: divergent rewriting. We can detect it because
52 52 two markers will be created independently:
53 53
54 54 (A, (B,)) and (A, (C,))
55 55
56 56 Format
57 57 ------
58 58
59 59 Markers are stored in an append-only file stored in
60 60 '.hg/store/obsstore'.
61 61
62 62 The file starts with a version header:
63 63
64 64 - 1 unsigned byte: version number, starting at zero.
65 65
66 66 The header is followed by the markers. Marker format depend of the version. See
67 67 comment associated with each format for details.
68 68
69 69 """
70 70 from __future__ import absolute_import
71 71
72 72 import errno
73 73 import struct
74 74
75 75 from .i18n import _
76 76 from . import (
77 77 error,
78 78 node,
79 79 parsers,
80 80 phases,
81 81 util,
82 82 )
83 83
84 84 _pack = struct.pack
85 85 _unpack = struct.unpack
86 86 _calcsize = struct.calcsize
87 87 propertycache = util.propertycache
88 88
89 89 # the obsolete feature is not mature enough to be enabled by default.
90 90 # you have to rely on third party extension extension to enable this.
91 91 _enabled = False
92 92
93 93 # Options for obsolescence
94 94 createmarkersopt = 'createmarkers'
95 95 allowunstableopt = 'allowunstable'
96 96 exchangeopt = 'exchange'
97 97
98 98 ### obsolescence marker flag
99 99
100 100 ## bumpedfix flag
101 101 #
102 102 # When a changeset A' succeed to a changeset A which became public, we call A'
103 103 # "bumped" because it's a successors of a public changesets
104 104 #
105 105 # o A' (bumped)
106 106 # |`:
107 107 # | o A
108 108 # |/
109 109 # o Z
110 110 #
111 111 # The way to solve this situation is to create a new changeset Ad as children
112 112 # of A. This changeset have the same content than A'. So the diff from A to A'
113 113 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
114 114 #
115 115 # o Ad
116 116 # |`:
117 117 # | x A'
118 118 # |'|
119 119 # o | A
120 120 # |/
121 121 # o Z
122 122 #
123 123 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
124 124 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
125 125 # This flag mean that the successors express the changes between the public and
126 126 # bumped version and fix the situation, breaking the transitivity of
127 127 # "bumped" here.
128 128 bumpedfix = 1
129 129 usingsha256 = 2
130 130
131 131 ## Parsing and writing of version "0"
132 132 #
133 133 # The header is followed by the markers. Each marker is made of:
134 134 #
135 135 # - 1 uint8 : number of new changesets "N", can be zero.
136 136 #
137 137 # - 1 uint32: metadata size "M" in bytes.
138 138 #
139 139 # - 1 byte: a bit field. It is reserved for flags used in common
140 140 # obsolete marker operations, to avoid repeated decoding of metadata
141 141 # entries.
142 142 #
143 143 # - 20 bytes: obsoleted changeset identifier.
144 144 #
145 145 # - N*20 bytes: new changesets identifiers.
146 146 #
147 147 # - M bytes: metadata as a sequence of nul-terminated strings. Each
148 148 # string contains a key and a value, separated by a colon ':', without
149 149 # additional encoding. Keys cannot contain '\0' or ':' and values
150 150 # cannot contain '\0'.
151 151 _fm0version = 0
152 152 _fm0fixed = '>BIB20s'
153 153 _fm0node = '20s'
154 154 _fm0fsize = _calcsize(_fm0fixed)
155 155 _fm0fnodesize = _calcsize(_fm0node)
156 156
157 157 def _fm0readmarkers(data, off):
158 158 # Loop on markers
159 159 l = len(data)
160 160 while off + _fm0fsize <= l:
161 161 # read fixed part
162 162 cur = data[off:off + _fm0fsize]
163 163 off += _fm0fsize
164 164 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
165 165 # read replacement
166 166 sucs = ()
167 167 if numsuc:
168 168 s = (_fm0fnodesize * numsuc)
169 169 cur = data[off:off + s]
170 170 sucs = _unpack(_fm0node * numsuc, cur)
171 171 off += s
172 172 # read metadata
173 173 # (metadata will be decoded on demand)
174 174 metadata = data[off:off + mdsize]
175 175 if len(metadata) != mdsize:
176 176 raise error.Abort(_('parsing obsolete marker: metadata is too '
177 177 'short, %d bytes expected, got %d')
178 178 % (mdsize, len(metadata)))
179 179 off += mdsize
180 180 metadata = _fm0decodemeta(metadata)
181 181 try:
182 182 when, offset = metadata.pop('date', '0 0').split(' ')
183 183 date = float(when), int(offset)
184 184 except ValueError:
185 185 date = (0., 0)
186 186 parents = None
187 187 if 'p2' in metadata:
188 188 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
189 189 elif 'p1' in metadata:
190 190 parents = (metadata.pop('p1', None),)
191 191 elif 'p0' in metadata:
192 192 parents = ()
193 193 if parents is not None:
194 194 try:
195 195 parents = tuple(node.bin(p) for p in parents)
196 196 # if parent content is not a nodeid, drop the data
197 197 for p in parents:
198 198 if len(p) != 20:
199 199 parents = None
200 200 break
201 201 except TypeError:
202 202 # if content cannot be translated to nodeid drop the data.
203 203 parents = None
204 204
205 205 metadata = tuple(sorted(metadata.iteritems()))
206 206
207 207 yield (pre, sucs, flags, metadata, date, parents)
208 208
209 209 def _fm0encodeonemarker(marker):
210 210 pre, sucs, flags, metadata, date, parents = marker
211 211 if flags & usingsha256:
212 212 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
213 213 metadata = dict(metadata)
214 214 time, tz = date
215 215 metadata['date'] = '%r %i' % (time, tz)
216 216 if parents is not None:
217 217 if not parents:
218 218 # mark that we explicitly recorded no parents
219 219 metadata['p0'] = ''
220 for i, p in enumerate(parents):
221 metadata['p%i' % (i + 1)] = node.hex(p)
220 for i, p in enumerate(parents, 1):
221 metadata['p%i' % i] = node.hex(p)
222 222 metadata = _fm0encodemeta(metadata)
223 223 numsuc = len(sucs)
224 224 format = _fm0fixed + (_fm0node * numsuc)
225 225 data = [numsuc, len(metadata), flags, pre]
226 226 data.extend(sucs)
227 227 return _pack(format, *data) + metadata
228 228
229 229 def _fm0encodemeta(meta):
230 230 """Return encoded metadata string to string mapping.
231 231
232 232 Assume no ':' in key and no '\0' in both key and value."""
233 233 for key, value in meta.iteritems():
234 234 if ':' in key or '\0' in key:
235 235 raise ValueError("':' and '\0' are forbidden in metadata key'")
236 236 if '\0' in value:
237 237 raise ValueError("':' is forbidden in metadata value'")
238 238 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
239 239
240 240 def _fm0decodemeta(data):
241 241 """Return string to string dictionary from encoded version."""
242 242 d = {}
243 243 for l in data.split('\0'):
244 244 if l:
245 245 key, value = l.split(':')
246 246 d[key] = value
247 247 return d
248 248
249 249 ## Parsing and writing of version "1"
250 250 #
251 251 # The header is followed by the markers. Each marker is made of:
252 252 #
253 253 # - uint32: total size of the marker (including this field)
254 254 #
255 255 # - float64: date in seconds since epoch
256 256 #
257 257 # - int16: timezone offset in minutes
258 258 #
259 259 # - uint16: a bit field. It is reserved for flags used in common
260 260 # obsolete marker operations, to avoid repeated decoding of metadata
261 261 # entries.
262 262 #
263 263 # - uint8: number of successors "N", can be zero.
264 264 #
265 265 # - uint8: number of parents "P", can be zero.
266 266 #
267 267 # 0: parents data stored but no parent,
268 268 # 1: one parent stored,
269 269 # 2: two parents stored,
270 270 # 3: no parent data stored
271 271 #
272 272 # - uint8: number of metadata entries M
273 273 #
274 274 # - 20 or 32 bytes: precursor changeset identifier.
275 275 #
276 276 # - N*(20 or 32) bytes: successors changesets identifiers.
277 277 #
278 278 # - P*(20 or 32) bytes: parents of the precursors changesets.
279 279 #
280 280 # - M*(uint8, uint8): size of all metadata entries (key and value)
281 281 #
282 282 # - remaining bytes: the metadata, each (key, value) pair after the other.
283 283 _fm1version = 1
284 284 _fm1fixed = '>IdhHBBB20s'
285 285 _fm1nodesha1 = '20s'
286 286 _fm1nodesha256 = '32s'
287 287 _fm1nodesha1size = _calcsize(_fm1nodesha1)
288 288 _fm1nodesha256size = _calcsize(_fm1nodesha256)
289 289 _fm1fsize = _calcsize(_fm1fixed)
290 290 _fm1parentnone = 3
291 291 _fm1parentshift = 14
292 292 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
293 293 _fm1metapair = 'BB'
294 294 _fm1metapairsize = _calcsize('BB')
295 295
296 296 def _fm1purereadmarkers(data, off):
297 297 # make some global constants local for performance
298 298 noneflag = _fm1parentnone
299 299 sha2flag = usingsha256
300 300 sha1size = _fm1nodesha1size
301 301 sha2size = _fm1nodesha256size
302 302 sha1fmt = _fm1nodesha1
303 303 sha2fmt = _fm1nodesha256
304 304 metasize = _fm1metapairsize
305 305 metafmt = _fm1metapair
306 306 fsize = _fm1fsize
307 307 unpack = _unpack
308 308
309 309 # Loop on markers
310 310 stop = len(data) - _fm1fsize
311 311 ufixed = struct.Struct(_fm1fixed).unpack
312 312
313 313 while off <= stop:
314 314 # read fixed part
315 315 o1 = off + fsize
316 316 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
317 317
318 318 if flags & sha2flag:
319 319 # FIXME: prec was read as a SHA1, needs to be amended
320 320
321 321 # read 0 or more successors
322 322 if numsuc == 1:
323 323 o2 = o1 + sha2size
324 324 sucs = (data[o1:o2],)
325 325 else:
326 326 o2 = o1 + sha2size * numsuc
327 327 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
328 328
329 329 # read parents
330 330 if numpar == noneflag:
331 331 o3 = o2
332 332 parents = None
333 333 elif numpar == 1:
334 334 o3 = o2 + sha2size
335 335 parents = (data[o2:o3],)
336 336 else:
337 337 o3 = o2 + sha2size * numpar
338 338 parents = unpack(sha2fmt * numpar, data[o2:o3])
339 339 else:
340 340 # read 0 or more successors
341 341 if numsuc == 1:
342 342 o2 = o1 + sha1size
343 343 sucs = (data[o1:o2],)
344 344 else:
345 345 o2 = o1 + sha1size * numsuc
346 346 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
347 347
348 348 # read parents
349 349 if numpar == noneflag:
350 350 o3 = o2
351 351 parents = None
352 352 elif numpar == 1:
353 353 o3 = o2 + sha1size
354 354 parents = (data[o2:o3],)
355 355 else:
356 356 o3 = o2 + sha1size * numpar
357 357 parents = unpack(sha1fmt * numpar, data[o2:o3])
358 358
359 359 # read metadata
360 360 off = o3 + metasize * nummeta
361 361 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
362 362 metadata = []
363 363 for idx in xrange(0, len(metapairsize), 2):
364 364 o1 = off + metapairsize[idx]
365 365 o2 = o1 + metapairsize[idx + 1]
366 366 metadata.append((data[off:o1], data[o1:o2]))
367 367 off = o2
368 368
369 369 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
370 370
371 371 def _fm1encodeonemarker(marker):
372 372 pre, sucs, flags, metadata, date, parents = marker
373 373 # determine node size
374 374 _fm1node = _fm1nodesha1
375 375 if flags & usingsha256:
376 376 _fm1node = _fm1nodesha256
377 377 numsuc = len(sucs)
378 378 numextranodes = numsuc
379 379 if parents is None:
380 380 numpar = _fm1parentnone
381 381 else:
382 382 numpar = len(parents)
383 383 numextranodes += numpar
384 384 formatnodes = _fm1node * numextranodes
385 385 formatmeta = _fm1metapair * len(metadata)
386 386 format = _fm1fixed + formatnodes + formatmeta
387 387 # tz is stored in minutes so we divide by 60
388 388 tz = date[1]//60
389 389 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
390 390 data.extend(sucs)
391 391 if parents is not None:
392 392 data.extend(parents)
393 393 totalsize = _calcsize(format)
394 394 for key, value in metadata:
395 395 lk = len(key)
396 396 lv = len(value)
397 397 data.append(lk)
398 398 data.append(lv)
399 399 totalsize += lk + lv
400 400 data[0] = totalsize
401 401 data = [_pack(format, *data)]
402 402 for key, value in metadata:
403 403 data.append(key)
404 404 data.append(value)
405 405 return ''.join(data)
406 406
407 407 def _fm1readmarkers(data, off):
408 408 native = getattr(parsers, 'fm1readmarkers', None)
409 409 if not native:
410 410 return _fm1purereadmarkers(data, off)
411 411 stop = len(data) - _fm1fsize
412 412 return native(data, off, stop)
413 413
414 414 # mapping to read/write various marker formats
415 415 # <version> -> (decoder, encoder)
416 416 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
417 417 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
418 418
419 419 @util.nogc
420 420 def _readmarkers(data):
421 421 """Read and enumerate markers from raw data"""
422 422 off = 0
423 423 diskversion = _unpack('>B', data[off:off + 1])[0]
424 424 off += 1
425 425 if diskversion not in formats:
426 426 raise error.Abort(_('parsing obsolete marker: unknown version %r')
427 427 % diskversion)
428 428 return diskversion, formats[diskversion][0](data, off)
429 429
430 430 def encodemarkers(markers, addheader=False, version=_fm0version):
431 431 # Kept separate from flushmarkers(), it will be reused for
432 432 # markers exchange.
433 433 encodeone = formats[version][1]
434 434 if addheader:
435 435 yield _pack('>B', version)
436 436 for marker in markers:
437 437 yield encodeone(marker)
438 438
439 439
440 440 class marker(object):
441 441 """Wrap obsolete marker raw data"""
442 442
443 443 def __init__(self, repo, data):
444 444 # the repo argument will be used to create changectx in later version
445 445 self._repo = repo
446 446 self._data = data
447 447 self._decodedmeta = None
448 448
449 449 def __hash__(self):
450 450 return hash(self._data)
451 451
452 452 def __eq__(self, other):
453 453 if type(other) != type(self):
454 454 return False
455 455 return self._data == other._data
456 456
457 457 def precnode(self):
458 458 """Precursor changeset node identifier"""
459 459 return self._data[0]
460 460
461 461 def succnodes(self):
462 462 """List of successor changesets node identifiers"""
463 463 return self._data[1]
464 464
465 465 def parentnodes(self):
466 466 """Parents of the precursors (None if not recorded)"""
467 467 return self._data[5]
468 468
469 469 def metadata(self):
470 470 """Decoded metadata dictionary"""
471 471 return dict(self._data[3])
472 472
473 473 def date(self):
474 474 """Creation date as (unixtime, offset)"""
475 475 return self._data[4]
476 476
477 477 def flags(self):
478 478 """The flags field of the marker"""
479 479 return self._data[2]
480 480
481 481 @util.nogc
482 482 def _addsuccessors(successors, markers):
483 483 for mark in markers:
484 484 successors.setdefault(mark[0], set()).add(mark)
485 485
486 486 @util.nogc
487 487 def _addprecursors(precursors, markers):
488 488 for mark in markers:
489 489 for suc in mark[1]:
490 490 precursors.setdefault(suc, set()).add(mark)
491 491
492 492 @util.nogc
493 493 def _addchildren(children, markers):
494 494 for mark in markers:
495 495 parents = mark[5]
496 496 if parents is not None:
497 497 for p in parents:
498 498 children.setdefault(p, set()).add(mark)
499 499
500 500 def _checkinvalidmarkers(markers):
501 501 """search for marker with invalid data and raise error if needed
502 502
503 503 Exist as a separated function to allow the evolve extension for a more
504 504 subtle handling.
505 505 """
506 506 for mark in markers:
507 507 if node.nullid in mark[1]:
508 508 raise error.Abort(_('bad obsolescence marker detected: '
509 509 'invalid successors nullid'))
510 510
511 511 class obsstore(object):
512 512 """Store obsolete markers
513 513
514 514 Markers can be accessed with two mappings:
515 515 - precursors[x] -> set(markers on precursors edges of x)
516 516 - successors[x] -> set(markers on successors edges of x)
517 517 - children[x] -> set(markers on precursors edges of children(x)
518 518 """
519 519
520 520 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
521 521 # prec: nodeid, precursor changesets
522 522 # succs: tuple of nodeid, successor changesets (0-N length)
523 523 # flag: integer, flag field carrying modifier for the markers (see doc)
524 524 # meta: binary blob, encoded metadata dictionary
525 525 # date: (float, int) tuple, date of marker creation
526 526 # parents: (tuple of nodeid) or None, parents of precursors
527 527 # None is used when no data has been recorded
528 528
529 529 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
530 530 # caches for various obsolescence related cache
531 531 self.caches = {}
532 532 self.svfs = svfs
533 533 self._version = defaultformat
534 534 self._readonly = readonly
535 535
536 536 def __iter__(self):
537 537 return iter(self._all)
538 538
539 539 def __len__(self):
540 540 return len(self._all)
541 541
542 542 def __nonzero__(self):
543 543 if not self._cached('_all'):
544 544 try:
545 545 return self.svfs.stat('obsstore').st_size > 1
546 546 except OSError as inst:
547 547 if inst.errno != errno.ENOENT:
548 548 raise
549 549 # just build an empty _all list if no obsstore exists, which
550 550 # avoids further stat() syscalls
551 551 pass
552 552 return bool(self._all)
553 553
554 554 __bool__ = __nonzero__
555 555
556 556 @property
557 557 def readonly(self):
558 558 """True if marker creation is disabled
559 559
560 560 Remove me in the future when obsolete marker is always on."""
561 561 return self._readonly
562 562
563 563 def create(self, transaction, prec, succs=(), flag=0, parents=None,
564 564 date=None, metadata=None):
565 565 """obsolete: add a new obsolete marker
566 566
567 567 * ensuring it is hashable
568 568 * check mandatory metadata
569 569 * encode metadata
570 570
571 571 If you are a human writing code creating marker you want to use the
572 572 `createmarkers` function in this module instead.
573 573
574 574 return True if a new marker have been added, False if the markers
575 575 already existed (no op).
576 576 """
577 577 if metadata is None:
578 578 metadata = {}
579 579 if date is None:
580 580 if 'date' in metadata:
581 581 # as a courtesy for out-of-tree extensions
582 582 date = util.parsedate(metadata.pop('date'))
583 583 else:
584 584 date = util.makedate()
585 585 if len(prec) != 20:
586 586 raise ValueError(prec)
587 587 for succ in succs:
588 588 if len(succ) != 20:
589 589 raise ValueError(succ)
590 590 if prec in succs:
591 591 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
592 592
593 593 metadata = tuple(sorted(metadata.iteritems()))
594 594
595 595 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
596 596 return bool(self.add(transaction, [marker]))
597 597
598 598 def add(self, transaction, markers):
599 599 """Add new markers to the store
600 600
601 601 Take care of filtering duplicate.
602 602 Return the number of new marker."""
603 603 if self._readonly:
604 604 raise error.Abort(_('creating obsolete markers is not enabled on '
605 605 'this repo'))
606 606 known = set(self._all)
607 607 new = []
608 608 for m in markers:
609 609 if m not in known:
610 610 known.add(m)
611 611 new.append(m)
612 612 if new:
613 613 f = self.svfs('obsstore', 'ab')
614 614 try:
615 615 offset = f.tell()
616 616 transaction.add('obsstore', offset)
617 617 # offset == 0: new file - add the version header
618 618 for bytes in encodemarkers(new, offset == 0, self._version):
619 619 f.write(bytes)
620 620 finally:
621 621 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
622 622 # call 'filecacheentry.refresh()' here
623 623 f.close()
624 624 self._addmarkers(new)
625 625 # new marker *may* have changed several set. invalidate the cache.
626 626 self.caches.clear()
627 627 # records the number of new markers for the transaction hooks
628 628 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
629 629 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
630 630 return len(new)
631 631
632 632 def mergemarkers(self, transaction, data):
633 633 """merge a binary stream of markers inside the obsstore
634 634
635 635 Returns the number of new markers added."""
636 636 version, markers = _readmarkers(data)
637 637 return self.add(transaction, markers)
638 638
639 639 @propertycache
640 640 def _all(self):
641 641 data = self.svfs.tryread('obsstore')
642 642 if not data:
643 643 return []
644 644 self._version, markers = _readmarkers(data)
645 645 markers = list(markers)
646 646 _checkinvalidmarkers(markers)
647 647 return markers
648 648
649 649 @propertycache
650 650 def successors(self):
651 651 successors = {}
652 652 _addsuccessors(successors, self._all)
653 653 return successors
654 654
655 655 @propertycache
656 656 def precursors(self):
657 657 precursors = {}
658 658 _addprecursors(precursors, self._all)
659 659 return precursors
660 660
661 661 @propertycache
662 662 def children(self):
663 663 children = {}
664 664 _addchildren(children, self._all)
665 665 return children
666 666
667 667 def _cached(self, attr):
668 668 return attr in self.__dict__
669 669
670 670 def _addmarkers(self, markers):
671 671 markers = list(markers) # to allow repeated iteration
672 672 self._all.extend(markers)
673 673 if self._cached('successors'):
674 674 _addsuccessors(self.successors, markers)
675 675 if self._cached('precursors'):
676 676 _addprecursors(self.precursors, markers)
677 677 if self._cached('children'):
678 678 _addchildren(self.children, markers)
679 679 _checkinvalidmarkers(markers)
680 680
681 681 def relevantmarkers(self, nodes):
682 682 """return a set of all obsolescence markers relevant to a set of nodes.
683 683
684 684 "relevant" to a set of nodes mean:
685 685
686 686 - marker that use this changeset as successor
687 687 - prune marker of direct children on this changeset
688 688 - recursive application of the two rules on precursors of these markers
689 689
690 690 It is a set so you cannot rely on order."""
691 691
692 692 pendingnodes = set(nodes)
693 693 seenmarkers = set()
694 694 seennodes = set(pendingnodes)
695 695 precursorsmarkers = self.precursors
696 696 children = self.children
697 697 while pendingnodes:
698 698 direct = set()
699 699 for current in pendingnodes:
700 700 direct.update(precursorsmarkers.get(current, ()))
701 701 pruned = [m for m in children.get(current, ()) if not m[1]]
702 702 direct.update(pruned)
703 703 direct -= seenmarkers
704 704 pendingnodes = set([m[0] for m in direct])
705 705 seenmarkers |= direct
706 706 pendingnodes -= seennodes
707 707 seennodes |= pendingnodes
708 708 return seenmarkers
709 709
710 710 def commonversion(versions):
711 711 """Return the newest version listed in both versions and our local formats.
712 712
713 713 Returns None if no common version exists.
714 714 """
715 715 versions.sort(reverse=True)
716 716 # search for highest version known on both side
717 717 for v in versions:
718 718 if v in formats:
719 719 return v
720 720 return None
721 721
722 722 # arbitrary picked to fit into 8K limit from HTTP server
723 723 # you have to take in account:
724 724 # - the version header
725 725 # - the base85 encoding
726 726 _maxpayload = 5300
727 727
728 728 def _pushkeyescape(markers):
729 729 """encode markers into a dict suitable for pushkey exchange
730 730
731 731 - binary data is base85 encoded
732 732 - split in chunks smaller than 5300 bytes"""
733 733 keys = {}
734 734 parts = []
735 735 currentlen = _maxpayload * 2 # ensure we create a new part
736 736 for marker in markers:
737 737 nextdata = _fm0encodeonemarker(marker)
738 738 if (len(nextdata) + currentlen > _maxpayload):
739 739 currentpart = []
740 740 currentlen = 0
741 741 parts.append(currentpart)
742 742 currentpart.append(nextdata)
743 743 currentlen += len(nextdata)
744 744 for idx, part in enumerate(reversed(parts)):
745 745 data = ''.join([_pack('>B', _fm0version)] + part)
746 746 keys['dump%i' % idx] = util.b85encode(data)
747 747 return keys
748 748
749 749 def listmarkers(repo):
750 750 """List markers over pushkey"""
751 751 if not repo.obsstore:
752 752 return {}
753 753 return _pushkeyescape(sorted(repo.obsstore))
754 754
755 755 def pushmarker(repo, key, old, new):
756 756 """Push markers over pushkey"""
757 757 if not key.startswith('dump'):
758 758 repo.ui.warn(_('unknown key: %r') % key)
759 759 return 0
760 760 if old:
761 761 repo.ui.warn(_('unexpected old value for %r') % key)
762 762 return 0
763 763 data = util.b85decode(new)
764 764 lock = repo.lock()
765 765 try:
766 766 tr = repo.transaction('pushkey: obsolete markers')
767 767 try:
768 768 repo.obsstore.mergemarkers(tr, data)
769 769 tr.close()
770 770 return 1
771 771 finally:
772 772 tr.release()
773 773 finally:
774 774 lock.release()
775 775
776 776 def getmarkers(repo, nodes=None):
777 777 """returns markers known in a repository
778 778
779 779 If <nodes> is specified, only markers "relevant" to those nodes are are
780 780 returned"""
781 781 if nodes is None:
782 782 rawmarkers = repo.obsstore
783 783 else:
784 784 rawmarkers = repo.obsstore.relevantmarkers(nodes)
785 785
786 786 for markerdata in rawmarkers:
787 787 yield marker(repo, markerdata)
788 788
789 789 def relevantmarkers(repo, node):
790 790 """all obsolete markers relevant to some revision"""
791 791 for markerdata in repo.obsstore.relevantmarkers(node):
792 792 yield marker(repo, markerdata)
793 793
794 794
795 795 def precursormarkers(ctx):
796 796 """obsolete marker marking this changeset as a successors"""
797 797 for data in ctx.repo().obsstore.precursors.get(ctx.node(), ()):
798 798 yield marker(ctx.repo(), data)
799 799
800 800 def successormarkers(ctx):
801 801 """obsolete marker making this changeset obsolete"""
802 802 for data in ctx.repo().obsstore.successors.get(ctx.node(), ()):
803 803 yield marker(ctx.repo(), data)
804 804
805 805 def allsuccessors(obsstore, nodes, ignoreflags=0):
806 806 """Yield node for every successor of <nodes>.
807 807
808 808 Some successors may be unknown locally.
809 809
810 810 This is a linear yield unsuited to detecting split changesets. It includes
811 811 initial nodes too."""
812 812 remaining = set(nodes)
813 813 seen = set(remaining)
814 814 while remaining:
815 815 current = remaining.pop()
816 816 yield current
817 817 for mark in obsstore.successors.get(current, ()):
818 818 # ignore marker flagged with specified flag
819 819 if mark[2] & ignoreflags:
820 820 continue
821 821 for suc in mark[1]:
822 822 if suc not in seen:
823 823 seen.add(suc)
824 824 remaining.add(suc)
825 825
826 826 def allprecursors(obsstore, nodes, ignoreflags=0):
827 827 """Yield node for every precursors of <nodes>.
828 828
829 829 Some precursors may be unknown locally.
830 830
831 831 This is a linear yield unsuited to detecting folded changesets. It includes
832 832 initial nodes too."""
833 833
834 834 remaining = set(nodes)
835 835 seen = set(remaining)
836 836 while remaining:
837 837 current = remaining.pop()
838 838 yield current
839 839 for mark in obsstore.precursors.get(current, ()):
840 840 # ignore marker flagged with specified flag
841 841 if mark[2] & ignoreflags:
842 842 continue
843 843 suc = mark[0]
844 844 if suc not in seen:
845 845 seen.add(suc)
846 846 remaining.add(suc)
847 847
848 848 def foreground(repo, nodes):
849 849 """return all nodes in the "foreground" of other node
850 850
851 851 The foreground of a revision is anything reachable using parent -> children
852 852 or precursor -> successor relation. It is very similar to "descendant" but
853 853 augmented with obsolescence information.
854 854
855 855 Beware that possible obsolescence cycle may result if complex situation.
856 856 """
857 857 repo = repo.unfiltered()
858 858 foreground = set(repo.set('%ln::', nodes))
859 859 if repo.obsstore:
860 860 # We only need this complicated logic if there is obsolescence
861 861 # XXX will probably deserve an optimised revset.
862 862 nm = repo.changelog.nodemap
863 863 plen = -1
864 864 # compute the whole set of successors or descendants
865 865 while len(foreground) != plen:
866 866 plen = len(foreground)
867 867 succs = set(c.node() for c in foreground)
868 868 mutable = [c.node() for c in foreground if c.mutable()]
869 869 succs.update(allsuccessors(repo.obsstore, mutable))
870 870 known = (n for n in succs if n in nm)
871 871 foreground = set(repo.set('%ln::', known))
872 872 return set(c.node() for c in foreground)
873 873
874 874
875 875 def successorssets(repo, initialnode, cache=None):
876 876 """Return set of all latest successors of initial nodes
877 877
878 878 The successors set of a changeset A are the group of revisions that succeed
879 879 A. It succeeds A as a consistent whole, each revision being only a partial
880 880 replacement. The successors set contains non-obsolete changesets only.
881 881
882 882 This function returns the full list of successor sets which is why it
883 883 returns a list of tuples and not just a single tuple. Each tuple is a valid
884 884 successors set. Note that (A,) may be a valid successors set for changeset A
885 885 (see below).
886 886
887 887 In most cases, a changeset A will have a single element (e.g. the changeset
888 888 A is replaced by A') in its successors set. Though, it is also common for a
889 889 changeset A to have no elements in its successor set (e.g. the changeset
890 890 has been pruned). Therefore, the returned list of successors sets will be
891 891 [(A',)] or [], respectively.
892 892
893 893 When a changeset A is split into A' and B', however, it will result in a
894 894 successors set containing more than a single element, i.e. [(A',B')].
895 895 Divergent changesets will result in multiple successors sets, i.e. [(A',),
896 896 (A'')].
897 897
898 898 If a changeset A is not obsolete, then it will conceptually have no
899 899 successors set. To distinguish this from a pruned changeset, the successor
900 900 set will contain itself only, i.e. [(A,)].
901 901
902 902 Finally, successors unknown locally are considered to be pruned (obsoleted
903 903 without any successors).
904 904
905 905 The optional `cache` parameter is a dictionary that may contain precomputed
906 906 successors sets. It is meant to reuse the computation of a previous call to
907 907 `successorssets` when multiple calls are made at the same time. The cache
908 908 dictionary is updated in place. The caller is responsible for its life
909 909 span. Code that makes multiple calls to `successorssets` *must* use this
910 910 cache mechanism or suffer terrible performance.
911 911 """
912 912
913 913 succmarkers = repo.obsstore.successors
914 914
915 915 # Stack of nodes we search successors sets for
916 916 toproceed = [initialnode]
917 917 # set version of above list for fast loop detection
918 918 # element added to "toproceed" must be added here
919 919 stackedset = set(toproceed)
920 920 if cache is None:
921 921 cache = {}
922 922
923 923 # This while loop is the flattened version of a recursive search for
924 924 # successors sets
925 925 #
926 926 # def successorssets(x):
927 927 # successors = directsuccessors(x)
928 928 # ss = [[]]
929 929 # for succ in directsuccessors(x):
930 930 # # product as in itertools cartesian product
931 931 # ss = product(ss, successorssets(succ))
932 932 # return ss
933 933 #
934 934 # But we can not use plain recursive calls here:
935 935 # - that would blow the python call stack
936 936 # - obsolescence markers may have cycles, we need to handle them.
937 937 #
938 938 # The `toproceed` list act as our call stack. Every node we search
939 939 # successors set for are stacked there.
940 940 #
941 941 # The `stackedset` is set version of this stack used to check if a node is
942 942 # already stacked. This check is used to detect cycles and prevent infinite
943 943 # loop.
944 944 #
945 945 # successors set of all nodes are stored in the `cache` dictionary.
946 946 #
947 947 # After this while loop ends we use the cache to return the successors sets
948 948 # for the node requested by the caller.
949 949 while toproceed:
950 950 # Every iteration tries to compute the successors sets of the topmost
951 951 # node of the stack: CURRENT.
952 952 #
953 953 # There are four possible outcomes:
954 954 #
955 955 # 1) We already know the successors sets of CURRENT:
956 956 # -> mission accomplished, pop it from the stack.
957 957 # 2) Node is not obsolete:
958 958 # -> the node is its own successors sets. Add it to the cache.
959 959 # 3) We do not know successors set of direct successors of CURRENT:
960 960 # -> We add those successors to the stack.
961 961 # 4) We know successors sets of all direct successors of CURRENT:
962 962 # -> We can compute CURRENT successors set and add it to the
963 963 # cache.
964 964 #
965 965 current = toproceed[-1]
966 966 if current in cache:
967 967 # case (1): We already know the successors sets
968 968 stackedset.remove(toproceed.pop())
969 969 elif current not in succmarkers:
970 970 # case (2): The node is not obsolete.
971 971 if current in repo:
972 972 # We have a valid last successors.
973 973 cache[current] = [(current,)]
974 974 else:
975 975 # Final obsolete version is unknown locally.
976 976 # Do not count that as a valid successors
977 977 cache[current] = []
978 978 else:
979 979 # cases (3) and (4)
980 980 #
981 981 # We proceed in two phases. Phase 1 aims to distinguish case (3)
982 982 # from case (4):
983 983 #
984 984 # For each direct successors of CURRENT, we check whether its
985 985 # successors sets are known. If they are not, we stack the
986 986 # unknown node and proceed to the next iteration of the while
987 987 # loop. (case 3)
988 988 #
989 989 # During this step, we may detect obsolescence cycles: a node
990 990 # with unknown successors sets but already in the call stack.
991 991 # In such a situation, we arbitrary set the successors sets of
992 992 # the node to nothing (node pruned) to break the cycle.
993 993 #
994 994 # If no break was encountered we proceed to phase 2.
995 995 #
996 996 # Phase 2 computes successors sets of CURRENT (case 4); see details
997 997 # in phase 2 itself.
998 998 #
999 999 # Note the two levels of iteration in each phase.
1000 1000 # - The first one handles obsolescence markers using CURRENT as
1001 1001 # precursor (successors markers of CURRENT).
1002 1002 #
1003 1003 # Having multiple entry here means divergence.
1004 1004 #
1005 1005 # - The second one handles successors defined in each marker.
1006 1006 #
1007 1007 # Having none means pruned node, multiple successors means split,
1008 1008 # single successors are standard replacement.
1009 1009 #
1010 1010 for mark in sorted(succmarkers[current]):
1011 1011 for suc in mark[1]:
1012 1012 if suc not in cache:
1013 1013 if suc in stackedset:
1014 1014 # cycle breaking
1015 1015 cache[suc] = []
1016 1016 else:
1017 1017 # case (3) If we have not computed successors sets
1018 1018 # of one of those successors we add it to the
1019 1019 # `toproceed` stack and stop all work for this
1020 1020 # iteration.
1021 1021 toproceed.append(suc)
1022 1022 stackedset.add(suc)
1023 1023 break
1024 1024 else:
1025 1025 continue
1026 1026 break
1027 1027 else:
1028 1028 # case (4): we know all successors sets of all direct
1029 1029 # successors
1030 1030 #
1031 1031 # Successors set contributed by each marker depends on the
1032 1032 # successors sets of all its "successors" node.
1033 1033 #
1034 1034 # Each different marker is a divergence in the obsolescence
1035 1035 # history. It contributes successors sets distinct from other
1036 1036 # markers.
1037 1037 #
1038 1038 # Within a marker, a successor may have divergent successors
1039 1039 # sets. In such a case, the marker will contribute multiple
1040 1040 # divergent successors sets. If multiple successors have
1041 1041 # divergent successors sets, a Cartesian product is used.
1042 1042 #
1043 1043 # At the end we post-process successors sets to remove
1044 1044 # duplicated entry and successors set that are strict subset of
1045 1045 # another one.
1046 1046 succssets = []
1047 1047 for mark in sorted(succmarkers[current]):
1048 1048 # successors sets contributed by this marker
1049 1049 markss = [[]]
1050 1050 for suc in mark[1]:
1051 1051 # cardinal product with previous successors
1052 1052 productresult = []
1053 1053 for prefix in markss:
1054 1054 for suffix in cache[suc]:
1055 1055 newss = list(prefix)
1056 1056 for part in suffix:
1057 1057 # do not duplicated entry in successors set
1058 1058 # first entry wins.
1059 1059 if part not in newss:
1060 1060 newss.append(part)
1061 1061 productresult.append(newss)
1062 1062 markss = productresult
1063 1063 succssets.extend(markss)
1064 1064 # remove duplicated and subset
1065 1065 seen = []
1066 1066 final = []
1067 1067 candidate = sorted(((set(s), s) for s in succssets if s),
1068 1068 key=lambda x: len(x[1]), reverse=True)
1069 1069 for setversion, listversion in candidate:
1070 1070 for seenset in seen:
1071 1071 if setversion.issubset(seenset):
1072 1072 break
1073 1073 else:
1074 1074 final.append(listversion)
1075 1075 seen.append(setversion)
1076 1076 final.reverse() # put small successors set first
1077 1077 cache[current] = final
1078 1078 return cache[initialnode]
1079 1079
1080 1080 # mapping of 'set-name' -> <function to compute this set>
1081 1081 cachefuncs = {}
1082 1082 def cachefor(name):
1083 1083 """Decorator to register a function as computing the cache for a set"""
1084 1084 def decorator(func):
1085 1085 assert name not in cachefuncs
1086 1086 cachefuncs[name] = func
1087 1087 return func
1088 1088 return decorator
1089 1089
1090 1090 def getrevs(repo, name):
1091 1091 """Return the set of revision that belong to the <name> set
1092 1092
1093 1093 Such access may compute the set and cache it for future use"""
1094 1094 repo = repo.unfiltered()
1095 1095 if not repo.obsstore:
1096 1096 return frozenset()
1097 1097 if name not in repo.obsstore.caches:
1098 1098 repo.obsstore.caches[name] = cachefuncs[name](repo)
1099 1099 return repo.obsstore.caches[name]
1100 1100
1101 1101 # To be simple we need to invalidate obsolescence cache when:
1102 1102 #
1103 1103 # - new changeset is added:
1104 1104 # - public phase is changed
1105 1105 # - obsolescence marker are added
1106 1106 # - strip is used a repo
1107 1107 def clearobscaches(repo):
1108 1108 """Remove all obsolescence related cache from a repo
1109 1109
1110 1110 This remove all cache in obsstore is the obsstore already exist on the
1111 1111 repo.
1112 1112
1113 1113 (We could be smarter here given the exact event that trigger the cache
1114 1114 clearing)"""
1115 1115 # only clear cache is there is obsstore data in this repo
1116 1116 if 'obsstore' in repo._filecache:
1117 1117 repo.obsstore.caches.clear()
1118 1118
1119 1119 @cachefor('obsolete')
1120 1120 def _computeobsoleteset(repo):
1121 1121 """the set of obsolete revisions"""
1122 1122 obs = set()
1123 1123 getnode = repo.changelog.node
1124 1124 notpublic = repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
1125 1125 for r in notpublic:
1126 1126 if getnode(r) in repo.obsstore.successors:
1127 1127 obs.add(r)
1128 1128 return obs
1129 1129
1130 1130 @cachefor('unstable')
1131 1131 def _computeunstableset(repo):
1132 1132 """the set of non obsolete revisions with obsolete parents"""
1133 1133 revs = [(ctx.rev(), ctx) for ctx in
1134 1134 repo.set('(not public()) and (not obsolete())')]
1135 1135 revs.sort(key=lambda x:x[0])
1136 1136 unstable = set()
1137 1137 for rev, ctx in revs:
1138 1138 # A rev is unstable if one of its parent is obsolete or unstable
1139 1139 # this works since we traverse following growing rev order
1140 1140 if any((x.obsolete() or (x.rev() in unstable))
1141 1141 for x in ctx.parents()):
1142 1142 unstable.add(rev)
1143 1143 return unstable
1144 1144
1145 1145 @cachefor('suspended')
1146 1146 def _computesuspendedset(repo):
1147 1147 """the set of obsolete parents with non obsolete descendants"""
1148 1148 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1149 1149 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1150 1150
1151 1151 @cachefor('extinct')
1152 1152 def _computeextinctset(repo):
1153 1153 """the set of obsolete parents without non obsolete descendants"""
1154 1154 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1155 1155
1156 1156
1157 1157 @cachefor('bumped')
1158 1158 def _computebumpedset(repo):
1159 1159 """the set of revs trying to obsolete public revisions"""
1160 1160 bumped = set()
1161 1161 # util function (avoid attribute lookup in the loop)
1162 1162 phase = repo._phasecache.phase # would be faster to grab the full list
1163 1163 public = phases.public
1164 1164 cl = repo.changelog
1165 1165 torev = cl.nodemap.get
1166 1166 for ctx in repo.set('(not public()) and (not obsolete())'):
1167 1167 rev = ctx.rev()
1168 1168 # We only evaluate mutable, non-obsolete revision
1169 1169 node = ctx.node()
1170 1170 # (future) A cache of precursors may worth if split is very common
1171 1171 for pnode in allprecursors(repo.obsstore, [node],
1172 1172 ignoreflags=bumpedfix):
1173 1173 prev = torev(pnode) # unfiltered! but so is phasecache
1174 1174 if (prev is not None) and (phase(repo, prev) <= public):
1175 1175 # we have a public precursor
1176 1176 bumped.add(rev)
1177 1177 break # Next draft!
1178 1178 return bumped
1179 1179
1180 1180 @cachefor('divergent')
1181 1181 def _computedivergentset(repo):
1182 1182 """the set of rev that compete to be the final successors of some revision.
1183 1183 """
1184 1184 divergent = set()
1185 1185 obsstore = repo.obsstore
1186 1186 newermap = {}
1187 1187 for ctx in repo.set('(not public()) - obsolete()'):
1188 1188 mark = obsstore.precursors.get(ctx.node(), ())
1189 1189 toprocess = set(mark)
1190 1190 seen = set()
1191 1191 while toprocess:
1192 1192 prec = toprocess.pop()[0]
1193 1193 if prec in seen:
1194 1194 continue # emergency cycle hanging prevention
1195 1195 seen.add(prec)
1196 1196 if prec not in newermap:
1197 1197 successorssets(repo, prec, newermap)
1198 1198 newer = [n for n in newermap[prec] if n]
1199 1199 if len(newer) > 1:
1200 1200 divergent.add(ctx.rev())
1201 1201 break
1202 1202 toprocess.update(obsstore.precursors.get(prec, ()))
1203 1203 return divergent
1204 1204
1205 1205
1206 1206 def createmarkers(repo, relations, flag=0, date=None, metadata=None):
1207 1207 """Add obsolete markers between changesets in a repo
1208 1208
1209 1209 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1210 1210 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1211 1211 containing metadata for this marker only. It is merged with the global
1212 1212 metadata specified through the `metadata` argument of this function,
1213 1213
1214 1214 Trying to obsolete a public changeset will raise an exception.
1215 1215
1216 1216 Current user and date are used except if specified otherwise in the
1217 1217 metadata attribute.
1218 1218
1219 1219 This function operates within a transaction of its own, but does
1220 1220 not take any lock on the repo.
1221 1221 """
1222 1222 # prepare metadata
1223 1223 if metadata is None:
1224 1224 metadata = {}
1225 1225 if 'user' not in metadata:
1226 1226 metadata['user'] = repo.ui.username()
1227 1227 tr = repo.transaction('add-obsolescence-marker')
1228 1228 try:
1229 1229 markerargs = []
1230 1230 for rel in relations:
1231 1231 prec = rel[0]
1232 1232 sucs = rel[1]
1233 1233 localmetadata = metadata.copy()
1234 1234 if 2 < len(rel):
1235 1235 localmetadata.update(rel[2])
1236 1236
1237 1237 if not prec.mutable():
1238 1238 raise error.Abort(_("cannot obsolete public changeset: %s")
1239 1239 % prec,
1240 1240 hint="see 'hg help phases' for details")
1241 1241 nprec = prec.node()
1242 1242 nsucs = tuple(s.node() for s in sucs)
1243 1243 npare = None
1244 1244 if not nsucs:
1245 1245 npare = tuple(p.node() for p in prec.parents())
1246 1246 if nprec in nsucs:
1247 1247 raise error.Abort(_("changeset %s cannot obsolete itself")
1248 1248 % prec)
1249 1249
1250 1250 # Creating the marker causes the hidden cache to become invalid,
1251 1251 # which causes recomputation when we ask for prec.parents() above.
1252 1252 # Resulting in n^2 behavior. So let's prepare all of the args
1253 1253 # first, then create the markers.
1254 1254 markerargs.append((nprec, nsucs, npare, localmetadata))
1255 1255
1256 1256 for args in markerargs:
1257 1257 nprec, nsucs, npare, localmetadata = args
1258 1258 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1259 1259 date=date, metadata=localmetadata)
1260 1260 repo.filteredrevcache.clear()
1261 1261 tr.close()
1262 1262 finally:
1263 1263 tr.release()
1264 1264
1265 1265 def isenabled(repo, option):
1266 1266 """Returns True if the given repository has the given obsolete option
1267 1267 enabled.
1268 1268 """
1269 1269 result = set(repo.ui.configlist('experimental', 'evolution'))
1270 1270 if 'all' in result:
1271 1271 return True
1272 1272
1273 1273 # For migration purposes, temporarily return true if the config hasn't been
1274 1274 # set but _enabled is true.
1275 1275 if len(result) == 0 and _enabled:
1276 1276 return True
1277 1277
1278 1278 # createmarkers must be enabled if other options are enabled
1279 1279 if ((allowunstableopt in result or exchangeopt in result) and
1280 1280 not createmarkersopt in result):
1281 1281 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
1282 1282 "if other obsolete options are enabled"))
1283 1283
1284 1284 return option in result
General Comments 0
You need to be logged in to leave comments. Login now