##// END OF EJS Templates
obsolete: move the 'isenabled' function at the top of the file...
marmoute -
r32333:566cfe9c default
parent child Browse files
Show More
@@ -1,1287 +1,1287 b''
1 1 # obsolete.py - obsolete markers handling
2 2 #
3 3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 4 # Logilab SA <contact@logilab.fr>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 """Obsolete marker handling
10 10
11 11 An obsolete marker maps an old changeset to a list of new
12 12 changesets. If the list of new changesets is empty, the old changeset
13 13 is said to be "killed". Otherwise, the old changeset is being
14 14 "replaced" by the new changesets.
15 15
16 16 Obsolete markers can be used to record and distribute changeset graph
17 17 transformations performed by history rewrite operations, and help
18 18 building new tools to reconcile conflicting rewrite actions. To
19 19 facilitate conflict resolution, markers include various annotations
20 20 besides old and news changeset identifiers, such as creation date or
21 21 author name.
22 22
23 23 The old obsoleted changeset is called a "precursor" and possible
24 24 replacements are called "successors". Markers that used changeset X as
25 25 a precursor are called "successor markers of X" because they hold
26 26 information about the successors of X. Markers that use changeset Y as
27 27 a successors are call "precursor markers of Y" because they hold
28 28 information about the precursors of Y.
29 29
30 30 Examples:
31 31
32 32 - When changeset A is replaced by changeset A', one marker is stored:
33 33
34 34 (A, (A',))
35 35
36 36 - When changesets A and B are folded into a new changeset C, two markers are
37 37 stored:
38 38
39 39 (A, (C,)) and (B, (C,))
40 40
41 41 - When changeset A is simply "pruned" from the graph, a marker is created:
42 42
43 43 (A, ())
44 44
45 45 - When changeset A is split into B and C, a single marker is used:
46 46
47 47 (A, (B, C))
48 48
49 49 We use a single marker to distinguish the "split" case from the "divergence"
50 50 case. If two independent operations rewrite the same changeset A in to A' and
51 51 A'', we have an error case: divergent rewriting. We can detect it because
52 52 two markers will be created independently:
53 53
54 54 (A, (B,)) and (A, (C,))
55 55
56 56 Format
57 57 ------
58 58
59 59 Markers are stored in an append-only file stored in
60 60 '.hg/store/obsstore'.
61 61
62 62 The file starts with a version header:
63 63
64 64 - 1 unsigned byte: version number, starting at zero.
65 65
66 66 The header is followed by the markers. Marker format depend of the version. See
67 67 comment associated with each format for details.
68 68
69 69 """
70 70 from __future__ import absolute_import
71 71
72 72 import errno
73 73 import struct
74 74
75 75 from .i18n import _
76 76 from . import (
77 77 error,
78 78 node,
79 79 parsers,
80 80 phases,
81 81 util,
82 82 )
83 83
84 84 _pack = struct.pack
85 85 _unpack = struct.unpack
86 86 _calcsize = struct.calcsize
87 87 propertycache = util.propertycache
88 88
89 89 # the obsolete feature is not mature enough to be enabled by default.
90 90 # you have to rely on third party extension extension to enable this.
91 91 _enabled = False
92 92
93 93 # Options for obsolescence
94 94 createmarkersopt = 'createmarkers'
95 95 allowunstableopt = 'allowunstable'
96 96 exchangeopt = 'exchange'
97 97
98 def isenabled(repo, option):
99 """Returns True if the given repository has the given obsolete option
100 enabled.
101 """
102 result = set(repo.ui.configlist('experimental', 'evolution'))
103 if 'all' in result:
104 return True
105
106 # For migration purposes, temporarily return true if the config hasn't been
107 # set but _enabled is true.
108 if len(result) == 0 and _enabled:
109 return True
110
111 # createmarkers must be enabled if other options are enabled
112 if ((allowunstableopt in result or exchangeopt in result) and
113 not createmarkersopt in result):
114 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
115 "if other obsolete options are enabled"))
116
117 return option in result
118
98 119 ### obsolescence marker flag
99 120
100 121 ## bumpedfix flag
101 122 #
102 123 # When a changeset A' succeed to a changeset A which became public, we call A'
103 124 # "bumped" because it's a successors of a public changesets
104 125 #
105 126 # o A' (bumped)
106 127 # |`:
107 128 # | o A
108 129 # |/
109 130 # o Z
110 131 #
111 132 # The way to solve this situation is to create a new changeset Ad as children
112 133 # of A. This changeset have the same content than A'. So the diff from A to A'
113 134 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
114 135 #
115 136 # o Ad
116 137 # |`:
117 138 # | x A'
118 139 # |'|
119 140 # o | A
120 141 # |/
121 142 # o Z
122 143 #
123 144 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
124 145 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
125 146 # This flag mean that the successors express the changes between the public and
126 147 # bumped version and fix the situation, breaking the transitivity of
127 148 # "bumped" here.
128 149 bumpedfix = 1
129 150 usingsha256 = 2
130 151
131 152 ## Parsing and writing of version "0"
132 153 #
133 154 # The header is followed by the markers. Each marker is made of:
134 155 #
135 156 # - 1 uint8 : number of new changesets "N", can be zero.
136 157 #
137 158 # - 1 uint32: metadata size "M" in bytes.
138 159 #
139 160 # - 1 byte: a bit field. It is reserved for flags used in common
140 161 # obsolete marker operations, to avoid repeated decoding of metadata
141 162 # entries.
142 163 #
143 164 # - 20 bytes: obsoleted changeset identifier.
144 165 #
145 166 # - N*20 bytes: new changesets identifiers.
146 167 #
147 168 # - M bytes: metadata as a sequence of nul-terminated strings. Each
148 169 # string contains a key and a value, separated by a colon ':', without
149 170 # additional encoding. Keys cannot contain '\0' or ':' and values
150 171 # cannot contain '\0'.
151 172 _fm0version = 0
152 173 _fm0fixed = '>BIB20s'
153 174 _fm0node = '20s'
154 175 _fm0fsize = _calcsize(_fm0fixed)
155 176 _fm0fnodesize = _calcsize(_fm0node)
156 177
157 178 def _fm0readmarkers(data, off):
158 179 # Loop on markers
159 180 l = len(data)
160 181 while off + _fm0fsize <= l:
161 182 # read fixed part
162 183 cur = data[off:off + _fm0fsize]
163 184 off += _fm0fsize
164 185 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
165 186 # read replacement
166 187 sucs = ()
167 188 if numsuc:
168 189 s = (_fm0fnodesize * numsuc)
169 190 cur = data[off:off + s]
170 191 sucs = _unpack(_fm0node * numsuc, cur)
171 192 off += s
172 193 # read metadata
173 194 # (metadata will be decoded on demand)
174 195 metadata = data[off:off + mdsize]
175 196 if len(metadata) != mdsize:
176 197 raise error.Abort(_('parsing obsolete marker: metadata is too '
177 198 'short, %d bytes expected, got %d')
178 199 % (mdsize, len(metadata)))
179 200 off += mdsize
180 201 metadata = _fm0decodemeta(metadata)
181 202 try:
182 203 when, offset = metadata.pop('date', '0 0').split(' ')
183 204 date = float(when), int(offset)
184 205 except ValueError:
185 206 date = (0., 0)
186 207 parents = None
187 208 if 'p2' in metadata:
188 209 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
189 210 elif 'p1' in metadata:
190 211 parents = (metadata.pop('p1', None),)
191 212 elif 'p0' in metadata:
192 213 parents = ()
193 214 if parents is not None:
194 215 try:
195 216 parents = tuple(node.bin(p) for p in parents)
196 217 # if parent content is not a nodeid, drop the data
197 218 for p in parents:
198 219 if len(p) != 20:
199 220 parents = None
200 221 break
201 222 except TypeError:
202 223 # if content cannot be translated to nodeid drop the data.
203 224 parents = None
204 225
205 226 metadata = tuple(sorted(metadata.iteritems()))
206 227
207 228 yield (pre, sucs, flags, metadata, date, parents)
208 229
209 230 def _fm0encodeonemarker(marker):
210 231 pre, sucs, flags, metadata, date, parents = marker
211 232 if flags & usingsha256:
212 233 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
213 234 metadata = dict(metadata)
214 235 time, tz = date
215 236 metadata['date'] = '%r %i' % (time, tz)
216 237 if parents is not None:
217 238 if not parents:
218 239 # mark that we explicitly recorded no parents
219 240 metadata['p0'] = ''
220 241 for i, p in enumerate(parents, 1):
221 242 metadata['p%i' % i] = node.hex(p)
222 243 metadata = _fm0encodemeta(metadata)
223 244 numsuc = len(sucs)
224 245 format = _fm0fixed + (_fm0node * numsuc)
225 246 data = [numsuc, len(metadata), flags, pre]
226 247 data.extend(sucs)
227 248 return _pack(format, *data) + metadata
228 249
229 250 def _fm0encodemeta(meta):
230 251 """Return encoded metadata string to string mapping.
231 252
232 253 Assume no ':' in key and no '\0' in both key and value."""
233 254 for key, value in meta.iteritems():
234 255 if ':' in key or '\0' in key:
235 256 raise ValueError("':' and '\0' are forbidden in metadata key'")
236 257 if '\0' in value:
237 258 raise ValueError("':' is forbidden in metadata value'")
238 259 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
239 260
240 261 def _fm0decodemeta(data):
241 262 """Return string to string dictionary from encoded version."""
242 263 d = {}
243 264 for l in data.split('\0'):
244 265 if l:
245 266 key, value = l.split(':')
246 267 d[key] = value
247 268 return d
248 269
249 270 ## Parsing and writing of version "1"
250 271 #
251 272 # The header is followed by the markers. Each marker is made of:
252 273 #
253 274 # - uint32: total size of the marker (including this field)
254 275 #
255 276 # - float64: date in seconds since epoch
256 277 #
257 278 # - int16: timezone offset in minutes
258 279 #
259 280 # - uint16: a bit field. It is reserved for flags used in common
260 281 # obsolete marker operations, to avoid repeated decoding of metadata
261 282 # entries.
262 283 #
263 284 # - uint8: number of successors "N", can be zero.
264 285 #
265 286 # - uint8: number of parents "P", can be zero.
266 287 #
267 288 # 0: parents data stored but no parent,
268 289 # 1: one parent stored,
269 290 # 2: two parents stored,
270 291 # 3: no parent data stored
271 292 #
272 293 # - uint8: number of metadata entries M
273 294 #
274 295 # - 20 or 32 bytes: precursor changeset identifier.
275 296 #
276 297 # - N*(20 or 32) bytes: successors changesets identifiers.
277 298 #
278 299 # - P*(20 or 32) bytes: parents of the precursors changesets.
279 300 #
280 301 # - M*(uint8, uint8): size of all metadata entries (key and value)
281 302 #
282 303 # - remaining bytes: the metadata, each (key, value) pair after the other.
283 304 _fm1version = 1
284 305 _fm1fixed = '>IdhHBBB20s'
285 306 _fm1nodesha1 = '20s'
286 307 _fm1nodesha256 = '32s'
287 308 _fm1nodesha1size = _calcsize(_fm1nodesha1)
288 309 _fm1nodesha256size = _calcsize(_fm1nodesha256)
289 310 _fm1fsize = _calcsize(_fm1fixed)
290 311 _fm1parentnone = 3
291 312 _fm1parentshift = 14
292 313 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
293 314 _fm1metapair = 'BB'
294 315 _fm1metapairsize = _calcsize('BB')
295 316
296 317 def _fm1purereadmarkers(data, off):
297 318 # make some global constants local for performance
298 319 noneflag = _fm1parentnone
299 320 sha2flag = usingsha256
300 321 sha1size = _fm1nodesha1size
301 322 sha2size = _fm1nodesha256size
302 323 sha1fmt = _fm1nodesha1
303 324 sha2fmt = _fm1nodesha256
304 325 metasize = _fm1metapairsize
305 326 metafmt = _fm1metapair
306 327 fsize = _fm1fsize
307 328 unpack = _unpack
308 329
309 330 # Loop on markers
310 331 stop = len(data) - _fm1fsize
311 332 ufixed = struct.Struct(_fm1fixed).unpack
312 333
313 334 while off <= stop:
314 335 # read fixed part
315 336 o1 = off + fsize
316 337 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
317 338
318 339 if flags & sha2flag:
319 340 # FIXME: prec was read as a SHA1, needs to be amended
320 341
321 342 # read 0 or more successors
322 343 if numsuc == 1:
323 344 o2 = o1 + sha2size
324 345 sucs = (data[o1:o2],)
325 346 else:
326 347 o2 = o1 + sha2size * numsuc
327 348 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
328 349
329 350 # read parents
330 351 if numpar == noneflag:
331 352 o3 = o2
332 353 parents = None
333 354 elif numpar == 1:
334 355 o3 = o2 + sha2size
335 356 parents = (data[o2:o3],)
336 357 else:
337 358 o3 = o2 + sha2size * numpar
338 359 parents = unpack(sha2fmt * numpar, data[o2:o3])
339 360 else:
340 361 # read 0 or more successors
341 362 if numsuc == 1:
342 363 o2 = o1 + sha1size
343 364 sucs = (data[o1:o2],)
344 365 else:
345 366 o2 = o1 + sha1size * numsuc
346 367 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
347 368
348 369 # read parents
349 370 if numpar == noneflag:
350 371 o3 = o2
351 372 parents = None
352 373 elif numpar == 1:
353 374 o3 = o2 + sha1size
354 375 parents = (data[o2:o3],)
355 376 else:
356 377 o3 = o2 + sha1size * numpar
357 378 parents = unpack(sha1fmt * numpar, data[o2:o3])
358 379
359 380 # read metadata
360 381 off = o3 + metasize * nummeta
361 382 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
362 383 metadata = []
363 384 for idx in xrange(0, len(metapairsize), 2):
364 385 o1 = off + metapairsize[idx]
365 386 o2 = o1 + metapairsize[idx + 1]
366 387 metadata.append((data[off:o1], data[o1:o2]))
367 388 off = o2
368 389
369 390 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
370 391
371 392 def _fm1encodeonemarker(marker):
372 393 pre, sucs, flags, metadata, date, parents = marker
373 394 # determine node size
374 395 _fm1node = _fm1nodesha1
375 396 if flags & usingsha256:
376 397 _fm1node = _fm1nodesha256
377 398 numsuc = len(sucs)
378 399 numextranodes = numsuc
379 400 if parents is None:
380 401 numpar = _fm1parentnone
381 402 else:
382 403 numpar = len(parents)
383 404 numextranodes += numpar
384 405 formatnodes = _fm1node * numextranodes
385 406 formatmeta = _fm1metapair * len(metadata)
386 407 format = _fm1fixed + formatnodes + formatmeta
387 408 # tz is stored in minutes so we divide by 60
388 409 tz = date[1]//60
389 410 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
390 411 data.extend(sucs)
391 412 if parents is not None:
392 413 data.extend(parents)
393 414 totalsize = _calcsize(format)
394 415 for key, value in metadata:
395 416 lk = len(key)
396 417 lv = len(value)
397 418 data.append(lk)
398 419 data.append(lv)
399 420 totalsize += lk + lv
400 421 data[0] = totalsize
401 422 data = [_pack(format, *data)]
402 423 for key, value in metadata:
403 424 data.append(key)
404 425 data.append(value)
405 426 return ''.join(data)
406 427
407 428 def _fm1readmarkers(data, off):
408 429 native = getattr(parsers, 'fm1readmarkers', None)
409 430 if not native:
410 431 return _fm1purereadmarkers(data, off)
411 432 stop = len(data) - _fm1fsize
412 433 return native(data, off, stop)
413 434
414 435 # mapping to read/write various marker formats
415 436 # <version> -> (decoder, encoder)
416 437 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
417 438 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
418 439
419 440 @util.nogc
420 441 def _readmarkers(data):
421 442 """Read and enumerate markers from raw data"""
422 443 off = 0
423 444 diskversion = _unpack('>B', data[off:off + 1])[0]
424 445 off += 1
425 446 if diskversion not in formats:
426 447 raise error.Abort(_('parsing obsolete marker: unknown version %r')
427 448 % diskversion)
428 449 return diskversion, formats[diskversion][0](data, off)
429 450
430 451 def encodemarkers(markers, addheader=False, version=_fm0version):
431 452 # Kept separate from flushmarkers(), it will be reused for
432 453 # markers exchange.
433 454 encodeone = formats[version][1]
434 455 if addheader:
435 456 yield _pack('>B', version)
436 457 for marker in markers:
437 458 yield encodeone(marker)
438 459
439 460
440 461 class marker(object):
441 462 """Wrap obsolete marker raw data"""
442 463
443 464 def __init__(self, repo, data):
444 465 # the repo argument will be used to create changectx in later version
445 466 self._repo = repo
446 467 self._data = data
447 468 self._decodedmeta = None
448 469
449 470 def __hash__(self):
450 471 return hash(self._data)
451 472
452 473 def __eq__(self, other):
453 474 if type(other) != type(self):
454 475 return False
455 476 return self._data == other._data
456 477
457 478 def precnode(self):
458 479 """Precursor changeset node identifier"""
459 480 return self._data[0]
460 481
461 482 def succnodes(self):
462 483 """List of successor changesets node identifiers"""
463 484 return self._data[1]
464 485
465 486 def parentnodes(self):
466 487 """Parents of the precursors (None if not recorded)"""
467 488 return self._data[5]
468 489
469 490 def metadata(self):
470 491 """Decoded metadata dictionary"""
471 492 return dict(self._data[3])
472 493
473 494 def date(self):
474 495 """Creation date as (unixtime, offset)"""
475 496 return self._data[4]
476 497
477 498 def flags(self):
478 499 """The flags field of the marker"""
479 500 return self._data[2]
480 501
481 502 @util.nogc
482 503 def _addsuccessors(successors, markers):
483 504 for mark in markers:
484 505 successors.setdefault(mark[0], set()).add(mark)
485 506
486 507 @util.nogc
487 508 def _addprecursors(precursors, markers):
488 509 for mark in markers:
489 510 for suc in mark[1]:
490 511 precursors.setdefault(suc, set()).add(mark)
491 512
492 513 @util.nogc
493 514 def _addchildren(children, markers):
494 515 for mark in markers:
495 516 parents = mark[5]
496 517 if parents is not None:
497 518 for p in parents:
498 519 children.setdefault(p, set()).add(mark)
499 520
500 521 def _checkinvalidmarkers(markers):
501 522 """search for marker with invalid data and raise error if needed
502 523
503 524 Exist as a separated function to allow the evolve extension for a more
504 525 subtle handling.
505 526 """
506 527 for mark in markers:
507 528 if node.nullid in mark[1]:
508 529 raise error.Abort(_('bad obsolescence marker detected: '
509 530 'invalid successors nullid'))
510 531
511 532 class obsstore(object):
512 533 """Store obsolete markers
513 534
514 535 Markers can be accessed with two mappings:
515 536 - precursors[x] -> set(markers on precursors edges of x)
516 537 - successors[x] -> set(markers on successors edges of x)
517 538 - children[x] -> set(markers on precursors edges of children(x)
518 539 """
519 540
520 541 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
521 542 # prec: nodeid, precursor changesets
522 543 # succs: tuple of nodeid, successor changesets (0-N length)
523 544 # flag: integer, flag field carrying modifier for the markers (see doc)
524 545 # meta: binary blob, encoded metadata dictionary
525 546 # date: (float, int) tuple, date of marker creation
526 547 # parents: (tuple of nodeid) or None, parents of precursors
527 548 # None is used when no data has been recorded
528 549
529 550 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
530 551 # caches for various obsolescence related cache
531 552 self.caches = {}
532 553 self.svfs = svfs
533 554 self._version = defaultformat
534 555 self._readonly = readonly
535 556
536 557 def __iter__(self):
537 558 return iter(self._all)
538 559
539 560 def __len__(self):
540 561 return len(self._all)
541 562
542 563 def __nonzero__(self):
543 564 if not self._cached('_all'):
544 565 try:
545 566 return self.svfs.stat('obsstore').st_size > 1
546 567 except OSError as inst:
547 568 if inst.errno != errno.ENOENT:
548 569 raise
549 570 # just build an empty _all list if no obsstore exists, which
550 571 # avoids further stat() syscalls
551 572 pass
552 573 return bool(self._all)
553 574
554 575 __bool__ = __nonzero__
555 576
556 577 @property
557 578 def readonly(self):
558 579 """True if marker creation is disabled
559 580
560 581 Remove me in the future when obsolete marker is always on."""
561 582 return self._readonly
562 583
563 584 def create(self, transaction, prec, succs=(), flag=0, parents=None,
564 585 date=None, metadata=None):
565 586 """obsolete: add a new obsolete marker
566 587
567 588 * ensuring it is hashable
568 589 * check mandatory metadata
569 590 * encode metadata
570 591
571 592 If you are a human writing code creating marker you want to use the
572 593 `createmarkers` function in this module instead.
573 594
574 595 return True if a new marker have been added, False if the markers
575 596 already existed (no op).
576 597 """
577 598 if metadata is None:
578 599 metadata = {}
579 600 if date is None:
580 601 if 'date' in metadata:
581 602 # as a courtesy for out-of-tree extensions
582 603 date = util.parsedate(metadata.pop('date'))
583 604 else:
584 605 date = util.makedate()
585 606 if len(prec) != 20:
586 607 raise ValueError(prec)
587 608 for succ in succs:
588 609 if len(succ) != 20:
589 610 raise ValueError(succ)
590 611 if prec in succs:
591 612 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
592 613
593 614 metadata = tuple(sorted(metadata.iteritems()))
594 615
595 616 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
596 617 return bool(self.add(transaction, [marker]))
597 618
598 619 def add(self, transaction, markers):
599 620 """Add new markers to the store
600 621
601 622 Take care of filtering duplicate.
602 623 Return the number of new marker."""
603 624 if self._readonly:
604 625 raise error.Abort(_('creating obsolete markers is not enabled on '
605 626 'this repo'))
606 627 known = set(self._all)
607 628 new = []
608 629 for m in markers:
609 630 if m not in known:
610 631 known.add(m)
611 632 new.append(m)
612 633 if new:
613 634 f = self.svfs('obsstore', 'ab')
614 635 try:
615 636 offset = f.tell()
616 637 transaction.add('obsstore', offset)
617 638 # offset == 0: new file - add the version header
618 639 for bytes in encodemarkers(new, offset == 0, self._version):
619 640 f.write(bytes)
620 641 finally:
621 642 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
622 643 # call 'filecacheentry.refresh()' here
623 644 f.close()
624 645 self._addmarkers(new)
625 646 # new marker *may* have changed several set. invalidate the cache.
626 647 self.caches.clear()
627 648 # records the number of new markers for the transaction hooks
628 649 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
629 650 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
630 651 return len(new)
631 652
632 653 def mergemarkers(self, transaction, data):
633 654 """merge a binary stream of markers inside the obsstore
634 655
635 656 Returns the number of new markers added."""
636 657 version, markers = _readmarkers(data)
637 658 return self.add(transaction, markers)
638 659
639 660 @propertycache
640 661 def _all(self):
641 662 data = self.svfs.tryread('obsstore')
642 663 if not data:
643 664 return []
644 665 self._version, markers = _readmarkers(data)
645 666 markers = list(markers)
646 667 _checkinvalidmarkers(markers)
647 668 return markers
648 669
649 670 @propertycache
650 671 def successors(self):
651 672 successors = {}
652 673 _addsuccessors(successors, self._all)
653 674 return successors
654 675
655 676 @propertycache
656 677 def precursors(self):
657 678 precursors = {}
658 679 _addprecursors(precursors, self._all)
659 680 return precursors
660 681
661 682 @propertycache
662 683 def children(self):
663 684 children = {}
664 685 _addchildren(children, self._all)
665 686 return children
666 687
667 688 def _cached(self, attr):
668 689 return attr in self.__dict__
669 690
670 691 def _addmarkers(self, markers):
671 692 markers = list(markers) # to allow repeated iteration
672 693 self._all.extend(markers)
673 694 if self._cached('successors'):
674 695 _addsuccessors(self.successors, markers)
675 696 if self._cached('precursors'):
676 697 _addprecursors(self.precursors, markers)
677 698 if self._cached('children'):
678 699 _addchildren(self.children, markers)
679 700 _checkinvalidmarkers(markers)
680 701
681 702 def relevantmarkers(self, nodes):
682 703 """return a set of all obsolescence markers relevant to a set of nodes.
683 704
684 705 "relevant" to a set of nodes mean:
685 706
686 707 - marker that use this changeset as successor
687 708 - prune marker of direct children on this changeset
688 709 - recursive application of the two rules on precursors of these markers
689 710
690 711 It is a set so you cannot rely on order."""
691 712
692 713 pendingnodes = set(nodes)
693 714 seenmarkers = set()
694 715 seennodes = set(pendingnodes)
695 716 precursorsmarkers = self.precursors
696 717 children = self.children
697 718 while pendingnodes:
698 719 direct = set()
699 720 for current in pendingnodes:
700 721 direct.update(precursorsmarkers.get(current, ()))
701 722 pruned = [m for m in children.get(current, ()) if not m[1]]
702 723 direct.update(pruned)
703 724 direct -= seenmarkers
704 725 pendingnodes = set([m[0] for m in direct])
705 726 seenmarkers |= direct
706 727 pendingnodes -= seennodes
707 728 seennodes |= pendingnodes
708 729 return seenmarkers
709 730
710 731 def commonversion(versions):
711 732 """Return the newest version listed in both versions and our local formats.
712 733
713 734 Returns None if no common version exists.
714 735 """
715 736 versions.sort(reverse=True)
716 737 # search for highest version known on both side
717 738 for v in versions:
718 739 if v in formats:
719 740 return v
720 741 return None
721 742
722 743 # arbitrary picked to fit into 8K limit from HTTP server
723 744 # you have to take in account:
724 745 # - the version header
725 746 # - the base85 encoding
726 747 _maxpayload = 5300
727 748
728 749 def _pushkeyescape(markers):
729 750 """encode markers into a dict suitable for pushkey exchange
730 751
731 752 - binary data is base85 encoded
732 753 - split in chunks smaller than 5300 bytes"""
733 754 keys = {}
734 755 parts = []
735 756 currentlen = _maxpayload * 2 # ensure we create a new part
736 757 for marker in markers:
737 758 nextdata = _fm0encodeonemarker(marker)
738 759 if (len(nextdata) + currentlen > _maxpayload):
739 760 currentpart = []
740 761 currentlen = 0
741 762 parts.append(currentpart)
742 763 currentpart.append(nextdata)
743 764 currentlen += len(nextdata)
744 765 for idx, part in enumerate(reversed(parts)):
745 766 data = ''.join([_pack('>B', _fm0version)] + part)
746 767 keys['dump%i' % idx] = util.b85encode(data)
747 768 return keys
748 769
749 770 def listmarkers(repo):
750 771 """List markers over pushkey"""
751 772 if not repo.obsstore:
752 773 return {}
753 774 return _pushkeyescape(sorted(repo.obsstore))
754 775
755 776 def pushmarker(repo, key, old, new):
756 777 """Push markers over pushkey"""
757 778 if not key.startswith('dump'):
758 779 repo.ui.warn(_('unknown key: %r') % key)
759 780 return 0
760 781 if old:
761 782 repo.ui.warn(_('unexpected old value for %r') % key)
762 783 return 0
763 784 data = util.b85decode(new)
764 785 lock = repo.lock()
765 786 try:
766 787 tr = repo.transaction('pushkey: obsolete markers')
767 788 try:
768 789 repo.obsstore.mergemarkers(tr, data)
769 790 tr.close()
770 791 return 1
771 792 finally:
772 793 tr.release()
773 794 finally:
774 795 lock.release()
775 796
776 797 def getmarkers(repo, nodes=None):
777 798 """returns markers known in a repository
778 799
779 800 If <nodes> is specified, only markers "relevant" to those nodes are are
780 801 returned"""
781 802 if nodes is None:
782 803 rawmarkers = repo.obsstore
783 804 else:
784 805 rawmarkers = repo.obsstore.relevantmarkers(nodes)
785 806
786 807 for markerdata in rawmarkers:
787 808 yield marker(repo, markerdata)
788 809
789 810 def relevantmarkers(repo, node):
790 811 """all obsolete markers relevant to some revision"""
791 812 for markerdata in repo.obsstore.relevantmarkers(node):
792 813 yield marker(repo, markerdata)
793 814
794 815
795 816 def precursormarkers(ctx):
796 817 """obsolete marker marking this changeset as a successors"""
797 818 for data in ctx.repo().obsstore.precursors.get(ctx.node(), ()):
798 819 yield marker(ctx.repo(), data)
799 820
800 821 def successormarkers(ctx):
801 822 """obsolete marker making this changeset obsolete"""
802 823 for data in ctx.repo().obsstore.successors.get(ctx.node(), ()):
803 824 yield marker(ctx.repo(), data)
804 825
805 826 def allsuccessors(obsstore, nodes, ignoreflags=0):
806 827 """Yield node for every successor of <nodes>.
807 828
808 829 Some successors may be unknown locally.
809 830
810 831 This is a linear yield unsuited to detecting split changesets. It includes
811 832 initial nodes too."""
812 833 remaining = set(nodes)
813 834 seen = set(remaining)
814 835 while remaining:
815 836 current = remaining.pop()
816 837 yield current
817 838 for mark in obsstore.successors.get(current, ()):
818 839 # ignore marker flagged with specified flag
819 840 if mark[2] & ignoreflags:
820 841 continue
821 842 for suc in mark[1]:
822 843 if suc not in seen:
823 844 seen.add(suc)
824 845 remaining.add(suc)
825 846
826 847 def allprecursors(obsstore, nodes, ignoreflags=0):
827 848 """Yield node for every precursors of <nodes>.
828 849
829 850 Some precursors may be unknown locally.
830 851
831 852 This is a linear yield unsuited to detecting folded changesets. It includes
832 853 initial nodes too."""
833 854
834 855 remaining = set(nodes)
835 856 seen = set(remaining)
836 857 while remaining:
837 858 current = remaining.pop()
838 859 yield current
839 860 for mark in obsstore.precursors.get(current, ()):
840 861 # ignore marker flagged with specified flag
841 862 if mark[2] & ignoreflags:
842 863 continue
843 864 suc = mark[0]
844 865 if suc not in seen:
845 866 seen.add(suc)
846 867 remaining.add(suc)
847 868
848 869 def foreground(repo, nodes):
849 870 """return all nodes in the "foreground" of other node
850 871
851 872 The foreground of a revision is anything reachable using parent -> children
852 873 or precursor -> successor relation. It is very similar to "descendant" but
853 874 augmented with obsolescence information.
854 875
855 876 Beware that possible obsolescence cycle may result if complex situation.
856 877 """
857 878 repo = repo.unfiltered()
858 879 foreground = set(repo.set('%ln::', nodes))
859 880 if repo.obsstore:
860 881 # We only need this complicated logic if there is obsolescence
861 882 # XXX will probably deserve an optimised revset.
862 883 nm = repo.changelog.nodemap
863 884 plen = -1
864 885 # compute the whole set of successors or descendants
865 886 while len(foreground) != plen:
866 887 plen = len(foreground)
867 888 succs = set(c.node() for c in foreground)
868 889 mutable = [c.node() for c in foreground if c.mutable()]
869 890 succs.update(allsuccessors(repo.obsstore, mutable))
870 891 known = (n for n in succs if n in nm)
871 892 foreground = set(repo.set('%ln::', known))
872 893 return set(c.node() for c in foreground)
873 894
874 895
875 896 def successorssets(repo, initialnode, cache=None):
876 897 """Return set of all latest successors of initial nodes
877 898
878 899 The successors set of a changeset A are the group of revisions that succeed
879 900 A. It succeeds A as a consistent whole, each revision being only a partial
880 901 replacement. The successors set contains non-obsolete changesets only.
881 902
882 903 This function returns the full list of successor sets which is why it
883 904 returns a list of tuples and not just a single tuple. Each tuple is a valid
884 905 successors set. Note that (A,) may be a valid successors set for changeset A
885 906 (see below).
886 907
887 908 In most cases, a changeset A will have a single element (e.g. the changeset
888 909 A is replaced by A') in its successors set. Though, it is also common for a
889 910 changeset A to have no elements in its successor set (e.g. the changeset
890 911 has been pruned). Therefore, the returned list of successors sets will be
891 912 [(A',)] or [], respectively.
892 913
893 914 When a changeset A is split into A' and B', however, it will result in a
894 915 successors set containing more than a single element, i.e. [(A',B')].
895 916 Divergent changesets will result in multiple successors sets, i.e. [(A',),
896 917 (A'')].
897 918
898 919 If a changeset A is not obsolete, then it will conceptually have no
899 920 successors set. To distinguish this from a pruned changeset, the successor
900 921 set will contain itself only, i.e. [(A,)].
901 922
902 923 Finally, successors unknown locally are considered to be pruned (obsoleted
903 924 without any successors).
904 925
905 926 The optional `cache` parameter is a dictionary that may contain precomputed
906 927 successors sets. It is meant to reuse the computation of a previous call to
907 928 `successorssets` when multiple calls are made at the same time. The cache
908 929 dictionary is updated in place. The caller is responsible for its life
909 930 span. Code that makes multiple calls to `successorssets` *must* use this
910 931 cache mechanism or suffer terrible performance.
911 932 """
912 933
913 934 succmarkers = repo.obsstore.successors
914 935
915 936 # Stack of nodes we search successors sets for
916 937 toproceed = [initialnode]
917 938 # set version of above list for fast loop detection
918 939 # element added to "toproceed" must be added here
919 940 stackedset = set(toproceed)
920 941 if cache is None:
921 942 cache = {}
922 943
923 944 # This while loop is the flattened version of a recursive search for
924 945 # successors sets
925 946 #
926 947 # def successorssets(x):
927 948 # successors = directsuccessors(x)
928 949 # ss = [[]]
929 950 # for succ in directsuccessors(x):
930 951 # # product as in itertools cartesian product
931 952 # ss = product(ss, successorssets(succ))
932 953 # return ss
933 954 #
934 955 # But we can not use plain recursive calls here:
935 956 # - that would blow the python call stack
936 957 # - obsolescence markers may have cycles, we need to handle them.
937 958 #
938 959 # The `toproceed` list act as our call stack. Every node we search
939 960 # successors set for are stacked there.
940 961 #
941 962 # The `stackedset` is set version of this stack used to check if a node is
942 963 # already stacked. This check is used to detect cycles and prevent infinite
943 964 # loop.
944 965 #
945 966 # successors set of all nodes are stored in the `cache` dictionary.
946 967 #
947 968 # After this while loop ends we use the cache to return the successors sets
948 969 # for the node requested by the caller.
949 970 while toproceed:
950 971 # Every iteration tries to compute the successors sets of the topmost
951 972 # node of the stack: CURRENT.
952 973 #
953 974 # There are four possible outcomes:
954 975 #
955 976 # 1) We already know the successors sets of CURRENT:
956 977 # -> mission accomplished, pop it from the stack.
957 978 # 2) Node is not obsolete:
958 979 # -> the node is its own successors sets. Add it to the cache.
959 980 # 3) We do not know successors set of direct successors of CURRENT:
960 981 # -> We add those successors to the stack.
961 982 # 4) We know successors sets of all direct successors of CURRENT:
962 983 # -> We can compute CURRENT successors set and add it to the
963 984 # cache.
964 985 #
965 986 current = toproceed[-1]
966 987 if current in cache:
967 988 # case (1): We already know the successors sets
968 989 stackedset.remove(toproceed.pop())
969 990 elif current not in succmarkers:
970 991 # case (2): The node is not obsolete.
971 992 if current in repo:
972 993 # We have a valid last successors.
973 994 cache[current] = [(current,)]
974 995 else:
975 996 # Final obsolete version is unknown locally.
976 997 # Do not count that as a valid successors
977 998 cache[current] = []
978 999 else:
979 1000 # cases (3) and (4)
980 1001 #
981 1002 # We proceed in two phases. Phase 1 aims to distinguish case (3)
982 1003 # from case (4):
983 1004 #
984 1005 # For each direct successors of CURRENT, we check whether its
985 1006 # successors sets are known. If they are not, we stack the
986 1007 # unknown node and proceed to the next iteration of the while
987 1008 # loop. (case 3)
988 1009 #
989 1010 # During this step, we may detect obsolescence cycles: a node
990 1011 # with unknown successors sets but already in the call stack.
991 1012 # In such a situation, we arbitrary set the successors sets of
992 1013 # the node to nothing (node pruned) to break the cycle.
993 1014 #
994 1015 # If no break was encountered we proceed to phase 2.
995 1016 #
996 1017 # Phase 2 computes successors sets of CURRENT (case 4); see details
997 1018 # in phase 2 itself.
998 1019 #
999 1020 # Note the two levels of iteration in each phase.
1000 1021 # - The first one handles obsolescence markers using CURRENT as
1001 1022 # precursor (successors markers of CURRENT).
1002 1023 #
1003 1024 # Having multiple entry here means divergence.
1004 1025 #
1005 1026 # - The second one handles successors defined in each marker.
1006 1027 #
1007 1028 # Having none means pruned node, multiple successors means split,
1008 1029 # single successors are standard replacement.
1009 1030 #
1010 1031 for mark in sorted(succmarkers[current]):
1011 1032 for suc in mark[1]:
1012 1033 if suc not in cache:
1013 1034 if suc in stackedset:
1014 1035 # cycle breaking
1015 1036 cache[suc] = []
1016 1037 else:
1017 1038 # case (3) If we have not computed successors sets
1018 1039 # of one of those successors we add it to the
1019 1040 # `toproceed` stack and stop all work for this
1020 1041 # iteration.
1021 1042 toproceed.append(suc)
1022 1043 stackedset.add(suc)
1023 1044 break
1024 1045 else:
1025 1046 continue
1026 1047 break
1027 1048 else:
1028 1049 # case (4): we know all successors sets of all direct
1029 1050 # successors
1030 1051 #
1031 1052 # Successors set contributed by each marker depends on the
1032 1053 # successors sets of all its "successors" node.
1033 1054 #
1034 1055 # Each different marker is a divergence in the obsolescence
1035 1056 # history. It contributes successors sets distinct from other
1036 1057 # markers.
1037 1058 #
1038 1059 # Within a marker, a successor may have divergent successors
1039 1060 # sets. In such a case, the marker will contribute multiple
1040 1061 # divergent successors sets. If multiple successors have
1041 1062 # divergent successors sets, a Cartesian product is used.
1042 1063 #
1043 1064 # At the end we post-process successors sets to remove
1044 1065 # duplicated entry and successors set that are strict subset of
1045 1066 # another one.
1046 1067 succssets = []
1047 1068 for mark in sorted(succmarkers[current]):
1048 1069 # successors sets contributed by this marker
1049 1070 markss = [[]]
1050 1071 for suc in mark[1]:
1051 1072 # cardinal product with previous successors
1052 1073 productresult = []
1053 1074 for prefix in markss:
1054 1075 for suffix in cache[suc]:
1055 1076 newss = list(prefix)
1056 1077 for part in suffix:
1057 1078 # do not duplicated entry in successors set
1058 1079 # first entry wins.
1059 1080 if part not in newss:
1060 1081 newss.append(part)
1061 1082 productresult.append(newss)
1062 1083 markss = productresult
1063 1084 succssets.extend(markss)
1064 1085 # remove duplicated and subset
1065 1086 seen = []
1066 1087 final = []
1067 1088 candidate = sorted(((set(s), s) for s in succssets if s),
1068 1089 key=lambda x: len(x[1]), reverse=True)
1069 1090 for setversion, listversion in candidate:
1070 1091 for seenset in seen:
1071 1092 if setversion.issubset(seenset):
1072 1093 break
1073 1094 else:
1074 1095 final.append(listversion)
1075 1096 seen.append(setversion)
1076 1097 final.reverse() # put small successors set first
1077 1098 cache[current] = final
1078 1099 return cache[initialnode]
1079 1100
1080 1101 # mapping of 'set-name' -> <function to compute this set>
1081 1102 cachefuncs = {}
1082 1103 def cachefor(name):
1083 1104 """Decorator to register a function as computing the cache for a set"""
1084 1105 def decorator(func):
1085 1106 assert name not in cachefuncs
1086 1107 cachefuncs[name] = func
1087 1108 return func
1088 1109 return decorator
1089 1110
1090 1111 def getrevs(repo, name):
1091 1112 """Return the set of revision that belong to the <name> set
1092 1113
1093 1114 Such access may compute the set and cache it for future use"""
1094 1115 repo = repo.unfiltered()
1095 1116 if not repo.obsstore:
1096 1117 return frozenset()
1097 1118 if name not in repo.obsstore.caches:
1098 1119 repo.obsstore.caches[name] = cachefuncs[name](repo)
1099 1120 return repo.obsstore.caches[name]
1100 1121
1101 1122 # To be simple we need to invalidate obsolescence cache when:
1102 1123 #
1103 1124 # - new changeset is added:
1104 1125 # - public phase is changed
1105 1126 # - obsolescence marker are added
1106 1127 # - strip is used a repo
1107 1128 def clearobscaches(repo):
1108 1129 """Remove all obsolescence related cache from a repo
1109 1130
1110 1131 This remove all cache in obsstore is the obsstore already exist on the
1111 1132 repo.
1112 1133
1113 1134 (We could be smarter here given the exact event that trigger the cache
1114 1135 clearing)"""
1115 1136 # only clear cache is there is obsstore data in this repo
1116 1137 if 'obsstore' in repo._filecache:
1117 1138 repo.obsstore.caches.clear()
1118 1139
1119 1140 @cachefor('obsolete')
1120 1141 def _computeobsoleteset(repo):
1121 1142 """the set of obsolete revisions"""
1122 1143 obs = set()
1123 1144 getnode = repo.changelog.node
1124 1145 notpublic = repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
1125 1146 for r in notpublic:
1126 1147 if getnode(r) in repo.obsstore.successors:
1127 1148 obs.add(r)
1128 1149 return obs
1129 1150
1130 1151 @cachefor('unstable')
1131 1152 def _computeunstableset(repo):
1132 1153 """the set of non obsolete revisions with obsolete parents"""
1133 1154 revs = [(ctx.rev(), ctx) for ctx in
1134 1155 repo.set('(not public()) and (not obsolete())')]
1135 1156 revs.sort(key=lambda x:x[0])
1136 1157 unstable = set()
1137 1158 for rev, ctx in revs:
1138 1159 # A rev is unstable if one of its parent is obsolete or unstable
1139 1160 # this works since we traverse following growing rev order
1140 1161 if any((x.obsolete() or (x.rev() in unstable))
1141 1162 for x in ctx.parents()):
1142 1163 unstable.add(rev)
1143 1164 return unstable
1144 1165
1145 1166 @cachefor('suspended')
1146 1167 def _computesuspendedset(repo):
1147 1168 """the set of obsolete parents with non obsolete descendants"""
1148 1169 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1149 1170 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1150 1171
1151 1172 @cachefor('extinct')
1152 1173 def _computeextinctset(repo):
1153 1174 """the set of obsolete parents without non obsolete descendants"""
1154 1175 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1155 1176
1156 1177
1157 1178 @cachefor('bumped')
1158 1179 def _computebumpedset(repo):
1159 1180 """the set of revs trying to obsolete public revisions"""
1160 1181 bumped = set()
1161 1182 # util function (avoid attribute lookup in the loop)
1162 1183 phase = repo._phasecache.phase # would be faster to grab the full list
1163 1184 public = phases.public
1164 1185 cl = repo.changelog
1165 1186 torev = cl.nodemap.get
1166 1187 for ctx in repo.set('(not public()) and (not obsolete())'):
1167 1188 rev = ctx.rev()
1168 1189 # We only evaluate mutable, non-obsolete revision
1169 1190 node = ctx.node()
1170 1191 # (future) A cache of precursors may worth if split is very common
1171 1192 for pnode in allprecursors(repo.obsstore, [node],
1172 1193 ignoreflags=bumpedfix):
1173 1194 prev = torev(pnode) # unfiltered! but so is phasecache
1174 1195 if (prev is not None) and (phase(repo, prev) <= public):
1175 1196 # we have a public precursor
1176 1197 bumped.add(rev)
1177 1198 break # Next draft!
1178 1199 return bumped
1179 1200
1180 1201 @cachefor('divergent')
1181 1202 def _computedivergentset(repo):
1182 1203 """the set of rev that compete to be the final successors of some revision.
1183 1204 """
1184 1205 divergent = set()
1185 1206 obsstore = repo.obsstore
1186 1207 newermap = {}
1187 1208 for ctx in repo.set('(not public()) - obsolete()'):
1188 1209 mark = obsstore.precursors.get(ctx.node(), ())
1189 1210 toprocess = set(mark)
1190 1211 seen = set()
1191 1212 while toprocess:
1192 1213 prec = toprocess.pop()[0]
1193 1214 if prec in seen:
1194 1215 continue # emergency cycle hanging prevention
1195 1216 seen.add(prec)
1196 1217 if prec not in newermap:
1197 1218 successorssets(repo, prec, newermap)
1198 1219 newer = [n for n in newermap[prec] if n]
1199 1220 if len(newer) > 1:
1200 1221 divergent.add(ctx.rev())
1201 1222 break
1202 1223 toprocess.update(obsstore.precursors.get(prec, ()))
1203 1224 return divergent
1204 1225
1205 1226
1206 1227 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
1207 1228 operation=None):
1208 1229 """Add obsolete markers between changesets in a repo
1209 1230
1210 1231 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1211 1232 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1212 1233 containing metadata for this marker only. It is merged with the global
1213 1234 metadata specified through the `metadata` argument of this function,
1214 1235
1215 1236 Trying to obsolete a public changeset will raise an exception.
1216 1237
1217 1238 Current user and date are used except if specified otherwise in the
1218 1239 metadata attribute.
1219 1240
1220 1241 This function operates within a transaction of its own, but does
1221 1242 not take any lock on the repo.
1222 1243 """
1223 1244 # prepare metadata
1224 1245 if metadata is None:
1225 1246 metadata = {}
1226 1247 if 'user' not in metadata:
1227 1248 metadata['user'] = repo.ui.username()
1228 1249 if operation:
1229 1250 metadata['operation'] = operation
1230 1251 tr = repo.transaction('add-obsolescence-marker')
1231 1252 try:
1232 1253 markerargs = []
1233 1254 for rel in relations:
1234 1255 prec = rel[0]
1235 1256 sucs = rel[1]
1236 1257 localmetadata = metadata.copy()
1237 1258 if 2 < len(rel):
1238 1259 localmetadata.update(rel[2])
1239 1260
1240 1261 if not prec.mutable():
1241 1262 raise error.Abort(_("cannot obsolete public changeset: %s")
1242 1263 % prec,
1243 1264 hint="see 'hg help phases' for details")
1244 1265 nprec = prec.node()
1245 1266 nsucs = tuple(s.node() for s in sucs)
1246 1267 npare = None
1247 1268 if not nsucs:
1248 1269 npare = tuple(p.node() for p in prec.parents())
1249 1270 if nprec in nsucs:
1250 1271 raise error.Abort(_("changeset %s cannot obsolete itself")
1251 1272 % prec)
1252 1273
1253 1274 # Creating the marker causes the hidden cache to become invalid,
1254 1275 # which causes recomputation when we ask for prec.parents() above.
1255 1276 # Resulting in n^2 behavior. So let's prepare all of the args
1256 1277 # first, then create the markers.
1257 1278 markerargs.append((nprec, nsucs, npare, localmetadata))
1258 1279
1259 1280 for args in markerargs:
1260 1281 nprec, nsucs, npare, localmetadata = args
1261 1282 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1262 1283 date=date, metadata=localmetadata)
1263 1284 repo.filteredrevcache.clear()
1264 1285 tr.close()
1265 1286 finally:
1266 1287 tr.release()
1267
1268 def isenabled(repo, option):
1269 """Returns True if the given repository has the given obsolete option
1270 enabled.
1271 """
1272 result = set(repo.ui.configlist('experimental', 'evolution'))
1273 if 'all' in result:
1274 return True
1275
1276 # For migration purposes, temporarily return true if the config hasn't been
1277 # set but _enabled is true.
1278 if len(result) == 0 and _enabled:
1279 return True
1280
1281 # createmarkers must be enabled if other options are enabled
1282 if ((allowunstableopt in result or exchangeopt in result) and
1283 not createmarkersopt in result):
1284 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
1285 "if other obsolete options are enabled"))
1286
1287 return option in result
General Comments 0
You need to be logged in to leave comments. Login now