##// END OF EJS Templates
obsolete: use absolute_import
Gregory Szorc -
r27332:04f346b8 default
parent child Browse files
Show More
@@ -1,1263 +1,1273 b''
1 1 # obsolete.py - obsolete markers handling
2 2 #
3 3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 4 # Logilab SA <contact@logilab.fr>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 """Obsolete marker handling
10 10
11 11 An obsolete marker maps an old changeset to a list of new
12 12 changesets. If the list of new changesets is empty, the old changeset
13 13 is said to be "killed". Otherwise, the old changeset is being
14 14 "replaced" by the new changesets.
15 15
16 16 Obsolete markers can be used to record and distribute changeset graph
17 17 transformations performed by history rewrite operations, and help
18 18 building new tools to reconcile conflicting rewrite actions. To
19 19 facilitate conflict resolution, markers include various annotations
20 20 besides old and news changeset identifiers, such as creation date or
21 21 author name.
22 22
23 23 The old obsoleted changeset is called a "precursor" and possible
24 24 replacements are called "successors". Markers that used changeset X as
25 25 a precursor are called "successor markers of X" because they hold
26 26 information about the successors of X. Markers that use changeset Y as
27 27 a successors are call "precursor markers of Y" because they hold
28 28 information about the precursors of Y.
29 29
30 30 Examples:
31 31
32 32 - When changeset A is replaced by changeset A', one marker is stored:
33 33
34 34 (A, (A',))
35 35
36 36 - When changesets A and B are folded into a new changeset C, two markers are
37 37 stored:
38 38
39 39 (A, (C,)) and (B, (C,))
40 40
41 41 - When changeset A is simply "pruned" from the graph, a marker is created:
42 42
43 43 (A, ())
44 44
45 45 - When changeset A is split into B and C, a single marker are used:
46 46
47 47 (A, (C, C))
48 48
49 49 We use a single marker to distinguish the "split" case from the "divergence"
50 50 case. If two independent operations rewrite the same changeset A in to A' and
51 51 A'', we have an error case: divergent rewriting. We can detect it because
52 52 two markers will be created independently:
53 53
54 54 (A, (B,)) and (A, (C,))
55 55
56 56 Format
57 57 ------
58 58
59 59 Markers are stored in an append-only file stored in
60 60 '.hg/store/obsstore'.
61 61
62 62 The file starts with a version header:
63 63
64 64 - 1 unsigned byte: version number, starting at zero.
65 65
66 66 The header is followed by the markers. Marker format depend of the version. See
67 67 comment associated with each format for details.
68 68
69 69 """
70 import errno, struct
71 import util, base85, node, parsers, error
72 import phases
73 from i18n import _
70 from __future__ import absolute_import
71
72 import errno
73 import struct
74
75 from .i18n import _
76 from . import (
77 base85,
78 error,
79 node,
80 parsers,
81 phases,
82 util,
83 )
74 84
75 85 _pack = struct.pack
76 86 _unpack = struct.unpack
77 87 _calcsize = struct.calcsize
78 88 propertycache = util.propertycache
79 89
80 90 # the obsolete feature is not mature enough to be enabled by default.
81 91 # you have to rely on third party extension extension to enable this.
82 92 _enabled = False
83 93
84 94 # Options for obsolescence
85 95 createmarkersopt = 'createmarkers'
86 96 allowunstableopt = 'allowunstable'
87 97 exchangeopt = 'exchange'
88 98
89 99 ### obsolescence marker flag
90 100
91 101 ## bumpedfix flag
92 102 #
93 103 # When a changeset A' succeed to a changeset A which became public, we call A'
94 104 # "bumped" because it's a successors of a public changesets
95 105 #
96 106 # o A' (bumped)
97 107 # |`:
98 108 # | o A
99 109 # |/
100 110 # o Z
101 111 #
102 112 # The way to solve this situation is to create a new changeset Ad as children
103 113 # of A. This changeset have the same content than A'. So the diff from A to A'
104 114 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
105 115 #
106 116 # o Ad
107 117 # |`:
108 118 # | x A'
109 119 # |'|
110 120 # o | A
111 121 # |/
112 122 # o Z
113 123 #
114 124 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
115 125 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
116 126 # This flag mean that the successors express the changes between the public and
117 127 # bumped version and fix the situation, breaking the transitivity of
118 128 # "bumped" here.
119 129 bumpedfix = 1
120 130 usingsha256 = 2
121 131
122 132 ## Parsing and writing of version "0"
123 133 #
124 134 # The header is followed by the markers. Each marker is made of:
125 135 #
126 136 # - 1 uint8 : number of new changesets "N", can be zero.
127 137 #
128 138 # - 1 uint32: metadata size "M" in bytes.
129 139 #
130 140 # - 1 byte: a bit field. It is reserved for flags used in common
131 141 # obsolete marker operations, to avoid repeated decoding of metadata
132 142 # entries.
133 143 #
134 144 # - 20 bytes: obsoleted changeset identifier.
135 145 #
136 146 # - N*20 bytes: new changesets identifiers.
137 147 #
138 148 # - M bytes: metadata as a sequence of nul-terminated strings. Each
139 149 # string contains a key and a value, separated by a colon ':', without
140 150 # additional encoding. Keys cannot contain '\0' or ':' and values
141 151 # cannot contain '\0'.
142 152 _fm0version = 0
143 153 _fm0fixed = '>BIB20s'
144 154 _fm0node = '20s'
145 155 _fm0fsize = _calcsize(_fm0fixed)
146 156 _fm0fnodesize = _calcsize(_fm0node)
147 157
148 158 def _fm0readmarkers(data, off):
149 159 # Loop on markers
150 160 l = len(data)
151 161 while off + _fm0fsize <= l:
152 162 # read fixed part
153 163 cur = data[off:off + _fm0fsize]
154 164 off += _fm0fsize
155 165 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
156 166 # read replacement
157 167 sucs = ()
158 168 if numsuc:
159 169 s = (_fm0fnodesize * numsuc)
160 170 cur = data[off:off + s]
161 171 sucs = _unpack(_fm0node * numsuc, cur)
162 172 off += s
163 173 # read metadata
164 174 # (metadata will be decoded on demand)
165 175 metadata = data[off:off + mdsize]
166 176 if len(metadata) != mdsize:
167 177 raise error.Abort(_('parsing obsolete marker: metadata is too '
168 178 'short, %d bytes expected, got %d')
169 179 % (mdsize, len(metadata)))
170 180 off += mdsize
171 181 metadata = _fm0decodemeta(metadata)
172 182 try:
173 183 when, offset = metadata.pop('date', '0 0').split(' ')
174 184 date = float(when), int(offset)
175 185 except ValueError:
176 186 date = (0., 0)
177 187 parents = None
178 188 if 'p2' in metadata:
179 189 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
180 190 elif 'p1' in metadata:
181 191 parents = (metadata.pop('p1', None),)
182 192 elif 'p0' in metadata:
183 193 parents = ()
184 194 if parents is not None:
185 195 try:
186 196 parents = tuple(node.bin(p) for p in parents)
187 197 # if parent content is not a nodeid, drop the data
188 198 for p in parents:
189 199 if len(p) != 20:
190 200 parents = None
191 201 break
192 202 except TypeError:
193 203 # if content cannot be translated to nodeid drop the data.
194 204 parents = None
195 205
196 206 metadata = tuple(sorted(metadata.iteritems()))
197 207
198 208 yield (pre, sucs, flags, metadata, date, parents)
199 209
200 210 def _fm0encodeonemarker(marker):
201 211 pre, sucs, flags, metadata, date, parents = marker
202 212 if flags & usingsha256:
203 213 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
204 214 metadata = dict(metadata)
205 215 time, tz = date
206 216 metadata['date'] = '%r %i' % (time, tz)
207 217 if parents is not None:
208 218 if not parents:
209 219 # mark that we explicitly recorded no parents
210 220 metadata['p0'] = ''
211 221 for i, p in enumerate(parents):
212 222 metadata['p%i' % (i + 1)] = node.hex(p)
213 223 metadata = _fm0encodemeta(metadata)
214 224 numsuc = len(sucs)
215 225 format = _fm0fixed + (_fm0node * numsuc)
216 226 data = [numsuc, len(metadata), flags, pre]
217 227 data.extend(sucs)
218 228 return _pack(format, *data) + metadata
219 229
220 230 def _fm0encodemeta(meta):
221 231 """Return encoded metadata string to string mapping.
222 232
223 233 Assume no ':' in key and no '\0' in both key and value."""
224 234 for key, value in meta.iteritems():
225 235 if ':' in key or '\0' in key:
226 236 raise ValueError("':' and '\0' are forbidden in metadata key'")
227 237 if '\0' in value:
228 238 raise ValueError("':' is forbidden in metadata value'")
229 239 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
230 240
231 241 def _fm0decodemeta(data):
232 242 """Return string to string dictionary from encoded version."""
233 243 d = {}
234 244 for l in data.split('\0'):
235 245 if l:
236 246 key, value = l.split(':')
237 247 d[key] = value
238 248 return d
239 249
240 250 ## Parsing and writing of version "1"
241 251 #
242 252 # The header is followed by the markers. Each marker is made of:
243 253 #
244 254 # - uint32: total size of the marker (including this field)
245 255 #
246 256 # - float64: date in seconds since epoch
247 257 #
248 258 # - int16: timezone offset in minutes
249 259 #
250 260 # - uint16: a bit field. It is reserved for flags used in common
251 261 # obsolete marker operations, to avoid repeated decoding of metadata
252 262 # entries.
253 263 #
254 264 # - uint8: number of successors "N", can be zero.
255 265 #
256 266 # - uint8: number of parents "P", can be zero.
257 267 #
258 268 # 0: parents data stored but no parent,
259 269 # 1: one parent stored,
260 270 # 2: two parents stored,
261 271 # 3: no parent data stored
262 272 #
263 273 # - uint8: number of metadata entries M
264 274 #
265 275 # - 20 or 32 bytes: precursor changeset identifier.
266 276 #
267 277 # - N*(20 or 32) bytes: successors changesets identifiers.
268 278 #
269 279 # - P*(20 or 32) bytes: parents of the precursors changesets.
270 280 #
271 281 # - M*(uint8, uint8): size of all metadata entries (key and value)
272 282 #
273 283 # - remaining bytes: the metadata, each (key, value) pair after the other.
274 284 _fm1version = 1
275 285 _fm1fixed = '>IdhHBBB20s'
276 286 _fm1nodesha1 = '20s'
277 287 _fm1nodesha256 = '32s'
278 288 _fm1nodesha1size = _calcsize(_fm1nodesha1)
279 289 _fm1nodesha256size = _calcsize(_fm1nodesha256)
280 290 _fm1fsize = _calcsize(_fm1fixed)
281 291 _fm1parentnone = 3
282 292 _fm1parentshift = 14
283 293 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
284 294 _fm1metapair = 'BB'
285 295 _fm1metapairsize = _calcsize('BB')
286 296
287 297 def _fm1purereadmarkers(data, off):
288 298 # make some global constants local for performance
289 299 noneflag = _fm1parentnone
290 300 sha2flag = usingsha256
291 301 sha1size = _fm1nodesha1size
292 302 sha2size = _fm1nodesha256size
293 303 sha1fmt = _fm1nodesha1
294 304 sha2fmt = _fm1nodesha256
295 305 metasize = _fm1metapairsize
296 306 metafmt = _fm1metapair
297 307 fsize = _fm1fsize
298 308 unpack = _unpack
299 309
300 310 # Loop on markers
301 311 stop = len(data) - _fm1fsize
302 312 ufixed = struct.Struct(_fm1fixed).unpack
303 313
304 314 while off <= stop:
305 315 # read fixed part
306 316 o1 = off + fsize
307 317 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
308 318
309 319 if flags & sha2flag:
310 320 # FIXME: prec was read as a SHA1, needs to be amended
311 321
312 322 # read 0 or more successors
313 323 if numsuc == 1:
314 324 o2 = o1 + sha2size
315 325 sucs = (data[o1:o2],)
316 326 else:
317 327 o2 = o1 + sha2size * numsuc
318 328 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
319 329
320 330 # read parents
321 331 if numpar == noneflag:
322 332 o3 = o2
323 333 parents = None
324 334 elif numpar == 1:
325 335 o3 = o2 + sha2size
326 336 parents = (data[o2:o3],)
327 337 else:
328 338 o3 = o2 + sha2size * numpar
329 339 parents = unpack(sha2fmt * numpar, data[o2:o3])
330 340 else:
331 341 # read 0 or more successors
332 342 if numsuc == 1:
333 343 o2 = o1 + sha1size
334 344 sucs = (data[o1:o2],)
335 345 else:
336 346 o2 = o1 + sha1size * numsuc
337 347 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
338 348
339 349 # read parents
340 350 if numpar == noneflag:
341 351 o3 = o2
342 352 parents = None
343 353 elif numpar == 1:
344 354 o3 = o2 + sha1size
345 355 parents = (data[o2:o3],)
346 356 else:
347 357 o3 = o2 + sha1size * numpar
348 358 parents = unpack(sha1fmt * numpar, data[o2:o3])
349 359
350 360 # read metadata
351 361 off = o3 + metasize * nummeta
352 362 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
353 363 metadata = []
354 364 for idx in xrange(0, len(metapairsize), 2):
355 365 o1 = off + metapairsize[idx]
356 366 o2 = o1 + metapairsize[idx + 1]
357 367 metadata.append((data[off:o1], data[o1:o2]))
358 368 off = o2
359 369
360 370 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
361 371
362 372 def _fm1encodeonemarker(marker):
363 373 pre, sucs, flags, metadata, date, parents = marker
364 374 # determine node size
365 375 _fm1node = _fm1nodesha1
366 376 if flags & usingsha256:
367 377 _fm1node = _fm1nodesha256
368 378 numsuc = len(sucs)
369 379 numextranodes = numsuc
370 380 if parents is None:
371 381 numpar = _fm1parentnone
372 382 else:
373 383 numpar = len(parents)
374 384 numextranodes += numpar
375 385 formatnodes = _fm1node * numextranodes
376 386 formatmeta = _fm1metapair * len(metadata)
377 387 format = _fm1fixed + formatnodes + formatmeta
378 388 # tz is stored in minutes so we divide by 60
379 389 tz = date[1]//60
380 390 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
381 391 data.extend(sucs)
382 392 if parents is not None:
383 393 data.extend(parents)
384 394 totalsize = _calcsize(format)
385 395 for key, value in metadata:
386 396 lk = len(key)
387 397 lv = len(value)
388 398 data.append(lk)
389 399 data.append(lv)
390 400 totalsize += lk + lv
391 401 data[0] = totalsize
392 402 data = [_pack(format, *data)]
393 403 for key, value in metadata:
394 404 data.append(key)
395 405 data.append(value)
396 406 return ''.join(data)
397 407
398 408 def _fm1readmarkers(data, off):
399 409 native = getattr(parsers, 'fm1readmarkers', None)
400 410 if not native:
401 411 return _fm1purereadmarkers(data, off)
402 412 stop = len(data) - _fm1fsize
403 413 return native(data, off, stop)
404 414
405 415 # mapping to read/write various marker formats
406 416 # <version> -> (decoder, encoder)
407 417 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
408 418 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
409 419
410 420 @util.nogc
411 421 def _readmarkers(data):
412 422 """Read and enumerate markers from raw data"""
413 423 off = 0
414 424 diskversion = _unpack('>B', data[off:off + 1])[0]
415 425 off += 1
416 426 if diskversion not in formats:
417 427 raise error.Abort(_('parsing obsolete marker: unknown version %r')
418 428 % diskversion)
419 429 return diskversion, formats[diskversion][0](data, off)
420 430
421 431 def encodemarkers(markers, addheader=False, version=_fm0version):
422 432 # Kept separate from flushmarkers(), it will be reused for
423 433 # markers exchange.
424 434 encodeone = formats[version][1]
425 435 if addheader:
426 436 yield _pack('>B', version)
427 437 for marker in markers:
428 438 yield encodeone(marker)
429 439
430 440
431 441 class marker(object):
432 442 """Wrap obsolete marker raw data"""
433 443
434 444 def __init__(self, repo, data):
435 445 # the repo argument will be used to create changectx in later version
436 446 self._repo = repo
437 447 self._data = data
438 448 self._decodedmeta = None
439 449
440 450 def __hash__(self):
441 451 return hash(self._data)
442 452
443 453 def __eq__(self, other):
444 454 if type(other) != type(self):
445 455 return False
446 456 return self._data == other._data
447 457
448 458 def precnode(self):
449 459 """Precursor changeset node identifier"""
450 460 return self._data[0]
451 461
452 462 def succnodes(self):
453 463 """List of successor changesets node identifiers"""
454 464 return self._data[1]
455 465
456 466 def parentnodes(self):
457 467 """Parents of the precursors (None if not recorded)"""
458 468 return self._data[5]
459 469
460 470 def metadata(self):
461 471 """Decoded metadata dictionary"""
462 472 return dict(self._data[3])
463 473
464 474 def date(self):
465 475 """Creation date as (unixtime, offset)"""
466 476 return self._data[4]
467 477
468 478 def flags(self):
469 479 """The flags field of the marker"""
470 480 return self._data[2]
471 481
472 482 @util.nogc
473 483 def _addsuccessors(successors, markers):
474 484 for mark in markers:
475 485 successors.setdefault(mark[0], set()).add(mark)
476 486
477 487 @util.nogc
478 488 def _addprecursors(precursors, markers):
479 489 for mark in markers:
480 490 for suc in mark[1]:
481 491 precursors.setdefault(suc, set()).add(mark)
482 492
483 493 @util.nogc
484 494 def _addchildren(children, markers):
485 495 for mark in markers:
486 496 parents = mark[5]
487 497 if parents is not None:
488 498 for p in parents:
489 499 children.setdefault(p, set()).add(mark)
490 500
491 501 def _checkinvalidmarkers(markers):
492 502 """search for marker with invalid data and raise error if needed
493 503
494 504 Exist as a separated function to allow the evolve extension for a more
495 505 subtle handling.
496 506 """
497 507 for mark in markers:
498 508 if node.nullid in mark[1]:
499 509 raise error.Abort(_('bad obsolescence marker detected: '
500 510 'invalid successors nullid'))
501 511
502 512 class obsstore(object):
503 513 """Store obsolete markers
504 514
505 515 Markers can be accessed with two mappings:
506 516 - precursors[x] -> set(markers on precursors edges of x)
507 517 - successors[x] -> set(markers on successors edges of x)
508 518 - children[x] -> set(markers on precursors edges of children(x)
509 519 """
510 520
511 521 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
512 522 # prec: nodeid, precursor changesets
513 523 # succs: tuple of nodeid, successor changesets (0-N length)
514 524 # flag: integer, flag field carrying modifier for the markers (see doc)
515 525 # meta: binary blob, encoded metadata dictionary
516 526 # date: (float, int) tuple, date of marker creation
517 527 # parents: (tuple of nodeid) or None, parents of precursors
518 528 # None is used when no data has been recorded
519 529
520 530 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
521 531 # caches for various obsolescence related cache
522 532 self.caches = {}
523 533 self.svfs = svfs
524 534 self._version = defaultformat
525 535 self._readonly = readonly
526 536
527 537 def __iter__(self):
528 538 return iter(self._all)
529 539
530 540 def __len__(self):
531 541 return len(self._all)
532 542
533 543 def __nonzero__(self):
534 544 if not self._cached('_all'):
535 545 try:
536 546 return self.svfs.stat('obsstore').st_size > 1
537 547 except OSError as inst:
538 548 if inst.errno != errno.ENOENT:
539 549 raise
540 550 # just build an empty _all list if no obsstore exists, which
541 551 # avoids further stat() syscalls
542 552 pass
543 553 return bool(self._all)
544 554
545 555 @property
546 556 def readonly(self):
547 557 """True if marker creation is disabled
548 558
549 559 Remove me in the future when obsolete marker is always on."""
550 560 return self._readonly
551 561
552 562 def create(self, transaction, prec, succs=(), flag=0, parents=None,
553 563 date=None, metadata=None):
554 564 """obsolete: add a new obsolete marker
555 565
556 566 * ensuring it is hashable
557 567 * check mandatory metadata
558 568 * encode metadata
559 569
560 570 If you are a human writing code creating marker you want to use the
561 571 `createmarkers` function in this module instead.
562 572
563 573 return True if a new marker have been added, False if the markers
564 574 already existed (no op).
565 575 """
566 576 if metadata is None:
567 577 metadata = {}
568 578 if date is None:
569 579 if 'date' in metadata:
570 580 # as a courtesy for out-of-tree extensions
571 581 date = util.parsedate(metadata.pop('date'))
572 582 else:
573 583 date = util.makedate()
574 584 if len(prec) != 20:
575 585 raise ValueError(prec)
576 586 for succ in succs:
577 587 if len(succ) != 20:
578 588 raise ValueError(succ)
579 589 if prec in succs:
580 590 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
581 591
582 592 metadata = tuple(sorted(metadata.iteritems()))
583 593
584 594 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
585 595 return bool(self.add(transaction, [marker]))
586 596
587 597 def add(self, transaction, markers):
588 598 """Add new markers to the store
589 599
590 600 Take care of filtering duplicate.
591 601 Return the number of new marker."""
592 602 if self._readonly:
593 603 raise error.Abort('creating obsolete markers is not enabled on '
594 604 'this repo')
595 605 known = set(self._all)
596 606 new = []
597 607 for m in markers:
598 608 if m not in known:
599 609 known.add(m)
600 610 new.append(m)
601 611 if new:
602 612 f = self.svfs('obsstore', 'ab')
603 613 try:
604 614 offset = f.tell()
605 615 transaction.add('obsstore', offset)
606 616 # offset == 0: new file - add the version header
607 617 for bytes in encodemarkers(new, offset == 0, self._version):
608 618 f.write(bytes)
609 619 finally:
610 620 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
611 621 # call 'filecacheentry.refresh()' here
612 622 f.close()
613 623 self._addmarkers(new)
614 624 # new marker *may* have changed several set. invalidate the cache.
615 625 self.caches.clear()
616 626 # records the number of new markers for the transaction hooks
617 627 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
618 628 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
619 629 return len(new)
620 630
621 631 def mergemarkers(self, transaction, data):
622 632 """merge a binary stream of markers inside the obsstore
623 633
624 634 Returns the number of new markers added."""
625 635 version, markers = _readmarkers(data)
626 636 return self.add(transaction, markers)
627 637
628 638 @propertycache
629 639 def _all(self):
630 640 data = self.svfs.tryread('obsstore')
631 641 if not data:
632 642 return []
633 643 self._version, markers = _readmarkers(data)
634 644 markers = list(markers)
635 645 _checkinvalidmarkers(markers)
636 646 return markers
637 647
638 648 @propertycache
639 649 def successors(self):
640 650 successors = {}
641 651 _addsuccessors(successors, self._all)
642 652 return successors
643 653
644 654 @propertycache
645 655 def precursors(self):
646 656 precursors = {}
647 657 _addprecursors(precursors, self._all)
648 658 return precursors
649 659
650 660 @propertycache
651 661 def children(self):
652 662 children = {}
653 663 _addchildren(children, self._all)
654 664 return children
655 665
656 666 def _cached(self, attr):
657 667 return attr in self.__dict__
658 668
659 669 def _addmarkers(self, markers):
660 670 markers = list(markers) # to allow repeated iteration
661 671 self._all.extend(markers)
662 672 if self._cached('successors'):
663 673 _addsuccessors(self.successors, markers)
664 674 if self._cached('precursors'):
665 675 _addprecursors(self.precursors, markers)
666 676 if self._cached('children'):
667 677 _addchildren(self.children, markers)
668 678 _checkinvalidmarkers(markers)
669 679
670 680 def relevantmarkers(self, nodes):
671 681 """return a set of all obsolescence markers relevant to a set of nodes.
672 682
673 683 "relevant" to a set of nodes mean:
674 684
675 685 - marker that use this changeset as successor
676 686 - prune marker of direct children on this changeset
677 687 - recursive application of the two rules on precursors of these markers
678 688
679 689 It is a set so you cannot rely on order."""
680 690
681 691 pendingnodes = set(nodes)
682 692 seenmarkers = set()
683 693 seennodes = set(pendingnodes)
684 694 precursorsmarkers = self.precursors
685 695 children = self.children
686 696 while pendingnodes:
687 697 direct = set()
688 698 for current in pendingnodes:
689 699 direct.update(precursorsmarkers.get(current, ()))
690 700 pruned = [m for m in children.get(current, ()) if not m[1]]
691 701 direct.update(pruned)
692 702 direct -= seenmarkers
693 703 pendingnodes = set([m[0] for m in direct])
694 704 seenmarkers |= direct
695 705 pendingnodes -= seennodes
696 706 seennodes |= pendingnodes
697 707 return seenmarkers
698 708
699 709 def commonversion(versions):
700 710 """Return the newest version listed in both versions and our local formats.
701 711
702 712 Returns None if no common version exists.
703 713 """
704 714 versions.sort(reverse=True)
705 715 # search for highest version known on both side
706 716 for v in versions:
707 717 if v in formats:
708 718 return v
709 719 return None
710 720
711 721 # arbitrary picked to fit into 8K limit from HTTP server
712 722 # you have to take in account:
713 723 # - the version header
714 724 # - the base85 encoding
715 725 _maxpayload = 5300
716 726
717 727 def _pushkeyescape(markers):
718 728 """encode markers into a dict suitable for pushkey exchange
719 729
720 730 - binary data is base85 encoded
721 731 - split in chunks smaller than 5300 bytes"""
722 732 keys = {}
723 733 parts = []
724 734 currentlen = _maxpayload * 2 # ensure we create a new part
725 735 for marker in markers:
726 736 nextdata = _fm0encodeonemarker(marker)
727 737 if (len(nextdata) + currentlen > _maxpayload):
728 738 currentpart = []
729 739 currentlen = 0
730 740 parts.append(currentpart)
731 741 currentpart.append(nextdata)
732 742 currentlen += len(nextdata)
733 743 for idx, part in enumerate(reversed(parts)):
734 744 data = ''.join([_pack('>B', _fm0version)] + part)
735 745 keys['dump%i' % idx] = base85.b85encode(data)
736 746 return keys
737 747
738 748 def listmarkers(repo):
739 749 """List markers over pushkey"""
740 750 if not repo.obsstore:
741 751 return {}
742 752 return _pushkeyescape(sorted(repo.obsstore))
743 753
744 754 def pushmarker(repo, key, old, new):
745 755 """Push markers over pushkey"""
746 756 if not key.startswith('dump'):
747 757 repo.ui.warn(_('unknown key: %r') % key)
748 758 return 0
749 759 if old:
750 760 repo.ui.warn(_('unexpected old value for %r') % key)
751 761 return 0
752 762 data = base85.b85decode(new)
753 763 lock = repo.lock()
754 764 try:
755 765 tr = repo.transaction('pushkey: obsolete markers')
756 766 try:
757 767 repo.obsstore.mergemarkers(tr, data)
758 768 tr.close()
759 769 return 1
760 770 finally:
761 771 tr.release()
762 772 finally:
763 773 lock.release()
764 774
765 775 def getmarkers(repo, nodes=None):
766 776 """returns markers known in a repository
767 777
768 778 If <nodes> is specified, only markers "relevant" to those nodes are are
769 779 returned"""
770 780 if nodes is None:
771 781 rawmarkers = repo.obsstore
772 782 else:
773 783 rawmarkers = repo.obsstore.relevantmarkers(nodes)
774 784
775 785 for markerdata in rawmarkers:
776 786 yield marker(repo, markerdata)
777 787
778 788 def relevantmarkers(repo, node):
779 789 """all obsolete markers relevant to some revision"""
780 790 for markerdata in repo.obsstore.relevantmarkers(node):
781 791 yield marker(repo, markerdata)
782 792
783 793
784 794 def precursormarkers(ctx):
785 795 """obsolete marker marking this changeset as a successors"""
786 796 for data in ctx.repo().obsstore.precursors.get(ctx.node(), ()):
787 797 yield marker(ctx.repo(), data)
788 798
789 799 def successormarkers(ctx):
790 800 """obsolete marker making this changeset obsolete"""
791 801 for data in ctx.repo().obsstore.successors.get(ctx.node(), ()):
792 802 yield marker(ctx.repo(), data)
793 803
794 804 def allsuccessors(obsstore, nodes, ignoreflags=0):
795 805 """Yield node for every successor of <nodes>.
796 806
797 807 Some successors may be unknown locally.
798 808
799 809 This is a linear yield unsuited to detecting split changesets. It includes
800 810 initial nodes too."""
801 811 remaining = set(nodes)
802 812 seen = set(remaining)
803 813 while remaining:
804 814 current = remaining.pop()
805 815 yield current
806 816 for mark in obsstore.successors.get(current, ()):
807 817 # ignore marker flagged with specified flag
808 818 if mark[2] & ignoreflags:
809 819 continue
810 820 for suc in mark[1]:
811 821 if suc not in seen:
812 822 seen.add(suc)
813 823 remaining.add(suc)
814 824
815 825 def allprecursors(obsstore, nodes, ignoreflags=0):
816 826 """Yield node for every precursors of <nodes>.
817 827
818 828 Some precursors may be unknown locally.
819 829
820 830 This is a linear yield unsuited to detecting folded changesets. It includes
821 831 initial nodes too."""
822 832
823 833 remaining = set(nodes)
824 834 seen = set(remaining)
825 835 while remaining:
826 836 current = remaining.pop()
827 837 yield current
828 838 for mark in obsstore.precursors.get(current, ()):
829 839 # ignore marker flagged with specified flag
830 840 if mark[2] & ignoreflags:
831 841 continue
832 842 suc = mark[0]
833 843 if suc not in seen:
834 844 seen.add(suc)
835 845 remaining.add(suc)
836 846
837 847 def foreground(repo, nodes):
838 848 """return all nodes in the "foreground" of other node
839 849
840 850 The foreground of a revision is anything reachable using parent -> children
841 851 or precursor -> successor relation. It is very similar to "descendant" but
842 852 augmented with obsolescence information.
843 853
844 854 Beware that possible obsolescence cycle may result if complex situation.
845 855 """
846 856 repo = repo.unfiltered()
847 857 foreground = set(repo.set('%ln::', nodes))
848 858 if repo.obsstore:
849 859 # We only need this complicated logic if there is obsolescence
850 860 # XXX will probably deserve an optimised revset.
851 861 nm = repo.changelog.nodemap
852 862 plen = -1
853 863 # compute the whole set of successors or descendants
854 864 while len(foreground) != plen:
855 865 plen = len(foreground)
856 866 succs = set(c.node() for c in foreground)
857 867 mutable = [c.node() for c in foreground if c.mutable()]
858 868 succs.update(allsuccessors(repo.obsstore, mutable))
859 869 known = (n for n in succs if n in nm)
860 870 foreground = set(repo.set('%ln::', known))
861 871 return set(c.node() for c in foreground)
862 872
863 873
864 874 def successorssets(repo, initialnode, cache=None):
865 875 """Return set of all latest successors of initial nodes
866 876
867 877 The successors set of a changeset A are the group of revisions that succeed
868 878 A. It succeeds A as a consistent whole, each revision being only a partial
869 879 replacement. The successors set contains non-obsolete changesets only.
870 880
871 881 This function returns the full list of successor sets which is why it
872 882 returns a list of tuples and not just a single tuple. Each tuple is a valid
873 883 successors set. Note that (A,) may be a valid successors set for changeset A
874 884 (see below).
875 885
876 886 In most cases, a changeset A will have a single element (e.g. the changeset
877 887 A is replaced by A') in its successors set. Though, it is also common for a
878 888 changeset A to have no elements in its successor set (e.g. the changeset
879 889 has been pruned). Therefore, the returned list of successors sets will be
880 890 [(A',)] or [], respectively.
881 891
882 892 When a changeset A is split into A' and B', however, it will result in a
883 893 successors set containing more than a single element, i.e. [(A',B')].
884 894 Divergent changesets will result in multiple successors sets, i.e. [(A',),
885 895 (A'')].
886 896
887 897 If a changeset A is not obsolete, then it will conceptually have no
888 898 successors set. To distinguish this from a pruned changeset, the successor
889 899 set will contain itself only, i.e. [(A,)].
890 900
891 901 Finally, successors unknown locally are considered to be pruned (obsoleted
892 902 without any successors).
893 903
894 904 The optional `cache` parameter is a dictionary that may contain precomputed
895 905 successors sets. It is meant to reuse the computation of a previous call to
896 906 `successorssets` when multiple calls are made at the same time. The cache
897 907 dictionary is updated in place. The caller is responsible for its life
898 908 span. Code that makes multiple calls to `successorssets` *must* use this
899 909 cache mechanism or suffer terrible performance.
900 910 """
901 911
902 912 succmarkers = repo.obsstore.successors
903 913
904 914 # Stack of nodes we search successors sets for
905 915 toproceed = [initialnode]
906 916 # set version of above list for fast loop detection
907 917 # element added to "toproceed" must be added here
908 918 stackedset = set(toproceed)
909 919 if cache is None:
910 920 cache = {}
911 921
912 922 # This while loop is the flattened version of a recursive search for
913 923 # successors sets
914 924 #
915 925 # def successorssets(x):
916 926 # successors = directsuccessors(x)
917 927 # ss = [[]]
918 928 # for succ in directsuccessors(x):
919 929 # # product as in itertools cartesian product
920 930 # ss = product(ss, successorssets(succ))
921 931 # return ss
922 932 #
923 933 # But we can not use plain recursive calls here:
924 934 # - that would blow the python call stack
925 935 # - obsolescence markers may have cycles, we need to handle them.
926 936 #
927 937 # The `toproceed` list act as our call stack. Every node we search
928 938 # successors set for are stacked there.
929 939 #
930 940 # The `stackedset` is set version of this stack used to check if a node is
931 941 # already stacked. This check is used to detect cycles and prevent infinite
932 942 # loop.
933 943 #
934 944 # successors set of all nodes are stored in the `cache` dictionary.
935 945 #
936 946 # After this while loop ends we use the cache to return the successors sets
937 947 # for the node requested by the caller.
938 948 while toproceed:
939 949 # Every iteration tries to compute the successors sets of the topmost
940 950 # node of the stack: CURRENT.
941 951 #
942 952 # There are four possible outcomes:
943 953 #
944 954 # 1) We already know the successors sets of CURRENT:
945 955 # -> mission accomplished, pop it from the stack.
946 956 # 2) Node is not obsolete:
947 957 # -> the node is its own successors sets. Add it to the cache.
948 958 # 3) We do not know successors set of direct successors of CURRENT:
949 959 # -> We add those successors to the stack.
950 960 # 4) We know successors sets of all direct successors of CURRENT:
951 961 # -> We can compute CURRENT successors set and add it to the
952 962 # cache.
953 963 #
954 964 current = toproceed[-1]
955 965 if current in cache:
956 966 # case (1): We already know the successors sets
957 967 stackedset.remove(toproceed.pop())
958 968 elif current not in succmarkers:
959 969 # case (2): The node is not obsolete.
960 970 if current in repo:
961 971 # We have a valid last successors.
962 972 cache[current] = [(current,)]
963 973 else:
964 974 # Final obsolete version is unknown locally.
965 975 # Do not count that as a valid successors
966 976 cache[current] = []
967 977 else:
968 978 # cases (3) and (4)
969 979 #
970 980 # We proceed in two phases. Phase 1 aims to distinguish case (3)
971 981 # from case (4):
972 982 #
973 983 # For each direct successors of CURRENT, we check whether its
974 984 # successors sets are known. If they are not, we stack the
975 985 # unknown node and proceed to the next iteration of the while
976 986 # loop. (case 3)
977 987 #
978 988 # During this step, we may detect obsolescence cycles: a node
979 989 # with unknown successors sets but already in the call stack.
980 990 # In such a situation, we arbitrary set the successors sets of
981 991 # the node to nothing (node pruned) to break the cycle.
982 992 #
983 993 # If no break was encountered we proceed to phase 2.
984 994 #
985 995 # Phase 2 computes successors sets of CURRENT (case 4); see details
986 996 # in phase 2 itself.
987 997 #
988 998 # Note the two levels of iteration in each phase.
989 999 # - The first one handles obsolescence markers using CURRENT as
990 1000 # precursor (successors markers of CURRENT).
991 1001 #
992 1002 # Having multiple entry here means divergence.
993 1003 #
994 1004 # - The second one handles successors defined in each marker.
995 1005 #
996 1006 # Having none means pruned node, multiple successors means split,
997 1007 # single successors are standard replacement.
998 1008 #
999 1009 for mark in sorted(succmarkers[current]):
1000 1010 for suc in mark[1]:
1001 1011 if suc not in cache:
1002 1012 if suc in stackedset:
1003 1013 # cycle breaking
1004 1014 cache[suc] = []
1005 1015 else:
1006 1016 # case (3) If we have not computed successors sets
1007 1017 # of one of those successors we add it to the
1008 1018 # `toproceed` stack and stop all work for this
1009 1019 # iteration.
1010 1020 toproceed.append(suc)
1011 1021 stackedset.add(suc)
1012 1022 break
1013 1023 else:
1014 1024 continue
1015 1025 break
1016 1026 else:
1017 1027 # case (4): we know all successors sets of all direct
1018 1028 # successors
1019 1029 #
1020 1030 # Successors set contributed by each marker depends on the
1021 1031 # successors sets of all its "successors" node.
1022 1032 #
1023 1033 # Each different marker is a divergence in the obsolescence
1024 1034 # history. It contributes successors sets distinct from other
1025 1035 # markers.
1026 1036 #
1027 1037 # Within a marker, a successor may have divergent successors
1028 1038 # sets. In such a case, the marker will contribute multiple
1029 1039 # divergent successors sets. If multiple successors have
1030 1040 # divergent successors sets, a Cartesian product is used.
1031 1041 #
1032 1042 # At the end we post-process successors sets to remove
1033 1043 # duplicated entry and successors set that are strict subset of
1034 1044 # another one.
1035 1045 succssets = []
1036 1046 for mark in sorted(succmarkers[current]):
1037 1047 # successors sets contributed by this marker
1038 1048 markss = [[]]
1039 1049 for suc in mark[1]:
1040 1050 # cardinal product with previous successors
1041 1051 productresult = []
1042 1052 for prefix in markss:
1043 1053 for suffix in cache[suc]:
1044 1054 newss = list(prefix)
1045 1055 for part in suffix:
1046 1056 # do not duplicated entry in successors set
1047 1057 # first entry wins.
1048 1058 if part not in newss:
1049 1059 newss.append(part)
1050 1060 productresult.append(newss)
1051 1061 markss = productresult
1052 1062 succssets.extend(markss)
1053 1063 # remove duplicated and subset
1054 1064 seen = []
1055 1065 final = []
1056 1066 candidate = sorted(((set(s), s) for s in succssets if s),
1057 1067 key=lambda x: len(x[1]), reverse=True)
1058 1068 for setversion, listversion in candidate:
1059 1069 for seenset in seen:
1060 1070 if setversion.issubset(seenset):
1061 1071 break
1062 1072 else:
1063 1073 final.append(listversion)
1064 1074 seen.append(setversion)
1065 1075 final.reverse() # put small successors set first
1066 1076 cache[current] = final
1067 1077 return cache[initialnode]
1068 1078
1069 1079 # mapping of 'set-name' -> <function to compute this set>
1070 1080 cachefuncs = {}
1071 1081 def cachefor(name):
1072 1082 """Decorator to register a function as computing the cache for a set"""
1073 1083 def decorator(func):
1074 1084 assert name not in cachefuncs
1075 1085 cachefuncs[name] = func
1076 1086 return func
1077 1087 return decorator
1078 1088
1079 1089 def getrevs(repo, name):
1080 1090 """Return the set of revision that belong to the <name> set
1081 1091
1082 1092 Such access may compute the set and cache it for future use"""
1083 1093 repo = repo.unfiltered()
1084 1094 if not repo.obsstore:
1085 1095 return frozenset()
1086 1096 if name not in repo.obsstore.caches:
1087 1097 repo.obsstore.caches[name] = cachefuncs[name](repo)
1088 1098 return repo.obsstore.caches[name]
1089 1099
1090 1100 # To be simple we need to invalidate obsolescence cache when:
1091 1101 #
1092 1102 # - new changeset is added:
1093 1103 # - public phase is changed
1094 1104 # - obsolescence marker are added
1095 1105 # - strip is used a repo
1096 1106 def clearobscaches(repo):
1097 1107 """Remove all obsolescence related cache from a repo
1098 1108
1099 1109 This remove all cache in obsstore is the obsstore already exist on the
1100 1110 repo.
1101 1111
1102 1112 (We could be smarter here given the exact event that trigger the cache
1103 1113 clearing)"""
1104 1114 # only clear cache is there is obsstore data in this repo
1105 1115 if 'obsstore' in repo._filecache:
1106 1116 repo.obsstore.caches.clear()
1107 1117
1108 1118 @cachefor('obsolete')
1109 1119 def _computeobsoleteset(repo):
1110 1120 """the set of obsolete revisions"""
1111 1121 obs = set()
1112 1122 getrev = repo.changelog.nodemap.get
1113 1123 getphase = repo._phasecache.phase
1114 1124 for n in repo.obsstore.successors:
1115 1125 rev = getrev(n)
1116 1126 if rev is not None and getphase(repo, rev):
1117 1127 obs.add(rev)
1118 1128 return obs
1119 1129
1120 1130 @cachefor('unstable')
1121 1131 def _computeunstableset(repo):
1122 1132 """the set of non obsolete revisions with obsolete parents"""
1123 1133 revs = [(ctx.rev(), ctx) for ctx in
1124 1134 repo.set('(not public()) and (not obsolete())')]
1125 1135 revs.sort(key=lambda x:x[0])
1126 1136 unstable = set()
1127 1137 for rev, ctx in revs:
1128 1138 # A rev is unstable if one of its parent is obsolete or unstable
1129 1139 # this works since we traverse following growing rev order
1130 1140 if any((x.obsolete() or (x.rev() in unstable))
1131 1141 for x in ctx.parents()):
1132 1142 unstable.add(rev)
1133 1143 return unstable
1134 1144
1135 1145 @cachefor('suspended')
1136 1146 def _computesuspendedset(repo):
1137 1147 """the set of obsolete parents with non obsolete descendants"""
1138 1148 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1139 1149 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1140 1150
1141 1151 @cachefor('extinct')
1142 1152 def _computeextinctset(repo):
1143 1153 """the set of obsolete parents without non obsolete descendants"""
1144 1154 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1145 1155
1146 1156
1147 1157 @cachefor('bumped')
1148 1158 def _computebumpedset(repo):
1149 1159 """the set of revs trying to obsolete public revisions"""
1150 1160 bumped = set()
1151 1161 # util function (avoid attribute lookup in the loop)
1152 1162 phase = repo._phasecache.phase # would be faster to grab the full list
1153 1163 public = phases.public
1154 1164 cl = repo.changelog
1155 1165 torev = cl.nodemap.get
1156 1166 for ctx in repo.set('(not public()) and (not obsolete())'):
1157 1167 rev = ctx.rev()
1158 1168 # We only evaluate mutable, non-obsolete revision
1159 1169 node = ctx.node()
1160 1170 # (future) A cache of precursors may worth if split is very common
1161 1171 for pnode in allprecursors(repo.obsstore, [node],
1162 1172 ignoreflags=bumpedfix):
1163 1173 prev = torev(pnode) # unfiltered! but so is phasecache
1164 1174 if (prev is not None) and (phase(repo, prev) <= public):
1165 1175 # we have a public precursors
1166 1176 bumped.add(rev)
1167 1177 break # Next draft!
1168 1178 return bumped
1169 1179
1170 1180 @cachefor('divergent')
1171 1181 def _computedivergentset(repo):
1172 1182 """the set of rev that compete to be the final successors of some revision.
1173 1183 """
1174 1184 divergent = set()
1175 1185 obsstore = repo.obsstore
1176 1186 newermap = {}
1177 1187 for ctx in repo.set('(not public()) - obsolete()'):
1178 1188 mark = obsstore.precursors.get(ctx.node(), ())
1179 1189 toprocess = set(mark)
1180 1190 seen = set()
1181 1191 while toprocess:
1182 1192 prec = toprocess.pop()[0]
1183 1193 if prec in seen:
1184 1194 continue # emergency cycle hanging prevention
1185 1195 seen.add(prec)
1186 1196 if prec not in newermap:
1187 1197 successorssets(repo, prec, newermap)
1188 1198 newer = [n for n in newermap[prec] if n]
1189 1199 if len(newer) > 1:
1190 1200 divergent.add(ctx.rev())
1191 1201 break
1192 1202 toprocess.update(obsstore.precursors.get(prec, ()))
1193 1203 return divergent
1194 1204
1195 1205
1196 1206 def createmarkers(repo, relations, flag=0, date=None, metadata=None):
1197 1207 """Add obsolete markers between changesets in a repo
1198 1208
1199 1209 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1200 1210 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1201 1211 containing metadata for this marker only. It is merged with the global
1202 1212 metadata specified through the `metadata` argument of this function,
1203 1213
1204 1214 Trying to obsolete a public changeset will raise an exception.
1205 1215
1206 1216 Current user and date are used except if specified otherwise in the
1207 1217 metadata attribute.
1208 1218
1209 1219 This function operates within a transaction of its own, but does
1210 1220 not take any lock on the repo.
1211 1221 """
1212 1222 # prepare metadata
1213 1223 if metadata is None:
1214 1224 metadata = {}
1215 1225 if 'user' not in metadata:
1216 1226 metadata['user'] = repo.ui.username()
1217 1227 tr = repo.transaction('add-obsolescence-marker')
1218 1228 try:
1219 1229 for rel in relations:
1220 1230 prec = rel[0]
1221 1231 sucs = rel[1]
1222 1232 localmetadata = metadata.copy()
1223 1233 if 2 < len(rel):
1224 1234 localmetadata.update(rel[2])
1225 1235
1226 1236 if not prec.mutable():
1227 1237 raise error.Abort("cannot obsolete public changeset: %s"
1228 1238 % prec,
1229 1239 hint='see "hg help phases" for details')
1230 1240 nprec = prec.node()
1231 1241 nsucs = tuple(s.node() for s in sucs)
1232 1242 npare = None
1233 1243 if not nsucs:
1234 1244 npare = tuple(p.node() for p in prec.parents())
1235 1245 if nprec in nsucs:
1236 1246 raise error.Abort("changeset %s cannot obsolete itself" % prec)
1237 1247 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1238 1248 date=date, metadata=localmetadata)
1239 1249 repo.filteredrevcache.clear()
1240 1250 tr.close()
1241 1251 finally:
1242 1252 tr.release()
1243 1253
1244 1254 def isenabled(repo, option):
1245 1255 """Returns True if the given repository has the given obsolete option
1246 1256 enabled.
1247 1257 """
1248 1258 result = set(repo.ui.configlist('experimental', 'evolution'))
1249 1259 if 'all' in result:
1250 1260 return True
1251 1261
1252 1262 # For migration purposes, temporarily return true if the config hasn't been
1253 1263 # set but _enabled is true.
1254 1264 if len(result) == 0 and _enabled:
1255 1265 return True
1256 1266
1257 1267 # createmarkers must be enabled if other options are enabled
1258 1268 if ((allowunstableopt in result or exchangeopt in result) and
1259 1269 not createmarkersopt in result):
1260 1270 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
1261 1271 "if other obsolete options are enabled"))
1262 1272
1263 1273 return option in result
@@ -1,221 +1,220 b''
1 1 #require test-repo
2 2
3 3 $ cd "$TESTDIR"/..
4 4
5 5 $ hg files 'set:(**.py)' | xargs python contrib/check-py3-compat.py
6 6 contrib/casesmash.py not using absolute_import
7 7 contrib/check-code.py not using absolute_import
8 8 contrib/check-code.py requires print_function
9 9 contrib/check-config.py not using absolute_import
10 10 contrib/check-config.py requires print_function
11 11 contrib/debugcmdserver.py not using absolute_import
12 12 contrib/debugcmdserver.py requires print_function
13 13 contrib/debugshell.py not using absolute_import
14 14 contrib/fixpax.py not using absolute_import
15 15 contrib/fixpax.py requires print_function
16 16 contrib/hgclient.py not using absolute_import
17 17 contrib/hgclient.py requires print_function
18 18 contrib/hgfixes/fix_bytes.py not using absolute_import
19 19 contrib/hgfixes/fix_bytesmod.py not using absolute_import
20 20 contrib/hgfixes/fix_leftover_imports.py not using absolute_import
21 21 contrib/import-checker.py not using absolute_import
22 22 contrib/import-checker.py requires print_function
23 23 contrib/memory.py not using absolute_import
24 24 contrib/perf.py not using absolute_import
25 25 contrib/python-hook-examples.py not using absolute_import
26 26 contrib/revsetbenchmarks.py not using absolute_import
27 27 contrib/revsetbenchmarks.py requires print_function
28 28 contrib/showstack.py not using absolute_import
29 29 contrib/synthrepo.py not using absolute_import
30 30 contrib/win32/hgwebdir_wsgi.py not using absolute_import
31 31 doc/check-seclevel.py not using absolute_import
32 32 doc/gendoc.py not using absolute_import
33 33 doc/hgmanpage.py not using absolute_import
34 34 hgext/__init__.py not using absolute_import
35 35 hgext/acl.py not using absolute_import
36 36 hgext/blackbox.py not using absolute_import
37 37 hgext/bugzilla.py not using absolute_import
38 38 hgext/censor.py not using absolute_import
39 39 hgext/children.py not using absolute_import
40 40 hgext/churn.py not using absolute_import
41 41 hgext/clonebundles.py not using absolute_import
42 42 hgext/color.py not using absolute_import
43 43 hgext/convert/__init__.py not using absolute_import
44 44 hgext/convert/bzr.py not using absolute_import
45 45 hgext/convert/common.py not using absolute_import
46 46 hgext/convert/convcmd.py not using absolute_import
47 47 hgext/convert/cvs.py not using absolute_import
48 48 hgext/convert/cvsps.py not using absolute_import
49 49 hgext/convert/darcs.py not using absolute_import
50 50 hgext/convert/filemap.py not using absolute_import
51 51 hgext/convert/git.py not using absolute_import
52 52 hgext/convert/gnuarch.py not using absolute_import
53 53 hgext/convert/hg.py not using absolute_import
54 54 hgext/convert/monotone.py not using absolute_import
55 55 hgext/convert/p4.py not using absolute_import
56 56 hgext/convert/subversion.py not using absolute_import
57 57 hgext/convert/transport.py not using absolute_import
58 58 hgext/eol.py not using absolute_import
59 59 hgext/extdiff.py not using absolute_import
60 60 hgext/factotum.py not using absolute_import
61 61 hgext/fetch.py not using absolute_import
62 62 hgext/gpg.py not using absolute_import
63 63 hgext/graphlog.py not using absolute_import
64 64 hgext/hgcia.py not using absolute_import
65 65 hgext/hgk.py not using absolute_import
66 66 hgext/highlight/__init__.py not using absolute_import
67 67 hgext/highlight/highlight.py not using absolute_import
68 68 hgext/histedit.py not using absolute_import
69 69 hgext/keyword.py not using absolute_import
70 70 hgext/largefiles/__init__.py not using absolute_import
71 71 hgext/largefiles/basestore.py not using absolute_import
72 72 hgext/largefiles/lfcommands.py not using absolute_import
73 73 hgext/largefiles/lfutil.py not using absolute_import
74 74 hgext/largefiles/localstore.py not using absolute_import
75 75 hgext/largefiles/overrides.py not using absolute_import
76 76 hgext/largefiles/proto.py not using absolute_import
77 77 hgext/largefiles/remotestore.py not using absolute_import
78 78 hgext/largefiles/reposetup.py not using absolute_import
79 79 hgext/largefiles/uisetup.py not using absolute_import
80 80 hgext/largefiles/wirestore.py not using absolute_import
81 81 hgext/mq.py not using absolute_import
82 82 hgext/notify.py not using absolute_import
83 83 hgext/pager.py not using absolute_import
84 84 hgext/patchbomb.py not using absolute_import
85 85 hgext/purge.py not using absolute_import
86 86 hgext/rebase.py not using absolute_import
87 87 hgext/record.py not using absolute_import
88 88 hgext/relink.py not using absolute_import
89 89 hgext/schemes.py not using absolute_import
90 90 hgext/share.py not using absolute_import
91 91 hgext/shelve.py not using absolute_import
92 92 hgext/strip.py not using absolute_import
93 93 hgext/transplant.py not using absolute_import
94 94 hgext/win32mbcs.py not using absolute_import
95 95 hgext/win32text.py not using absolute_import
96 96 hgext/zeroconf/Zeroconf.py not using absolute_import
97 97 hgext/zeroconf/Zeroconf.py requires print_function
98 98 hgext/zeroconf/__init__.py not using absolute_import
99 99 i18n/check-translation.py not using absolute_import
100 100 i18n/polib.py not using absolute_import
101 101 mercurial/byterange.py not using absolute_import
102 102 mercurial/cmdutil.py not using absolute_import
103 103 mercurial/commands.py not using absolute_import
104 104 mercurial/commandserver.py not using absolute_import
105 105 mercurial/context.py not using absolute_import
106 106 mercurial/destutil.py not using absolute_import
107 107 mercurial/dirstate.py not using absolute_import
108 108 mercurial/dispatch.py requires print_function
109 109 mercurial/encoding.py not using absolute_import
110 110 mercurial/exchange.py not using absolute_import
111 111 mercurial/help.py not using absolute_import
112 112 mercurial/httpclient/__init__.py not using absolute_import
113 113 mercurial/httpclient/_readers.py not using absolute_import
114 114 mercurial/httpclient/socketutil.py not using absolute_import
115 115 mercurial/httpconnection.py not using absolute_import
116 116 mercurial/keepalive.py not using absolute_import
117 117 mercurial/keepalive.py requires print_function
118 118 mercurial/localrepo.py not using absolute_import
119 119 mercurial/lsprof.py requires print_function
120 120 mercurial/lsprofcalltree.py not using absolute_import
121 121 mercurial/lsprofcalltree.py requires print_function
122 122 mercurial/mail.py requires print_function
123 123 mercurial/manifest.py not using absolute_import
124 124 mercurial/mdiff.py not using absolute_import
125 mercurial/obsolete.py not using absolute_import
126 125 mercurial/patch.py not using absolute_import
127 126 mercurial/pure/base85.py not using absolute_import
128 127 mercurial/pure/bdiff.py not using absolute_import
129 128 mercurial/pure/diffhelpers.py not using absolute_import
130 129 mercurial/pure/mpatch.py not using absolute_import
131 130 mercurial/pure/osutil.py not using absolute_import
132 131 mercurial/pure/parsers.py not using absolute_import
133 132 mercurial/pvec.py not using absolute_import
134 133 mercurial/py3kcompat.py not using absolute_import
135 134 mercurial/revlog.py not using absolute_import
136 135 mercurial/scmposix.py not using absolute_import
137 136 mercurial/scmutil.py not using absolute_import
138 137 mercurial/scmwindows.py not using absolute_import
139 138 mercurial/similar.py not using absolute_import
140 139 mercurial/store.py not using absolute_import
141 140 mercurial/util.py not using absolute_import
142 141 mercurial/windows.py not using absolute_import
143 142 setup.py not using absolute_import
144 143 tests/filterpyflakes.py requires print_function
145 144 tests/generate-working-copy-states.py requires print_function
146 145 tests/get-with-headers.py requires print_function
147 146 tests/heredoctest.py requires print_function
148 147 tests/hypothesishelpers.py not using absolute_import
149 148 tests/hypothesishelpers.py requires print_function
150 149 tests/killdaemons.py not using absolute_import
151 150 tests/md5sum.py not using absolute_import
152 151 tests/mockblackbox.py not using absolute_import
153 152 tests/printenv.py not using absolute_import
154 153 tests/readlink.py not using absolute_import
155 154 tests/readlink.py requires print_function
156 155 tests/revlog-formatv0.py not using absolute_import
157 156 tests/run-tests.py not using absolute_import
158 157 tests/seq.py not using absolute_import
159 158 tests/seq.py requires print_function
160 159 tests/silenttestrunner.py not using absolute_import
161 160 tests/silenttestrunner.py requires print_function
162 161 tests/sitecustomize.py not using absolute_import
163 162 tests/svn-safe-append.py not using absolute_import
164 163 tests/svnxml.py not using absolute_import
165 164 tests/test-ancestor.py requires print_function
166 165 tests/test-atomictempfile.py not using absolute_import
167 166 tests/test-batching.py not using absolute_import
168 167 tests/test-batching.py requires print_function
169 168 tests/test-bdiff.py not using absolute_import
170 169 tests/test-bdiff.py requires print_function
171 170 tests/test-context.py not using absolute_import
172 171 tests/test-context.py requires print_function
173 172 tests/test-demandimport.py not using absolute_import
174 173 tests/test-demandimport.py requires print_function
175 174 tests/test-dispatch.py not using absolute_import
176 175 tests/test-dispatch.py requires print_function
177 176 tests/test-doctest.py not using absolute_import
178 177 tests/test-duplicateoptions.py not using absolute_import
179 178 tests/test-duplicateoptions.py requires print_function
180 179 tests/test-filecache.py not using absolute_import
181 180 tests/test-filecache.py requires print_function
182 181 tests/test-filelog.py not using absolute_import
183 182 tests/test-filelog.py requires print_function
184 183 tests/test-hg-parseurl.py not using absolute_import
185 184 tests/test-hg-parseurl.py requires print_function
186 185 tests/test-hgweb-auth.py not using absolute_import
187 186 tests/test-hgweb-auth.py requires print_function
188 187 tests/test-hgwebdir-paths.py not using absolute_import
189 188 tests/test-hybridencode.py not using absolute_import
190 189 tests/test-hybridencode.py requires print_function
191 190 tests/test-lrucachedict.py not using absolute_import
192 191 tests/test-lrucachedict.py requires print_function
193 192 tests/test-manifest.py not using absolute_import
194 193 tests/test-minirst.py not using absolute_import
195 194 tests/test-minirst.py requires print_function
196 195 tests/test-parseindex2.py not using absolute_import
197 196 tests/test-parseindex2.py requires print_function
198 197 tests/test-pathencode.py not using absolute_import
199 198 tests/test-pathencode.py requires print_function
200 199 tests/test-propertycache.py not using absolute_import
201 200 tests/test-propertycache.py requires print_function
202 201 tests/test-revlog-ancestry.py not using absolute_import
203 202 tests/test-revlog-ancestry.py requires print_function
204 203 tests/test-run-tests.py not using absolute_import
205 204 tests/test-simplemerge.py not using absolute_import
206 205 tests/test-status-inprocess.py not using absolute_import
207 206 tests/test-status-inprocess.py requires print_function
208 207 tests/test-symlink-os-yes-fs-no.py not using absolute_import
209 208 tests/test-trusted.py not using absolute_import
210 209 tests/test-trusted.py requires print_function
211 210 tests/test-ui-color.py not using absolute_import
212 211 tests/test-ui-color.py requires print_function
213 212 tests/test-ui-config.py not using absolute_import
214 213 tests/test-ui-config.py requires print_function
215 214 tests/test-ui-verbosity.py not using absolute_import
216 215 tests/test-ui-verbosity.py requires print_function
217 216 tests/test-url.py not using absolute_import
218 217 tests/test-url.py requires print_function
219 218 tests/test-walkrepo.py requires print_function
220 219 tests/test-wireproto.py requires print_function
221 220 tests/tinyproxy.py requires print_function
General Comments 0
You need to be logged in to leave comments. Login now